mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-01-31 03:27:39 +00:00
Make realloc() go 100x faster on Linux/NetBSD
Cosmopolitan now supports mremap(), which is only supported on Linux and NetBSD. First, it allows memory mappings to be relocated without copying them; this can dramatically speed up data structures like std::vector if the array size grows larger than 256kb. The mremap() system call is also 10x faster than munmap() when shrinking large memory mappings. There's now two functions, getpagesize() and getgransize() which help to write portable code that uses mmap(MAP_FIXED). Alternative sysconf() may be called with our new _SC_GRANSIZE. The madvise() system call now has a better wrapper with improved documentation.
This commit is contained in:
parent
196942084b
commit
f7780de24b
71 changed files with 1301 additions and 640 deletions
|
@ -16,61 +16,31 @@
|
|||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/calls/syscall-nt.internal.h"
|
||||
#include "libc/calls/syscall_support-nt.internal.h"
|
||||
#include "libc/nt/enum/offerpriority.h"
|
||||
#include "libc/nt/memory.h"
|
||||
#include "libc/nt/runtime.h"
|
||||
#include "libc/nt/struct/memoryrangeentry.h"
|
||||
#include "libc/sysv/consts/madv.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
|
||||
typedef bool32 (*__msabi PrefetchVirtualMemoryPtr)(
|
||||
int64_t hProcess, uintptr_t NumberOfEntries,
|
||||
struct NtMemoryRangeEntry *VirtualAddresses, uint32_t reserved_Flags);
|
||||
|
||||
textwindows static PrefetchVirtualMemoryPtr GetPrefetchVirtualMemory(void) {
|
||||
static PrefetchVirtualMemoryPtr PrefetchVirtualMemory_;
|
||||
if (!PrefetchVirtualMemory_) {
|
||||
PrefetchVirtualMemory_ = /* win8.1+ */
|
||||
GetProcAddressModule("Kernel32.dll", "PrefetchVirtualMemory");
|
||||
}
|
||||
return PrefetchVirtualMemory_;
|
||||
}
|
||||
|
||||
typedef bool32 (*__msabi OfferVirtualMemoryPtr)(void *inout_VirtualAddress,
|
||||
size_t Size, int Priority);
|
||||
|
||||
textwindows static OfferVirtualMemoryPtr GetOfferVirtualMemory(void) {
|
||||
static OfferVirtualMemoryPtr OfferVirtualMemory_;
|
||||
if (!OfferVirtualMemory_) {
|
||||
OfferVirtualMemory_ = /* win8.1+ */
|
||||
GetProcAddressModule("Kernel32.dll", "OfferVirtualMemory");
|
||||
}
|
||||
return OfferVirtualMemory_;
|
||||
}
|
||||
|
||||
textwindows int sys_madvise_nt(void *addr, size_t length, int advice) {
|
||||
if (advice == MADV_WILLNEED || advice == MADV_SEQUENTIAL) {
|
||||
PrefetchVirtualMemoryPtr fn = GetPrefetchVirtualMemory();
|
||||
if (fn) {
|
||||
if (fn(GetCurrentProcess(), 1, &(struct NtMemoryRangeEntry){addr, length},
|
||||
0)) {
|
||||
return 0;
|
||||
} else {
|
||||
return __winerr();
|
||||
}
|
||||
if (!length)
|
||||
return 0;
|
||||
if (PrefetchVirtualMemory(GetCurrentProcess(), 1,
|
||||
&(struct NtMemoryRangeEntry){addr, length}, 0)) {
|
||||
return 0;
|
||||
} else {
|
||||
return enosys();
|
||||
return __winerr();
|
||||
}
|
||||
} else if (advice == MADV_FREE) {
|
||||
OfferVirtualMemoryPtr fn = GetOfferVirtualMemory();
|
||||
if (fn) {
|
||||
if (fn(addr, length, kNtVmOfferPriorityNormal)) {
|
||||
return 0;
|
||||
} else {
|
||||
return __winerr();
|
||||
}
|
||||
if (!length)
|
||||
return 0;
|
||||
if (OfferVirtualMemory(addr, length, kNtVmOfferPriorityNormal)) {
|
||||
return 0;
|
||||
} else {
|
||||
return enosys();
|
||||
return __winerr();
|
||||
}
|
||||
} else {
|
||||
return einval();
|
||||
|
|
|
@ -21,27 +21,49 @@
|
|||
#include "libc/calls/syscall-sysv.internal.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/intrin/strace.internal.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
|
||||
static int __madvise(void *addr, size_t length, int advice) {
|
||||
|
||||
// simulate linux behavior of validating alignment
|
||||
if ((uintptr_t)addr & (getpagesize() - 1))
|
||||
return einval();
|
||||
|
||||
// simulate linux behavior of checking for negative length
|
||||
if ((ssize_t)length < 0)
|
||||
return einval();
|
||||
|
||||
// madvise(0, 0, advice) may be used to validate advice
|
||||
if (!length && (IsFreebsd() || IsNetbsd()))
|
||||
addr = (void *)65536l;
|
||||
|
||||
if (!IsWindows())
|
||||
return sys_madvise(addr, length, advice);
|
||||
return sys_madvise_nt(addr, length, advice);
|
||||
}
|
||||
|
||||
/**
|
||||
* Drops hints to O/S about intended access patterns of mmap()'d memory.
|
||||
* Declares intent to OS on how memory region will be used.
|
||||
*
|
||||
* `madvise(0, 0, advice)` is recommended for validating `advise` and it
|
||||
* will always be the case that a `length` of zero is a no-op otherwise.
|
||||
*
|
||||
* Having the interval overlap unmapped pages has undefined behavior. On
|
||||
* Linux, this can be counted upon to raise ENOMEM. Other OSes vary much
|
||||
* in behavior here; they'll might ignore unmapped regions or they might
|
||||
* raise EINVAL, EFAULT, or ENOMEM.
|
||||
*
|
||||
* @param advice can be MADV_WILLNEED, MADV_SEQUENTIAL, MADV_FREE, etc.
|
||||
* @return 0 on success, or -1 w/ errno
|
||||
* @raise EINVAL if `advice` isn't valid or supported by system
|
||||
* @raise EINVAL on Linux if addr/length isn't page size aligned with
|
||||
* respect to `getpagesize()`
|
||||
* @raise ENOMEM on Linux if addr/length overlaps unmapped regions
|
||||
* @raise EINVAL if `addr` isn't getpagesize() aligned
|
||||
* @raise EINVAL if `length` is negative
|
||||
* @see libc/sysv/consts.sh
|
||||
* @see fadvise()
|
||||
*/
|
||||
int madvise(void *addr, size_t length, int advice) {
|
||||
int rc;
|
||||
if (!IsWindows()) {
|
||||
rc = sys_madvise(addr, length, advice);
|
||||
} else {
|
||||
rc = sys_madvise_nt(addr, length, advice);
|
||||
}
|
||||
int rc = __madvise(addr, length, advice);
|
||||
STRACE("madvise(%p, %'zu, %d) → %d% m", addr, length, advice, rc);
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -1,77 +0,0 @@
|
|||
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
|
||||
│ vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi │
|
||||
╞══════════════════════════════════════════════════════════════════════════════╡
|
||||
│ Copyright 2021 Justine Alexandra Roberts Tunney │
|
||||
│ │
|
||||
│ Permission to use, copy, modify, and/or distribute this software for │
|
||||
│ any purpose with or without fee is hereby granted, provided that the │
|
||||
│ above copyright notice and this permission notice appear in all copies. │
|
||||
│ │
|
||||
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
|
||||
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
|
||||
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
|
||||
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
|
||||
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
|
||||
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
|
||||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/calls/syscall-sysv.internal.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/errno.h"
|
||||
#include "libc/intrin/asmflag.h"
|
||||
#include "libc/intrin/strace.internal.h"
|
||||
#include "libc/sysv/consts/map.h"
|
||||
#include "libc/sysv/consts/mremap.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
|
||||
/**
|
||||
* Relocates memory.
|
||||
*
|
||||
* This function lets you move to to different addresses witohut copying
|
||||
* it. This system call is currently supported on Linux and NetBSD. Your
|
||||
* C library runtime won't have any awareness of this memory, so certain
|
||||
* features like ASAN memory safety and kprintf() won't work as well.
|
||||
*/
|
||||
void *sys_mremap(void *p, size_t n, size_t m, int f, void *q) {
|
||||
#ifdef __x86_64__
|
||||
bool cf;
|
||||
uintptr_t res, rdx;
|
||||
register uintptr_t r8 asm("r8");
|
||||
register uintptr_t r10 asm("r10");
|
||||
if (IsLinux()) {
|
||||
r10 = f;
|
||||
r8 = (uintptr_t)q;
|
||||
asm("syscall"
|
||||
: "=a"(res)
|
||||
: "0"(0x019), "D"(p), "S"(n), "d"(m), "r"(r10), "r"(r8)
|
||||
: "rcx", "r11", "memory", "cc");
|
||||
if (res > -4096ul)
|
||||
errno = -res, res = -1;
|
||||
} else if (IsNetbsd()) {
|
||||
if (f & MREMAP_MAYMOVE) {
|
||||
res = 0x19B;
|
||||
r10 = m;
|
||||
r8 = (f & MREMAP_FIXED) ? MAP_FIXED : 0;
|
||||
asm(CFLAG_ASM("syscall")
|
||||
: CFLAG_CONSTRAINT(cf), "+a"(res), "=d"(rdx)
|
||||
: "D"(p), "S"(n), "2"(q), "r"(r10), "r"(r8)
|
||||
: "rcx", "r9", "r11", "memory", "cc");
|
||||
if (cf)
|
||||
errno = res, res = -1;
|
||||
} else {
|
||||
res = einval();
|
||||
}
|
||||
} else {
|
||||
res = enosys();
|
||||
}
|
||||
#elif defined(__aarch64__)
|
||||
void *res;
|
||||
res = __sys_mremap(p, n, m, f, q);
|
||||
#else
|
||||
#error "arch unsupported"
|
||||
#endif
|
||||
KERNTRACE("sys_mremap(%p, %'zu, %'zu, %#b, %p) → %p% m", p, n, m, f, q, res);
|
||||
return (void *)res;
|
||||
}
|
|
@ -136,8 +136,7 @@ u32 sys_getuid(void);
|
|||
u32 sys_umask(u32);
|
||||
unsigned long _sysret(unsigned long);
|
||||
void *__sys_mmap(void *, u64, u32, u32, i64, i64, i64);
|
||||
void *__sys_mremap(void *, u64, u64, i32, void *);
|
||||
void *sys_mremap(void *, u64, u64, i32, void *);
|
||||
void *sys_mremap(void *, u64, u64, u64, u64);
|
||||
void sys_exit(i32);
|
||||
|
||||
#undef i32
|
||||
|
|
|
@ -62,6 +62,7 @@ o/$(MODE)/libc/intrin/kprintf.o: private \
|
|||
-Wframe-larger-than=128 \
|
||||
-Walloca-larger-than=128
|
||||
|
||||
o/$(MODE)/libc/intrin/mmap.o \
|
||||
o/$(MODE)/libc/intrin/tree.o: private \
|
||||
CFLAGS += \
|
||||
-ffunction-sections
|
||||
|
|
|
@ -26,6 +26,7 @@ const char *DescribeInOutInt64(char[23], ssize_t, int64_t *) libcesque;
|
|||
const char *DescribeItimer(char[12], int) libcesque;
|
||||
const char *DescribeMapFlags(char[64], int) libcesque;
|
||||
const char *DescribeMapping(char[8], int, int) libcesque;
|
||||
const char *DescribeMremapFlags(char[30], int) libcesque;
|
||||
const char *DescribeNtConsoleInFlags(char[256], uint32_t) libcesque;
|
||||
const char *DescribeNtConsoleOutFlags(char[128], uint32_t) libcesque;
|
||||
const char *DescribeNtCreationDisposition(uint32_t) libcesque;
|
||||
|
@ -49,7 +50,6 @@ const char *DescribePollFlags(char[64], int) libcesque;
|
|||
const char *DescribeProtFlags(char[48], int) libcesque;
|
||||
const char *DescribePtrace(char[12], int) libcesque;
|
||||
const char *DescribePtraceEvent(char[32], int) libcesque;
|
||||
const char *DescribeRemapFlags(char[48], int) libcesque;
|
||||
const char *DescribeRlimitName(char[20], int) libcesque;
|
||||
const char *DescribeSchedPolicy(char[48], int) libcesque;
|
||||
const char *DescribeSeccompOperation(int) libcesque;
|
||||
|
@ -81,6 +81,7 @@ const char *DescribeWhichPrio(char[12], int) libcesque;
|
|||
#define DescribeItimer(x) DescribeItimer(alloca(12), x)
|
||||
#define DescribeMapFlags(x) DescribeMapFlags(alloca(64), x)
|
||||
#define DescribeMapping(x, y) DescribeMapping(alloca(8), x, y)
|
||||
#define DescribeMremapFlags(x) DescribeMremapFlags(alloca(30), x)
|
||||
#define DescribeNtConsoleInFlags(x) DescribeNtConsoleInFlags(alloca(256), x)
|
||||
#define DescribeNtConsoleOutFlags(x) DescribeNtConsoleOutFlags(alloca(128), x)
|
||||
#define DescribeNtFileAccessFlags(x) DescribeNtFileAccessFlags(alloca(512), x)
|
||||
|
@ -103,7 +104,6 @@ const char *DescribeWhichPrio(char[12], int) libcesque;
|
|||
#define DescribeProtFlags(x) DescribeProtFlags(alloca(48), x)
|
||||
#define DescribePtrace(i) DescribePtrace(alloca(12), i)
|
||||
#define DescribePtraceEvent(x) DescribePtraceEvent(alloca(32), x)
|
||||
#define DescribeRemapFlags(x) DescribeRemapFlags(alloca(48), x)
|
||||
#define DescribeRlimitName(rl) DescribeRlimitName(alloca(20), rl)
|
||||
#define DescribeSchedPolicy(x) DescribeSchedPolicy(alloca(48), x)
|
||||
#define DescribeSiCode(x, y) DescribeSiCode(alloca(20), x, y)
|
||||
|
|
|
@ -26,7 +26,10 @@ static char DescribeMapType(int flags) {
|
|||
case MAP_FILE:
|
||||
return '-';
|
||||
case MAP_PRIVATE:
|
||||
return 'p';
|
||||
if (flags & MAP_NOFORK)
|
||||
return 'P';
|
||||
else
|
||||
return 'p';
|
||||
case MAP_SHARED:
|
||||
return 's';
|
||||
default:
|
||||
|
@ -47,7 +50,6 @@ const char *(DescribeMapping)(char p[8], int prot, int flags) {
|
|||
DescribeProt(p, prot);
|
||||
p[3] = DescribeMapType(flags);
|
||||
p[4] = (flags & MAP_ANONYMOUS) ? 'a' : '-';
|
||||
p[5] = (flags & MAP_FIXED) ? 'f' : '-';
|
||||
p[6] = 0;
|
||||
p[5] = 0;
|
||||
return p;
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
|
||||
│ vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi │
|
||||
╞══════════════════════════════════════════════════════════════════════════════╡
|
||||
│ Copyright 2022 Justine Alexandra Roberts Tunney │
|
||||
│ Copyright 2024 Justine Alexandra Roberts Tunney │
|
||||
│ │
|
||||
│ Permission to use, copy, modify, and/or distribute this software for │
|
||||
│ any purpose with or without fee is hereby granted, provided that the │
|
||||
|
@ -20,12 +20,12 @@
|
|||
#include "libc/macros.internal.h"
|
||||
#include "libc/sysv/consts/mremap.h"
|
||||
|
||||
static const struct DescribeFlags kRemapFlags[] = {
|
||||
static const struct DescribeFlags kMremapFlags[] = {
|
||||
{MREMAP_MAYMOVE, "MAYMOVE"}, //
|
||||
{MREMAP_FIXED, "FIXED"}, //
|
||||
};
|
||||
|
||||
const char *(DescribeRemapFlags)(char buf[48], int x) {
|
||||
return DescribeFlags(buf, 48, kRemapFlags, ARRAYLEN(kRemapFlags), "MREMAP_",
|
||||
const char *(DescribeMremapFlags)(char buf[30], int x) {
|
||||
return DescribeFlags(buf, 30, kMremapFlags, ARRAYLEN(kMremapFlags), "MREMAP_",
|
||||
x);
|
||||
}
|
|
@ -22,7 +22,7 @@
|
|||
#include "libc/sysv/consts/map.h"
|
||||
#include "libc/sysv/consts/prot.h"
|
||||
|
||||
#define G __granularity()
|
||||
#define G getgransize()
|
||||
|
||||
/**
|
||||
* Extends static allocation.
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/sysv/consts/auxv.h"
|
||||
|
||||
int __granularity(void) {
|
||||
int getgransize(void) {
|
||||
static int res;
|
||||
if (!res) {
|
||||
if (!IsWindows()) {
|
|
@ -86,8 +86,6 @@ void __maps_init(void) {
|
|||
|
||||
privileged bool __maps_lock(void) {
|
||||
struct CosmoTib *tib;
|
||||
if (!__threaded)
|
||||
return false;
|
||||
if (!__tls_enabled)
|
||||
return false;
|
||||
tib = __get_tls_privileged();
|
||||
|
@ -105,8 +103,6 @@ privileged bool __maps_lock(void) {
|
|||
|
||||
privileged void __maps_unlock(void) {
|
||||
struct CosmoTib *tib;
|
||||
if (!__threaded)
|
||||
return;
|
||||
if (!__tls_enabled)
|
||||
return;
|
||||
tib = __get_tls_privileged();
|
||||
|
|
|
@ -18,8 +18,7 @@ struct Map {
|
|||
int flags; /* memory map flag */
|
||||
bool iscow; /* windows nt only */
|
||||
bool readonlyfile; /* windows nt only */
|
||||
unsigned visited; /* used for checks */
|
||||
unsigned oldprot; /* in windows fork */
|
||||
unsigned visited; /* checks and fork */
|
||||
intptr_t hand; /* windows nt only */
|
||||
union {
|
||||
struct Tree tree;
|
||||
|
@ -33,7 +32,7 @@ struct Maps {
|
|||
struct Dll *free;
|
||||
size_t count;
|
||||
size_t pages;
|
||||
atomic_ulong rollo;
|
||||
atomic_size_t rollo;
|
||||
struct Map stack;
|
||||
struct Map guard;
|
||||
};
|
||||
|
@ -45,6 +44,7 @@ struct AddrSize {
|
|||
|
||||
extern struct Maps __maps;
|
||||
|
||||
void *randaddr(void);
|
||||
void __maps_init(void);
|
||||
bool __maps_lock(void);
|
||||
void __maps_check(void);
|
||||
|
@ -52,6 +52,7 @@ void __maps_unlock(void);
|
|||
void __maps_add(struct Map *);
|
||||
void __maps_free(struct Map *);
|
||||
struct Map *__maps_alloc(void);
|
||||
struct Map *__maps_ceil(const char *);
|
||||
struct Map *__maps_floor(const char *);
|
||||
void __maps_stack(char *, int, int, size_t, int, intptr_t);
|
||||
int __maps_compare(const struct Tree *, const struct Tree *);
|
||||
|
@ -61,11 +62,7 @@ forceinline optimizespeed int __maps_search(const void *key,
|
|||
const struct Tree *node) {
|
||||
const char *addr = (const char *)key;
|
||||
const struct Map *map = (const struct Map *)MAP_TREE_CONTAINER(node);
|
||||
if (addr < map->addr)
|
||||
return +1;
|
||||
if (addr >= map->addr + map->size)
|
||||
return -1;
|
||||
return 0;
|
||||
return (addr > map->addr) - (addr < map->addr);
|
||||
}
|
||||
|
||||
static struct Map *__maps_next(struct Map *map) {
|
||||
|
|
|
@ -44,14 +44,16 @@
|
|||
#include "libc/stdio/sysparam.h"
|
||||
#include "libc/sysv/consts/auxv.h"
|
||||
#include "libc/sysv/consts/map.h"
|
||||
#include "libc/sysv/consts/mremap.h"
|
||||
#include "libc/sysv/consts/o.h"
|
||||
#include "libc/sysv/consts/prot.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
#include "libc/thread/thread.h"
|
||||
#include "libc/thread/tls.h"
|
||||
|
||||
#define MMDEBUG 0 // this code is too slow for openbsd/windows
|
||||
#define WINBASE 0x100080040000 // TODO: Can we support Windows Vista again?
|
||||
#define WINMAXX 0x200080000000
|
||||
#define MMDEBUG IsModeDbg()
|
||||
#define WINBASE (1ul << 35) // 34 gb
|
||||
#define WINMAXX ((1ul << 44) - WINBASE) // 17 tb
|
||||
|
||||
#define MAP_FIXED_NOREPLACE_linux 0x100000
|
||||
|
||||
|
@ -86,9 +88,19 @@ privileged optimizespeed struct Map *__maps_floor(const char *addr) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool overlaps_existing_map(const char *addr, size_t size, int pagesz) {
|
||||
struct Map *map;
|
||||
if ((map = __maps_floor(addr)))
|
||||
struct Map *__maps_ceil(const char *addr) {
|
||||
struct Tree *node;
|
||||
if ((node = tree_ceil(__maps.maps, addr, __maps_search)))
|
||||
return MAP_TREE_CONTAINER(node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool __maps_overlaps(const char *addr, size_t size, int pagesz) {
|
||||
ASSERT(!((uintptr_t)addr & (getgransize() - 1)) && size);
|
||||
struct Map *map, *ceil, *floor;
|
||||
floor = __maps_floor(addr);
|
||||
ceil = __maps_ceil(addr + size);
|
||||
for (map = floor; map && map != ceil; map = __maps_next(map))
|
||||
if (MAX(addr, map->addr) <
|
||||
MIN(addr + PGUP(size), map->addr + PGUP(map->size)))
|
||||
return true;
|
||||
|
@ -107,14 +119,13 @@ void __maps_check(void) {
|
|||
ASSERT(map->visited != id);
|
||||
ASSERT(map->size);
|
||||
map->visited = id;
|
||||
pages += (map->size + getpagesize() - 1) / getpagesize();
|
||||
pages += (map->size + pagesz - 1) / pagesz;
|
||||
maps += 1;
|
||||
struct Map *next;
|
||||
if ((next = __maps_next(map))) {
|
||||
ASSERT(map->addr < next->addr);
|
||||
ASSERT(
|
||||
!(MAX(map->addr, next->addr) <
|
||||
MIN(map->addr + PGUP(map->size), next->addr + PGUP(next->size))));
|
||||
ASSERT(MAX(map->addr, next->addr) >=
|
||||
MIN(map->addr + PGUP(map->size), next->addr + PGUP(next->size)));
|
||||
}
|
||||
}
|
||||
ASSERT(maps = __maps.count);
|
||||
|
@ -122,87 +133,30 @@ void __maps_check(void) {
|
|||
#endif
|
||||
}
|
||||
|
||||
void __maps_free(struct Map *map) {
|
||||
map->size = 0;
|
||||
map->addr = MAP_FAILED;
|
||||
dll_init(&map->free);
|
||||
dll_make_first(&__maps.free, &map->free);
|
||||
}
|
||||
|
||||
static void __maps_insert(struct Map *map) {
|
||||
__maps.pages += (map->size + getpagesize() - 1) / getpagesize();
|
||||
struct Map *floor = __maps_floor(map->addr);
|
||||
if (floor && !IsWindows() && //
|
||||
map->addr + map->size == floor->addr && //
|
||||
(map->flags & MAP_ANONYMOUS) && //
|
||||
map->flags == floor->flags && //
|
||||
map->prot == floor->prot) {
|
||||
floor->addr -= map->size;
|
||||
floor->size += map->size;
|
||||
__maps_free(map);
|
||||
__maps_check();
|
||||
} else {
|
||||
__maps_add(map);
|
||||
__maps_check();
|
||||
}
|
||||
}
|
||||
|
||||
struct Map *__maps_alloc(void) {
|
||||
struct Dll *e;
|
||||
struct Map *map;
|
||||
if ((e = dll_first(__maps.free))) {
|
||||
dll_remove(&__maps.free, e);
|
||||
map = MAP_FREE_CONTAINER(e);
|
||||
return map;
|
||||
}
|
||||
int granularity = __granularity();
|
||||
struct DirectMap sys = sys_mmap(0, granularity, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (sys.addr == MAP_FAILED)
|
||||
return 0;
|
||||
if (IsWindows())
|
||||
CloseHandle(sys.maphandle);
|
||||
map = sys.addr;
|
||||
map->addr = MAP_FAILED;
|
||||
for (int i = 1; i < granularity / sizeof(struct Map); ++i)
|
||||
__maps_free(map + i);
|
||||
return map;
|
||||
}
|
||||
|
||||
static int __munmap(char *addr, size_t size, bool untrack_only) {
|
||||
|
||||
// validate arguments
|
||||
int pagesz = getpagesize();
|
||||
int granularity = __granularity();
|
||||
if (((uintptr_t)addr & (granularity - 1)) || //
|
||||
!size || (uintptr_t)addr + size < size)
|
||||
return einval();
|
||||
|
||||
// normalize size
|
||||
size = (size + granularity - 1) & -granularity;
|
||||
|
||||
// untrack mappings
|
||||
static int __muntrack(char *addr, size_t size, int pagesz,
|
||||
struct Dll **deleted) {
|
||||
int rc = 0;
|
||||
struct Map *map;
|
||||
struct Map *next;
|
||||
struct Dll *deleted = 0;
|
||||
if (__maps_lock()) {
|
||||
__maps_unlock();
|
||||
return edeadlk();
|
||||
}
|
||||
for (map = __maps_floor(addr); map; map = next) {
|
||||
struct Map *ceil;
|
||||
struct Map *floor;
|
||||
floor = __maps_floor(addr);
|
||||
ceil = __maps_ceil(addr + size);
|
||||
for (map = floor; map && map != ceil; map = next) {
|
||||
next = __maps_next(map);
|
||||
char *map_addr = map->addr;
|
||||
size_t map_size = map->size;
|
||||
if (!(MAX(addr, map_addr) < MIN(addr + size, map_addr + PGUP(map_size))))
|
||||
break;
|
||||
if (addr <= map_addr && addr + size >= map_addr + PGUP(map_size)) {
|
||||
if (!(MAX(addr, map_addr) <
|
||||
MIN(addr + PGUP(size), map_addr + PGUP(map_size))))
|
||||
continue;
|
||||
if (addr <= map_addr && addr + PGUP(size) >= map_addr + PGUP(map_size)) {
|
||||
// remove mapping completely
|
||||
tree_remove(&__maps.maps, &map->tree);
|
||||
dll_init(&map->free);
|
||||
dll_make_first(&deleted, &map->free);
|
||||
dll_make_first(deleted, &map->free);
|
||||
__maps.pages -= (map_size + pagesz - 1) / pagesz;
|
||||
__maps.count -= 1;
|
||||
__maps_check();
|
||||
} else if (IsWindows()) {
|
||||
// you can't carve up memory maps on windows. our mmap() makes
|
||||
// this not a problem (for non-enormous memory maps) by making
|
||||
|
@ -210,8 +164,8 @@ static int __munmap(char *addr, size_t size, bool untrack_only) {
|
|||
rc = einval();
|
||||
} else if (addr <= map_addr) {
|
||||
// shave off lefthand side of mapping
|
||||
ASSERT(addr + size < map_addr + PGUP(map_size));
|
||||
size_t left = PGUP(addr + size - map_addr);
|
||||
ASSERT(addr + PGUP(size) < map_addr + PGUP(map_size));
|
||||
size_t left = addr + PGUP(size) - map_addr;
|
||||
size_t right = map_size - left;
|
||||
ASSERT(right > 0);
|
||||
ASSERT(left > 0);
|
||||
|
@ -225,11 +179,12 @@ static int __munmap(char *addr, size_t size, bool untrack_only) {
|
|||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
dll_init(&leftmap->free);
|
||||
dll_make_first(&deleted, &leftmap->free);
|
||||
dll_make_first(deleted, &leftmap->free);
|
||||
__maps_check();
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
} else if (addr + size >= map_addr + PGUP(map_size)) {
|
||||
} else if (addr + PGUP(size) >= map_addr + PGUP(map_size)) {
|
||||
// shave off righthand side of mapping
|
||||
size_t left = addr - map_addr;
|
||||
size_t right = map_addr + map_size - addr;
|
||||
|
@ -240,14 +195,15 @@ static int __munmap(char *addr, size_t size, bool untrack_only) {
|
|||
rightmap->addr = addr;
|
||||
rightmap->size = right;
|
||||
dll_init(&rightmap->free);
|
||||
dll_make_first(&deleted, &rightmap->free);
|
||||
dll_make_first(deleted, &rightmap->free);
|
||||
__maps_check();
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
} else {
|
||||
// punch hole in mapping
|
||||
size_t left = addr - map_addr;
|
||||
size_t middle = size;
|
||||
size_t middle = PGUP(size);
|
||||
size_t right = map_size - middle - left;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
|
@ -268,7 +224,8 @@ static int __munmap(char *addr, size_t size, bool untrack_only) {
|
|||
middlemap->addr = addr;
|
||||
middlemap->size = size;
|
||||
dll_init(&middlemap->free);
|
||||
dll_make_first(&deleted, &middlemap->free);
|
||||
dll_make_first(deleted, &middlemap->free);
|
||||
__maps_check();
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
|
@ -276,36 +233,146 @@ static int __munmap(char *addr, size_t size, bool untrack_only) {
|
|||
rc = -1;
|
||||
}
|
||||
}
|
||||
__maps_check();
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
void __maps_free(struct Map *map) {
|
||||
map->size = 0;
|
||||
map->addr = MAP_FAILED;
|
||||
dll_init(&map->free);
|
||||
dll_make_first(&__maps.free, &map->free);
|
||||
}
|
||||
|
||||
static void __maps_insert(struct Map *map) {
|
||||
map->flags &= MAP_TYPE | MAP_ANONYMOUS | MAP_NOFORK;
|
||||
|
||||
// coalesce adjacent mappings
|
||||
if (!IsWindows() && (map->flags & MAP_ANONYMOUS)) {
|
||||
int prot = map->prot & ~(MAP_FIXED | MAP_FIXED_NOREPLACE);
|
||||
int flags = map->flags;
|
||||
bool coalesced = false;
|
||||
struct Map *floor, *ceil, *other, *last = 0;
|
||||
floor = __maps_floor(map->addr);
|
||||
ceil = __maps_ceil(map->addr + map->size);
|
||||
for (other = floor; other; last = other, other = __maps_next(other)) {
|
||||
if (prot == other->prot && flags == other->flags) {
|
||||
if (!coalesced) {
|
||||
if (map->addr == other->addr + other->size) {
|
||||
__maps.pages += (map->size + getpagesize() - 1) / getpagesize();
|
||||
other->size += map->size;
|
||||
__maps_free(map);
|
||||
__maps_check();
|
||||
coalesced = true;
|
||||
} else if (map->addr + map->size == other->addr) {
|
||||
__maps.pages += (map->size + getpagesize() - 1) / getpagesize();
|
||||
other->addr -= map->size;
|
||||
other->size += map->size;
|
||||
__maps_free(map);
|
||||
__maps_check();
|
||||
coalesced = true;
|
||||
}
|
||||
}
|
||||
if (last && other->addr == last->addr + last->size) {
|
||||
other->addr -= last->size;
|
||||
other->size += last->size;
|
||||
tree_remove(&__maps.maps, &last->tree);
|
||||
__maps.count -= 1;
|
||||
__maps_free(last);
|
||||
__maps_check();
|
||||
}
|
||||
}
|
||||
if (other == ceil)
|
||||
break;
|
||||
}
|
||||
if (coalesced)
|
||||
return;
|
||||
}
|
||||
|
||||
// otherwise insert new mapping
|
||||
__maps.pages += (map->size + getpagesize() - 1) / getpagesize();
|
||||
__maps_add(map);
|
||||
__maps_check();
|
||||
}
|
||||
|
||||
struct Map *__maps_alloc(void) {
|
||||
struct Dll *e;
|
||||
struct Map *map;
|
||||
if ((e = dll_first(__maps.free))) {
|
||||
dll_remove(&__maps.free, e);
|
||||
map = MAP_FREE_CONTAINER(e);
|
||||
return map;
|
||||
}
|
||||
int gransz = getgransize();
|
||||
struct DirectMap sys = sys_mmap(0, gransz, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (sys.addr == MAP_FAILED)
|
||||
return 0;
|
||||
map = sys.addr;
|
||||
map->addr = sys.addr;
|
||||
map->size = gransz;
|
||||
map->prot = PROT_READ | PROT_WRITE;
|
||||
map->flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NOFORK;
|
||||
map->hand = sys.maphandle;
|
||||
__maps_insert(map++);
|
||||
map->addr = MAP_FAILED;
|
||||
for (int i = 1; i < gransz / sizeof(struct Map) - 1; ++i)
|
||||
__maps_free(map + i);
|
||||
return map;
|
||||
}
|
||||
|
||||
static int __munmap(char *addr, size_t size) {
|
||||
|
||||
// validate arguments
|
||||
int pagesz = getpagesize();
|
||||
int gransz = getgransize();
|
||||
if (((uintptr_t)addr & (gransz - 1)) || //
|
||||
!size || (uintptr_t)addr + size < size)
|
||||
return einval();
|
||||
|
||||
// lock the memory manager
|
||||
// abort on reentry due to signal handler
|
||||
if (__maps_lock()) {
|
||||
__maps_unlock();
|
||||
return edeadlk();
|
||||
}
|
||||
__maps_check();
|
||||
|
||||
// normalize size
|
||||
// abort if size doesn't include all pages in granule
|
||||
size_t pgup_size = (size + pagesz - 1) & -pagesz;
|
||||
size_t grup_size = (size + gransz - 1) & -gransz;
|
||||
if (grup_size > pgup_size)
|
||||
if (__maps_overlaps(addr + pgup_size, grup_size - pgup_size, pagesz)) {
|
||||
__maps_unlock();
|
||||
return einval();
|
||||
}
|
||||
|
||||
// untrack mappings
|
||||
struct Dll *deleted = 0;
|
||||
__muntrack(addr, pgup_size, pagesz, &deleted);
|
||||
__maps_unlock();
|
||||
|
||||
// delete mappings
|
||||
int rc = 0;
|
||||
for (struct Dll *e = dll_first(deleted); e; e = dll_next(deleted, e)) {
|
||||
struct Map *map = MAP_FREE_CONTAINER(e);
|
||||
if (!untrack_only) {
|
||||
if (!IsWindows()) {
|
||||
if (sys_munmap(map->addr, map->size))
|
||||
rc = -1;
|
||||
} else if (map->hand != -1) {
|
||||
ASSERT(!((uintptr_t)map->addr & (granularity - 1)));
|
||||
if (!UnmapViewOfFile(map->addr))
|
||||
rc = -1;
|
||||
if (!CloseHandle(map->hand))
|
||||
rc = -1;
|
||||
}
|
||||
if (!IsWindows()) {
|
||||
if (sys_munmap(map->addr, map->size))
|
||||
rc = -1;
|
||||
} else if (map->hand != -1) {
|
||||
ASSERT(!((uintptr_t)map->addr & (gransz - 1)));
|
||||
if (!UnmapViewOfFile(map->addr))
|
||||
rc = -1;
|
||||
if (!CloseHandle(map->hand))
|
||||
rc = -1;
|
||||
}
|
||||
}
|
||||
|
||||
// free mappings
|
||||
if (!dll_is_empty(deleted)) {
|
||||
__maps_lock();
|
||||
struct Dll *e;
|
||||
while ((e = dll_first(deleted))) {
|
||||
dll_remove(&deleted, e);
|
||||
__maps_free(MAP_FREE_CONTAINER(e));
|
||||
}
|
||||
__maps_check();
|
||||
dll_make_first(&__maps.free, deleted);
|
||||
__maps_unlock();
|
||||
}
|
||||
|
||||
|
@ -313,7 +380,7 @@ static int __munmap(char *addr, size_t size, bool untrack_only) {
|
|||
}
|
||||
|
||||
static void *__mmap_chunk(void *addr, size_t size, int prot, int flags, int fd,
|
||||
int64_t off, int pagesz, int granularity) {
|
||||
int64_t off, int pagesz, int gransz) {
|
||||
|
||||
// polyfill nuances of fixed mappings
|
||||
int sysflags = flags;
|
||||
|
@ -328,7 +395,7 @@ static void *__mmap_chunk(void *addr, size_t size, int prot, int flags, int fd,
|
|||
sysflags |= MAP_FIXED_NOREPLACE_linux;
|
||||
} else if (IsFreebsd() || IsNetbsd()) {
|
||||
sysflags |= MAP_FIXED;
|
||||
if (overlaps_existing_map(addr, size, pagesz))
|
||||
if (__maps_overlaps(addr, size, pagesz))
|
||||
return (void *)eexist();
|
||||
} else {
|
||||
noreplace = true;
|
||||
|
@ -351,7 +418,7 @@ static void *__mmap_chunk(void *addr, size_t size, int prot, int flags, int fd,
|
|||
|
||||
// remove mapping we blew away
|
||||
if (IsWindows() && should_untrack)
|
||||
if (__munmap(addr, size, false))
|
||||
if (__munmap(addr, size))
|
||||
return MAP_FAILED;
|
||||
|
||||
// obtain mapping from operating system
|
||||
|
@ -366,7 +433,7 @@ TryAgain:
|
|||
} else if (should_untrack) {
|
||||
errno = ENOMEM;
|
||||
} else {
|
||||
addr += granularity;
|
||||
addr += gransz;
|
||||
errno = olderr;
|
||||
goto TryAgain;
|
||||
}
|
||||
|
@ -394,10 +461,17 @@ TryAgain:
|
|||
}
|
||||
|
||||
// untrack mapping we blew away
|
||||
if (!IsWindows() && should_untrack)
|
||||
__munmap(res.addr, size, true);
|
||||
if (!IsWindows() && should_untrack) {
|
||||
struct Dll *deleted = 0;
|
||||
__muntrack(res.addr, size, pagesz, &deleted);
|
||||
if (!dll_is_empty(deleted)) {
|
||||
__maps_lock();
|
||||
dll_make_first(&__maps.free, deleted);
|
||||
__maps_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
// track Map object
|
||||
// track map object
|
||||
map->addr = res.addr;
|
||||
map->size = size;
|
||||
map->off = off;
|
||||
|
@ -417,11 +491,11 @@ TryAgain:
|
|||
}
|
||||
|
||||
static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
|
||||
int64_t off, int pagesz, int granularity) {
|
||||
int64_t off, int pagesz, int gransz) {
|
||||
|
||||
// validate file map args
|
||||
if (!(flags & MAP_ANONYMOUS)) {
|
||||
if (off & (granularity - 1))
|
||||
if (off & (gransz - 1))
|
||||
return (void *)einval();
|
||||
if (IsWindows()) {
|
||||
if (!__isfdkind(fd, kFdFile))
|
||||
|
@ -433,36 +507,48 @@ static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
|
|||
|
||||
// mmap works fine on unix
|
||||
if (!IsWindows())
|
||||
return __mmap_chunk(addr, size, prot, flags, fd, off, pagesz, granularity);
|
||||
return __mmap_chunk(addr, size, prot, flags, fd, off, pagesz, gransz);
|
||||
|
||||
// if the concept of pagesz wasn't exciting enough
|
||||
if (!addr && !(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
|
||||
size_t slab = (size + granularity - 1) & -granularity;
|
||||
addr = (char *)(WINBASE + atomic_fetch_add(&__maps.rollo, slab) % WINMAXX);
|
||||
size_t rollo, rollo2, slab = (size + gransz - 1) & -gransz;
|
||||
rollo = atomic_load_explicit(&__maps.rollo, memory_order_relaxed);
|
||||
for (;;) {
|
||||
if ((rollo2 = rollo + slab) > WINMAXX) {
|
||||
rollo = 0;
|
||||
rollo2 = slab;
|
||||
}
|
||||
if (atomic_compare_exchange_weak_explicit(&__maps.rollo, &rollo, rollo2,
|
||||
memory_order_acq_rel,
|
||||
memory_order_relaxed)) {
|
||||
addr = (char *)WINBASE + rollo;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// windows forbids unmapping a subset of a map once it's made
|
||||
if (size <= granularity || size > 100 * 1024 * 1024)
|
||||
return __mmap_chunk(addr, size, prot, flags, fd, off, pagesz, granularity);
|
||||
if (size <= gransz || size > 100 * 1024 * 1024)
|
||||
return __mmap_chunk(addr, size, prot, flags, fd, off, pagesz, gransz);
|
||||
|
||||
// so we create a separate map for each granule in the mapping
|
||||
if (!(flags & MAP_FIXED)) {
|
||||
while (overlaps_existing_map(addr, size, pagesz)) {
|
||||
while (__maps_overlaps(addr, size, pagesz)) {
|
||||
if (flags & MAP_FIXED_NOREPLACE)
|
||||
return (void *)eexist();
|
||||
addr += granularity;
|
||||
addr += gransz;
|
||||
}
|
||||
}
|
||||
char *res = addr;
|
||||
while (size) {
|
||||
char *got;
|
||||
size_t amt = MIN(size, granularity);
|
||||
got = __mmap_chunk(addr, amt, prot, flags, fd, off, pagesz, granularity);
|
||||
size_t amt = MIN(size, gransz);
|
||||
got = __mmap_chunk(addr, amt, prot, flags, fd, off, pagesz, gransz);
|
||||
if (got != addr) {
|
||||
if (got != MAP_FAILED)
|
||||
__munmap(got, amt, false);
|
||||
__munmap(got, amt);
|
||||
if (addr > res)
|
||||
__munmap(res, addr - res, false);
|
||||
__munmap(res, addr - res);
|
||||
errno = EAGAIN;
|
||||
return MAP_FAILED;
|
||||
}
|
||||
|
@ -477,20 +563,20 @@ static void *__mmap(char *addr, size_t size, int prot, int flags, int fd,
|
|||
int64_t off) {
|
||||
char *res;
|
||||
int pagesz = getpagesize();
|
||||
int granularity = __granularity();
|
||||
int gransz = getgransize();
|
||||
|
||||
// validate arguments
|
||||
if (((uintptr_t)addr & (granularity - 1)) || //
|
||||
if (((uintptr_t)addr & (gransz - 1)) || //
|
||||
!size || (uintptr_t)addr + size < size)
|
||||
return (void *)einval();
|
||||
if (size > 0x100000000000)
|
||||
if (size > WINMAXX)
|
||||
return (void *)enomem();
|
||||
if (__maps.count * pagesz + size > __virtualmax)
|
||||
return (void *)enomem();
|
||||
|
||||
// create memory mappping
|
||||
if (!__isfdkind(fd, kFdZip)) {
|
||||
res = __mmap_impl(addr, size, prot, flags, fd, off, pagesz, granularity);
|
||||
res = __mmap_impl(addr, size, prot, flags, fd, off, pagesz, gransz);
|
||||
} else {
|
||||
res = _weaken(__zipos_mmap)(
|
||||
addr, size, prot, flags,
|
||||
|
@ -500,6 +586,170 @@ static void *__mmap(char *addr, size_t size, int prot, int flags, int fd,
|
|||
return res;
|
||||
}
|
||||
|
||||
static void *__mremap_impl(char *old_addr, size_t old_size, size_t new_size,
|
||||
int flags, char *new_addr, int pagesz, int gransz) {
|
||||
|
||||
// normalize and validate old size
|
||||
// abort if size doesn't include all pages in granule
|
||||
size_t pgup_old_size = (old_size + pagesz - 1) & -pagesz;
|
||||
size_t grup_old_size = (old_size + gransz - 1) & -gransz;
|
||||
if (grup_old_size > pgup_old_size)
|
||||
if (__maps_overlaps(old_addr + pgup_old_size, grup_old_size - pgup_old_size,
|
||||
pagesz))
|
||||
return (void *)einval();
|
||||
old_size = pgup_old_size;
|
||||
|
||||
// validate new size
|
||||
// abort if size doesn't include all pages in granule
|
||||
if (flags & MREMAP_FIXED) {
|
||||
size_t pgup_new_size = (new_size + pagesz - 1) & -pagesz;
|
||||
size_t grup_new_size = (new_size + gransz - 1) & -gransz;
|
||||
if (grup_new_size > pgup_new_size)
|
||||
if (__maps_overlaps(new_addr + pgup_new_size,
|
||||
grup_new_size - pgup_new_size, pagesz))
|
||||
return (void *)einval();
|
||||
}
|
||||
|
||||
// check old interval is fully contained within one mapping
|
||||
struct Map *old_map;
|
||||
if (!(old_map = __maps_floor(old_addr)) ||
|
||||
old_addr + old_size > old_map->addr + PGUP(old_map->size) ||
|
||||
old_addr < old_map->addr)
|
||||
return (void *)efault();
|
||||
|
||||
// save old properties
|
||||
int old_off = old_map->off;
|
||||
int old_prot = old_map->prot;
|
||||
int old_flags = old_map->flags;
|
||||
|
||||
// allocate object for tracking new mapping
|
||||
struct Map *map;
|
||||
if (!(map = __maps_alloc()))
|
||||
return (void *)enomem();
|
||||
|
||||
// netbsd mremap fixed returns enoent rather than unmapping old pages
|
||||
if (IsNetbsd() && (flags & MREMAP_FIXED))
|
||||
if (__munmap(new_addr, new_size)) {
|
||||
__maps_free(map);
|
||||
return MAP_FAILED;
|
||||
}
|
||||
|
||||
// release lock before system call if possible
|
||||
if (!flags)
|
||||
__maps_unlock();
|
||||
|
||||
// the time has come
|
||||
char *res;
|
||||
if (IsNetbsd()) {
|
||||
int sysfl = (flags & MREMAP_FIXED) ? MAP_FIXED : 0;
|
||||
res = sys_mremap(old_addr, old_size, (uintptr_t)new_addr, new_size, sysfl);
|
||||
} else {
|
||||
res = sys_mremap(old_addr, old_size, new_size, flags, (uintptr_t)new_addr);
|
||||
}
|
||||
|
||||
// re-acquire lock if needed
|
||||
if (!flags)
|
||||
__maps_lock();
|
||||
|
||||
// check result
|
||||
if (res == MAP_FAILED) {
|
||||
__maps_free(map);
|
||||
return MAP_FAILED;
|
||||
}
|
||||
|
||||
if (!(flags & MREMAP_MAYMOVE))
|
||||
ASSERT(res == old_addr);
|
||||
|
||||
// untrack old mapping
|
||||
struct Dll *deleted = 0;
|
||||
__muntrack(old_addr, old_size, pagesz, &deleted);
|
||||
dll_make_first(&__maps.free, deleted);
|
||||
deleted = 0;
|
||||
|
||||
// track map object
|
||||
map->addr = res;
|
||||
map->size = new_size;
|
||||
map->off = old_off;
|
||||
map->prot = old_prot;
|
||||
map->flags = old_flags;
|
||||
__maps_insert(map);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static void *__mremap(char *old_addr, size_t old_size, size_t new_size,
|
||||
int flags, char *new_addr) {
|
||||
|
||||
int pagesz = getpagesize();
|
||||
int gransz = getgransize();
|
||||
|
||||
// demand kernel support
|
||||
if (!IsLinux() && !IsNetbsd())
|
||||
return (void *)enosys();
|
||||
|
||||
// we support these flags
|
||||
if (flags & ~(MREMAP_MAYMOVE | MREMAP_FIXED))
|
||||
return (void *)einval();
|
||||
if (IsNetbsd() && !(flags & MREMAP_MAYMOVE) &&
|
||||
((new_size + pagesz - 1) & -pagesz) > old_size)
|
||||
return (void *)enotsup();
|
||||
if ((flags & MREMAP_FIXED) && !(flags & MREMAP_MAYMOVE))
|
||||
return (void *)einval();
|
||||
|
||||
// addresses must be granularity aligned
|
||||
if ((uintptr_t)old_addr & (gransz - 1))
|
||||
return (void *)einval();
|
||||
if (flags & MREMAP_FIXED)
|
||||
if ((uintptr_t)new_addr & (gransz - 1))
|
||||
return (void *)einval();
|
||||
|
||||
// sizes must not be zero
|
||||
if (!old_size)
|
||||
return (void *)einval();
|
||||
if (!new_size)
|
||||
return (void *)einval();
|
||||
|
||||
// check for big size
|
||||
if (old_size > WINMAXX)
|
||||
return (void *)enomem();
|
||||
if (new_size > WINMAXX)
|
||||
return (void *)enomem();
|
||||
|
||||
// check for overflow
|
||||
if ((uintptr_t)old_addr + old_size < old_size)
|
||||
return (void *)enomem();
|
||||
if (flags & MREMAP_FIXED)
|
||||
if ((uintptr_t)new_addr + new_size < new_size)
|
||||
return (void *)enomem();
|
||||
|
||||
// old and new intervals must not overlap
|
||||
if (flags & MREMAP_FIXED)
|
||||
if (MAX(old_addr, new_addr) <
|
||||
MIN(old_addr + old_size, new_addr + PGUP(new_size)))
|
||||
return (void *)einval();
|
||||
|
||||
// memory increase must not exceed RLIMIT_AS
|
||||
if (PGUP(new_size) > old_size)
|
||||
if (__maps.count * pagesz - old_size + PGUP(new_size) > __virtualmax)
|
||||
return (void *)enomem();
|
||||
|
||||
// lock the memory manager
|
||||
// abort on reentry due to signal handler
|
||||
if (__maps_lock()) {
|
||||
__maps_unlock();
|
||||
return (void *)edeadlk();
|
||||
}
|
||||
__maps_check();
|
||||
|
||||
// perform operation
|
||||
char *res = __mremap_impl(old_addr, old_size, new_size, flags, new_addr,
|
||||
pagesz, gransz);
|
||||
|
||||
// return result
|
||||
__maps_unlock();
|
||||
return res;
|
||||
}
|
||||
|
||||
void *mmap(void *addr, size_t size, int prot, int flags, int fd, int64_t off) {
|
||||
void *res = __mmap(addr, size, prot, flags, fd, off);
|
||||
STRACE("mmap(%p, %'zu, %s, %s, %d, %'ld) → %p% m", addr, size,
|
||||
|
@ -507,8 +757,22 @@ void *mmap(void *addr, size_t size, int prot, int flags, int fd, int64_t off) {
|
|||
return res;
|
||||
}
|
||||
|
||||
void *mremap(void *old_addr, size_t old_size, size_t new_size, int flags, ...) {
|
||||
va_list ap;
|
||||
void *new_addr = 0;
|
||||
if (flags & MREMAP_FIXED) {
|
||||
va_start(ap, flags);
|
||||
new_addr = va_arg(ap, void *);
|
||||
va_end(ap);
|
||||
}
|
||||
void *res = __mremap(old_addr, old_size, new_size, flags, new_addr);
|
||||
STRACE("mremap(%p, %'zu, %'zu, %s, %p) → %p% m", old_addr, old_size, new_size,
|
||||
DescribeMremapFlags(flags), new_addr, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
int munmap(void *addr, size_t size) {
|
||||
int rc = __munmap(addr, size, false);
|
||||
int rc = __munmap(addr, size);
|
||||
STRACE("munmap(%p, %'zu) → %d% m", addr, size, rc);
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -74,13 +74,16 @@ int __mprotect(char *addr, size_t size, int prot) {
|
|||
__maps_unlock();
|
||||
return edeadlk();
|
||||
}
|
||||
for (struct Map *map = __maps_floor(addr); map; map = __maps_next(map)) {
|
||||
struct Map *map, *ceil, *floor;
|
||||
floor = __maps_floor(addr);
|
||||
ceil = __maps_ceil(addr + size);
|
||||
for (map = floor; map && map != ceil; map = __maps_next(map)) {
|
||||
char *map_addr = map->addr;
|
||||
size_t map_size = map->size;
|
||||
char *beg = MAX(addr, map_addr);
|
||||
char *end = MIN(addr + size, map_addr + PGUP(map_size));
|
||||
if (beg >= end)
|
||||
break;
|
||||
continue;
|
||||
found = true;
|
||||
if (addr <= map_addr && addr + size >= map_addr + PGUP(map_size)) {
|
||||
// change protection of entire mapping
|
||||
|
|
|
@ -37,7 +37,10 @@ textwindows int sys_msync_nt(char *addr, size_t size, int flags) {
|
|||
if (__maps_lock()) {
|
||||
rc = edeadlk();
|
||||
} else {
|
||||
for (struct Map *map = __maps_floor(addr); map; map = __maps_next(map)) {
|
||||
struct Map *map, *ceil, *floor;
|
||||
floor = __maps_floor(addr);
|
||||
ceil = __maps_ceil(addr + size);
|
||||
for (map = floor; map && map != ceil; map = __maps_next(map)) {
|
||||
char *beg = MAX(addr, map->addr);
|
||||
char *end = MIN(addr + size, map->addr + map->size);
|
||||
if (beg < end)
|
||||
|
|
26
libc/intrin/randaddr.c
Normal file
26
libc/intrin/randaddr.c
Normal file
|
@ -0,0 +1,26 @@
|
|||
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
|
||||
│ vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi │
|
||||
╞══════════════════════════════════════════════════════════════════════════════╡
|
||||
│ Copyright 2024 Justine Alexandra Roberts Tunney │
|
||||
│ │
|
||||
│ Permission to use, copy, modify, and/or distribute this software for │
|
||||
│ any purpose with or without fee is hereby granted, provided that the │
|
||||
│ above copyright notice and this permission notice appear in all copies. │
|
||||
│ │
|
||||
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
|
||||
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
|
||||
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
|
||||
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
|
||||
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
|
||||
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
|
||||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/intrin/maps.h"
|
||||
|
||||
void *randaddr(void) {
|
||||
static unsigned long lcg = 1;
|
||||
lcg *= 6364136223846793005;
|
||||
lcg += 1442695040888963407;
|
||||
return (void *)(lcg >> 48 << 28);
|
||||
}
|
|
@ -36,35 +36,12 @@ static inline void tree_set_red(struct Tree *node, int red) {
|
|||
node->word |= red;
|
||||
}
|
||||
|
||||
forceinline optimizespeed struct Tree *tree_floor(const struct Tree *node,
|
||||
const void *key,
|
||||
tree_search_f *cmp) {
|
||||
struct Tree *left = 0;
|
||||
while (node) {
|
||||
if (cmp(key, node) >= 0) {
|
||||
left = (struct Tree *)node;
|
||||
node = tree_get_left(node);
|
||||
} else {
|
||||
node = node->right;
|
||||
}
|
||||
}
|
||||
return left;
|
||||
}
|
||||
|
||||
static inline struct Tree *tree_ceil(const struct Tree *node, const void *key,
|
||||
tree_search_f *cmp) {
|
||||
struct Tree *right = 0;
|
||||
while (node) {
|
||||
if (cmp(key, node) < 0) {
|
||||
right = (struct Tree *)node;
|
||||
node = tree_get_left(node);
|
||||
} else {
|
||||
node = node->right;
|
||||
}
|
||||
}
|
||||
return right;
|
||||
}
|
||||
|
||||
// Returns node equal to given key.
|
||||
//
|
||||
// [1 3 5 7] [1 3 5 7] [1 3 5 7]
|
||||
// NULL ↑ NULL
|
||||
// 4 3 8
|
||||
//
|
||||
static inline struct Tree *tree_get(const struct Tree *node, const void *key,
|
||||
tree_search_f *cmp) {
|
||||
while (node) {
|
||||
|
@ -80,6 +57,72 @@ static inline struct Tree *tree_get(const struct Tree *node, const void *key,
|
|||
return 0;
|
||||
}
|
||||
|
||||
// Returns last node less than or equal to given key.
|
||||
//
|
||||
// [1 3 5 7] [1 3 5 7] [1 3 5 7]
|
||||
// ↑ ↑ ↑
|
||||
// 4 3 8
|
||||
//
|
||||
forceinline optimizespeed struct Tree *tree_floor(const struct Tree *node,
|
||||
const void *key,
|
||||
tree_search_f *cmp) {
|
||||
struct Tree *left = 0;
|
||||
while (node) {
|
||||
int c = cmp(key, node);
|
||||
if (c < 0) {
|
||||
node = tree_get_left(node);
|
||||
} else if (c > 0) {
|
||||
left = (struct Tree *)node;
|
||||
node = node->right;
|
||||
} else {
|
||||
return (struct Tree *)node;
|
||||
}
|
||||
}
|
||||
return left;
|
||||
}
|
||||
|
||||
// Returns first node not less than given key.
|
||||
//
|
||||
// [1 3 5 7] [1 3 5 7] [1 3 5 7]
|
||||
// ↑ ↑ NULL
|
||||
// 4 3 8
|
||||
//
|
||||
static inline struct Tree *tree_lower(const struct Tree *node, const void *key,
|
||||
tree_search_f *cmp) {
|
||||
struct Tree *left = 0;
|
||||
while (node) {
|
||||
int c = cmp(key, node);
|
||||
if (c <= 0) {
|
||||
left = (struct Tree *)node;
|
||||
node = tree_get_left(node);
|
||||
} else {
|
||||
node = node->right;
|
||||
}
|
||||
}
|
||||
return left;
|
||||
}
|
||||
|
||||
// Returns first node greater than than given key.
|
||||
//
|
||||
// [1 3 5 7] [1 3 5 7] [1 3 5 7]
|
||||
// ↑ ↑ NULL
|
||||
// 4 3 8
|
||||
//
|
||||
static inline struct Tree *tree_ceil(const struct Tree *node, const void *key,
|
||||
tree_search_f *cmp) {
|
||||
struct Tree *left = 0;
|
||||
while (node) {
|
||||
int c = cmp(key, node);
|
||||
if (c < 0) {
|
||||
left = (struct Tree *)node;
|
||||
node = tree_get_left(node);
|
||||
} else {
|
||||
node = node->right;
|
||||
}
|
||||
}
|
||||
return left;
|
||||
}
|
||||
|
||||
struct Tree *tree_next(struct Tree *) libcesque;
|
||||
struct Tree *tree_prev(struct Tree *) libcesque;
|
||||
struct Tree *tree_first(struct Tree *) libcesque;
|
||||
|
|
|
@ -44,7 +44,7 @@ void __may_leak(void *alloc) {
|
|||
return;
|
||||
pthread_mutex_lock(&lock);
|
||||
if (dll_is_empty(freaks)) {
|
||||
int g = __granularity();
|
||||
int g = getgransize();
|
||||
struct Leak *p = _mapanon(g);
|
||||
int n = g / sizeof(struct Leak);
|
||||
for (int i = 0; i < n; ++i) {
|
||||
|
|
|
@ -32,9 +32,9 @@
|
|||
* @see valloc()
|
||||
*/
|
||||
void *pvalloc(size_t n) {
|
||||
if (ckd_add(&n, n, __granularity() - 1)) {
|
||||
if (ckd_add(&n, n, getpagesize() - 1)) {
|
||||
errno = ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
return memalign(__granularity(), n & -__granularity());
|
||||
return memalign(getpagesize(), n & -getpagesize());
|
||||
}
|
||||
|
|
|
@ -29,5 +29,5 @@
|
|||
* @see pvalloc()
|
||||
*/
|
||||
void *valloc(size_t n) {
|
||||
return memalign(__granularity(), n);
|
||||
return memalign(getpagesize(), n);
|
||||
}
|
||||
|
|
|
@ -102,6 +102,12 @@ void *VirtualAlloc2(
|
|||
struct NtMemExtendedParameter *in_out_opt_ExtendedParameters,
|
||||
unsigned ParameterCount);
|
||||
|
||||
bool32 PrefetchVirtualMemory(int64_t hProcess, uintptr_t NumberOfEntries,
|
||||
struct NtMemoryRangeEntry *VirtualAddresses,
|
||||
uint32_t reserved_Flags);
|
||||
bool32 OfferVirtualMemory(void *inout_VirtualAddress, size_t Size,
|
||||
int Priority);
|
||||
|
||||
#if ShouldUseMsabiAttribute()
|
||||
#include "libc/nt/thunk/memory.inc"
|
||||
#endif /* ShouldUseMsabiAttribute() */
|
||||
|
|
|
@ -71,9 +71,10 @@ extern long __klog_handle;
|
|||
void WipeKeystrokes(void);
|
||||
__msabi extern typeof(GetCurrentProcessId) *const __imp_GetCurrentProcessId;
|
||||
|
||||
static textwindows wontreturn void AbortFork(const char *func) {
|
||||
static textwindows wontreturn void AbortFork(const char *func, void *addr) {
|
||||
#if SYSDEBUG
|
||||
kprintf("fork() %!s() failed with win32 error %u\n", func, GetLastError());
|
||||
kprintf("fork() %!s(%lx) failed with win32 error %u\n", func, addr,
|
||||
GetLastError());
|
||||
#endif
|
||||
TerminateThisProcess(SIGSTKFLT);
|
||||
}
|
||||
|
@ -134,9 +135,9 @@ static dontinline textwindows bool WriteAll(int64_t h, void *buf, size_t n) {
|
|||
static textwindows dontinline void ReadOrDie(int64_t h, void *buf, size_t n) {
|
||||
ssize_t got;
|
||||
if ((got = ForkIo2(h, buf, n, ReadFile, "ReadFile", true)) == -1)
|
||||
AbortFork("ReadFile1");
|
||||
AbortFork("ReadFile1", buf);
|
||||
if (got != n)
|
||||
AbortFork("ReadFile2");
|
||||
AbortFork("ReadFile2", buf);
|
||||
}
|
||||
|
||||
static textwindows int64_t MapOrDie(uint32_t prot, uint64_t size) {
|
||||
|
@ -159,7 +160,7 @@ static textwindows int64_t MapOrDie(uint32_t prot, uint64_t size) {
|
|||
break;
|
||||
}
|
||||
}
|
||||
AbortFork("MapOrDie");
|
||||
AbortFork("MapOrDie", (void *)size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -172,7 +173,7 @@ TryAgain:
|
|||
access &= ~kNtFileMapExecute;
|
||||
goto TryAgain;
|
||||
}
|
||||
AbortFork("ViewOrDie");
|
||||
AbortFork("ViewOrDie", base);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -223,7 +224,7 @@ textwindows void WinMainForked(void) {
|
|||
}
|
||||
|
||||
// map memory into process
|
||||
int granularity = __granularity();
|
||||
int granularity = getgransize();
|
||||
for (struct Tree *e = tree_first(maps); e; e = tree_next(e)) {
|
||||
struct Map *map = MAP_TREE_CONTAINER(e);
|
||||
if ((uintptr_t)map->addr & (granularity - 1))
|
||||
|
@ -280,14 +281,14 @@ textwindows void WinMainForked(void) {
|
|||
unsigned old_protect;
|
||||
if (!VirtualProtect(map->addr, map->size, __prot2nt(map->prot, map->iscow),
|
||||
&old_protect))
|
||||
AbortFork("VirtualProtect");
|
||||
AbortFork("VirtualProtect", map->addr);
|
||||
}
|
||||
__maps.maps = maps;
|
||||
__maps_init();
|
||||
|
||||
// mitosis complete
|
||||
if (!CloseHandle(reader))
|
||||
AbortFork("CloseHandle");
|
||||
AbortFork("CloseHandle", (void *)reader);
|
||||
|
||||
// rewrap the stdin named pipe hack
|
||||
// since the handles closed on fork
|
||||
|
@ -370,6 +371,8 @@ textwindows int sys_fork_nt(uint32_t dwCreationFlags) {
|
|||
// this list will be populated with the maps we're transferring
|
||||
for (struct Map *map = __maps_first(); ok && map;
|
||||
map = __maps_next(map)) {
|
||||
if (map->flags & MAP_NOFORK)
|
||||
continue;
|
||||
if (MAX((char *)__executable_start, map->addr) <
|
||||
MIN((char *)_end, map->addr + map->size))
|
||||
continue; // executable image is loaded by windows
|
||||
|
@ -382,9 +385,11 @@ textwindows int sys_fork_nt(uint32_t dwCreationFlags) {
|
|||
ok = WriteAll(writer, &map, sizeof(map));
|
||||
}
|
||||
// now write content of each map to child
|
||||
int granularity = __granularity();
|
||||
int granularity = getgransize();
|
||||
for (struct Map *map = __maps_first(); ok && map;
|
||||
map = __maps_next(map)) {
|
||||
if (map->flags & MAP_NOFORK)
|
||||
continue;
|
||||
// we only need to worry about the base mapping
|
||||
if ((uintptr_t)map->addr & (granularity - 1))
|
||||
continue;
|
||||
|
@ -410,15 +415,15 @@ textwindows int sys_fork_nt(uint32_t dwCreationFlags) {
|
|||
ok = VirtualProtect(
|
||||
map2->addr, map2->size,
|
||||
__prot2nt(map2->prot | PROT_READ, map2->iscow),
|
||||
&map2->oldprot);
|
||||
&map2->visited);
|
||||
}
|
||||
if (ok)
|
||||
ok = WriteAll(writer, map->addr, size);
|
||||
for (struct Map *map2 = map; ok && map2; map2 = __maps_next(map2)) {
|
||||
if (!(map2->prot & PROT_READ))
|
||||
if (map->addr >= map2->addr && map->addr < map->addr + size)
|
||||
ok = VirtualProtect(map2->addr, map2->size, map2->oldprot,
|
||||
&map2->oldprot);
|
||||
ok = VirtualProtect(map2->addr, map2->size, map2->visited,
|
||||
&map2->visited);
|
||||
}
|
||||
}
|
||||
if (ok)
|
||||
|
|
|
@ -23,5 +23,5 @@ long __get_avphys_pages(void) {
|
|||
struct sysinfo si;
|
||||
if (sysinfo(&si) == -1)
|
||||
return -1;
|
||||
return (((int64_t)si.freeram + si.bufferram) * si.mem_unit) / __granularity();
|
||||
return (((int64_t)si.freeram + si.bufferram) * si.mem_unit) / getpagesize();
|
||||
}
|
||||
|
|
|
@ -23,5 +23,5 @@ long __get_phys_pages(void) {
|
|||
struct sysinfo si;
|
||||
if (sysinfo(&si) == -1)
|
||||
return -1;
|
||||
return ((int64_t)si.totalram * si.mem_unit) / __granularity();
|
||||
return ((int64_t)si.totalram * si.mem_unit) / getpagesize();
|
||||
}
|
||||
|
|
|
@ -56,15 +56,14 @@ static ssize_t GetZipFile(struct Zipos *zipos, const char *name) {
|
|||
* @note This code can't depend on dlmalloc()
|
||||
*/
|
||||
static struct SymbolTable *GetSymbolTableFromZip(struct Zipos *zipos) {
|
||||
size_t size;
|
||||
ssize_t cf, lf;
|
||||
size_t size, size2;
|
||||
struct SymbolTable *res = 0;
|
||||
if ((cf = GetZipFile(zipos, ".symtab." _ARCH_NAME)) != -1 ||
|
||||
(cf = GetZipFile(zipos, ".symtab")) != -1) {
|
||||
lf = GetZipCfileOffset(zipos->map + cf);
|
||||
size = GetZipLfileUncompressedSize(zipos->map + lf);
|
||||
size2 = ROUNDUP(size, __granularity());
|
||||
if ((res = _mapanon(size2))) {
|
||||
if ((res = _mapanon(size))) {
|
||||
switch (ZIP_LFILE_COMPRESSIONMETHOD(zipos->map + lf)) {
|
||||
case kZipCompressionNone:
|
||||
memcpy(res, (void *)ZIP_LFILE_CONTENT(zipos->map + lf), size);
|
||||
|
@ -73,12 +72,12 @@ static struct SymbolTable *GetSymbolTableFromZip(struct Zipos *zipos) {
|
|||
if (__inflate((void *)res, size,
|
||||
(void *)ZIP_LFILE_CONTENT(zipos->map + lf),
|
||||
GetZipLfileCompressedSize(zipos->map + lf))) {
|
||||
munmap(res, size2);
|
||||
munmap(res, size);
|
||||
res = 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
munmap(res, size2);
|
||||
munmap(res, size);
|
||||
res = 0;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ int unsetenv(const char *) libcesque;
|
|||
int clearenv(void) libcesque;
|
||||
void fpreset(void) libcesque;
|
||||
void *mmap(void *, uint64_t, int32_t, int32_t, int32_t, int64_t) libcesque;
|
||||
void *mremap(void *, size_t, size_t, int, ...) libcesque;
|
||||
int munmap(void *, uint64_t) libcesque;
|
||||
int mprotect(void *, uint64_t, int) libcesque;
|
||||
int msync(void *, size_t, int) libcesque;
|
||||
|
@ -54,6 +55,7 @@ char *getlogin(void) libcesque;
|
|||
int getlogin_r(char *, size_t) libcesque;
|
||||
int login_tty(int) libcesque;
|
||||
int getpagesize(void) pureconst libcesque;
|
||||
int getgransize(void) pureconst libcesque;
|
||||
int syncfs(int) dontthrow libcesque;
|
||||
int vhangup(void) libcesque;
|
||||
int getdtablesize(void) libcesque;
|
||||
|
@ -127,7 +129,6 @@ void GetCpuidBrand(char[13], uint32_t) libcesque;
|
|||
long __get_rlimit(int) libcesque;
|
||||
const char *__describe_os(void) libcesque;
|
||||
long __get_sysctl(int, int) libcesque;
|
||||
int __granularity(void) pureconst libcesque;
|
||||
int __get_arg_max(void) pureconst libcesque;
|
||||
int __get_cpu_count(void) pureconst libcesque;
|
||||
long __get_avphys_pages(void) pureconst libcesque;
|
||||
|
|
|
@ -41,11 +41,12 @@
|
|||
*
|
||||
* The following parameters are supported:
|
||||
*
|
||||
* - `_SC_PAGESIZE` returns page size for mmap()
|
||||
* - `_SC_GRANSIZE` returns addr alignment for mmap()
|
||||
* - `_SC_CLK_TCK` returns number of clock ticks per second
|
||||
* - `_SC_ARG_MAX` will perform expensive rlimit calculations
|
||||
* - `_SC_SIGSTKSZ` returns host platform's preferred SIGSTKSZ
|
||||
* - `_SC_MINSIGSTKSZ` returns host platform's required MINSIGSTKSZ
|
||||
* - `_SC_PAGESIZE` currently always returns 65536 due to Windows
|
||||
* - `_SC_AVPHYS_PAGES` returns average physical memory pages
|
||||
* - `_SC_PHYS_PAGES` returns physical memory pages available
|
||||
* - `_SC_NPROCESSORS_ONLN` returns number of effective CPUs
|
||||
|
@ -61,6 +62,8 @@ long sysconf(int name) {
|
|||
return CLK_TCK;
|
||||
case _SC_PAGESIZE:
|
||||
return getpagesize();
|
||||
case _SC_GRANSIZE:
|
||||
return getgransize();
|
||||
case _SC_ARG_MAX:
|
||||
return __get_arg_max();
|
||||
case _SC_SIGSTKSZ:
|
||||
|
|
|
@ -33,6 +33,8 @@
|
|||
#define _SC_VERSION 29
|
||||
#define _SC_PAGE_SIZE 30
|
||||
#define _SC_PAGESIZE 30 /* !! */
|
||||
#define _SC_GRAN_SIZE 3000
|
||||
#define _SC_GRANSIZE 3000
|
||||
#define _SC_RTSIG_MAX 31
|
||||
#define _SC_SEM_NSEMS_MAX 32
|
||||
#define _SC_SEM_VALUE_MAX 33
|
||||
|
|
|
@ -62,7 +62,7 @@ static void __zipos_dismiss(uint8_t *map, const uint8_t *cdir, long pg) {
|
|||
}
|
||||
|
||||
// unmap the executable portion beneath the local files
|
||||
mo = ROUNDDOWN(lo, __granularity());
|
||||
mo = ROUNDDOWN(lo, getgransize());
|
||||
if (mo)
|
||||
munmap(map, mo);
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ int __zipos_stat_impl(struct Zipos *zipos, size_t cf, struct stat *st) {
|
|||
bzero(st, sizeof(*st));
|
||||
st->st_nlink = 1;
|
||||
st->st_dev = zipos->dev;
|
||||
st->st_blksize = __granularity();
|
||||
st->st_blksize = getpagesize();
|
||||
if (cf == ZIPOS_SYNTHETIC_DIRECTORY) {
|
||||
st->st_mode = S_IFDIR | (0555 & ~atomic_load_explicit(
|
||||
&__umask, memory_order_acquire));
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
#include "libc/sysv/macros.internal.h"
|
||||
.scall __sys_mremap,0x19bffffffffff019,216,4095,4095,globl,hidden
|
2
libc/sysv/calls/sys_mremap.S
Normal file
2
libc/sysv/calls/sys_mremap.S
Normal file
|
@ -0,0 +1,2 @@
|
|||
#include "libc/sysv/macros.internal.h"
|
||||
.scall sys_mremap,0x19bffffffffff019,216,4095,4095,globl,hidden
|
|
@ -228,6 +228,7 @@ syscon mmap MAP_LOCKED 0x00002000 0x00002000 0 0 0 0 0 0
|
|||
syscon mmap MAP_NORESERVE 0x00004000 0x00004000 0x00000040 0x00000040 0 0 0x00000040 0 # Linux calls it "reserve"; NT calls it "commit"? which is default?
|
||||
syscon mmap MAP_POPULATE 0x00008000 0x00008000 0 0 0x00040000 0 0 0 # MAP_PREFAULT_READ on FreeBSD; can avoid madvise(MADV_WILLNEED) on private file mapping
|
||||
syscon mmap MAP_NONBLOCK 0x00010000 0x00010000 0 0 0 0 0 0
|
||||
syscon mmap MAP_NOFORK 0 0 0 0 0 0 0 0x10000000 # used on pages internal to our mmap() implemention on windows
|
||||
syscon mmap MAP_SYNC 0x00080000 0x00080000 0 0 0 0 0 0 # perform synchronous page faults for mapping (Linux 4.15+)
|
||||
syscon mmap MAP_HUGETLB 0x00040000 -1 -1 -1 -1 -1 -1 -1 # make it inherit across execve()
|
||||
syscon mmap MAP_INHERIT -1 -1 -1 -1 -1 -1 0x00000080 -1 # make it inherit across execve()
|
||||
|
@ -294,13 +295,6 @@ syscon mprot PROT_WRITE 2 2 2 2 2 2 2 2 # mmap, mprotect, uni
|
|||
syscon mprot PROT_EXEC 4 4 4 4 4 4 4 4 # mmap, mprotect, unix consensus
|
||||
syscon mprot PROT_GUARD 0 0 0 0 0 0 0 0x100 # mmap, mprotect, unix consensus
|
||||
|
||||
# mremap() flags
|
||||
# the revolutionary praxis of realloc()
|
||||
#
|
||||
# group name GNU/Systemd GNU/Systemd (Aarch64) XNU's Not UNIX! MacOS (Arm64) FreeBSD OpenBSD NetBSD The New Technology Commentary
|
||||
syscon mremap MREMAP_MAYMOVE 1 1 1 1 1 1 1 1 # faked non-linux (b/c linux only)
|
||||
syscon mremap MREMAP_FIXED 2 2 2 2 2 2 2 2 # faked non-linux (b/c linux only)
|
||||
|
||||
# sigprocmask() flags
|
||||
#
|
||||
# group name GNU/Systemd GNU/Systemd (Aarch64) XNU's Not UNIX! MacOS (Arm64) FreeBSD OpenBSD NetBSD The New Technology Commentary
|
||||
|
|
2
libc/sysv/consts/MAP_NOFORK.S
Normal file
2
libc/sysv/consts/MAP_NOFORK.S
Normal file
|
@ -0,0 +1,2 @@
|
|||
#include "libc/sysv/consts/syscon.internal.h"
|
||||
.syscon mmap,MAP_NOFORK,0,0,0,0,0,0,0,0x10000000
|
|
@ -1,2 +0,0 @@
|
|||
#include "libc/sysv/consts/syscon.internal.h"
|
||||
.syscon mremap,MREMAP_FIXED,2,2,2,2,2,2,2,2
|
|
@ -1,2 +0,0 @@
|
|||
#include "libc/sysv/consts/syscon.internal.h"
|
||||
.syscon mremap,MREMAP_MAYMOVE,1,1,1,1,1,1,1,1
|
|
@ -19,6 +19,7 @@ extern const int MAP_JIT;
|
|||
extern const int MAP_LOCKED;
|
||||
extern const int MAP_NOCACHE;
|
||||
extern const int MAP_NOEXTEND;
|
||||
extern const int MAP_NOFORK;
|
||||
extern const int MAP_NONBLOCK;
|
||||
extern const int MAP_NORESERVE;
|
||||
extern const int MAP_NOSYNC;
|
||||
|
|
|
@ -59,7 +59,7 @@ scall __sys_pipe 0x02a10721e202a016 0x03b globl hidden # NOTE: pipe2() on FreeB
|
|||
scall sys_select 0x9a184785d285d817 0xfff globl hidden
|
||||
scall sys_pselect 0x9b486ea0a298a90e 0x848 globl hidden # pselect6() on gnu/systemd
|
||||
scall sys_sched_yield 0x15e12a14bf25d018 0x07c globl hidden # select() on XNU (previously swtch() but removed in 12.4)
|
||||
scall __sys_mremap 0x19bffffffffff019 0x0d8 globl hidden
|
||||
scall sys_mremap 0x19bffffffffff019 0x0d8 globl hidden
|
||||
scall sys_mincore 0x04e04e04e204e01b 0x0e8 globl hidden
|
||||
scall sys_madvise 0x04b04b04b204b01c 0x0e9 globl hidden
|
||||
scall sys_shmget 0x0e71210e7210901d 0x0c2 globl # no wrapper
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "libc/calls/struct/siginfo.h"
|
||||
#include "libc/calls/ucontext.h"
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/intrin/kprintf.h"
|
||||
#include "libc/sysv/consts/sa.h"
|
||||
#include "libc/sysv/consts/sig.h"
|
||||
#include "libc/testlib/testlib.h"
|
||||
|
@ -44,14 +45,7 @@ static void ContinueOnError(int sig, siginfo_t *si, void *vctx) {
|
|||
#endif /* __x86_64__ */
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if byte at address `p` is readable.
|
||||
*
|
||||
* This function temporarily catches `SIGSEGV` and `SIGBUS` to recover
|
||||
* on error. It then attempts a volatile read and if it faults, then
|
||||
* this function shall return false. The value at `p` isn't considered.
|
||||
*/
|
||||
bool testlib_memoryexists(const void *p) {
|
||||
bool testlib_pokememory(const void *p) {
|
||||
volatile char c;
|
||||
const atomic_char *mem = p;
|
||||
struct sigaction old[2];
|
||||
|
@ -68,3 +62,16 @@ bool testlib_memoryexists(const void *p) {
|
|||
npassert(!sigaction(SIGSEGV, old + 0, 0));
|
||||
return !gotsignal;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if byte at address `p` is readable.
|
||||
*
|
||||
* This function temporarily catches `SIGSEGV` and `SIGBUS` to recover
|
||||
* on error. It then attempts a volatile read and if it faults, then
|
||||
* this function shall return false. The value at `p` isn't considered.
|
||||
*/
|
||||
bool testlib_memoryexists(const void *p) {
|
||||
if (kisdangerous(p))
|
||||
return false;
|
||||
return testlib_pokememory(p);
|
||||
}
|
||||
|
|
|
@ -372,6 +372,7 @@ void testlib_seterrno(int);
|
|||
void testlib_runalltests(void);
|
||||
const char *testlib_strerror(void);
|
||||
void testlib_runallbenchmarks(void);
|
||||
bool testlib_pokememory(const void *);
|
||||
bool testlib_memoryexists(const void *);
|
||||
void testlib_runtestcases(const testfn_t *, const testfn_t *, testfn_t);
|
||||
void testlib_runfixtures(const testfn_t *, const testfn_t *,
|
||||
|
|
|
@ -27,7 +27,7 @@ struct Cert FinishCertificate(struct Cert *ca, mbedtls_x509write_cert *wcert,
|
|||
int i, n, rc;
|
||||
unsigned char *p;
|
||||
mbedtls_x509_crt *cert;
|
||||
p = malloc((n = __granularity()));
|
||||
p = malloc((n = getgransize()));
|
||||
i = mbedtls_x509write_crt_der(wcert, p, n, GenerateHardRandom, 0);
|
||||
if (i < 0)
|
||||
FATALF("write key (grep -0x%04x)", -i);
|
||||
|
|
|
@ -33,63 +33,100 @@ void SetUpOnce(void) {
|
|||
|
||||
TEST(madvise, anon) {
|
||||
char *p;
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, __granularity(), PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, getpagesize(), PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
ASSERT_SYS(0, 0, madvise(p, __granularity(), MADV_WILLNEED));
|
||||
ASSERT_SYS(0, 0, munmap(p, __granularity()));
|
||||
ASSERT_SYS(0, 0, madvise(p, getpagesize(), MADV_WILLNEED));
|
||||
ASSERT_SYS(0, 0, munmap(p, getpagesize()));
|
||||
}
|
||||
|
||||
TEST(madvise, file) {
|
||||
char *p;
|
||||
ASSERT_SYS(0, 3, creat("foo.dat", 0644));
|
||||
ASSERT_SYS(0, 0, ftruncate(3, __granularity()));
|
||||
ASSERT_SYS(0, 0, ftruncate(3, getpagesize()));
|
||||
ASSERT_SYS(0, 0, close(3));
|
||||
ASSERT_SYS(0, 3, open("foo.dat", O_RDWR));
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, __granularity(), PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, getpagesize(), PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE, 3, 0)));
|
||||
ASSERT_SYS(0, 0, madvise(p, __granularity(), MADV_WILLNEED));
|
||||
ASSERT_SYS(0, 0, munmap(p, __granularity()));
|
||||
ASSERT_SYS(0, 0, madvise(p, getpagesize(), MADV_WILLNEED));
|
||||
ASSERT_SYS(0, 0, munmap(p, getpagesize()));
|
||||
ASSERT_SYS(0, 0, close(3));
|
||||
}
|
||||
|
||||
TEST(madvise, short) {
|
||||
char *p;
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, __granularity(), PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, getpagesize(), PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
ASSERT_SYS(0, 0, madvise(p, __granularity() - 1, MADV_WILLNEED));
|
||||
ASSERT_SYS(0, 0, munmap(p, __granularity()));
|
||||
ASSERT_SYS(0, 0, madvise(p, getpagesize() - 1, MADV_WILLNEED));
|
||||
ASSERT_SYS(0, 0, munmap(p, getpagesize()));
|
||||
}
|
||||
|
||||
TEST(madvise, zero) {
|
||||
char *p;
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, getpagesize(), PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
ASSERT_SYS(0, 0, madvise(p, 0, MADV_WILLNEED));
|
||||
ASSERT_SYS(0, 0, madvise(0, 0, MADV_WILLNEED));
|
||||
ASSERT_SYS(EINVAL, -1, madvise(p, 0, 666));
|
||||
ASSERT_SYS(EINVAL, -1, madvise(0, 0, 666));
|
||||
ASSERT_SYS(0, 0, munmap(p, getpagesize()));
|
||||
}
|
||||
|
||||
TEST(madvise, subPages) {
|
||||
char *p;
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, __granularity(), PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, getpagesize() * 2, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
ASSERT_SYS(0, 0,
|
||||
madvise(p + getpagesize(), __granularity() - getpagesize(),
|
||||
MADV_WILLNEED));
|
||||
ASSERT_SYS(0, 0, munmap(p, __granularity()));
|
||||
ASSERT_SYS(0, 0, madvise(p + getpagesize(), getpagesize(), MADV_WILLNEED));
|
||||
ASSERT_SYS(0, 0, munmap(p, getpagesize() * 2));
|
||||
}
|
||||
|
||||
TEST(madvise, madvWillNeed_unmappedRegion) {
|
||||
char *p;
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, getgransize() * 3, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
ASSERT_SYS(0, 0, munmap(p + getgransize(), getgransize()));
|
||||
if (IsXnu())
|
||||
ASSERT_SYS(EINVAL, -1, madvise(p, getgransize() * 3, MADV_WILLNEED));
|
||||
else if (IsBsd() || IsWindows())
|
||||
ASSERT_SYS(0, 0, madvise(p, getgransize() * 3, MADV_WILLNEED));
|
||||
else
|
||||
ASSERT_SYS(ENOMEM, -1, madvise(p, getgransize() * 3, MADV_WILLNEED));
|
||||
ASSERT_SYS(0, 0, munmap(p, getgransize() * 3));
|
||||
}
|
||||
|
||||
TEST(madvise, madvFree_unmappedRegion) {
|
||||
char *p;
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, getgransize() * 3, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
ASSERT_SYS(0, 0, munmap(p + getgransize(), getgransize()));
|
||||
if (IsXnu())
|
||||
ASSERT_SYS(EINVAL, -1, madvise(p, getgransize() * 3, MADV_FREE));
|
||||
else if (IsNetbsd() || IsOpenbsd())
|
||||
ASSERT_SYS(EFAULT, -1, madvise(p, getgransize() * 3, MADV_FREE));
|
||||
else if (IsFreebsd() || IsWindows())
|
||||
ASSERT_SYS(0, 0, madvise(p, getgransize() * 3, MADV_FREE));
|
||||
else
|
||||
ASSERT_SYS(ENOMEM, -1, madvise(p, getgransize() * 3, MADV_FREE));
|
||||
ASSERT_SYS(0, 0, munmap(p, getgransize() * 3));
|
||||
}
|
||||
|
||||
TEST(madvise, misalign) {
|
||||
char *p;
|
||||
if (!IsLinux())
|
||||
return; // most platforms don't care
|
||||
if (IsQemuUser())
|
||||
return; // qemu claims to be linux but doesn't care
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, __granularity(), PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, getpagesize(), PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
ASSERT_SYS(EINVAL, -1, madvise(p + 1, __granularity() - 1, MADV_WILLNEED));
|
||||
ASSERT_SYS(0, 0, munmap(p, __granularity()));
|
||||
ASSERT_SYS(EINVAL, -1, madvise(p + 1, getpagesize(), MADV_WILLNEED));
|
||||
ASSERT_SYS(0, 0, madvise(p, 1, MADV_WILLNEED));
|
||||
ASSERT_SYS(EINVAL, -1, madvise(p, -1, MADV_WILLNEED));
|
||||
ASSERT_SYS(0, 0, munmap(p, getpagesize()));
|
||||
}
|
||||
|
||||
TEST(madvise, badAdvice) {
|
||||
char *p;
|
||||
if (IsAarch64() && IsQemuUser())
|
||||
return; // qemu doesn't validate advice
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, __granularity(), PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, getpagesize(), PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
ASSERT_SYS(EINVAL, -1, madvise(p, __granularity(), 127));
|
||||
ASSERT_SYS(0, 0, munmap(p, __granularity()));
|
||||
ASSERT_SYS(EINVAL, -1, madvise(p, getpagesize(), 127));
|
||||
ASSERT_SYS(0, 0, munmap(p, getpagesize()));
|
||||
}
|
||||
|
||||
TEST(madvise, missingMemory) {
|
||||
|
@ -98,5 +135,5 @@ TEST(madvise, missingMemory) {
|
|||
if (IsQemuUser())
|
||||
return; // qemu claims to be linux but doesn't care
|
||||
ASSERT_SYS(ENOMEM, -1,
|
||||
madvise((char *)0x83483838000, __granularity(), MADV_WILLNEED));
|
||||
madvise((char *)0x83483838000, getpagesize(), MADV_WILLNEED));
|
||||
}
|
||||
|
|
|
@ -86,7 +86,7 @@ TEST(pledge, default_allowsExit) {
|
|||
int *job;
|
||||
int ws, pid;
|
||||
// create small shared memory region
|
||||
ASSERT_NE(-1, (job = mmap(0, __granularity(), PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(-1, (job = mmap(0, getpagesize(), PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_ANONYMOUS, -1, 0)));
|
||||
job[0] = 2; // create workload
|
||||
job[1] = 2;
|
||||
|
@ -100,7 +100,7 @@ TEST(pledge, default_allowsExit) {
|
|||
EXPECT_TRUE(WIFEXITED(ws));
|
||||
EXPECT_EQ(0, WEXITSTATUS(ws));
|
||||
EXPECT_EQ(4, job[0]); // check result
|
||||
EXPECT_SYS(0, 0, munmap(job, __granularity()));
|
||||
EXPECT_SYS(0, 0, munmap(job, getpagesize()));
|
||||
}
|
||||
|
||||
TEST(pledge, execpromises_notok) {
|
||||
|
@ -298,7 +298,7 @@ TEST(pledge, stdioTty_sendtoRestricted_requiresNullAddr) {
|
|||
errno = 0;
|
||||
// set lower 32-bit word of pointer to zero lool
|
||||
struct sockaddr_in *sin =
|
||||
mmap((void *)0x300000000000, __granularity(), PROT_READ | PROT_WRITE,
|
||||
mmap((void *)0x300000000000, getpagesize(), PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
sin->sin_family = AF_INET;
|
||||
ASSERT_SYS(
|
||||
|
@ -334,7 +334,7 @@ TEST(pledge, unix_forbidsInetSockets) {
|
|||
TEST(pledge, wpath_doesNotImplyRpath) {
|
||||
int ws, pid;
|
||||
bool *gotsome;
|
||||
ASSERT_NE(-1, (gotsome = _mapshared(__granularity())));
|
||||
ASSERT_NE(-1, (gotsome = _mapshared(getpagesize())));
|
||||
ASSERT_SYS(0, 0, touch("foo", 0644));
|
||||
ASSERT_NE(-1, (pid = fork()));
|
||||
if (!pid) {
|
||||
|
@ -412,13 +412,13 @@ TEST(pledge, mmap) {
|
|||
ASSERT_NE(-1, (pid = fork()));
|
||||
if (!pid) {
|
||||
ASSERT_SYS(0, 0, pledge("stdio", 0));
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, __granularity(), PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, getpagesize(), PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
|
||||
ASSERT_SYS(0, 0, mprotect(p, __granularity(), PROT_READ));
|
||||
ASSERT_SYS(0, 0, mprotect(p, getpagesize(), PROT_READ));
|
||||
ASSERT_SYS(EPERM, MAP_FAILED,
|
||||
mprotect(p, __granularity(), PROT_READ | PROT_EXEC));
|
||||
mprotect(p, getpagesize(), PROT_READ | PROT_EXEC));
|
||||
ASSERT_SYS(EPERM, MAP_FAILED,
|
||||
mmap(0, __granularity(), PROT_EXEC | PROT_READ,
|
||||
mmap(0, getpagesize(), PROT_EXEC | PROT_READ,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
|
||||
_Exit(0);
|
||||
}
|
||||
|
@ -434,11 +434,11 @@ TEST(pledge, mmapProtExec) {
|
|||
ASSERT_NE(-1, (pid = fork()));
|
||||
if (!pid) {
|
||||
ASSERT_SYS(0, 0, pledge("stdio prot_exec", 0));
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, __granularity(), PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, getpagesize(), PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
|
||||
ASSERT_SYS(0, 0, mprotect(p, __granularity(), PROT_READ));
|
||||
ASSERT_SYS(0, 0, mprotect(p, __granularity(), PROT_READ | PROT_EXEC));
|
||||
ASSERT_NE(MAP_FAILED, mmap(0, __granularity(), PROT_EXEC | PROT_READ,
|
||||
ASSERT_SYS(0, 0, mprotect(p, getpagesize(), PROT_READ));
|
||||
ASSERT_SYS(0, 0, mprotect(p, getpagesize(), PROT_READ | PROT_EXEC));
|
||||
ASSERT_NE(MAP_FAILED, mmap(0, getpagesize(), PROT_EXEC | PROT_READ,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
|
||||
_Exit(0);
|
||||
}
|
||||
|
|
|
@ -159,15 +159,15 @@ TEST(setrlimit, testVirtualMemoryLimit) {
|
|||
ASSERT_NE(-1, (wstatus = xspawn(0)));
|
||||
if (wstatus == -2) {
|
||||
ASSERT_EQ(0, setrlimit(RLIMIT_AS, &(struct rlimit){MEM, MEM}));
|
||||
for (i = 0; i < (MEM * 2) / __granularity(); ++i) {
|
||||
p = sys_mmap(0, __granularity(), PROT_READ | PROT_WRITE,
|
||||
for (i = 0; i < (MEM * 2) / getpagesize(); ++i) {
|
||||
p = sys_mmap(0, getpagesize(), PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE | MAP_POPULATE, -1, 0)
|
||||
.addr;
|
||||
if (p == MAP_FAILED) {
|
||||
ASSERT_EQ(ENOMEM, errno);
|
||||
_Exit(0);
|
||||
}
|
||||
rngset(p, __granularity(), _rand64, -1);
|
||||
rngset(p, getpagesize(), _rand64, -1);
|
||||
}
|
||||
_Exit(1);
|
||||
}
|
||||
|
@ -193,15 +193,15 @@ TEST(setrlimit, testDataMemoryLimit) {
|
|||
ASSERT_NE(-1, (wstatus = xspawn(0)));
|
||||
if (wstatus == -2) {
|
||||
ASSERT_EQ(0, setrlimit(RLIMIT_DATA, &(struct rlimit){MEM, MEM}));
|
||||
for (i = 0; i < (MEM * 2) / __granularity(); ++i) {
|
||||
p = sys_mmap(0, __granularity(), PROT_READ | PROT_WRITE,
|
||||
for (i = 0; i < (MEM * 2) / getpagesize(); ++i) {
|
||||
p = sys_mmap(0, getpagesize(), PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE | MAP_POPULATE, -1, 0)
|
||||
.addr;
|
||||
if (p == MAP_FAILED) {
|
||||
ASSERT_EQ(ENOMEM, errno);
|
||||
_Exit(0);
|
||||
}
|
||||
rngset(p, __granularity(), _rand64, -1);
|
||||
rngset(p, getpagesize(), _rand64, -1);
|
||||
}
|
||||
_Exit(1);
|
||||
}
|
||||
|
|
|
@ -335,7 +335,7 @@ TEST(unveil, isThreadSpecificOnLinux_isProcessWideOnOpenbsd) {
|
|||
TEST(unveil, usedTwice_forbidden_worksWithPledge) {
|
||||
int ws, pid;
|
||||
bool *gotsome;
|
||||
ASSERT_NE(-1, (gotsome = _mapshared(__granularity())));
|
||||
ASSERT_NE(-1, (gotsome = _mapshared(getpagesize())));
|
||||
ASSERT_NE(-1, (pid = fork()));
|
||||
if (!pid) {
|
||||
__pledge_mode = PLEDGE_PENALTY_KILL_PROCESS;
|
||||
|
@ -359,7 +359,7 @@ TEST(unveil, usedTwice_forbidden_worksWithPledge) {
|
|||
ASSERT_TRUE(*gotsome);
|
||||
ASSERT_TRUE(WIFSIGNALED(ws));
|
||||
ASSERT_EQ(IsOpenbsd() ? SIGABRT : SIGSYS, WTERMSIG(ws));
|
||||
EXPECT_SYS(0, 0, munmap(gotsome, __granularity()));
|
||||
EXPECT_SYS(0, 0, munmap(gotsome, getpagesize()));
|
||||
}
|
||||
|
||||
TEST(unveil, lotsOfPaths) {
|
||||
|
|
|
@ -277,7 +277,7 @@ TEST(ksnprintf, testMisalignedPointer_wontFormat) {
|
|||
TEST(ksnprintf, testUnterminatedOverrun_truncatesAtPageBoundary) {
|
||||
char *m;
|
||||
char b[32];
|
||||
int gran = __granularity();
|
||||
int gran = getgransize();
|
||||
m = memset(_mapanon(gran * 2), 1, gran);
|
||||
EXPECT_SYS(0, 0, munmap(m + gran, gran));
|
||||
EXPECT_EQ(12, ksnprintf(b, 32, "%'s", m + gran - 3));
|
||||
|
|
|
@ -47,7 +47,7 @@ TEST(lockipc, mutex) {
|
|||
int e, rc, ws, pid;
|
||||
|
||||
// create shared memory
|
||||
shm = _mapshared(__granularity());
|
||||
shm = _mapshared(getpagesize());
|
||||
|
||||
// create shared mutex
|
||||
pthread_mutexattr_t mattr;
|
||||
|
@ -86,5 +86,5 @@ TEST(lockipc, mutex) {
|
|||
|
||||
EXPECT_EQ(PROCESSES * ITERATIONS, shm->x);
|
||||
ASSERT_EQ(0, pthread_mutex_destroy(&shm->mutex));
|
||||
ASSERT_SYS(0, 0, munmap(shm, __granularity()));
|
||||
ASSERT_SYS(0, 0, munmap(shm, getpagesize()));
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "libc/calls/struct/timespec.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/intrin/kprintf.h"
|
||||
#include "libc/intrin/maps.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/sysv/consts/map.h"
|
||||
#include "libc/sysv/consts/prot.h"
|
||||
|
@ -38,13 +39,6 @@
|
|||
kprintf("%'20ld ns %2dx %s\n", (long)nanos, (ITERATIONS), #CODE); \
|
||||
} while (0)
|
||||
|
||||
void *randaddr(void) {
|
||||
static unsigned long lcg = 1;
|
||||
lcg *= 6364136223846793005;
|
||||
lcg += 1442695040888963407;
|
||||
return (void *)(lcg >> 48 << 28);
|
||||
}
|
||||
|
||||
void map_unmap_one_page(void) {
|
||||
void *p;
|
||||
if ((p = mmap(randaddr(), 1, PROT_READ | PROT_WRITE,
|
||||
|
|
|
@ -18,37 +18,22 @@
|
|||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "ape/sections.internal.h"
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/calls/syscall-sysv.internal.h"
|
||||
#include "libc/calls/ucontext.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/errno.h"
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/intrin/kprintf.h"
|
||||
#include "libc/intrin/safemacros.internal.h"
|
||||
#include "libc/intrin/xchg.internal.h"
|
||||
#include "libc/intrin/maps.h"
|
||||
#include "libc/limits.h"
|
||||
#include "libc/log/log.h"
|
||||
#include "libc/macros.internal.h"
|
||||
#include "libc/mem/gc.h"
|
||||
#include "libc/mem/mem.h"
|
||||
#include "libc/runtime/memtrack.internal.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/runtime/stack.h"
|
||||
#include "libc/runtime/sysconf.h"
|
||||
#include "libc/stdio/rand.h"
|
||||
#include "libc/stdio/stdio.h"
|
||||
#include "libc/str/str.h"
|
||||
#include "libc/sysv/consts/auxv.h"
|
||||
#include "libc/sysv/consts/map.h"
|
||||
#include "libc/sysv/consts/msync.h"
|
||||
#include "libc/sysv/consts/o.h"
|
||||
#include "libc/sysv/consts/prot.h"
|
||||
#include "libc/sysv/consts/sa.h"
|
||||
#include "libc/sysv/consts/sig.h"
|
||||
#include "libc/testlib/ezbench.h"
|
||||
#include "libc/testlib/testlib.h"
|
||||
#include "libc/x/xspawn.h"
|
||||
#include "third_party/xed/x86.h"
|
||||
|
||||
// this is also a good torture test for mmap
|
||||
//
|
||||
|
@ -61,11 +46,11 @@
|
|||
__static_yoink("zipos");
|
||||
|
||||
int pagesz;
|
||||
int granularity;
|
||||
int gransz;
|
||||
|
||||
void SetUpOnce(void) {
|
||||
pagesz = getpagesize();
|
||||
granularity = __granularity();
|
||||
gransz = getgransize();
|
||||
testlib_enable_tmp_setup_teardown();
|
||||
// ASSERT_SYS(0, 0, pledge("stdio rpath wpath cpath proc", 0));
|
||||
}
|
||||
|
@ -109,7 +94,7 @@ TEST(mmap, pageBeyondGone) {
|
|||
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
ASSERT_NE(MAP_FAILED, p);
|
||||
EXPECT_TRUE(testlib_memoryexists(p));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + pagesz - 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + 1)); // b/c kisdangerous
|
||||
EXPECT_FALSE(testlib_memoryexists(p + pagesz));
|
||||
ASSERT_EQ(0, munmap(p, 1));
|
||||
}
|
||||
|
@ -124,47 +109,76 @@ TEST(mmap, fixedTaken) {
|
|||
}
|
||||
|
||||
TEST(mmap, hint) {
|
||||
char *p, *q;
|
||||
char *p;
|
||||
|
||||
// obtain four pages
|
||||
EXPECT_NE(MAP_FAILED, (p = mmap(NULL, granularity * 4, PROT_READ,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(randaddr(), gransz * 4, PROT_READ,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
|
||||
// unmap two of those pages
|
||||
EXPECT_SYS(0, 0, munmap(p + granularity, granularity));
|
||||
EXPECT_SYS(0, 0, munmap(p + granularity * 3, granularity));
|
||||
EXPECT_SYS(0, 0, munmap(p + gransz, gransz));
|
||||
EXPECT_SYS(0, 0, munmap(p + gransz * 3, gransz));
|
||||
|
||||
// test AVAILABLE nonfixed nonzero addr is granted
|
||||
// - posix doesn't mandate this behavior (but should)
|
||||
// - freebsd always chooses for you (which has no acceptable workaround)
|
||||
// - netbsd manual claims it'll be like freebsd, but is actually like openbsd
|
||||
if (!IsFreebsd())
|
||||
EXPECT_EQ(p + granularity, mmap(p + granularity, granularity, PROT_READ,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
|
||||
ASSERT_EQ(p + gransz, mmap(p + gransz, gransz, PROT_READ,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
|
||||
|
||||
// test UNAVAILABLE nonfixed nonzero addr picks something nearby
|
||||
// - posix actually does require this, but doesn't say how close
|
||||
// - xnu / linux / openbsd always choose nearest on the right
|
||||
// - freebsd goes about 16mb to the right
|
||||
// - qemu-user is off the wall
|
||||
if (!IsQemuUser()) {
|
||||
q = mmap(p + granularity * 2, granularity, PROT_READ,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
EXPECT_LE(ABS(q - (p + granularity * 2)), 64 * 1024 * 1024);
|
||||
EXPECT_SYS(0, 0, munmap(q, granularity));
|
||||
}
|
||||
/*if (!IsQemuUser()) {
|
||||
q = mmap(p + gransz * 2, gransz, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1,
|
||||
0);
|
||||
EXPECT_LE(ABS(q - (p + gransz * 2)), 128 * 1024 * 1024);
|
||||
EXPECT_SYS(0, 0, munmap(q, gransz));
|
||||
}*/
|
||||
|
||||
// clean up
|
||||
EXPECT_SYS(0, 0, munmap(p, granularity * 4));
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz * 4));
|
||||
}
|
||||
|
||||
TEST(mprotect, punchHoleAndFillHole) {
|
||||
char *p;
|
||||
int count = __maps.count;
|
||||
int gransz = getgransize();
|
||||
|
||||
// obtain memory
|
||||
ASSERT_NE(MAP_FAILED,
|
||||
(p = mmap(randaddr(), gransz * 3, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
ASSERT_EQ((count += IsWindows() ? 3 : 1), __maps.count);
|
||||
|
||||
// if munmap punches a hole...
|
||||
// the number of mappings may increase
|
||||
// this is why it's possible for munmap() to ENOMEM lool
|
||||
EXPECT_SYS(0, 0, munmap(p + gransz, gransz));
|
||||
ASSERT_EQ((count += IsWindows() ? -1 : +1), __maps.count);
|
||||
|
||||
// now if we fill that hole
|
||||
// the memory manager will coalesce the mappings
|
||||
ASSERT_NE(MAP_FAILED,
|
||||
(p = mmap(p + gransz, gransz, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0)));
|
||||
ASSERT_EQ((count += IsWindows() ? +1 : -1), __maps.count);
|
||||
|
||||
// clean up
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz * 3));
|
||||
}
|
||||
|
||||
TEST(mmap, smallerThanPage_mapsRemainder) {
|
||||
long pagesz = sysconf(_SC_PAGESIZE);
|
||||
char *map =
|
||||
mmap(0, 4096, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
mmap(0, 1, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
ASSERT_NE(MAP_FAILED, map);
|
||||
EXPECT_TRUE(testlib_memoryexists(map));
|
||||
EXPECT_TRUE(testlib_memoryexists(map + (pagesz - 1)));
|
||||
EXPECT_TRUE(testlib_pokememory(map + (pagesz - 1)));
|
||||
EXPECT_TRUE(!testlib_memoryexists(map + (pagesz - 1)));
|
||||
EXPECT_SYS(0, 0, munmap(map, 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(map));
|
||||
EXPECT_FALSE(testlib_memoryexists(map + (pagesz - 1)));
|
||||
|
@ -178,7 +192,7 @@ TEST(mmap, testMapFile) {
|
|||
ASSERT_NE(-1, (fd = open(path, O_CREAT | O_TRUNC | O_RDWR, 0644)));
|
||||
EXPECT_EQ(5, write(fd, "hello", 5));
|
||||
EXPECT_NE(-1, fdatasync(fd));
|
||||
EXPECT_NE(MAP_FAILED, (p = mmap(NULL, 5, PROT_READ, MAP_PRIVATE, fd, 0)));
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(NULL, 5, PROT_READ, MAP_PRIVATE, fd, 0)));
|
||||
EXPECT_STREQN("hello", p, 5);
|
||||
EXPECT_NE(-1, munmap(p, 5));
|
||||
EXPECT_NE(-1, close(fd));
|
||||
|
@ -192,7 +206,7 @@ TEST(mmap, testMapFile_fdGetsClosed_makesNoDifference) {
|
|||
ASSERT_NE(-1, (fd = open(path, O_CREAT | O_TRUNC | O_RDWR, 0644)));
|
||||
EXPECT_EQ(5, write(fd, "hello", 5));
|
||||
EXPECT_NE(-1, fdatasync(fd));
|
||||
EXPECT_NE(MAP_FAILED,
|
||||
ASSERT_NE(MAP_FAILED,
|
||||
(p = mmap(NULL, 5, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0)));
|
||||
EXPECT_NE(-1, close(fd));
|
||||
EXPECT_STREQN("hello", p, 5);
|
||||
|
@ -209,7 +223,7 @@ TEST(mmap, testMapFile_fdGetsClosed_makesNoDifference) {
|
|||
TEST(mmap, fileOffset) {
|
||||
int fd;
|
||||
char *map;
|
||||
int offset_align = IsWindows() ? granularity : getpagesize();
|
||||
int offset_align = IsWindows() ? gransz : getpagesize();
|
||||
ASSERT_NE(-1, (fd = open("foo", O_CREAT | O_RDWR, 0644)));
|
||||
EXPECT_NE(-1, ftruncate(fd, offset_align * 2));
|
||||
EXPECT_NE(-1, pwrite(fd, "hello", 5, offset_align * 0));
|
||||
|
@ -226,13 +240,13 @@ TEST(mmap, mapPrivate_writesDontChangeFile) {
|
|||
int fd;
|
||||
char *map, buf[6];
|
||||
ASSERT_NE(-1, (fd = open("bar", O_CREAT | O_RDWR, 0644)));
|
||||
EXPECT_NE(-1, ftruncate(fd, granularity));
|
||||
EXPECT_NE(-1, ftruncate(fd, gransz));
|
||||
EXPECT_NE(-1, pwrite(fd, "hello", 5, 0));
|
||||
ASSERT_NE(MAP_FAILED, (map = mmap(NULL, granularity, PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (map = mmap(NULL, gransz, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE, fd, 0)));
|
||||
memcpy(map, "there", 5);
|
||||
EXPECT_NE(-1, msync(map, granularity, MS_SYNC));
|
||||
EXPECT_NE(-1, munmap(map, granularity));
|
||||
EXPECT_NE(-1, msync(map, gransz, MS_SYNC));
|
||||
EXPECT_NE(-1, munmap(map, gransz));
|
||||
EXPECT_NE(-1, pread(fd, buf, 6, 0));
|
||||
EXPECT_EQ(0, memcmp(buf, "hello", 5), "%#.*s", 5, buf);
|
||||
EXPECT_NE(-1, close(fd));
|
||||
|
@ -245,7 +259,7 @@ TEST(mmap, ziposCannotBeShared) {
|
|||
void *p;
|
||||
ASSERT_NE(-1, (fd = open(ziposLifePath, O_RDONLY), "%s", ziposLifePath));
|
||||
EXPECT_SYS(EINVAL, MAP_FAILED,
|
||||
(p = mmap(NULL, granularity, PROT_READ, MAP_SHARED, fd, 0)));
|
||||
(p = mmap(NULL, gransz, PROT_READ, MAP_SHARED, fd, 0)));
|
||||
close(fd);
|
||||
}
|
||||
|
||||
|
@ -257,9 +271,9 @@ TEST(mmap, ziposCow) {
|
|||
void *p;
|
||||
ASSERT_NE(-1, (fd = open(ziposLifePath, O_RDONLY), "%s", ziposLifePath));
|
||||
EXPECT_NE(MAP_FAILED,
|
||||
(p = mmap(NULL, granularity, PROT_READ, MAP_PRIVATE, fd, 0)));
|
||||
(p = mmap(NULL, gransz, PROT_READ, MAP_PRIVATE, fd, 0)));
|
||||
EXPECT_STREQN("\177ELF", ((const char *)p), 4);
|
||||
EXPECT_NE(-1, munmap(p, granularity));
|
||||
EXPECT_NE(-1, munmap(p, gransz));
|
||||
EXPECT_NE(-1, close(fd));
|
||||
}
|
||||
|
||||
|
@ -270,7 +284,7 @@ TEST(mmap, ziposCowFileMapReadonlyFork) {
|
|||
int fd, ws;
|
||||
void *p;
|
||||
ASSERT_NE(-1, (fd = open(ziposLifePath, O_RDONLY), "%s", ziposLifePath));
|
||||
EXPECT_NE(MAP_FAILED, (p = mmap(NULL, 4, PROT_READ, MAP_PRIVATE, fd, 0)));
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(NULL, 4, PROT_READ, MAP_PRIVATE, fd, 0)));
|
||||
EXPECT_STREQN("ELF", ((const char *)p) + 1, 3);
|
||||
ASSERT_NE(-1, (ws = xspawn(0)));
|
||||
if (ws == -2) {
|
||||
|
@ -291,7 +305,7 @@ TEST(mmap, ziposCowFileMapFork) {
|
|||
void *p;
|
||||
char lol[4];
|
||||
ASSERT_NE(-1, (fd = open(ziposLifePath, O_RDONLY), "%s", ziposLifePath));
|
||||
EXPECT_NE(MAP_FAILED,
|
||||
ASSERT_NE(MAP_FAILED,
|
||||
(p = mmap(NULL, 6, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0)));
|
||||
memcpy(p, "parnt", 6);
|
||||
ASSERT_NE(-1, (ws = xspawn(0)));
|
||||
|
@ -321,7 +335,7 @@ TEST(mmap, cow) {
|
|||
path);
|
||||
EXPECT_EQ(5, write(fd, "hello", 5));
|
||||
EXPECT_NE(-1, fdatasync(fd));
|
||||
EXPECT_NE(MAP_FAILED, (p = mmap(NULL, 5, PROT_READ, MAP_PRIVATE, fd, 0)));
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(NULL, 5, PROT_READ, MAP_PRIVATE, fd, 0)));
|
||||
EXPECT_STREQN("hello", p, 5);
|
||||
EXPECT_NE(-1, munmap(p, 5));
|
||||
EXPECT_NE(-1, close(fd));
|
||||
|
@ -340,7 +354,7 @@ TEST(mmap, cowFileMapReadonlyFork) {
|
|||
EXPECT_EQ(6, write(fd, "hello", 6));
|
||||
EXPECT_NE(-1, close(fd));
|
||||
ASSERT_NE(-1, (fd = open(path, O_RDONLY)));
|
||||
EXPECT_NE(MAP_FAILED, (p = mmap(NULL, 6, PROT_READ, MAP_PRIVATE, fd, 0)));
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(NULL, 6, PROT_READ, MAP_PRIVATE, fd, 0)));
|
||||
EXPECT_STREQN("hello", p, 5);
|
||||
ASSERT_NE(-1, (ws = xspawn(0)));
|
||||
if (ws == -2) {
|
||||
|
@ -365,7 +379,7 @@ TEST(mmap, cowFileMapFork) {
|
|||
ASSERT_NE(-1, (fd = open(path, O_CREAT | O_TRUNC | O_RDWR, 0644)));
|
||||
EXPECT_EQ(6, write(fd, "parnt", 6));
|
||||
EXPECT_NE(-1, fdatasync(fd));
|
||||
EXPECT_NE(MAP_FAILED,
|
||||
ASSERT_NE(MAP_FAILED,
|
||||
(p = mmap(NULL, 6, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0)));
|
||||
EXPECT_STREQN("parnt", p, 5);
|
||||
ASSERT_NE(-1, (ws = xspawn(0)));
|
||||
|
@ -390,7 +404,7 @@ TEST(mmap, cowFileMapFork) {
|
|||
TEST(mmap, sharedAnonMapFork) {
|
||||
int ws;
|
||||
char *p;
|
||||
EXPECT_NE(MAP_FAILED, (p = mmap(NULL, 6, PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(NULL, 6, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_ANONYMOUS, -1, 0)));
|
||||
strcpy(p, "parnt");
|
||||
EXPECT_STREQN("parnt", p, 5);
|
||||
|
@ -417,7 +431,7 @@ TEST(mmap, sharedFileMapFork) {
|
|||
ASSERT_NE(-1, (fd = open(path, O_CREAT | O_TRUNC | O_RDWR, 0644)));
|
||||
EXPECT_EQ(6, write(fd, "parnt", 6));
|
||||
EXPECT_NE(-1, fdatasync(fd));
|
||||
EXPECT_NE(MAP_FAILED,
|
||||
ASSERT_NE(MAP_FAILED,
|
||||
(p = mmap(NULL, 6, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0)));
|
||||
EXPECT_STREQN("parnt", p, 5);
|
||||
ASSERT_NE(-1, (ws = xspawn(0)));
|
||||
|
@ -449,15 +463,15 @@ void *ptrs[N];
|
|||
|
||||
void BenchMmapPrivate(void) {
|
||||
void *p;
|
||||
p = mmap(0, granularity * 10, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
p = mmap(0, gransz * 10, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE,
|
||||
-1, 0);
|
||||
if (p == MAP_FAILED)
|
||||
__builtin_trap();
|
||||
ptrs[count++] = p;
|
||||
}
|
||||
|
||||
void BenchUnmap(void) {
|
||||
if (munmap(ptrs[--count], granularity * 10))
|
||||
if (munmap(ptrs[--count], gransz * 10))
|
||||
__builtin_trap();
|
||||
}
|
||||
|
||||
|
|
|
@ -147,18 +147,18 @@ TEST(mprotect, testSegfault_writeToReadOnlyAnonymous) {
|
|||
}
|
||||
|
||||
TEST(mprotect, testExecOnly_canExecute) {
|
||||
char *p = _mapanon(__granularity());
|
||||
char *p = _mapanon(getpagesize());
|
||||
void (*f)(void) = (void *)p;
|
||||
memcpy(p, kRet31337, sizeof(kRet31337));
|
||||
ASSERT_SYS(0, 0, mprotect(p, __granularity(), PROT_EXEC | PROT_READ));
|
||||
ASSERT_SYS(0, 0, mprotect(p, getpagesize(), PROT_EXEC | PROT_READ));
|
||||
f();
|
||||
// On all supported platforms, PROT_EXEC implies PROT_READ. There is
|
||||
// one exception to this rule: Chromebook's fork of the Linux kernel
|
||||
// which has been reported, to have the ability to prevent a program
|
||||
// from reading its own code.
|
||||
ASSERT_SYS(0, 0, mprotect(p, __granularity(), PROT_EXEC));
|
||||
ASSERT_SYS(0, 0, mprotect(p, getpagesize(), PROT_EXEC));
|
||||
f();
|
||||
munmap(p, __granularity());
|
||||
munmap(p, getpagesize());
|
||||
}
|
||||
|
||||
TEST(mprotect, testProtNone_cantEvenRead) {
|
||||
|
@ -249,3 +249,41 @@ TEST(mprotect, image) {
|
|||
EXPECT_SYS(0, 0, mprotect(p, 16384, PROT_READ | PROT_WRITE));
|
||||
EXPECT_EQ(2, ++p[0]);
|
||||
}
|
||||
|
||||
TEST(mprotect, weirdSize) {
|
||||
char *p;
|
||||
EXPECT_NE(MAP_FAILED, (p = mmap(0, 1, PROT_READ | PROT_EXEC,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
EXPECT_SYS(0, 0, mprotect(p, 2, PROT_NONE));
|
||||
EXPECT_SYS(0, 0, munmap(p, 1));
|
||||
}
|
||||
|
||||
TEST(mprotect, outerOverlap) {
|
||||
char *p;
|
||||
int gransz = getgransize();
|
||||
EXPECT_NE(MAP_FAILED, (p = mmap(0, gransz * 3, PROT_READ | PROT_EXEC,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p + gransz * 2, gransz));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 2));
|
||||
EXPECT_SYS(0, 0, mprotect(p, gransz * 3, PROT_NONE));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 2));
|
||||
EXPECT_SYS(0, 0, mprotect(p + gransz, gransz, PROT_READ));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz * 3));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 2));
|
||||
}
|
||||
|
|
178
test/libc/intrin/mremap_test.c
Normal file
178
test/libc/intrin/mremap_test.c
Normal file
|
@ -0,0 +1,178 @@
|
|||
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
|
||||
│ vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi │
|
||||
╞══════════════════════════════════════════════════════════════════════════════╡
|
||||
│ Copyright 2024 Justine Alexandra Roberts Tunney │
|
||||
│ │
|
||||
│ Permission to use, copy, modify, and/or distribute this software for │
|
||||
│ any purpose with or without fee is hereby granted, provided that the │
|
||||
│ above copyright notice and this permission notice appear in all copies. │
|
||||
│ │
|
||||
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
|
||||
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
|
||||
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
|
||||
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
|
||||
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
|
||||
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
|
||||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/sysv/consts/mremap.h"
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/calls/struct/timespec.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/errno.h"
|
||||
#include "libc/intrin/kprintf.h"
|
||||
#include "libc/intrin/maps.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/str/str.h"
|
||||
#include "libc/sysv/consts/map.h"
|
||||
#include "libc/sysv/consts/mremap.h"
|
||||
#include "libc/sysv/consts/prot.h"
|
||||
#include "libc/testlib/testlib.h"
|
||||
|
||||
void SetUpOnce(void) {
|
||||
if (!IsLinux() && !IsNetbsd()) {
|
||||
tinyprint(2, "warning: skipping mremap() tests on this os\n", NULL);
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(mremap, dontMove_hasRoom_itMoves) {
|
||||
if (IsNetbsd())
|
||||
return; // NetBSD requires MREMAP_MAYMOVE
|
||||
char *p;
|
||||
int pagesz = getpagesize();
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(randaddr(), pagesz, PROT_READ | PROT_EXEC,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
EXPECT_TRUE(testlib_memoryexists(p));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + pagesz));
|
||||
ASSERT_SYS(0, p, mremap(p, pagesz, pagesz * 2, 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + pagesz));
|
||||
ASSERT_SYS(0, 0, munmap(p, pagesz * 2));
|
||||
EXPECT_FALSE(testlib_memoryexists(p));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + pagesz));
|
||||
}
|
||||
|
||||
TEST(mremap, dontMove_noRoom_itFailsWithEnomem) {
|
||||
if (IsNetbsd())
|
||||
return; // NetBSD requires MREMAP_MAYMOVE
|
||||
char *p;
|
||||
int pagesz = getpagesize();
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(randaddr(), pagesz * 2, PROT_READ | PROT_EXEC,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + pagesz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + pagesz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + pagesz * 2));
|
||||
ASSERT_SYS(ENOMEM, MAP_FAILED, mremap(p, pagesz, pagesz * 3, 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + pagesz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + pagesz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + pagesz * 2));
|
||||
ASSERT_SYS(0, 0, munmap(p, pagesz * 2));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + pagesz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + pagesz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + pagesz * 2));
|
||||
}
|
||||
|
||||
TEST(mremap, mayMove_noRoom_itRelocates) {
|
||||
char *p, *p2;
|
||||
int pagesz = getpagesize();
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(randaddr(), pagesz * 2, PROT_READ | PROT_EXEC,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + pagesz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + pagesz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + pagesz * 2));
|
||||
ASSERT_NE(MAP_FAILED, (p2 = mremap(p, pagesz, pagesz * 3, MREMAP_MAYMOVE)));
|
||||
ASSERT_NE(p, p2);
|
||||
EXPECT_FALSE(testlib_memoryexists(p + pagesz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + pagesz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + pagesz * 2));
|
||||
EXPECT_TRUE(testlib_memoryexists(p2 + pagesz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p2 + pagesz * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p2 + pagesz * 2));
|
||||
ASSERT_SYS(0, 0, munmap(p + pagesz, pagesz));
|
||||
ASSERT_SYS(0, 0, munmap(p2, pagesz * 3));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + pagesz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + pagesz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + pagesz * 2));
|
||||
EXPECT_FALSE(testlib_memoryexists(p2 + pagesz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p2 + pagesz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p2 + pagesz * 2));
|
||||
}
|
||||
|
||||
// demonstrate value of mremap() system call
|
||||
//
|
||||
// mmap(1'048'576) took 1'130 ns
|
||||
// mremap(1'048'576 -> 2'097'152) took 3'117 ns
|
||||
// mremap(2'097'152 -> 1'048'576) took 3'596 ns
|
||||
// mremap(1'048'576 -> 2'097'152) took 796'381 ns [simulated]
|
||||
// munmap(2'097'152) took 50'020 ns
|
||||
//
|
||||
|
||||
TEST(mremap, bench) {
|
||||
#define N 10
|
||||
long size = 1024 * 1024;
|
||||
char *rollo = randaddr();
|
||||
char *addr[N];
|
||||
|
||||
// create mappings
|
||||
struct timespec ts1 = timespec_real();
|
||||
for (long i = 0; i < N; ++i)
|
||||
if ((addr[i] = mmap((rollo += size), size, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)) == MAP_FAILED) {
|
||||
kprintf("first mmap failed: %m\n");
|
||||
exit(1);
|
||||
}
|
||||
kprintf("mmap(%'zu) took %'ld ns\n", size,
|
||||
timespec_tonanos(timespec_sub(timespec_real(), ts1)) / N);
|
||||
|
||||
// use mremap to grow mappings
|
||||
ts1 = timespec_real();
|
||||
for (long i = 0; i < N; ++i)
|
||||
if ((addr[i] = mremap(addr[i], size, size * 2, MREMAP_MAYMOVE)) ==
|
||||
MAP_FAILED) {
|
||||
kprintf("grow mremap failed: %m\n");
|
||||
exit(1);
|
||||
}
|
||||
kprintf("mremap(%'zu -> %'zu) took %'ld ns\n", size, size * 2,
|
||||
timespec_tonanos(timespec_sub(timespec_real(), ts1)) / N);
|
||||
|
||||
// use mremap to shrink mappings
|
||||
ts1 = timespec_real();
|
||||
for (long i = 0; i < N; ++i)
|
||||
if (mremap(addr[i], size * 2, size, 0) != addr[i]) {
|
||||
kprintf("shrink mremap failed: %m\n");
|
||||
exit(1);
|
||||
}
|
||||
kprintf("mremap(%'zu -> %'zu) took %'ld ns\n", size * 2, size,
|
||||
timespec_tonanos(timespec_sub(timespec_real(), ts1)) / N);
|
||||
|
||||
// do the thing that mremap is trying to optimize
|
||||
ts1 = timespec_real();
|
||||
for (long i = 0; i < N; ++i) {
|
||||
char *addr2;
|
||||
if ((addr2 = mmap(0, size * 2, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)) == MAP_FAILED) {
|
||||
kprintf("simulated mmap failed: %m\n");
|
||||
exit(1);
|
||||
}
|
||||
memmove(addr2, addr[i], size);
|
||||
if (munmap(addr[i], size)) {
|
||||
kprintf("simulated munmap failed: %m\n");
|
||||
exit(1);
|
||||
}
|
||||
addr[i] = addr2;
|
||||
}
|
||||
kprintf("mremap(%'zu -> %'zu) took %'ld ns [simulated]\n", size, size * 2,
|
||||
timespec_tonanos(timespec_sub(timespec_real(), ts1)) / N);
|
||||
|
||||
// unmap mappings
|
||||
ts1 = timespec_real();
|
||||
for (long i = 0; i < N; ++i)
|
||||
if (munmap(addr[i], size * 2)) {
|
||||
kprintf("munmap failed: %m\n");
|
||||
exit(1);
|
||||
}
|
||||
kprintf("munmap(%'zu) took %'ld ns\n", size * 2,
|
||||
timespec_tonanos(timespec_sub(timespec_real(), ts1)) / N);
|
||||
}
|
|
@ -19,6 +19,7 @@
|
|||
#include "libc/calls/calls.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/errno.h"
|
||||
#include "libc/intrin/maps.h"
|
||||
#include "libc/log/log.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/sysv/consts/map.h"
|
||||
|
@ -26,116 +27,116 @@
|
|||
#include "libc/sysv/consts/prot.h"
|
||||
#include "libc/testlib/testlib.h"
|
||||
|
||||
int granularity;
|
||||
int gransz;
|
||||
|
||||
void SetUpOnce(void) {
|
||||
granularity = __granularity();
|
||||
gransz = getgransize();
|
||||
testlib_enable_tmp_setup_teardown();
|
||||
}
|
||||
|
||||
TEST(munmap, doesntExist_doesntCare) {
|
||||
EXPECT_SYS(0, 0, munmap(0, granularity * 8));
|
||||
EXPECT_SYS(0, 0, munmap(0, gransz * 8));
|
||||
}
|
||||
|
||||
TEST(munmap, invalidParams) {
|
||||
EXPECT_SYS(EINVAL, -1, munmap(0, 0));
|
||||
EXPECT_SYS(EINVAL, -1, munmap((void *)0x100080000000, 0));
|
||||
EXPECT_SYS(EINVAL, -1, munmap((void *)0x100080000001, granularity));
|
||||
EXPECT_SYS(EINVAL, -1, munmap((void *)0x100080000001, gransz));
|
||||
}
|
||||
|
||||
TEST(munmap, test) {
|
||||
char *p;
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, granularity, PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, gransz, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
|
||||
EXPECT_TRUE(testlib_memoryexists(p));
|
||||
EXPECT_SYS(0, 0, munmap(p, granularity));
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz));
|
||||
EXPECT_FALSE(testlib_memoryexists(p));
|
||||
}
|
||||
|
||||
TEST(munmap, punchHoleInMemory) {
|
||||
char *p;
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, granularity * 3, PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, gransz * 3, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p + granularity, granularity));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p, granularity));
|
||||
EXPECT_SYS(0, 0, munmap(p + granularity * 2, granularity));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 2));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p + gransz, gransz));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz));
|
||||
EXPECT_SYS(0, 0, munmap(p + gransz * 2, gransz));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 2));
|
||||
}
|
||||
|
||||
TEST(munmap, memoryHasHole) {
|
||||
char *p;
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, granularity * 3, PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, gransz * 3, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
|
||||
EXPECT_SYS(0, 0, munmap(p + granularity, granularity));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p, granularity * 3));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p + gransz, gransz));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz * 3));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 2));
|
||||
}
|
||||
|
||||
TEST(munmap, blanketFree) {
|
||||
char *p;
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, granularity * 3, PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, gransz * 3, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p + granularity * 0, granularity));
|
||||
EXPECT_SYS(0, 0, munmap(p + granularity * 2, granularity));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p, granularity * 3));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 2));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p + gransz * 0, gransz));
|
||||
EXPECT_SYS(0, 0, munmap(p + gransz * 2, gransz));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz * 3));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 2));
|
||||
}
|
||||
|
||||
TEST(munmap, trimLeft) {
|
||||
char *p;
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, granularity * 2, PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, gransz * 2, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 1));
|
||||
EXPECT_SYS(0, 0, munmap(p, granularity));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 1));
|
||||
EXPECT_SYS(0, 0, munmap(p, granularity * 2));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz * 2));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 1));
|
||||
}
|
||||
|
||||
TEST(munmap, trimRight) {
|
||||
char *p;
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, granularity * 2, PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, gransz * 2, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 1));
|
||||
EXPECT_SYS(0, 0, munmap(p + granularity, granularity));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + granularity * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 1));
|
||||
EXPECT_SYS(0, 0, munmap(p, granularity * 2));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + granularity * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_SYS(0, 0, munmap(p + gransz, gransz));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz * 2));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 1));
|
||||
}
|
||||
|
||||
TEST(munmap, memoryGone) {
|
||||
char *p;
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, granularity, PROT_READ | PROT_WRITE,
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, gransz, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
|
||||
EXPECT_SYS(0, 0, munmap(p, granularity));
|
||||
EXPECT_SYS(0, 0, munmap(p, granularity));
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz));
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz));
|
||||
}
|
||||
|
||||
TEST(munmap, tinyFile_roundupUnmapSize) {
|
||||
|
@ -147,7 +148,7 @@ TEST(munmap, tinyFile_roundupUnmapSize) {
|
|||
ASSERT_NE(MAP_FAILED, (p = mmap(0, 5, PROT_READ, MAP_PRIVATE, 3, 0)));
|
||||
ASSERT_SYS(0, 0, close(3));
|
||||
EXPECT_TRUE(testlib_memoryexists(p));
|
||||
EXPECT_SYS(0, 0, munmap(p, granularity));
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz));
|
||||
EXPECT_FALSE(testlib_memoryexists(p));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + 5));
|
||||
}
|
||||
|
@ -171,22 +172,22 @@ TEST(munmap, tinyFile_preciseUnmapSize) {
|
|||
}
|
||||
|
||||
// clang-format off
|
||||
/* TEST(munmap, tinyFile_mapThriceUnmapOnce) { */
|
||||
/* char *p = (char *)0x000063d646e20000; */
|
||||
/* ASSERT_SYS(0, 3, open("doge", O_RDWR | O_CREAT | O_TRUNC, 0644)); */
|
||||
/* ASSERT_SYS (0, 5, write(3, "hello", 5)); */
|
||||
/* ASSERT_EQ(p+granularity*0, mmap(p+granularity*0, granularity, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0)); */
|
||||
/* ASSERT_EQ(p+granularity*1, mmap(p+granularity*1, 5, PROT_READ, MAP_PRIVATE|MAP_FIXED, 3, 0)); */
|
||||
/* ASSERT_EQ(p+granularity*3, mmap(p+granularity*3, 5, PROT_READ, MAP_PRIVATE|MAP_FIXED, 3, 0)); */
|
||||
/* ASSERT_SYS(0, 0, close(3)); */
|
||||
/* EXPECT_TRUE(testlib_memoryexists(p+granularity*0)); */
|
||||
/* EXPECT_TRUE(testlib_memoryexists(p+granularity*1)); */
|
||||
/* EXPECT_FALSE(testlib_memoryexists(p+granularity*2)); */
|
||||
/* EXPECT_TRUE(testlib_memoryexists(p+granularity*3)); */
|
||||
/* EXPECT_SYS(0, 0, munmap(p, granularity*5)); */
|
||||
/* EXPECT_FALSE(testlib_memoryexists(p+granularity*0)); */
|
||||
/* EXPECT_FALSE(testlib_memoryexists(p+granularity*1)); */
|
||||
/* EXPECT_FALSE(testlib_memoryexists(p+granularity*2)); */
|
||||
/* EXPECT_FALSE(testlib_memoryexists(p+granularity*3)); */
|
||||
/* } */
|
||||
TEST(munmap, tinyFile_mapThriceUnmapOnce) {
|
||||
char *p = randaddr();
|
||||
ASSERT_SYS(0, 3, open("doge", O_RDWR | O_CREAT | O_TRUNC, 0644));
|
||||
ASSERT_SYS (0, 5, write(3, "hello", 5));
|
||||
ASSERT_EQ(p+gransz*0, mmap(p+gransz*0, gransz, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0));
|
||||
ASSERT_EQ(p+gransz*1, mmap(p+gransz*1, 5, PROT_READ, MAP_PRIVATE|MAP_FIXED, 3, 0));
|
||||
ASSERT_EQ(p+gransz*3, mmap(p+gransz*3, 5, PROT_READ, MAP_PRIVATE|MAP_FIXED, 3, 0));
|
||||
ASSERT_SYS(0, 0, close(3));
|
||||
EXPECT_TRUE(testlib_memoryexists(p+gransz*0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p+gransz*1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p+gransz*2));
|
||||
EXPECT_TRUE(testlib_memoryexists(p+gransz*3));
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz*5));
|
||||
EXPECT_FALSE(testlib_memoryexists(p+gransz*0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p+gransz*1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p+gransz*2));
|
||||
EXPECT_FALSE(testlib_memoryexists(p+gransz*3));
|
||||
}
|
||||
// clang-format on
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "libc/intrin/tree.h"
|
||||
#include "libc/intrin/kprintf.h"
|
||||
#include "libc/intrin/maps.h"
|
||||
#include "libc/intrin/tree.h"
|
||||
#include "libc/macros.internal.h"
|
||||
#include "libc/mem/mem.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
|
@ -71,12 +72,6 @@ struct number {
|
|||
struct Tree elem;
|
||||
};
|
||||
|
||||
int number_search(const void *ra, const struct Tree *rb) {
|
||||
long a = (long)ra;
|
||||
const struct number *b = NUMBER_CONTAINER(rb);
|
||||
return (a > b->number) - (a < b->number);
|
||||
}
|
||||
|
||||
int number_compare(const struct Tree *ra, const struct Tree *rb) {
|
||||
const struct number *a = NUMBER_CONTAINER(ra);
|
||||
const struct number *b = NUMBER_CONTAINER(rb);
|
||||
|
@ -90,23 +85,32 @@ struct number *number_new(int number) {
|
|||
return res;
|
||||
}
|
||||
|
||||
struct Tree *tree = 0;
|
||||
|
||||
void print(void) {
|
||||
void print(struct Tree *tree) {
|
||||
for (struct Tree *e = tree_first(tree); e; e = tree_next(e))
|
||||
kprintf("%3d", NUMBER_CONTAINER(e)->number);
|
||||
kprintf("\n");
|
||||
}
|
||||
|
||||
void print_reversed(void) {
|
||||
void print_reversed(struct Tree *tree) {
|
||||
for (struct Tree *e = tree_last(tree); e; e = tree_prev(e))
|
||||
kprintf("%3d", NUMBER_CONTAINER(e)->number);
|
||||
kprintf("\n");
|
||||
}
|
||||
|
||||
int number_search(const void *ra, const struct Tree *rb) {
|
||||
long a = (long)ra;
|
||||
const struct number *b = NUMBER_CONTAINER(rb);
|
||||
return (a > b->number) - (a < b->number);
|
||||
}
|
||||
|
||||
void simple_test(void) {
|
||||
// 0 2 2 23 30 32 34 34 46 52 53 65 70 74 90 94 95 95 96 96
|
||||
// 96 96 95 95 94 90 74 70 65 53 52 46 34 34 32 30 23 2 2 0
|
||||
static const long kNumba[] = {74, 53, 96, 70, 34, 95, 30, 2, 96, 46,
|
||||
23, 2, 52, 0, 34, 94, 90, 95, 32, 65};
|
||||
|
||||
// test insertion works
|
||||
struct Tree *tree = 0;
|
||||
for (int i = 0; i < 20; ++i) {
|
||||
int number = kNumba[i];
|
||||
kprintf("%3d", number);
|
||||
|
@ -114,26 +118,159 @@ void simple_test(void) {
|
|||
tree_check(tree, number_compare);
|
||||
}
|
||||
kprintf("\n");
|
||||
print();
|
||||
print_reversed();
|
||||
|
||||
// test iteration works
|
||||
print(tree);
|
||||
|
||||
// test reverse iteration works
|
||||
print_reversed(tree);
|
||||
|
||||
// test removal works
|
||||
for (int i = 0; i < 20; ++i) {
|
||||
tree_remove(&tree, tree_get(tree, (void *)kNumba[i], number_search));
|
||||
tree_check(tree, number_compare);
|
||||
print();
|
||||
print(tree);
|
||||
}
|
||||
}
|
||||
|
||||
int main() {
|
||||
ShowCrashReports();
|
||||
tree_check(__maps.maps, __maps_compare);
|
||||
kprintf("\n");
|
||||
__print_maps(15);
|
||||
kprintf("\n");
|
||||
simple_test();
|
||||
tree_check(__maps.maps, __maps_compare);
|
||||
// use up a bunch of memory
|
||||
for (int i = 0; i < 100000; ++i)
|
||||
tree_insert(&tree, &number_new(rand())->elem, number_compare);
|
||||
tree_check(tree, number_compare);
|
||||
tree_check(__maps.maps, __maps_compare);
|
||||
__print_maps(15);
|
||||
|
||||
// visually verify maps get coalesced
|
||||
__print_maps(0);
|
||||
}
|
||||
|
||||
void search_test(void) {
|
||||
struct Tree *x, *tree = 0;
|
||||
tree_insert(&tree, &number_new(1)->elem, number_compare);
|
||||
tree_insert(&tree, &number_new(3)->elem, number_compare);
|
||||
tree_insert(&tree, &number_new(5)->elem, number_compare);
|
||||
tree_insert(&tree, &number_new(7)->elem, number_compare);
|
||||
|
||||
// Test tree_get()
|
||||
//
|
||||
// Returns node equal to given key.
|
||||
//
|
||||
// [1 3 5 7] [1 3 5 7] [1 3 5 7]
|
||||
// NULL ↑ NULL
|
||||
// 4 3 8
|
||||
//
|
||||
x = tree_get(tree, (void *)4l, number_search);
|
||||
if (x)
|
||||
exit(1);
|
||||
x = tree_get(tree, (void *)3l, number_search);
|
||||
if (!x)
|
||||
exit(2);
|
||||
if (NUMBER_CONTAINER(x)->number != 3)
|
||||
exit(3);
|
||||
if (!tree_get(tree, (void *)7l, number_search))
|
||||
exit(27);
|
||||
if (tree_get(tree, (void *)8l, number_search))
|
||||
exit(28);
|
||||
|
||||
// Test tree_floor()
|
||||
//
|
||||
// Returns last node less than or equal to given key.
|
||||
//
|
||||
// [1 3 5 7] [1 3 5 7] [1 3 5 7]
|
||||
// ↑ ↑ ↑
|
||||
// 4 3 8
|
||||
//
|
||||
x = tree_floor(tree, (void *)4l, number_search);
|
||||
if (!x)
|
||||
exit(4);
|
||||
if (NUMBER_CONTAINER(x)->number != 3)
|
||||
exit(5);
|
||||
x = tree_floor(tree, (void *)3l, number_search);
|
||||
if (!x)
|
||||
exit(6);
|
||||
if (NUMBER_CONTAINER(x)->number != 3)
|
||||
exit(7);
|
||||
if (!tree_floor(tree, (void *)7l, number_search))
|
||||
exit(24);
|
||||
x = tree_floor(tree, (void *)8l, number_search);
|
||||
if (!x)
|
||||
exit(25);
|
||||
if (NUMBER_CONTAINER(x)->number != 7)
|
||||
exit(30);
|
||||
if (tree_floor(tree, (void *)0l, number_search))
|
||||
exit(31);
|
||||
|
||||
// Test tree_lower()
|
||||
//
|
||||
// Returns first node not less than given key.
|
||||
//
|
||||
// [1 3 5 7] [1 3 5 7] [1 3 5 7]
|
||||
// ↑ ↑ NULL
|
||||
// 4 3 8
|
||||
//
|
||||
x = tree_lower(tree, (void *)4l, number_search);
|
||||
if (!x)
|
||||
exit(4);
|
||||
if (NUMBER_CONTAINER(x)->number != 5)
|
||||
exit(8);
|
||||
x = tree_lower(tree, (void *)3l, number_search);
|
||||
if (!x)
|
||||
exit(9);
|
||||
if (NUMBER_CONTAINER(x)->number != 3)
|
||||
exit(10);
|
||||
if (!tree_lower(tree, (void *)7l, number_search))
|
||||
exit(22);
|
||||
if (tree_lower(tree, (void *)8l, number_search))
|
||||
exit(23);
|
||||
|
||||
// Test tree_ceil()
|
||||
//
|
||||
// Returns first node greater than than given key.
|
||||
//
|
||||
// [1 3 5 7] [1 3 5 7] [1 3 5 7]
|
||||
// ↑ ↑ NULL
|
||||
// 4 3 8
|
||||
//
|
||||
x = tree_ceil(tree, (void *)4l, number_search);
|
||||
if (!x)
|
||||
exit(11);
|
||||
if (NUMBER_CONTAINER(x)->number != 5)
|
||||
exit(12);
|
||||
x = tree_ceil(tree, (void *)3l, number_search);
|
||||
if (!x)
|
||||
exit(13);
|
||||
if (NUMBER_CONTAINER(x)->number != 5)
|
||||
exit(14);
|
||||
if (tree_ceil(tree, (void *)7l, number_search))
|
||||
exit(21);
|
||||
|
||||
// Test tree_first()
|
||||
if (tree_first(NULL))
|
||||
exit(15);
|
||||
x = tree_first(tree);
|
||||
if (!x)
|
||||
exit(16);
|
||||
if (NUMBER_CONTAINER(x)->number != 1)
|
||||
exit(17);
|
||||
|
||||
// Test tree_last()
|
||||
if (tree_last(NULL))
|
||||
exit(18);
|
||||
x = tree_last(tree);
|
||||
if (!x)
|
||||
exit(19);
|
||||
if (NUMBER_CONTAINER(x)->number != 7)
|
||||
exit(20);
|
||||
}
|
||||
|
||||
int main() {
|
||||
ShowCrashReports();
|
||||
|
||||
// show memory maps at startup
|
||||
tree_check(__maps.maps, __maps_compare);
|
||||
kprintf("\n");
|
||||
__print_maps(0);
|
||||
kprintf("\n");
|
||||
|
||||
// run tests
|
||||
simple_test();
|
||||
search_test();
|
||||
}
|
||||
|
|
|
@ -188,20 +188,22 @@ BENCH(bulk_free, bench) {
|
|||
EZBENCH2("free(malloc(16)) MT", donothing, MallocFree());
|
||||
}
|
||||
|
||||
#define ITERATIONS 10000
|
||||
#define ITERATIONS 1000
|
||||
#define THREADS 10
|
||||
#define SIZE (256 * 1024)
|
||||
|
||||
void *Worker(void *arg) {
|
||||
/* for (int i = 0; i < ITERATIONS; ++i) { */
|
||||
/* char *p; */
|
||||
/* ASSERT_NE(NULL, (p = malloc(lemur64() % 128))); */
|
||||
/* ASSERT_NE(NULL, (p = realloc(p, max(lemur64() % 128, 1)))); */
|
||||
/* free(p); */
|
||||
/* } */
|
||||
for (int i = 0; i < ITERATIONS; ++i) {
|
||||
char *p;
|
||||
ASSERT_NE(NULL, (p = malloc(lemur64() % SIZE)));
|
||||
ASSERT_NE(NULL, (p = realloc(p, max(lemur64() % SIZE, 1))));
|
||||
free(p);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
BENCH(malloc, torture) {
|
||||
int i, n = __get_cpu_count();
|
||||
TEST(malloc, torture) {
|
||||
int i, n = THREADS;
|
||||
pthread_t *t = gc(malloc(sizeof(pthread_t) * n));
|
||||
if (!n)
|
||||
return;
|
||||
|
|
|
@ -85,7 +85,7 @@ TEST(lz4, zoneFileGmt) {
|
|||
(mapsize = roundup(
|
||||
LZ4_FRAME_BLOCKCONTENTSIZE(lz4check(dict)) +
|
||||
(gmtsize = LZ4_FRAME_BLOCKCONTENTSIZE(lz4check(gmt))),
|
||||
__granularity())))),
|
||||
getpagesize())))),
|
||||
dict)),
|
||||
gmt);
|
||||
ASSERT_BINEQ(
|
||||
|
|
|
@ -63,10 +63,10 @@ TEST(fork, testSharedMemory) {
|
|||
int *sharedvar;
|
||||
int *privatevar;
|
||||
EXPECT_NE(MAP_FAILED,
|
||||
(sharedvar = mmap(NULL, __granularity(), PROT_READ | PROT_WRITE,
|
||||
(sharedvar = mmap(NULL, getpagesize(), PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_ANONYMOUS, -1, 0)));
|
||||
EXPECT_NE(MAP_FAILED,
|
||||
(privatevar = mmap(NULL, __granularity(), PROT_READ | PROT_WRITE,
|
||||
(privatevar = mmap(NULL, getpagesize(), PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
|
||||
stackvar = 1;
|
||||
*sharedvar = 1;
|
||||
|
@ -77,18 +77,18 @@ TEST(fork, testSharedMemory) {
|
|||
++stackvar;
|
||||
++*sharedvar;
|
||||
++*privatevar;
|
||||
msync((void *)ROUNDDOWN((intptr_t)&stackvar, __granularity()),
|
||||
__granularity(), MS_SYNC);
|
||||
EXPECT_NE(-1, msync(privatevar, __granularity(), MS_SYNC));
|
||||
EXPECT_NE(-1, msync(sharedvar, __granularity(), MS_SYNC));
|
||||
msync((void *)ROUNDDOWN((intptr_t)&stackvar, getpagesize()), getpagesize(),
|
||||
MS_SYNC);
|
||||
EXPECT_NE(-1, msync(privatevar, getpagesize(), MS_SYNC));
|
||||
EXPECT_NE(-1, msync(sharedvar, getpagesize(), MS_SYNC));
|
||||
_exit(0);
|
||||
}
|
||||
EXPECT_NE(-1, waitpid(pid, &ws, 0));
|
||||
EXPECT_EQ(1, stackvar);
|
||||
EXPECT_EQ(2, *sharedvar);
|
||||
EXPECT_EQ(1, *privatevar);
|
||||
EXPECT_NE(-1, munmap(sharedvar, __granularity()));
|
||||
EXPECT_NE(-1, munmap(privatevar, __granularity()));
|
||||
EXPECT_NE(-1, munmap(sharedvar, getpagesize()));
|
||||
EXPECT_NE(-1, munmap(privatevar, getpagesize()));
|
||||
}
|
||||
|
||||
static volatile bool gotsigusr1;
|
||||
|
|
|
@ -28,11 +28,11 @@
|
|||
int i, *p;
|
||||
|
||||
void SetUp(void) {
|
||||
p = _mapshared(__granularity());
|
||||
p = _mapshared(getpagesize());
|
||||
}
|
||||
|
||||
void TearDown(void) {
|
||||
munmap(p, __granularity());
|
||||
munmap(p, getpagesize());
|
||||
}
|
||||
|
||||
void AtExit3(void) {
|
||||
|
|
|
@ -169,5 +169,5 @@ BENCH(memcpy, bench) {
|
|||
BB(1023);
|
||||
BB(1024);
|
||||
BB(4096);
|
||||
BB(__granularity());
|
||||
BB(getpagesize());
|
||||
}
|
||||
|
|
|
@ -129,7 +129,7 @@ TEST(sem_timedwait, threads) {
|
|||
|
||||
TEST(sem_timedwait, processes) {
|
||||
int i, r, rc, n = 4, pshared = 1;
|
||||
sem_t *sm = _mapshared(__granularity()), *s[2] = {sm, sm + 1};
|
||||
sem_t *sm = _mapshared(getpagesize()), *s[2] = {sm, sm + 1};
|
||||
ASSERT_SYS(0, 0, sem_init(s[0], pshared, 0));
|
||||
ASSERT_SYS(0, 0, sem_init(s[1], pshared, 0));
|
||||
for (i = 0; i < n; ++i) {
|
||||
|
@ -163,5 +163,5 @@ TEST(sem_timedwait, processes) {
|
|||
ASSERT_EQ(0, r);
|
||||
ASSERT_SYS(0, 0, sem_destroy(s[1]));
|
||||
ASSERT_SYS(0, 0, sem_destroy(s[0]));
|
||||
ASSERT_SYS(0, 0, munmap(sm, __granularity()));
|
||||
ASSERT_SYS(0, 0, munmap(sm, getpagesize()));
|
||||
}
|
||||
|
|
2
third_party/dlmalloc/directmap.inc
vendored
2
third_party/dlmalloc/directmap.inc
vendored
|
@ -57,7 +57,7 @@ static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
|
|||
size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
|
||||
char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
|
||||
oldmmsize, newmmsize, flags);
|
||||
if (cp != CMFAIL) {
|
||||
if (cp != MAP_FAILED) {
|
||||
mchunkptr newp = (mchunkptr)(cp + offset);
|
||||
size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
|
||||
newp->head = psize;
|
||||
|
|
11
third_party/dlmalloc/dlmalloc.c
vendored
11
third_party/dlmalloc/dlmalloc.c
vendored
|
@ -24,8 +24,7 @@
|
|||
#include "libc/thread/tls.h"
|
||||
#include "third_party/dlmalloc/vespene.internal.h"
|
||||
#include "libc/thread/tls.h"
|
||||
#include "libc/intrin/kprintf.h"
|
||||
#include "libc/intrin/kprintf.h"
|
||||
#include "libc/sysv/consts/mremap.h"
|
||||
#include "third_party/nsync/mu.h"
|
||||
|
||||
#if !IsTiny()
|
||||
|
@ -41,7 +40,7 @@
|
|||
#endif
|
||||
|
||||
#define HAVE_MMAP 1
|
||||
#define HAVE_MREMAP 0
|
||||
#define HAVE_MREMAP 1
|
||||
#define HAVE_MORECORE 0
|
||||
#define USE_LOCKS 2
|
||||
#define USE_SPIN_LOCKS 1
|
||||
|
@ -197,7 +196,7 @@ static void* sys_alloc(mstate m, size_t nb) {
|
|||
}
|
||||
|
||||
if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
|
||||
char* mp = (char*)(dlmalloc_requires_more_vespene_gas(asize));
|
||||
char* mp = dlmalloc_requires_more_vespene_gas(asize);
|
||||
if (mp != CMFAIL) {
|
||||
tbase = mp;
|
||||
tsize = asize;
|
||||
|
@ -368,7 +367,7 @@ static int sys_trim(mstate m, size_t pad) {
|
|||
size_t newsize = sp->size - extra;
|
||||
(void)newsize; /* placate people compiling -Wunused-variable */
|
||||
/* Prefer mremap, fall back to munmap */
|
||||
if (CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL ||
|
||||
if (CALL_MREMAP(sp->base, sp->size, newsize, 0) != MAP_FAILED ||
|
||||
(!extra || !CALL_MUNMAP(sp->base + newsize, extra))) {
|
||||
released = extra;
|
||||
}
|
||||
|
@ -1263,7 +1262,7 @@ void* dlrealloc_single(void* oldmem, size_t bytes) {
|
|||
}
|
||||
#endif /* FOOTERS */
|
||||
if (!PREACTION(m)) {
|
||||
mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
|
||||
mchunkptr newp = try_realloc_chunk(m, oldp, nb, MREMAP_MAYMOVE);
|
||||
POSTACTION(m);
|
||||
if (newp != 0) {
|
||||
check_inuse_chunk(m, newp);
|
||||
|
|
2
third_party/dlmalloc/platform.inc
vendored
2
third_party/dlmalloc/platform.inc
vendored
|
@ -511,7 +511,7 @@ FORCEINLINE int win32munmap(void* ptr, size_t size) {
|
|||
#define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
|
||||
#endif /* MREMAP */
|
||||
#else /* HAVE_MMAP && HAVE_MREMAP */
|
||||
#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
|
||||
#define CALL_MREMAP(addr, osz, nsz, mv) MAP_FAILED
|
||||
#endif /* HAVE_MMAP && HAVE_MREMAP */
|
||||
|
||||
/* mstate bit set if continguous morecore disabled or failed */
|
||||
|
|
3
third_party/zip/zipup.c
vendored
3
third_party/zip/zipup.c
vendored
|
@ -47,9 +47,6 @@
|
|||
// MISSING #include "os2/os2zip.h"
|
||||
#endif
|
||||
|
||||
#undef PAGESIZE
|
||||
#define PAGESIZE __granularity()
|
||||
|
||||
#if defined(MMAP)
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/calls/weirdtypes.h"
|
||||
|
|
|
@ -163,7 +163,7 @@ struct ElfWriter *elfwriter_open(const char *path, int mode, int arch) {
|
|||
CHECK_NOTNULL((elf = calloc(1, sizeof(struct ElfWriter))));
|
||||
CHECK_NOTNULL((elf->path = strdup(path)));
|
||||
CHECK_NE(-1, (elf->fd = open(elf->path, O_CREAT | O_TRUNC | O_RDWR, mode)));
|
||||
CHECK_NE(-1, ftruncate(elf->fd, (elf->mapsize = __granularity())));
|
||||
CHECK_NE(-1, ftruncate(elf->fd, (elf->mapsize = getgransize())));
|
||||
CHECK_NE(MAP_FAILED, (elf->map = mmap((void *)(intptr_t)kFixedmapStart,
|
||||
elf->mapsize, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_FIXED, elf->fd, 0)));
|
||||
|
@ -234,7 +234,7 @@ void *elfwriter_reserve(struct ElfWriter *elf, size_t size) {
|
|||
do {
|
||||
greed = greed + (greed >> 1);
|
||||
} while (need > greed);
|
||||
greed = ROUNDUP(greed, __granularity());
|
||||
greed = ROUNDUP(greed, getgransize());
|
||||
CHECK_NE(-1, ftruncate(elf->fd, greed));
|
||||
CHECK_NE(MAP_FAILED, mmap((char *)elf->map + elf->mapsize,
|
||||
greed - elf->mapsize, PROT_READ | PROT_WRITE,
|
||||
|
|
|
@ -28,14 +28,14 @@ void *Calloc(size_t a, size_t b) {
|
|||
static size_t n;
|
||||
z = a * b;
|
||||
if (!p) {
|
||||
n = __granularity();
|
||||
p = mmap((void *)0x300000000000, __granularity(), PROT_READ | PROT_WRITE,
|
||||
n = getgransize();
|
||||
p = mmap((void *)0x300000000000, getgransize(), PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
|
||||
}
|
||||
if (i + z > n) {
|
||||
mmap(p + i, __granularity(), PROT_READ | PROT_WRITE,
|
||||
mmap(p + i, getgransize(), PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
|
||||
n += __granularity();
|
||||
n += getgransize();
|
||||
}
|
||||
r = p + i;
|
||||
i += z;
|
||||
|
|
|
@ -5019,7 +5019,7 @@ static int LuaProgramTokenBucket(lua_State *L) {
|
|||
VERBOSEF("(token) please run the blackholed program; see our website!");
|
||||
}
|
||||
}
|
||||
tokenbucket.b = _mapshared(ROUNDUP(1ul << cidr, __granularity()));
|
||||
tokenbucket.b = _mapshared(ROUNDUP(1ul << cidr, getgransize()));
|
||||
memset(tokenbucket.b, 127, 1ul << cidr);
|
||||
tokenbucket.cidr = cidr;
|
||||
tokenbucket.reject = reject;
|
||||
|
@ -7339,7 +7339,7 @@ void RedBean(int argc, char *argv[]) {
|
|||
heartbeatinterval.tv_sec = 5;
|
||||
CHECK_GT(CLK_TCK, 0);
|
||||
CHECK_NE(MAP_FAILED,
|
||||
(shared = mmap(NULL, ROUNDUP(sizeof(struct Shared), __granularity()),
|
||||
(shared = mmap(NULL, ROUNDUP(sizeof(struct Shared), getgransize()),
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
|
||||
-1, 0)));
|
||||
if (daemonize) {
|
||||
|
|
|
@ -970,7 +970,7 @@ int Plinko(int argc, char *argv[]) {
|
|||
}
|
||||
|
||||
if (mmap((void *)0x200000000000,
|
||||
ROUNDUP((TERM + 1) * sizeof(g_mem[0]), __granularity()),
|
||||
ROUNDUP((TERM + 1) * sizeof(g_mem[0]), getgransize()),
|
||||
PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1,
|
||||
0) == MAP_FAILED ||
|
||||
mmap((void *)(0x200000000000 +
|
||||
|
@ -979,7 +979,7 @@ int Plinko(int argc, char *argv[]) {
|
|||
PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1,
|
||||
0) == MAP_FAILED ||
|
||||
mmap((void *)0x400000000000,
|
||||
ROUNDUP((TERM + 1) * sizeof(g_mem[0]), __granularity()),
|
||||
ROUNDUP((TERM + 1) * sizeof(g_mem[0]), getgransize()),
|
||||
PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1,
|
||||
0) == MAP_FAILED ||
|
||||
mmap((void *)(0x400000000000 +
|
||||
|
|
|
@ -452,7 +452,6 @@ static void PrintImage(unsigned yn, unsigned xn,
|
|||
size_t size;
|
||||
char *v, *vt;
|
||||
size = yn * (xn * (32 + (2 + (1 + 3) * 3) * 2 + 1 + 3)) * 1 + 5 + 1;
|
||||
size = ROUNDUP(size, __granularity());
|
||||
CHECK_NOTNULL((vt = _mapanon(size)));
|
||||
v = RenderImage(vt, yn, xn, rgb);
|
||||
*v++ = '\r';
|
||||
|
@ -550,8 +549,8 @@ static void LoadFile(const char *path, size_t yn, size_t xn, void *rgb) {
|
|||
stbir_resize_uint8(data, gotx, goty, 0, rgb, xn * XS, yn * YS, 0, CN);
|
||||
#else
|
||||
CHECK_EQ(CN, 3);
|
||||
data2size = ROUNDUP(sizeof(float) * goty * gotx * CN, __granularity());
|
||||
data3size = ROUNDUP(sizeof(float) * yn * YS * xn * XS * CN, __granularity());
|
||||
data2size = sizeof(float) * goty * gotx * CN;
|
||||
data3size = sizeof(float) * yn * YS * xn * XS * CN;
|
||||
CHECK_NOTNULL((data2 = _mapanon(data2size)));
|
||||
CHECK_NOTNULL((data3 = _mapanon(data3size)));
|
||||
rgb2lin(goty * gotx * CN, data2, data);
|
||||
|
@ -625,7 +624,7 @@ int main(int argc, char *argv[]) {
|
|||
// FIXME: on the conversion stage should do 2Y because of halfblocks
|
||||
// printf( "filename >%s<\tx >%d<\ty >%d<\n\n", filename, x_, y_);
|
||||
size = y_ * YS * x_ * XS * CN;
|
||||
CHECK_NOTNULL((rgb = _mapanon(ROUNDUP(size, __granularity()))));
|
||||
CHECK_NOTNULL((rgb = _mapanon(size)));
|
||||
for (i = optind; i < argc; ++i) {
|
||||
if (!argv[i])
|
||||
continue;
|
||||
|
@ -636,7 +635,7 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
PrintImage(y_, x_, rgb);
|
||||
}
|
||||
munmap(rgb, ROUNDUP(size, __granularity()));
|
||||
munmap(rgb, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -37,8 +37,7 @@ forceinline void ConvolveGradient(unsigned yn, unsigned xn,
|
|||
size_t size;
|
||||
unsigned y, x, i, j, k;
|
||||
float py[4], px[4], (*tmp)[yn][xn][4];
|
||||
tmp =
|
||||
_mapanon((size = ROUNDUP(sizeof(float) * 4 * xn * yn, __granularity())));
|
||||
tmp = _mapanon((size = ROUNDUP(sizeof(float) * 4 * xn * yn, getgransize())));
|
||||
for (y = 0; y < yn - KW + 1; ++y) {
|
||||
for (x = 0; x < xn - KW + 1; ++x) {
|
||||
for (k = 0; k < 4; ++k)
|
||||
|
|
|
@ -281,8 +281,8 @@ static void SetupCanvas(void) {
|
|||
munmap(buffer, buffersize);
|
||||
}
|
||||
displaysize = ROUNDUP(ROUNDUP((tyn * txn) << zoom, 16), 1ul << zoom);
|
||||
canvassize = ROUNDUP(displaysize, __granularity());
|
||||
buffersize = ROUNDUP(tyn * txn * 16 + 4096, __granularity());
|
||||
canvassize = ROUNDUP(displaysize, getgransize());
|
||||
buffersize = ROUNDUP(tyn * txn * 16 + 4096, getgransize());
|
||||
canvas = Allocate(canvassize);
|
||||
buffer = Allocate(buffersize);
|
||||
}
|
||||
|
|
|
@ -785,7 +785,7 @@ static void RasterIt(void) {
|
|||
static bool once;
|
||||
static void *buf;
|
||||
if (!once) {
|
||||
buf = _mapanon(ROUNDUP(fb0_.size, __granularity()));
|
||||
buf = _mapanon(ROUNDUP(fb0_.size, getgransize()));
|
||||
once = true;
|
||||
}
|
||||
WriteToFrameBuffer(fb0_.vscreen.yres_virtual, fb0_.vscreen.xres_virtual, buf,
|
||||
|
|
Loading…
Reference in a new issue