Simplify memory manager

This commit is contained in:
Justine Tunney 2024-07-04 10:52:16 -07:00
parent 5a9a08d1cf
commit 01587de761
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
40 changed files with 451 additions and 311 deletions

24
ctl/is_void.h Normal file
View file

@ -0,0 +1,24 @@
// -*-mode:c++;indent-tabs-mode:nil;c-basic-offset:4;tab-width:8;coding:utf-8-*-
// vi: set et ft=cpp ts=4 sts=4 sw=4 fenc=utf-8 :vi
#ifndef CTL_IS_VOID_H_
#define CTL_IS_VOID_H_
#include "integral_constant.h"
#include "remove_cv.h"
namespace ctl {
template<typename>
struct is_void_ : public ctl::false_type
{};
template<>
struct is_void_<void> : public ctl::true_type
{};
template<typename _Tp>
struct is_void : public is_void_<typename ctl::remove_cv<_Tp>::type>::type
{};
} // namespace ctl
#endif // CTL_IS_VOID_H_

View file

@ -525,7 +525,7 @@ class vector
capacity_ = new_capacity;
}
Allocator alloc_;
[[no_unique_address]] Allocator alloc_;
pointer data_;
size_type size_;
size_type capacity_;

View file

@ -24,6 +24,7 @@
#include <sys/socket.h>
#include <time.h>
#include "libc/mem/leaks.h"
#include "libc/runtime/runtime.h"
/**
* @fileoverview greenbean lightweight threaded web server
@ -336,10 +337,9 @@ int main(int argc, char *argv[]) {
sigaddset(&block, SIGHUP);
sigaddset(&block, SIGQUIT);
pthread_attr_t attr;
int pagesz = getauxval(AT_PAGESZ);
unassert(!pthread_attr_init(&attr));
unassert(!pthread_attr_setstacksize(&attr, 65536));
unassert(!pthread_attr_setguardsize(&attr, pagesz));
unassert(!pthread_attr_setguardsize(&attr, getpagesize()));
unassert(!pthread_attr_setsigmask_np(&attr, &block));
unassert(!pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, 0));
pthread_t *th = gc(calloc(threads, sizeof(pthread_t)));

View file

@ -30,7 +30,7 @@
* @return 0 on success, or -1 w/ errno
* @raise EINVAL if `advice` isn't valid or supported by system
* @raise EINVAL on Linux if addr/length isn't page size aligned with
* respect to `getauxval(AT_PAGESZ)`
* respect to `getpagesize()`
* @raise ENOMEM on Linux if addr/length overlaps unmapped regions
* @see libc/sysv/consts.sh
* @see fadvise()

View file

@ -25,7 +25,7 @@
* @return 0 on success, or errno on error
* @raise EINVAL if `advice` isn't valid or supported by system
* @raise EINVAL on Linux if addr/length isn't page size aligned with
* respect to `getauxval(AT_PAGESZ)`
* respect to `getpagesize()`
* @raise ENOMEM on Linux if addr/length overlaps unmapped regions
* @returnserrno
*/

View file

@ -21,6 +21,7 @@
#include "libc/calls/struct/rlimit.internal.h"
#include "libc/calls/syscall-sysv.internal.h"
#include "libc/dce.h"
#include "libc/errno.h"
#include "libc/intrin/describeflags.internal.h"
#include "libc/intrin/strace.internal.h"
#include "libc/macros.internal.h"
@ -77,6 +78,7 @@
*/
int setrlimit(int resource, const struct rlimit *rlim) {
int rc;
int olde = errno;
if (resource == 127) {
rc = einval();
} else if (!rlim) {
@ -85,18 +87,16 @@ int setrlimit(int resource, const struct rlimit *rlim) {
rc = _sysret(__syslib->__setrlimit(resource, rlim));
} else if (!IsWindows()) {
rc = sys_setrlimit(resource, rlim);
if (IsXnu() && !rc && resource == RLIMIT_AS) {
// TODO(jart): What's up with XNU and NetBSD?
__virtualmax = rlim->rlim_cur;
}
} else if (resource == RLIMIT_STACK) {
rc = enotsup();
} else if (resource == RLIMIT_AS) {
__virtualmax = rlim->rlim_cur;
rc = 0;
} else {
rc = einval();
}
if (resource == RLIMIT_AS) {
__virtualmax = rlim->rlim_cur;
errno = olde;
rc = 0;
}
STRACE("setrlimit(%s, %s) → %d% m", DescribeRlimitName(resource),
DescribeRlimit(0, rlim), rc);
return rc;

View file

@ -303,7 +303,7 @@ static wontreturn dontinstrument void foreign_helper(void **p) {
static dontinline void elf_exec(const char *file, char **envp) {
// get microprocessor page size
long pagesz = getauxval(AT_PAGESZ);
long pagesz = getpagesize();
// load helper executable into address space
struct Loaded prog;

View file

@ -16,8 +16,11 @@
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/dce.h"
#include "libc/errno.h"
#include "libc/intrin/getauxval.internal.h"
#include "libc/nt/struct/systeminfo.h"
#include "libc/nt/systeminfo.h"
#include "libc/runtime/runtime.h"
#include "libc/sysv/consts/auxv.h"
@ -35,11 +38,17 @@ unsigned long getauxval(unsigned long key) {
x = __getauxval(key);
if (key == AT_PAGESZ) {
if (!x.isfound) {
if (!IsWindows()) {
#ifdef __aarch64__
x.value = 16384;
x.value = 16384;
#else
x.value = 4096;
x.value = 4096;
#endif
} else {
struct NtSystemInfo si;
GetSystemInfo(&si);
x.value = si.dwPageSize;
}
}
x.isfound = true;
}

View file

@ -105,7 +105,7 @@ static size_t __get_stack_size(int pagesz, uintptr_t start, uintptr_t top) {
* This function works on every OS except Windows.
*/
struct AddrSize __get_main_stack(void) {
int pagesz = getauxval(AT_PAGESZ);
int pagesz = getpagesize();
uintptr_t start = (uintptr_t)__argv;
uintptr_t top = __get_main_top(pagesz);
uintptr_t bot = top - __get_stack_size(pagesz, start, top);

View file

@ -17,11 +17,12 @@
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/runtime/runtime.h"
#include "libc/sysv/consts/auxv.h"
/**
* Returns granularity of memory manager.
* @see sysconf(_SC_PAGE_SIZE) which is portable
*/
int getpagesize(void) {
return __granularity();
return getauxval(AT_PAGESZ);
}

View file

@ -17,14 +17,21 @@
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/dce.h"
#include "libc/nt/struct/systeminfo.h"
#include "libc/nt/systeminfo.h"
#include "libc/runtime/runtime.h"
#include "libc/sysv/consts/auxv.h"
int __granularity(void) {
if (IsWindows())
return 65536;
static int res;
if (!res)
res = getauxval(AT_PAGESZ);
if (!res) {
if (!IsWindows()) {
res = getpagesize();
} else {
struct NtSystemInfo si;
GetSystemInfo(&si);
res = si.dwAllocationGranularity;
}
}
return res;
}

View file

@ -155,17 +155,20 @@ __funline bool kischarmisaligned(const char *p, signed char t) {
}
privileged static bool32 kisdangerous_unlocked(const char *addr) {
struct Dll *e, *e2;
for (e = dll_first(__maps.used); e; e = e2) {
e2 = dll_next(__maps.used, e);
struct Map *map = MAP_CONTAINER(e);
if (map->addr <= addr && addr < map->addr + map->size) {
dll_remove(&__maps.used, e);
dll_make_first(&__maps.used, e);
return !(map->prot & PROT_READ);
}
struct Dll *e;
if ((e = dll_first(__maps.used))) {
do {
struct Map *map = MAP_CONTAINER(e);
if (map->addr <= addr && addr < map->addr + map->size) {
dll_remove(&__maps.used, e);
dll_make_first(&__maps.used, e);
return !(map->prot & PROT_READ);
}
} while ((e = dll_next(__maps.used, e)));
return true;
} else {
return false;
}
return true;
}
privileged bool32 kisdangerous(const void *addr) {

View file

@ -34,8 +34,6 @@ struct Maps __maps;
void __maps_add(struct Map *map) {
dll_init(&map->elem);
dll_make_first(&__maps.used, &map->elem);
map->next = __maps.maps;
__maps.maps = map;
++__maps.count;
}
@ -44,23 +42,31 @@ static void __maps_adder(struct Map *map, int pagesz) {
__maps_add(map);
}
void __maps_stack(void *stackaddr, int pagesz, size_t stacksize, int stackprot,
intptr_t stackhand) {
__maps.stack.addr = stackaddr;
__maps.stack.size = stacksize;
void __maps_stack(char *stackaddr, int pagesz, int guardsize, size_t stacksize,
int stackprot, intptr_t stackhand) {
__maps.stack.addr = stackaddr + guardsize;
__maps.stack.size = stacksize - guardsize;
__maps.stack.prot = stackprot;
__maps.stack.h = stackhand;
__maps.stack.hand = -1;
__maps_adder(&__maps.stack, pagesz);
if (guardsize) {
__maps.guard.addr = stackaddr;
__maps.guard.size = guardsize;
__maps.guard.prot = PROT_NONE;
__maps.guard.hand = stackhand;
__maps_adder(&__maps.guard, pagesz);
}
}
void __maps_init(void) {
int pagesz = getauxval(AT_PAGESZ);
int pagesz = getpagesize();
// record _start() stack mapping
if (!IsWindows()) {
struct AddrSize stack;
stack = __get_main_stack();
__maps_stack(stack.addr, pagesz, stack.size, (uintptr_t)ape_stack_prot, 0);
__maps_stack(stack.addr, pagesz, 0, stack.size, (uintptr_t)ape_stack_prot,
0);
}
// record .text and .data mappings
@ -78,15 +84,15 @@ void __maps_init(void) {
__maps_adder(&text, pagesz);
}
privileged void __maps_lock(void) {
privileged bool __maps_lock(void) {
struct CosmoTib *tib;
if (!__threaded)
return;
return false;
if (!__tls_enabled)
return;
return false;
tib = __get_tls_privileged();
if (tib->tib_relock_maps++)
return;
return true;
while (atomic_exchange_explicit(&__maps.lock, 1, memory_order_acquire)) {
#if defined(__GNUC__) && defined(__aarch64__)
__asm__ volatile("yield");
@ -94,6 +100,7 @@ privileged void __maps_lock(void) {
__asm__ volatile("pause");
#endif
}
return false;
}
privileged void __maps_unlock(void) {

View file

@ -8,7 +8,6 @@ COSMOPOLITAN_C_START_
#define MAP_CONTAINER(e) DLL_CONTAINER(struct Map, elem, e)
struct Map {
struct Map *next; /* for __maps.maps */
char *addr; /* granule aligned */
size_t size; /* must be nonzero */
struct Dll elem; /* for __maps.free */
@ -18,18 +17,20 @@ struct Map {
bool iscow; /* windows nt only */
bool readonlyfile; /* windows nt only */
unsigned visited; /* used for checks */
intptr_t h; /* windows nt only */
intptr_t hand; /* windows nt only */
};
struct Maps {
unsigned mono;
atomic_int lock;
struct Map *maps;
struct Dll *free;
struct Map stack;
struct Dll *used;
size_t count;
size_t pages;
struct Map stack;
struct Map guard;
bool once;
atomic_ulong rollo;
};
struct AddrSize {
@ -40,16 +41,15 @@ struct AddrSize {
extern struct Maps __maps;
void __maps_init(void);
void __maps_lock(void);
bool __maps_lock(void);
void __maps_check(void);
void __maps_unlock(void);
void __maps_add(struct Map *);
struct Map *__maps_alloc(void);
void __maps_free(struct Map *);
void __maps_insert(struct Map *);
int __munmap(char *, size_t, bool);
void *__mmap(char *, size_t, int, int, int, int64_t);
void __maps_stack(void *, int, size_t, int, intptr_t);
void __maps_stack(char *, int, int, size_t, int, intptr_t);
struct AddrSize __get_main_stack(void);
COSMOPOLITAN_C_END_

View file

@ -24,6 +24,7 @@
#include "libc/calls/state.internal.h"
#include "libc/calls/struct/sigset.internal.h"
#include "libc/calls/syscall-sysv.internal.h"
#include "libc/cosmo.h"
#include "libc/dce.h"
#include "libc/errno.h"
#include "libc/intrin/atomic.h"
@ -54,9 +55,9 @@
#define MAP_FIXED_NOREPLACE_linux 0x100000
#define PGUP(x) (((x) + granularity - 1) & -granularity)
#define PGUP(x) (((x) + pagesz - 1) & -pagesz)
#if MMDEBUG
#if !MMDEBUG
#define ASSERT(x) (void)0
#else
#define ASSERT(x) \
@ -72,14 +73,14 @@
} while (0)
#endif
static atomic_ulong rollo;
static bool overlaps_existing_map(const char *addr, size_t size) {
int granularity = __granularity();
for (struct Map *map = __maps.maps; map; map = map->next)
static bool overlaps_existing_map(const char *addr, size_t size, int pagesz) {
for (struct Dll *e = dll_first(__maps.used); e;
e = dll_next(__maps.used, e)) {
struct Map *map = MAP_CONTAINER(e);
if (MAX(addr, map->addr) <
MIN(addr + PGUP(size), map->addr + PGUP(map->size)))
return true;
}
return false;
}
@ -87,43 +88,44 @@ void __maps_check(void) {
#if MMDEBUG
size_t maps = 0;
size_t pages = 0;
int granularity = getauxval(AT_PAGESZ);
unsigned id = __maps.mono++;
for (struct Map *map = __maps.maps; map; map = map->next) {
int pagesz = getpagesize();
unsigned id = ++__maps.mono;
for (struct Dll *e = dll_first(__maps.used); e;
e = dll_next(__maps.used, e)) {
struct Map *map = MAP_CONTAINER(e);
ASSERT(map->addr != MAP_FAILED);
ASSERT(map->visited != id);
ASSERT(map->size);
map->visited = id;
pages += PGUP(map->size) / granularity;
pages += (map->size + getpagesize() - 1) / getpagesize();
maps += 1;
}
ASSERT(maps = __maps.count);
ASSERT(pages == __maps.pages);
for (struct Dll *e = dll_first(__maps.used); e;
e = dll_next(__maps.used, e)) {
ASSERT(MAP_CONTAINER(e)->visited == id);
--maps;
}
ASSERT(maps == 0);
for (struct Map *m1 = __maps.maps; m1; m1 = m1->next)
for (struct Map *m2 = m1->next; m2; m2 = m2->next)
struct Map *m1 = MAP_CONTAINER(e);
for (struct Dll *f = dll_next(__maps.used, e); f;
f = dll_next(__maps.used, f)) {
struct Map *m2 = MAP_CONTAINER(f);
ASSERT(MAX(m1->addr, m2->addr) >=
MIN(m1->addr + PGUP(m1->size), m2->addr + PGUP(m2->size)));
}
}
#endif
}
void __maps_free(struct Map *map) {
map->next = 0;
map->size = 0;
map->addr = MAP_FAILED;
ASSERT(dll_is_alone(&map->elem));
dll_make_last(&__maps.free, &map->elem);
}
void __maps_insert(struct Map *map) {
struct Map *last = __maps.maps;
int granularity = getauxval(AT_PAGESZ);
__maps.pages += PGUP(map->size) / granularity;
static void __maps_insert(struct Map *map) {
struct Dll *e = dll_first(__maps.used);
struct Map *last = e ? MAP_CONTAINER(e) : 0;
__maps.pages += (map->size + getpagesize() - 1) / getpagesize();
if (last && !IsWindows() && //
map->addr == last->addr + last->size && //
(map->flags & MAP_ANONYMOUS) && //
@ -155,7 +157,6 @@ struct Map *__maps_alloc(void) {
if ((e = dll_first(__maps.free))) {
dll_remove(&__maps.free, e);
map = MAP_CONTAINER(e);
map->next = 0;
return map;
}
int granularity = __granularity();
@ -172,40 +173,43 @@ struct Map *__maps_alloc(void) {
dll_init(&map[i].elem);
__maps_free(map + i);
}
map->next = 0;
return map;
}
int __munmap(char *addr, size_t size, bool untrack_only) {
// validate arguments
int pagesz = getauxval(AT_PAGESZ);
int pagesz = getpagesize();
int granularity = __granularity();
if (((uintptr_t)addr & (granularity - 1)) || //
!size || (uintptr_t)addr + size < size)
return einval();
// normalize size
size = (size + granularity - 1) & -granularity;
// untrack mappings
int rc = 0;
struct Dll *cur;
struct Dll *next;
struct Dll *delete = 0;
__maps_lock();
struct Map *map = __maps.maps;
struct Map **prev = &__maps.maps;
while (map) {
if (__maps_lock()) {
__maps_unlock();
return edeadlk();
}
for (cur = dll_first(__maps.used); cur; cur = next) {
next = dll_next(__maps.used, cur);
struct Map *map = MAP_CONTAINER(cur);
char *map_addr = map->addr;
size_t map_size = map->size;
struct Map *next = map->next;
if (MAX(addr, map_addr) <
MIN(addr + PGUP(size), map_addr + PGUP(map_size))) {
if (addr <= map_addr && addr + PGUP(size) >= map_addr + PGUP(map_size)) {
if (MAX(addr, map_addr) < MIN(addr + size, map_addr + PGUP(map_size))) {
if (addr <= map_addr && addr + size >= map_addr + PGUP(map_size)) {
// remove mapping completely
dll_remove(&__maps.used, &map->elem);
dll_make_first(&delete, &map->elem);
*prev = next;
dll_remove(&__maps.used, cur);
dll_make_first(&delete, cur);
__maps.pages -= (map_size + pagesz - 1) / pagesz;
__maps.count -= 1;
map = next;
continue;
__maps_check();
} else if (IsWindows()) {
// you can't carve up memory maps on windows. our mmap() makes
// this not a problem (for non-enormous memory maps) by making
@ -225,14 +229,14 @@ int __munmap(char *addr, size_t size, bool untrack_only) {
if (!(map->flags & MAP_ANONYMOUS))
map->off += left;
__maps.pages -= (left + pagesz - 1) / pagesz;
__maps_check();
leftmap->addr = map_addr;
leftmap->size = left;
dll_make_first(&delete, &leftmap->elem);
__maps_check();
} else {
rc = -1;
}
} else if (addr + PGUP(size) >= map_addr + PGUP(map_size)) {
} else if (addr + size >= map_addr + PGUP(map_size)) {
// shave off righthand side of mapping
size_t left = addr - map_addr;
size_t right = map_addr + map_size - addr;
@ -240,23 +244,22 @@ int __munmap(char *addr, size_t size, bool untrack_only) {
if ((rightmap = __maps_alloc())) {
map->size = left;
__maps.pages -= (right + pagesz - 1) / pagesz;
__maps_check();
rightmap->addr = addr;
rightmap->size = right;
dll_make_first(&delete, &rightmap->elem);
__maps_check();
} else {
rc = -1;
}
} else {
// punch hole in mapping
size_t left = addr - map_addr;
size_t middle = PGUP(size);
size_t middle = size;
size_t right = map_size - middle - left;
struct Map *leftmap;
if ((leftmap = __maps_alloc())) {
struct Map *middlemap;
if ((middlemap = __maps_alloc())) {
leftmap->next = map;
leftmap->addr = map_addr;
leftmap->size = left;
leftmap->off = map->off;
@ -267,13 +270,12 @@ int __munmap(char *addr, size_t size, bool untrack_only) {
if (!(map->flags & MAP_ANONYMOUS))
map->off += left + middle;
dll_make_first(&__maps.used, &leftmap->elem);
*prev = leftmap;
__maps.pages -= (middle + pagesz - 1) / pagesz;
__maps.count += 1;
__maps_check();
middlemap->addr = addr;
middlemap->size = size;
dll_make_first(&delete, &middlemap->elem);
__maps_check();
} else {
rc = -1;
}
@ -282,22 +284,21 @@ int __munmap(char *addr, size_t size, bool untrack_only) {
}
}
}
prev = &map->next;
map = next;
}
__maps_unlock();
// delete mappings
for (struct Dll *e = dll_first(delete); e; e = dll_next(delete, e)) {
map = MAP_CONTAINER(e);
struct Map *map = MAP_CONTAINER(e);
if (!untrack_only) {
if (!IsWindows()) {
if (sys_munmap(map->addr, map->size))
rc = -1;
} else {
} else if (map->hand != -1) {
ASSERT(!((uintptr_t)map->addr & (granularity - 1)));
if (!UnmapViewOfFile(map->addr))
rc = -1;
if (!CloseHandle(map->h))
if (!CloseHandle(map->hand))
rc = -1;
}
}
@ -319,7 +320,7 @@ int __munmap(char *addr, size_t size, bool untrack_only) {
}
static void *__mmap_chunk(void *addr, size_t size, int prot, int flags, int fd,
int64_t off, int granularity) {
int64_t off, int pagesz, int granularity) {
// polyfill nuances of fixed mappings
int sysflags = flags;
@ -334,7 +335,7 @@ static void *__mmap_chunk(void *addr, size_t size, int prot, int flags, int fd,
sysflags |= MAP_FIXED_NOREPLACE_linux;
} else if (IsFreebsd() || IsNetbsd()) {
sysflags |= MAP_FIXED;
if (overlaps_existing_map(addr, size))
if (overlaps_existing_map(addr, size, pagesz))
return (void *)eexist();
} else {
noreplace = true;
@ -345,7 +346,10 @@ static void *__mmap_chunk(void *addr, size_t size, int prot, int flags, int fd,
// allocate Map object
struct Map *map;
__maps_lock();
if (__maps_lock()) {
__maps_unlock();
return (void *)edeadlk();
}
map = __maps_alloc();
__maps_unlock();
if (!map)
@ -405,7 +409,7 @@ TryAgain:
map->off = off;
map->prot = prot;
map->flags = flags;
map->h = res.maphandle;
map->hand = res.maphandle;
if (IsWindows()) {
map->iscow = (flags & MAP_TYPE) != MAP_SHARED && fd != -1;
map->readonlyfile = (flags & MAP_TYPE) == MAP_SHARED && fd != -1 &&
@ -419,10 +423,10 @@ TryAgain:
}
static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
int64_t off, int granularity) {
int64_t off, int pagesz, int granularity) {
// validate file map args
if (fd != -1) {
if (!(flags & MAP_ANONYMOUS)) {
if (off & (granularity - 1))
return (void *)einval();
if (IsWindows()) {
@ -435,19 +439,21 @@ static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
// mmap works fine on unix
if (!IsWindows())
return __mmap_chunk(addr, size, prot, flags, fd, off, granularity);
return __mmap_chunk(addr, size, prot, flags, fd, off, pagesz, granularity);
// if the concept of granularity wasn't exciting enough
if (!addr && !(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)))
addr = (char *)(WINBASE + atomic_fetch_add(&rollo, PGUP(size)) % WINMAXX);
// if the concept of pagesz wasn't exciting enough
if (!addr && !(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
size_t slab = (size + granularity - 1) & -granularity;
addr = (char *)(WINBASE + atomic_fetch_add(&__maps.rollo, slab) % WINMAXX);
}
// windows forbids unmapping a subset of a map once it's made
if (size <= granularity || size > 100 * 1024 * 1024)
return __mmap_chunk(addr, size, prot, flags, fd, off, granularity);
return __mmap_chunk(addr, size, prot, flags, fd, off, pagesz, granularity);
// so we create a separate map for each granule in the mapping
if (!(flags & MAP_FIXED)) {
while (overlaps_existing_map(addr, size)) {
while (overlaps_existing_map(addr, size, pagesz)) {
if (flags & MAP_FIXED_NOREPLACE)
return (void *)eexist();
addr += granularity;
@ -457,7 +463,7 @@ static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
while (size) {
char *got;
size_t amt = MIN(size, granularity);
got = __mmap_chunk(addr, amt, prot, flags, fd, off, granularity);
got = __mmap_chunk(addr, amt, prot, flags, fd, off, pagesz, granularity);
if (got != addr) {
if (got != MAP_FAILED)
__munmap(got, amt, false);
@ -476,6 +482,7 @@ static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
void *__mmap(char *addr, size_t size, int prot, int flags, int fd,
int64_t off) {
char *res;
int pagesz = getpagesize();
int granularity = __granularity();
// validate arguments
@ -484,17 +491,12 @@ void *__mmap(char *addr, size_t size, int prot, int flags, int fd,
return (void *)einval();
if (size > 0x100000000000)
return (void *)enomem();
// normalize arguments
if (flags & MAP_ANONYMOUS) {
fd = -1;
off = 0;
size = PGUP(size);
}
if (__maps.count * pagesz + size > __virtualmax)
return (void *)enomem();
// create memory mappping
if (!__isfdkind(fd, kFdZip)) {
res = __mmap_impl(addr, size, prot, flags, fd, off, granularity);
res = __mmap_impl(addr, size, prot, flags, fd, off, pagesz, granularity);
} else {
res = _weaken(__zipos_mmap)(
addr, size, prot, flags,
@ -505,24 +507,14 @@ void *__mmap(char *addr, size_t size, int prot, int flags, int fd,
}
void *mmap(void *addr, size_t size, int prot, int flags, int fd, int64_t off) {
void *res;
BLOCK_SIGNALS;
BLOCK_CANCELATION;
res = __mmap(addr, size, prot, flags, fd, off);
ALLOW_CANCELATION;
ALLOW_SIGNALS;
void *res = __mmap(addr, size, prot, flags, fd, off);
STRACE("mmap(%p, %'zu, %s, %s, %d, %'ld) → %p% m", addr, size,
DescribeProtFlags(prot), DescribeMapFlags(flags), fd, off, res);
return res;
}
int munmap(void *addr, size_t size) {
int rc;
BLOCK_SIGNALS;
BLOCK_CANCELATION;
rc = __munmap(addr, size, false);
ALLOW_CANCELATION;
ALLOW_SIGNALS;
int rc = __munmap(addr, size, false);
STRACE("munmap(%p, %'zu) → %d% m", addr, size, rc);
return rc;
}

View file

@ -59,56 +59,57 @@ int __mprotect(char *addr, size_t size, int prot) {
return 0;
// unix checks prot before checking size
int pagesz = getauxval(AT_PAGESZ);
if ((intptr_t)addr & (pagesz - 1))
int pagesz = getpagesize();
if (((intptr_t)addr & (pagesz - 1)) || (uintptr_t)addr + size < size)
return einval();
// normalize size
size = (size + pagesz - 1) & -pagesz;
// change mappings
int rc = 0;
__maps_lock();
struct Dll *cur;
bool found = false;
struct Map *map = __maps.maps;
struct Map **prev = &__maps.maps;
while (map) {
if (__maps_lock()) {
__maps_unlock();
return edeadlk();
}
for (cur = dll_first(__maps.used); cur; cur = dll_next(__maps.used, cur)) {
struct Map *map = MAP_CONTAINER(cur);
char *map_addr = map->addr;
size_t map_size = map->size;
struct Map *next = map->next;
char *beg = MAX(addr, map_addr);
char *end = MIN(addr + PGUP(size), map_addr + PGUP(map_size));
char *end = MIN(addr + size, map_addr + PGUP(map_size));
if (beg < end) {
found = true;
if (addr <= map_addr && addr + PGUP(size) >= map_addr + PGUP(map_size)) {
if (addr <= map_addr && addr + size >= map_addr + PGUP(map_size)) {
// change protection of entire mapping
if (!__mprotect_chunk(map_addr, map_size, prot, map->iscow)) {
map->prot = prot;
} else {
rc = -1;
}
} else if (IsWindows()) {
// windows does allow changing protection at 4096 byte chunks
// however we currently don't have data structures that track
// this within the 64 kb map granules that can't be broken up
if (__mprotect_chunk(beg, end - beg, prot, map->iscow) == -1)
rc = -1;
} else if (addr <= map_addr) {
// cleave lefthand side of mapping
// change lefthand side of mapping
size_t left = PGUP(addr + size - map_addr);
size_t right = map_size - left;
struct Map *leftmap;
if ((leftmap = __maps_alloc())) {
if (!__mprotect_chunk(map_addr, left, prot, false)) {
leftmap->next = map;
leftmap->addr = map_addr;
leftmap->size = left;
leftmap->prot = prot;
leftmap->off = map->off;
leftmap->flags = map->flags;
leftmap->iscow = map->iscow;
leftmap->readonlyfile = map->readonlyfile;
leftmap->hand = map->hand;
map->addr += left;
map->size = right;
map->hand = -1;
if (!(map->flags & MAP_ANONYMOUS))
map->off += left;
dll_make_first(&__maps.used, &leftmap->elem);
*prev = leftmap;
__maps.count += 1;
__maps_check();
} else {
@ -118,26 +119,28 @@ int __mprotect(char *addr, size_t size, int prot) {
} else {
rc = -1;
}
} else if (addr + PGUP(size) >= map_addr + PGUP(map_size)) {
// cleave righthand side of mapping
} else if (addr + size >= map_addr + PGUP(map_size)) {
// change righthand side of mapping
size_t left = addr - map_addr;
size_t right = map_addr + map_size - addr;
struct Map *leftmap;
if ((leftmap = __maps_alloc())) {
if (!__mprotect_chunk(map_addr + left, right, prot, false)) {
leftmap->next = map;
leftmap->addr = map_addr;
leftmap->size = left;
leftmap->off = map->off;
leftmap->prot = map->prot;
leftmap->flags = map->flags;
leftmap->iscow = map->iscow;
leftmap->readonlyfile = map->readonlyfile;
leftmap->hand = map->hand;
map->addr += left;
map->size = right;
map->prot = prot;
map->hand = -1;
if (!(map->flags & MAP_ANONYMOUS))
map->off += left;
dll_make_first(&__maps.used, &leftmap->elem);
*prev = leftmap;
__maps.count += 1;
__maps_check();
} else {
@ -148,34 +151,36 @@ int __mprotect(char *addr, size_t size, int prot) {
rc = -1;
}
} else {
// punch hole in mapping
// change middle of mapping
size_t left = addr - map_addr;
size_t middle = PGUP(size);
size_t middle = size;
size_t right = map_size - middle - left;
struct Map *leftmap;
if ((leftmap = __maps_alloc())) {
struct Map *midlmap;
if ((midlmap = __maps_alloc())) {
if (!__mprotect_chunk(map_addr + left, middle, prot, false)) {
leftmap->next = midlmap;
leftmap->addr = map_addr;
leftmap->size = left;
leftmap->off = map->off;
leftmap->prot = map->prot;
leftmap->flags = map->flags;
midlmap->next = map;
leftmap->iscow = map->iscow;
leftmap->readonlyfile = map->readonlyfile;
leftmap->hand = map->hand;
midlmap->addr = map_addr + left;
midlmap->size = middle;
midlmap->off = (map->flags & MAP_ANONYMOUS) ? 0 : map->off + left;
midlmap->prot = prot;
midlmap->flags = map->flags;
midlmap->hand = -1;
map->addr += left + middle;
map->size = right;
map->hand = -1;
if (!(map->flags & MAP_ANONYMOUS))
map->off += left + middle;
dll_make_first(&__maps.used, &leftmap->elem);
dll_make_first(&__maps.used, &midlmap->elem);
*prev = leftmap;
dll_make_first(&__maps.used, &leftmap->elem);
__maps.count += 2;
__maps_check();
} else {
@ -192,8 +197,6 @@ int __mprotect(char *addr, size_t size, int prot) {
}
}
}
prev = &map->next;
map = next;
}
// allow user to change mappings unknown to cosmo runtime

View file

@ -27,7 +27,7 @@
textwindows int sys_msync_nt(char *addr, size_t size, int flags) {
int pagesz = getauxval(AT_PAGESZ);
int pagesz = getpagesize();
size = (size + pagesz - 1) & -pagesz;
if ((uintptr_t)addr & (pagesz - 1))
@ -35,7 +35,9 @@ textwindows int sys_msync_nt(char *addr, size_t size, int flags) {
int rc = 0;
__maps_lock();
for (struct Map *map = __maps.maps; map; map = map->next) {
for (struct Dll *e = dll_first(__maps.used); e;
e = dll_next(__maps.used, e)) {
struct Map *map = MAP_CONTAINER(e);
char *beg = MAX(addr, map->addr);
char *end = MIN(addr + size, map->addr + map->size);
if (beg < end)

View file

@ -30,7 +30,7 @@
* Prints memory mappings.
*/
void __print_maps(void) {
int limit = 13;
int limit = 15;
long maptally = 0;
char mappingbuf[8], sb[16];
__maps_lock();
@ -43,8 +43,8 @@ void __print_maps(void) {
(DescribeMapping)(mappingbuf, map->prot, map->flags));
sizefmt(sb, map->size, 1024);
kprintf(" %!sb", sb);
if (map->h && map->h != -1)
kprintf(" h=%ld", map->h);
if (map->hand && map->hand != -1)
kprintf(" hand=%ld", map->hand);
if (map->iscow)
kprintf(" cow");
if (map->readonlyfile)
@ -53,7 +53,7 @@ void __print_maps(void) {
if (!--limit)
break;
}
kprintf("# %'zu bytes in %'zu mappings\n",
__maps.pages * getauxval(AT_PAGESZ), __maps.count);
kprintf("# %'zu bytes in %'zu mappings\n", __maps.pages * getpagesize(),
__maps.count);
__maps_unlock();
}

View file

@ -5,7 +5,7 @@
#define SYSDEBUG 0
#endif
#define _NTTRACE 1 /* not configurable w/ flag yet */
#define _NTTRACE 0 /* not configurable w/ flag yet */
#define _POLLTRACE 0 /* not configurable w/ flag yet */
#define _DATATRACE 1 /* not configurable w/ flag yet */
#define _LOCKTRACE 0 /* not configurable w/ flag yet */

View file

@ -16,7 +16,6 @@
#include "libc/assert.h"
#include "libc/dce.h"
#include "libc/errno.h"
#include "libc/intrin/kprintf.h"
#include "libc/mem/mem.h"
#include "libc/stdalign.internal.h"
#include "libc/stdckdint.h"
@ -44,8 +43,6 @@ static void tinymalloc_init(void) {
align = TINYMALLOC_MAX_ALIGN;
heap.memory = (char *)(((uintptr_t)heap.bits + align - 1) & -align);
heap.size = sizeof(heap.bits) - (heap.memory - heap.bits);
kprintf("heap.memory = %p\n", heap.memory);
kprintf("heap.size = %p\n", heap.size);
heap.once = 1;
}

View file

@ -72,7 +72,7 @@ __msabi extern typeof(GetCurrentProcessId) *const __imp_GetCurrentProcessId;
static textwindows wontreturn void AbortFork(const char *func) {
#if SYSDEBUG
kprintf("fork() %s() failed with win32 error %d\n", func, GetLastError());
kprintf("fork() %!s() failed with win32 error %u\n", func, GetLastError());
#endif
TerminateThisProcess(SIGSTKFLT);
}
@ -94,57 +94,45 @@ static inline textwindows ssize_t ForkIo(int64_t h, char *p, size_t n,
struct NtOverlapped *)) {
size_t i;
uint32_t x;
for (i = 0; i < n; i += x)
for (i = 0; i < n; i += x) {
if (!f(h, p + i, n - i, &x, NULL))
return __winerr();
if (!x)
break;
}
return i;
}
static dontinline textwindows bool ForkIo2(int64_t h, void *buf, size_t n,
bool32 (*fn)(int64_t, void *,
uint32_t, uint32_t *,
struct NtOverlapped *),
const char *sf, bool ischild) {
static dontinline textwindows ssize_t ForkIo2(
int64_t h, void *buf, size_t n,
bool32 (*fn)(int64_t, void *, uint32_t, uint32_t *, struct NtOverlapped *),
const char *sf, bool ischild) {
ssize_t rc = ForkIo(h, buf, n, fn);
if (ischild) {
__tls_enabled_set(false); // prevent tls crash in kprintf
__pid = __imp_GetCurrentProcessId();
__klog_handle = 0;
__maps.used = 0;
}
NTTRACE("%s(%ld, %p, %'zu) → %'zd% m", sf, h, buf, n, rc);
return rc != -1;
return rc;
}
static dontinline textwindows bool WriteAll(int64_t h, void *buf, size_t n) {
bool ok;
ok = ForkIo2(h, buf, n, (void *)WriteFile, "WriteFile", false);
#ifndef NDEBUG
if (ok)
ok = ForkIo2(h, &n, sizeof(n), (void *)WriteFile, "WriteFile", false);
#endif
#if SYSDEBUG
if (!ok) {
kprintf("failed to write %zu bytes to forked child: %d\n", n,
GetLastError());
}
#endif
ok = ForkIo2(h, buf, n, (void *)WriteFile, "WriteFile", false) != -1;
if (!ok)
AbortFork("WriteAll");
// Sleep(10);
return ok;
}
static textwindows dontinline void ReadOrDie(int64_t h, void *buf, size_t n) {
if (!ForkIo2(h, buf, n, ReadFile, "ReadFile", true)) {
ssize_t got;
if ((got = ForkIo2(h, buf, n, ReadFile, "ReadFile", true)) == -1)
AbortFork("ReadFile1");
}
#ifndef NDEBUG
size_t got;
if (!ForkIo2(h, &got, sizeof(got), ReadFile, "ReadFile", true)) {
if (got != n)
AbortFork("ReadFile2");
}
if (got != n) {
AbortFork("ReadFile_SIZE_CHECK");
}
#endif
}
static textwindows int64_t MapOrDie(uint32_t prot, uint64_t size) {
@ -197,12 +185,35 @@ static textwindows void *Malloc(size_t size) {
return HeapAlloc(GetProcessHeap(), 0, size);
}
static textwindows void Free(void *addr) {
HeapFree(GetProcessHeap(), 0, addr);
}
static int CountMaps(struct Dll *maps) {
int count = 0;
for (struct Dll *e = dll_first(maps); e; e = dll_next(maps, e))
++count;
return count;
}
static struct Map **SortMaps(struct Dll *maps, int count) {
int j, i = 0;
struct Map **sorted = Malloc(count * sizeof(struct Map *));
for (struct Dll *e = dll_first(maps); e; e = dll_next(maps, e)) {
struct Map *map = MAP_CONTAINER(e);
for (j = i; j > 0 && sorted[j - 1]->addr > map->addr; --j)
sorted[j] = sorted[j - 1];
sorted[j] = map;
++i;
}
return sorted;
}
textwindows void WinMainForked(void) {
jmp_buf jb;
int64_t reader;
int64_t savetsc;
struct Map *map;
uint32_t varlen, oldprot;
uint32_t varlen;
char16_t fvar[21 + 1 + 21 + 1];
struct Fds *fds = __veil("r", &g_fds);
@ -222,29 +233,55 @@ textwindows void WinMainForked(void) {
ReadOrDie(reader, jb, sizeof(jb));
// read memory mappings from parent process
struct Map *maps = __maps.maps;
int n = 0;
struct Dll *maps = 0;
for (;;) {
map = Malloc(sizeof(*map));
ReadOrDie(reader, map, sizeof(*map));
struct Map *map = Malloc(sizeof(struct Map));
ReadOrDie(reader, map, sizeof(struct Map));
if (map->addr == MAP_FAILED) {
Free(map);
break;
}
dll_init(&map->elem);
dll_make_first(&maps, &map->elem);
++n;
}
// created sorted array of maps
struct Map **sorted = SortMaps(maps, n);
// map memory into process
int granularity = __granularity();
for (int i = 0; i < n; ++i) {
struct Map *map = sorted[i];
if ((uintptr_t)map->addr & (granularity - 1))
continue;
size_t size = map->size;
// get true length in case mprotect() chopped up actual win32 map
for (int j = i + 1;
j < n && sorted[j]->hand == -1 && map->addr + size == sorted[j]->addr;
++j) {
size += sorted[j]->size;
}
// obtain the most permissive access possible
unsigned prot, access;
if (map->readonlyfile) {
prot = kNtPageExecuteRead;
access = kNtFileMapRead | kNtFileMapExecute;
} else {
prot = kNtPageExecuteReadwrite;
access = kNtFileMapWrite | kNtFileMapExecute;
}
if ((map->flags & MAP_TYPE) != MAP_SHARED) {
// we don't need to close the map handle because sys_mmap_nt
// doesn't mark it inheritable across fork() for MAP_PRIVATE
ViewOrDie((map->h = MapOrDie(kNtPageExecuteReadwrite, map->size)),
kNtFileMapWrite | kNtFileMapExecute, 0, map->size, map->addr);
ReadOrDie(reader, map->addr, map->size);
map->hand = MapOrDie(prot, size);
ViewOrDie(map->hand, access, 0, size, map->addr);
ReadOrDie(reader, map->addr, size);
} else {
// we can however safely inherit MAP_SHARED with zero copy
ViewOrDie(map->h,
map->readonlyfile ? kNtFileMapRead | kNtFileMapExecute
: kNtFileMapWrite | kNtFileMapExecute,
map->off, map->size, map->addr);
ViewOrDie(map->hand, access, map->off, size, map->addr);
}
dll_init(&map->elem);
bool isdone = !map->next;
map->next = maps;
maps = map;
if (isdone)
break;
}
// read the .data and .bss program image sections
@ -259,19 +296,19 @@ textwindows void WinMainForked(void) {
// fixup memory manager
__maps.free = 0;
__maps.used = 0;
__maps.maps = maps;
__maps.count = 0;
__maps.pages = 0;
dll_init(&__maps.stack.elem);
dll_make_first(&__maps.used, &__maps.stack.elem);
for (struct Map *map = maps; map; map = map->next) {
for (int i = 0; i < n; ++i) {
struct Map *map = sorted[i];
__maps.count += 1;
__maps.pages += (map->size + 4095) / 4096;
dll_make_last(&__maps.used, &map->elem);
__maps.pages += (map->size + getpagesize() - 1) / getpagesize();
unsigned old_protect;
if (!VirtualProtect(map->addr, map->size, __prot2nt(map->prot, map->iscow),
&oldprot))
&old_protect))
AbortFork("VirtualProtect");
}
Free(sorted);
__maps.used = maps;
__maps_init();
// mitosis complete
@ -288,9 +325,8 @@ textwindows void WinMainForked(void) {
#if SYSDEBUG
RemoveVectoredExceptionHandler(oncrash);
#endif
if (_weaken(__sig_init)) {
if (_weaken(__sig_init))
_weaken(__sig_init)();
}
// jump back into function below
longjmp(jb, 1);
@ -299,7 +335,6 @@ textwindows void WinMainForked(void) {
textwindows int sys_fork_nt(uint32_t dwCreationFlags) {
char ok;
jmp_buf jb;
uint32_t op;
char **args;
int rc = -1;
struct Proc *proc;
@ -358,18 +393,62 @@ textwindows int sys_fork_nt(uint32_t dwCreationFlags) {
if (spawnrc != -1) {
CloseHandle(procinfo.hThread);
ok = WriteAll(writer, jb, sizeof(jb));
for (struct Map *map = __maps.maps; ok && map; map = map->next) {
int count = 0;
// this list will be populated with the maps we're transferring
struct Dll *e2, *maps = 0;
for (struct Dll *e = dll_first(__maps.used); ok && e; e = e2) {
e2 = dll_next(__maps.used, e);
struct Map *map = MAP_CONTAINER(e);
if (MAX((char *)__executable_start, map->addr) <
MIN((char *)_end, map->addr + map->size))
continue; // executable image is loaded by windows
dll_remove(&__maps.used, e);
dll_make_last(&maps, e);
ok = WriteAll(writer, map, sizeof(*map));
if (ok && (map->flags & MAP_TYPE) != MAP_SHARED) {
// XXX: forking destroys thread guard pages currently
VirtualProtect(map->addr, map->size,
__prot2nt(map->prot | PROT_READ, map->iscow), &op);
ok = WriteAll(writer, map->addr, map->size);
}
++count;
}
// send a terminating Map struct to child
if (ok) {
struct Map map;
map.addr = MAP_FAILED;
ok = WriteAll(writer, &map, sizeof(map));
}
// now write content of each map to child
int granularity = __granularity();
struct Map **sorted = SortMaps(maps, count);
uint32_t *old_protect = Malloc(count * 4);
for (int i = 0; ok && i < count; ++i) {
struct Map *map = sorted[i];
// we only need to worry about the base mapping
if ((uintptr_t)map->addr & (granularity - 1))
continue;
// shared mappings don't need to be copied
if ((map->flags & MAP_TYPE) == MAP_SHARED)
continue;
// get true length in case mprotect() chopped up actual win32 map
int j;
size_t size = map->size;
for (j = i + 1; j < count && sorted[j]->hand == -1 &&
map->addr + size == sorted[j]->addr;
++j) {
size += sorted[j]->size;
}
for (int k = i; ok && k < j; ++k)
if (!(sorted[k]->prot & PROT_READ))
ok = VirtualProtect(
sorted[k]->addr, sorted[k]->size,
__prot2nt(sorted[k]->prot | PROT_READ, map->iscow),
&old_protect[k]);
if (ok)
ok = WriteAll(writer, map->addr, size);
for (int k = i; ok && k < j; ++k)
if (!(sorted[k]->prot & PROT_READ))
ok = VirtualProtect(sorted[k]->addr, sorted[k]->size,
old_protect[k], &old_protect[k]);
}
Free(old_protect);
Free(sorted);
dll_make_first(&__maps.used, maps);
if (ok)
ok = WriteAll(writer, __data_start, __data_end - __data_start);
if (ok)

View file

@ -35,5 +35,5 @@ char __is_stack_overflow(siginfo_t *si, void *arg) {
return false;
intptr_t sp = uc->uc_mcontext.SP;
intptr_t fp = (intptr_t)si->si_addr;
return ABS(fp - sp) < getauxval(AT_PAGESZ);
return ABS(fp - sp) < getpagesize();
}

View file

@ -49,7 +49,7 @@ static struct SymbolTable *OpenSymbolTableImpl(const char *filename) {
size_t n, m, tsz, size;
const Elf64_Sym *symtab, *sym;
ptrdiff_t names_offset, name_base_offset, stp_offset;
long pagesz = getauxval(AT_PAGESZ);
long pagesz = getpagesize();
map = MAP_FAILED;
if ((fd = open(filename, O_RDONLY | O_CLOEXEC)) == -1)
return 0;

View file

@ -53,7 +53,7 @@ int sethostid(long) libcesque;
char *getlogin(void) libcesque;
int getlogin_r(char *, size_t) libcesque;
int login_tty(int) libcesque;
int getpagesize(void) libcesque;
int getpagesize(void) pureconst libcesque;
int syncfs(int) dontthrow libcesque;
int vhangup(void) libcesque;
int getdtablesize(void) libcesque;

View file

@ -22,10 +22,10 @@
#include "libc/calls/struct/sysinfo.h"
#include "libc/calls/struct/sysinfo.internal.h"
#include "libc/dce.h"
#include "libc/intrin/maps.h"
#include "libc/limits.h"
#include "libc/macros.internal.h"
#include "libc/runtime/clktck.h"
#include "libc/intrin/maps.h"
#include "libc/runtime/runtime.h"
#include "libc/runtime/sysconf.h"
#include "libc/sysv/consts/_posix.h"
@ -60,7 +60,7 @@ long sysconf(int name) {
case _SC_CLK_TCK:
return CLK_TCK;
case _SC_PAGESIZE:
return __granularity();
return getpagesize();
case _SC_ARG_MAX:
return __get_arg_max();
case _SC_SIGSTKSZ:

View file

@ -38,6 +38,8 @@
#include "libc/nt/process.h"
#include "libc/nt/runtime.h"
#include "libc/nt/signals.h"
#include "libc/nt/struct/systeminfo.h"
#include "libc/nt/systeminfo.h"
#include "libc/nt/thunk/msabi.h"
#include "libc/runtime/internal.h"
#include "libc/runtime/memtrack.internal.h"
@ -64,6 +66,7 @@ __msabi extern typeof(GetEnvironmentStrings) *const __imp_GetEnvironmentStringsW
__msabi extern typeof(GetEnvironmentVariable) *const __imp_GetEnvironmentVariableW;
__msabi extern typeof(GetFileAttributes) *const __imp_GetFileAttributesW;
__msabi extern typeof(GetStdHandle) *const __imp_GetStdHandle;
__msabi extern typeof(GetSystemInfo) *const __imp_GetSystemInfo;
__msabi extern typeof(GetUserName) *const __imp_GetUserNameW;
__msabi extern typeof(MapViewOfFileEx) *const __imp_MapViewOfFileEx;
__msabi extern typeof(SetConsoleCP) *const __imp_SetConsoleCP;
@ -209,8 +212,12 @@ static abi wontreturn void WinInit(const char16_t *cmdline) {
uint32_t oldattr;
__imp_VirtualProtect(stackaddr, GetGuardSize(),
kNtPageReadwrite | kNtPageGuard, &oldattr);
if (_weaken(__maps_stack))
_weaken(__maps_stack)(stackaddr, 4096, stacksize, stackprot, stackhand);
if (_weaken(__maps_stack)) {
struct NtSystemInfo si;
__imp_GetSystemInfo(&si);
_weaken(__maps_stack)(stackaddr, si.dwPageSize, GetGuardSize(), stacksize,
stackprot, stackhand);
}
struct WinArgs *wa =
(struct WinArgs *)(stackaddr + (stacksize - sizeof(struct WinArgs)));

View file

@ -128,7 +128,7 @@ static void __zipos_init(void) {
if (!fstat(fd, &st) && (map = mmap(0, st.st_size, PROT_READ, MAP_SHARED,
fd, 0)) != MAP_FAILED) {
if ((cdir = GetZipEocd(map, st.st_size, &err))) {
long pagesz = getauxval(AT_PAGESZ);
long pagesz = getpagesize();
__zipos_dismiss(map, cdir, pagesz);
__zipos.map = map;
__zipos.cdir = cdir;

View file

@ -38,7 +38,7 @@
errno_t pthread_attr_init(pthread_attr_t *attr) {
*attr = (pthread_attr_t){
.__stacksize = GetStackSize(),
.__guardsize = getauxval(AT_PAGESZ),
.__guardsize = getpagesize(),
};
return 0;
}

View file

@ -144,7 +144,7 @@ static int FixupCustomStackOnOpenbsd(pthread_attr_t *attr) {
size_t n;
uintptr_t x, y;
int e, rc, pagesz;
pagesz = getauxval(AT_PAGESZ);
pagesz = getpagesize();
n = attr->__stacksize;
x = (uintptr_t)attr->__stackaddr;
y = ROUNDUP(x, pagesz);
@ -210,7 +210,7 @@ static errno_t pthread_create_impl(pthread_t *thread,
}
} else {
// cosmo is managing the stack
int pagesize = getauxval(AT_PAGESZ);
int pagesize = getpagesize();
pt->pt_attr.__guardsize = ROUNDUP(pt->pt_attr.__guardsize, pagesize);
pt->pt_attr.__stacksize = pt->pt_attr.__stacksize;
if (pt->pt_attr.__guardsize + pagesize > pt->pt_attr.__stacksize) {

View file

@ -46,7 +46,7 @@ void SetUpOnce(void) {
exit(0);
}
testlib_enable_tmp_setup_teardown();
pagesize = (size_t)getauxval(AT_PAGESZ);
pagesize = (size_t)getpagesize();
// ASSERT_SYS(0, 0, pledge("stdio rpath wpath cpath", 0));
}

View file

@ -65,8 +65,8 @@ TEST(madvise, subPages) {
ASSERT_NE(MAP_FAILED, (p = mmap(0, __granularity(), PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)));
ASSERT_SYS(0, 0,
madvise(p + getauxval(AT_PAGESZ),
__granularity() - getauxval(AT_PAGESZ), MADV_WILLNEED));
madvise(p + getpagesize(), __granularity() - getpagesize(),
MADV_WILLNEED));
ASSERT_SYS(0, 0, munmap(p, __granularity()));
}

View file

@ -136,8 +136,8 @@ TEST(setrlimit, testMemoryLimit) {
ASSERT_NE(-1, (wstatus = xspawn(0)));
if (wstatus == -2) {
ASSERT_EQ(0, SetKernelEnforcedMemoryLimit(MEM));
for (gotsome = false, i = 0; i < (MEM * 2) / getauxval(AT_PAGESZ); ++i) {
p = mmap(0, getauxval(AT_PAGESZ), PROT_READ | PROT_WRITE,
for (gotsome = false, i = 0; i < (MEM * 2) / __granularity(); ++i) {
p = mmap(0, __granularity(), PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_POPULATE, -1, 0);
if (p != MAP_FAILED) {
gotsome = true;
@ -149,7 +149,7 @@ TEST(setrlimit, testMemoryLimit) {
ASSERT_EQ(ENOMEM, errno);
_Exit(0);
}
rngset(p, getauxval(AT_PAGESZ), _rand64, -1);
rngset(p, __granularity(), _rand64, -1);
}
_Exit(1);
}
@ -162,24 +162,18 @@ TEST(setrlimit, testMemoryLimit) {
TEST(setrlimit, testVirtualMemoryLimit) {
char *p;
int i, wstatus;
if (IsXnu())
return; /* doesn't work on darwin */
if (IsOpenbsd())
return; /* unavailable on openbsd */
if (IsWindows())
return; /* of course it doesn't work on windows */
ASSERT_NE(-1, (wstatus = xspawn(0)));
if (wstatus == -2) {
ASSERT_EQ(0, setrlimit(RLIMIT_AS, &(struct rlimit){MEM, MEM}));
for (i = 0; i < (MEM * 2) / getauxval(AT_PAGESZ); ++i) {
p = sys_mmap(0, getauxval(AT_PAGESZ), PROT_READ | PROT_WRITE,
for (i = 0; i < (MEM * 2) / __granularity(); ++i) {
p = sys_mmap(0, __granularity(), PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_POPULATE, -1, 0)
.addr;
if (p == MAP_FAILED) {
ASSERT_EQ(ENOMEM, errno);
_Exit(0);
}
rngset(p, getauxval(AT_PAGESZ), _rand64, -1);
rngset(p, __granularity(), _rand64, -1);
}
_Exit(1);
}
@ -205,15 +199,15 @@ TEST(setrlimit, testDataMemoryLimit) {
ASSERT_NE(-1, (wstatus = xspawn(0)));
if (wstatus == -2) {
ASSERT_EQ(0, setrlimit(RLIMIT_DATA, &(struct rlimit){MEM, MEM}));
for (i = 0; i < (MEM * 2) / getauxval(AT_PAGESZ); ++i) {
p = sys_mmap(0, getauxval(AT_PAGESZ), PROT_READ | PROT_WRITE,
for (i = 0; i < (MEM * 2) / __granularity(); ++i) {
p = sys_mmap(0, __granularity(), PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_POPULATE, -1, 0)
.addr;
if (p == MAP_FAILED) {
ASSERT_EQ(ENOMEM, errno);
_Exit(0);
}
rngset(p, getauxval(AT_PAGESZ), _rand64, -1);
rngset(p, __granularity(), _rand64, -1);
}
_Exit(1);
}

View file

@ -75,7 +75,7 @@ TEST(sigbus, test) {
// map two pages of the file into memory
char *map;
long pagesz = getauxval(AT_PAGESZ);
long pagesz = getpagesize();
ASSERT_NE(MAP_FAILED,
(map = mmap(0, pagesz * 2, PROT_READ, MAP_PRIVATE, 3, 0)));

View file

@ -228,7 +228,7 @@ TEST(ksnprintf, fuzzTheUnbreakable) {
char *f, b[32];
_Alignas(65536) static const char weasel[65535];
f = (void *)__veil("r", weasel);
EXPECT_SYS(0, 0, mprotect(f, __granularity(), PROT_READ | PROT_WRITE));
EXPECT_SYS(0, 0, mprotect(f, getpagesize(), PROT_READ | PROT_WRITE));
strcpy(f, "hello %s\n");
EXPECT_EQ(12, ksnprintf(b, sizeof(b), f, "world"));
EXPECT_STREQ("hello world\n", b);
@ -240,7 +240,7 @@ TEST(ksnprintf, fuzzTheUnbreakable) {
f[Rando() & 15] = '%';
ksnprintf(b, sizeof(b), f, lemur64(), lemur64(), lemur64());
}
EXPECT_SYS(0, 0, mprotect(f, __granularity(), PROT_READ));
EXPECT_SYS(0, 0, mprotect(f, getpagesize(), PROT_READ));
}
TEST(kprintf, testFailure_wontClobberErrnoAndBypassesSystemCallSupport) {

View file

@ -60,9 +60,11 @@
__static_yoink("zipos");
int pagesz;
int granularity;
void SetUpOnce(void) {
pagesz = getpagesize();
granularity = __granularity();
testlib_enable_tmp_setup_teardown();
// ASSERT_SYS(0, 0, pledge("stdio rpath wpath cpath proc", 0));
@ -84,27 +86,41 @@ TEST(mmap, overflow) {
TEST(mmap, noreplaceImage) {
ASSERT_SYS(EEXIST, MAP_FAILED,
mmap(__executable_start, granularity, PROT_READ,
mmap(__executable_start, 1, PROT_READ,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED_NOREPLACE, -1, 0));
}
TEST(mmap, noreplaceExistingMap) {
char *p;
ASSERT_NE(MAP_FAILED, (p = mmap(0, granularity, PROT_READ,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
ASSERT_NE(MAP_FAILED,
(p = mmap(0, 1, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
ASSERT_SYS(EEXIST, MAP_FAILED,
mmap(p, granularity, PROT_READ,
mmap(p, 1, PROT_READ,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED_NOREPLACE, -1, 0));
EXPECT_SYS(0, 0, munmap(p, granularity));
EXPECT_SYS(0, 0, munmap(p, 1));
}
TEST(mmap, pageBeyondGone) {
int pagesz = getpagesize();
char *p = mmap(0, pagesz * 2, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
ASSERT_EQ(0, munmap(p, pagesz * 2));
p = mmap(p, 1, PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
ASSERT_NE(MAP_FAILED, p);
EXPECT_TRUE(testlib_memoryexists(p));
EXPECT_TRUE(testlib_memoryexists(p + pagesz - 1));
EXPECT_FALSE(testlib_memoryexists(p + pagesz));
ASSERT_EQ(0, munmap(p, 1));
}
TEST(mmap, fixedTaken) {
char *p;
ASSERT_NE(MAP_FAILED, (p = mmap(0, granularity, PROT_READ,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
ASSERT_NE(MAP_FAILED, mmap(p, granularity, PROT_READ,
ASSERT_NE(MAP_FAILED,
(p = mmap(0, 1, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
ASSERT_NE(MAP_FAILED, mmap(p, 1, PROT_READ,
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
EXPECT_SYS(0, 0, munmap(p, granularity));
EXPECT_SYS(0, 0, munmap(p, 1));
}
TEST(mmap, hint) {
@ -181,7 +197,7 @@ TEST(mmap, testMapFile_fdGetsClosed_makesNoDifference) {
EXPECT_NE(-1, close(fd));
EXPECT_STREQN("hello", p, 5);
p[1] = 'a';
EXPECT_NE(-1, msync(p, getauxval(AT_PAGESZ), MS_SYNC));
EXPECT_NE(-1, msync(p, getpagesize(), MS_SYNC));
ASSERT_NE(-1, (fd = open(path, O_RDONLY)));
EXPECT_EQ(5, read(fd, buf, 5));
EXPECT_STREQN("hallo", buf, 5);
@ -193,7 +209,7 @@ TEST(mmap, testMapFile_fdGetsClosed_makesNoDifference) {
TEST(mmap, fileOffset) {
int fd;
char *map;
int offset_align = IsWindows() ? granularity : getauxval(AT_PAGESZ);
int offset_align = IsWindows() ? granularity : getpagesize();
ASSERT_NE(-1, (fd = open("foo", O_CREAT | O_RDWR, 0644)));
EXPECT_NE(-1, ftruncate(fd, offset_align * 2));
EXPECT_NE(-1, pwrite(fd, "hello", 5, offset_align * 0));
@ -433,15 +449,15 @@ void *ptrs[N];
void BenchMmapPrivate(void) {
void *p;
p = mmap(0, granularity, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE,
-1, 0);
p = mmap(0, granularity * 10, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (p == MAP_FAILED)
__builtin_trap();
ptrs[count++] = p;
}
void BenchUnmap(void) {
if (munmap(ptrs[--count], granularity))
if (munmap(ptrs[--count], granularity * 10))
__builtin_trap();
}

View file

@ -120,9 +120,9 @@ void TearDown(void) {
}
TEST(mprotect, testOkMemory) {
char *p = gc(memalign(getauxval(AT_PAGESZ), getauxval(AT_PAGESZ)));
char *p = gc(memalign(getpagesize(), getpagesize()));
p[0] = 0;
ASSERT_NE(-1, mprotect(p, getauxval(AT_PAGESZ), PROT_READ | PROT_WRITE));
ASSERT_NE(-1, mprotect(p, getpagesize(), PROT_READ | PROT_WRITE));
p[0] = 1;
EXPECT_EQ(1, p[0]);
EXPECT_FALSE(gotsegv);
@ -131,20 +131,19 @@ TEST(mprotect, testOkMemory) {
TEST(mprotect, testSegfault_writeToReadOnlyAnonymous) {
volatile char *p;
p = gc(memalign(getauxval(AT_PAGESZ), getauxval(AT_PAGESZ)));
p = gc(memalign(getpagesize(), getpagesize()));
EXPECT_FALSE(gotsegv);
p[0] = 1;
EXPECT_FALSE(gotsegv);
EXPECT_FALSE(gotbusted);
EXPECT_NE(-1, mprotect((void *)p, getauxval(AT_PAGESZ), PROT_READ));
EXPECT_NE(-1, mprotect((void *)p, getpagesize(), PROT_READ));
__expropriate(p[0]);
EXPECT_FALSE(gotsegv);
EXPECT_FALSE(gotbusted);
p[0] = 2;
EXPECT_TRUE(gotsegv | gotbusted);
EXPECT_EQ(1, p[0]);
EXPECT_NE(-1,
mprotect((void *)p, getauxval(AT_PAGESZ), PROT_READ | PROT_WRITE));
EXPECT_NE(-1, mprotect((void *)p, getpagesize(), PROT_READ | PROT_WRITE));
}
TEST(mprotect, testExecOnly_canExecute) {
@ -164,22 +163,21 @@ TEST(mprotect, testExecOnly_canExecute) {
TEST(mprotect, testProtNone_cantEvenRead) {
volatile char *p;
p = gc(memalign(getauxval(AT_PAGESZ), getauxval(AT_PAGESZ)));
EXPECT_NE(-1, mprotect((void *)p, getauxval(AT_PAGESZ), PROT_NONE));
p = gc(memalign(getpagesize(), getpagesize()));
EXPECT_NE(-1, mprotect((void *)p, getpagesize(), PROT_NONE));
__expropriate(p[0]);
EXPECT_TRUE(gotsegv | gotbusted);
EXPECT_NE(-1,
mprotect((void *)p, getauxval(AT_PAGESZ), PROT_READ | PROT_WRITE));
EXPECT_NE(-1, mprotect((void *)p, getpagesize(), PROT_READ | PROT_WRITE));
}
TEST(mprotect, testExecJit_actuallyWorks) {
int (*p)(void) = gc(memalign(getauxval(AT_PAGESZ), getauxval(AT_PAGESZ)));
int (*p)(void) = gc(memalign(getpagesize(), getpagesize()));
memcpy(p, kRet31337, sizeof(kRet31337));
EXPECT_NE(-1, mprotect(p, getauxval(AT_PAGESZ), PROT_EXEC));
EXPECT_NE(-1, mprotect(p, getpagesize(), PROT_EXEC));
EXPECT_EQ(31337, p());
EXPECT_FALSE(gotsegv);
EXPECT_FALSE(gotbusted);
EXPECT_NE(-1, mprotect(p, getauxval(AT_PAGESZ), PROT_READ | PROT_WRITE));
EXPECT_NE(-1, mprotect(p, getpagesize(), PROT_READ | PROT_WRITE));
}
TEST(mprotect, testRwxMap_vonNeumannRules) {
@ -187,14 +185,13 @@ TEST(mprotect, testRwxMap_vonNeumannRules) {
return; // boo
if (IsXnuSilicon())
return; // boo
int (*p)(void) = gc(memalign(getauxval(AT_PAGESZ), getauxval(AT_PAGESZ)));
int (*p)(void) = gc(memalign(getpagesize(), getpagesize()));
memcpy(p, kRet31337, sizeof(kRet31337));
EXPECT_NE(-1, mprotect(p, getauxval(AT_PAGESZ),
PROT_READ | PROT_WRITE | PROT_EXEC));
EXPECT_NE(-1, mprotect(p, getpagesize(), PROT_READ | PROT_WRITE | PROT_EXEC));
EXPECT_EQ(31337, p());
EXPECT_FALSE(gotsegv);
EXPECT_FALSE(gotbusted);
EXPECT_NE(-1, mprotect(p, getauxval(AT_PAGESZ), PROT_READ | PROT_WRITE));
EXPECT_NE(-1, mprotect(p, getpagesize(), PROT_READ | PROT_WRITE));
}
TEST(mprotect, testExecuteFlatFileMapOpenedAsReadonly) {
@ -231,13 +228,13 @@ TEST(mprotect, testFileMap_canChangeToExecWhileOpenInRdwrMode) {
}
TEST(mprotect, testBadProt_failsEinval) {
volatile char *p = gc(memalign(getauxval(AT_PAGESZ), getauxval(AT_PAGESZ)));
volatile char *p = gc(memalign(getpagesize(), getpagesize()));
EXPECT_EQ(-1, mprotect((void *)p, 9999, -1));
EXPECT_EQ(EINVAL, errno);
}
TEST(mprotect, testZeroSize_doesNothing) {
volatile char *p = gc(memalign(getauxval(AT_PAGESZ), getauxval(AT_PAGESZ)));
volatile char *p = gc(memalign(getpagesize(), getpagesize()));
EXPECT_NE(-1, mprotect((void *)p, 0, PROT_READ));
p[0] = 1;
EXPECT_FALSE(gotsegv);

View file

@ -1,5 +1,6 @@
#include "libc/sysv/consts/auxv.h"
#include "libc/runtime/runtime.h"
#include "libc/runtime/runtime.h"
/* ---------------------------- setting mparams -------------------------- */
@ -48,7 +49,7 @@ __attribute__((__constructor__(49))) int init_mparams(void) {
size_t gsize;
#if defined(__COSMOPOLITAN__)
psize = getauxval(AT_PAGESZ);
psize = getpagesize();
gsize = DEFAULT_GRANULARITY ? DEFAULT_GRANULARITY : psize;
#elif !defined(WIN32)
psize = malloc_getpagesize;

View file

@ -244,7 +244,7 @@ int main(int argc, char *argv[]) {
sigset_t block;
sigfillset(&block);
pthread_attr_t attr;
int pagesz = getauxval(AT_PAGESZ);
int pagesz = getpagesize();
pthread_t *threads = calloc(nthreads, sizeof(pthread_t));
unassert(!pthread_attr_init(&attr));
unassert(!pthread_attr_setstacksize(&attr, 65536));

View file

@ -364,6 +364,7 @@ int main(int argc, char *argv[]) {
CANIUSE(TM2);
CANIUSE(TME);
CANIUSE(TSC);
CANIUSE(INVTSC);
CANIUSE(TSC_ADJUST);
CANIUSE(TSC_DEADLINE_TIMER);
CANIUSE(TSX_FORCE_ABORT);

View file

@ -497,18 +497,18 @@ static void *NewBoard(size_t *out_size) {
char *p;
size_t s, n, k;
s = (byn * bxn) >> 3;
k = getauxval(AT_PAGESZ) + ROUNDUP(s, getauxval(AT_PAGESZ));
n = ROUNDUP(k + getauxval(AT_PAGESZ), sysconf(_SC_PAGESIZE));
k = getpagesize() + ROUNDUP(s, getpagesize());
n = ROUNDUP(k + getpagesize(), sysconf(_SC_PAGESIZE));
p = _mapanon(n);
mprotect(p, getauxval(AT_PAGESZ), 0);
mprotect(p, getpagesize(), 0);
mprotect(p + k, n - k, 0);
if (out_size)
*out_size = n;
return p + getauxval(AT_PAGESZ);
return p + getpagesize();
}
static void FreeBoard(void *p, size_t n) {
munmap((char *)p - getauxval(AT_PAGESZ), n);
munmap((char *)p - getpagesize(), n);
}
static void AllocateBoardsWithHardwareAcceleratedMemorySafety(void) {