mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-06-27 14:58:30 +00:00
Make mmap() scalable
It's now possible to create thousands of thousands of sparse independent memory mappings, without any slowdown. The memory manager is better with tracking memory protection now, particularly on Windows in a precise way that can be restored during fork(). You now have the highest quality mem manager possible. It's even better than some OSes like XNU, where mmap() is implemented as an O(n) operation which means sadly things aren't much improved over there. With this change the llamafile HTTP server endpoint at /tokenize with a prompt of 50 tokens is now able to handle 2.6m r/sec
This commit is contained in:
parent
3756870635
commit
8c645fa1ee
59 changed files with 1238 additions and 1067 deletions
|
@ -85,7 +85,7 @@ int setrlimit(int resource, const struct rlimit *rlim) {
|
|||
rc = efault();
|
||||
} else if (IsXnuSilicon()) {
|
||||
rc = _sysret(__syslib->__setrlimit(resource, rlim));
|
||||
} else if (!IsWindows()) {
|
||||
} else if (!IsWindows() && !(IsNetbsd() && resource == RLIMIT_AS)) {
|
||||
rc = sys_setrlimit(resource, rlim);
|
||||
} else if (resource == RLIMIT_STACK) {
|
||||
rc = enotsup();
|
||||
|
|
|
@ -62,6 +62,10 @@ o/$(MODE)/libc/intrin/kprintf.o: private \
|
|||
-Wframe-larger-than=128 \
|
||||
-Walloca-larger-than=128
|
||||
|
||||
o/$(MODE)/libc/intrin/tree.o: private \
|
||||
CFLAGS += \
|
||||
-ffunction-sections
|
||||
|
||||
o//libc/intrin/memmove.o: private \
|
||||
CFLAGS += \
|
||||
-fno-toplevel-reorder
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
static char DescribeMapType(int flags) {
|
||||
switch (flags & MAP_TYPE) {
|
||||
case MAP_FILE:
|
||||
return 'f';
|
||||
return '-';
|
||||
case MAP_PRIVATE:
|
||||
return 'p';
|
||||
case MAP_SHARED:
|
||||
|
@ -47,7 +47,7 @@ const char *(DescribeMapping)(char p[8], int prot, int flags) {
|
|||
DescribeProt(p, prot);
|
||||
p[3] = DescribeMapType(flags);
|
||||
p[4] = (flags & MAP_ANONYMOUS) ? 'a' : '-';
|
||||
p[5] = (flags & MAP_FIXED) ? 'F' : '-';
|
||||
p[5] = (flags & MAP_FIXED) ? 'f' : '-';
|
||||
p[6] = 0;
|
||||
return p;
|
||||
}
|
||||
|
|
|
@ -20,12 +20,12 @@
|
|||
#include "libc/macros.internal.h"
|
||||
#include "libc/sysv/consts/prot.h"
|
||||
|
||||
static const struct DescribeFlags kProtFlags[] = {
|
||||
{PROT_READ, "READ"}, //
|
||||
{PROT_WRITE, "WRITE"}, //
|
||||
{PROT_EXEC, "EXEC"}, //
|
||||
};
|
||||
|
||||
const char *(DescribeProtFlags)(char buf[48], int x) {
|
||||
const struct DescribeFlags kProtFlags[] = {
|
||||
{PROT_READ, "READ"}, //
|
||||
{PROT_WRITE, "WRITE"}, //
|
||||
{PROT_EXEC, "EXEC"}, //
|
||||
{PROT_GUARD, "GUARD"}, //
|
||||
};
|
||||
return DescribeFlags(buf, 48, kProtFlags, ARRAYLEN(kProtFlags), "PROT_", x);
|
||||
}
|
||||
|
|
|
@ -18,8 +18,11 @@
|
|||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/calls/struct/rlimit.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/fmt/itoa.h"
|
||||
#include "libc/intrin/kprintf.h"
|
||||
#include "libc/intrin/strace.internal.h"
|
||||
#include "libc/str/str.h"
|
||||
#include "libc/sysv/consts/rlim.h"
|
||||
|
||||
const char *DescribeRlimit(char buf[64], int rc, const struct rlimit *rlim) {
|
||||
if (rc == -1)
|
||||
|
@ -29,7 +32,18 @@ const char *DescribeRlimit(char buf[64], int rc, const struct rlimit *rlim) {
|
|||
if (kisdangerous(rlim)) {
|
||||
ksnprintf(buf, 64, "%p", rlim);
|
||||
} else {
|
||||
ksnprintf(buf, 64, "{%'ld, %'ld}", rlim->rlim_cur, rlim->rlim_max);
|
||||
char str[2][21];
|
||||
if (rlim->rlim_cur == RLIM_INFINITY) {
|
||||
strcpy(str[0], "RLIM_INFINITY");
|
||||
} else {
|
||||
FormatInt64(str[0], rlim->rlim_cur);
|
||||
}
|
||||
if (rlim->rlim_max == RLIM_INFINITY) {
|
||||
strcpy(str[1], "RLIM_INFINITY");
|
||||
} else {
|
||||
FormatInt64(str[1], rlim->rlim_max);
|
||||
}
|
||||
ksnprintf(buf, 64, "{%s, %s}", str[0], str[1]);
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
|
|
@ -86,9 +86,8 @@ TryAgain:
|
|||
if ((dm.addr = MapViewOfFileEx(dm.maphandle, fl.flags2, off >> 32, off,
|
||||
size, addr))) {
|
||||
uint32_t oldprot;
|
||||
if (VirtualProtect(dm.addr, size, __prot2nt(prot, iscow), &oldprot)) {
|
||||
if (VirtualProtect(dm.addr, size, __prot2nt(prot, iscow), &oldprot))
|
||||
return dm;
|
||||
}
|
||||
UnmapViewOfFile(dm.addr);
|
||||
}
|
||||
CloseHandle(dm.maphandle);
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
*
|
||||
* It's required that `elem` and `succ` aren't part of the same list.
|
||||
*/
|
||||
privileged void dll_splice_after(struct Dll *elem, struct Dll *succ) {
|
||||
void dll_splice_after(struct Dll *elem, struct Dll *succ) {
|
||||
struct Dll *tmp1, *tmp2;
|
||||
tmp1 = elem->next;
|
||||
tmp2 = succ->prev;
|
||||
|
@ -43,7 +43,7 @@ privileged void dll_splice_after(struct Dll *elem, struct Dll *succ) {
|
|||
*
|
||||
* @param list is a doubly-linked list, where `!*list` means empty
|
||||
*/
|
||||
privileged void dll_remove(struct Dll **list, struct Dll *elem) {
|
||||
void dll_remove(struct Dll **list, struct Dll *elem) {
|
||||
if (*list == elem) {
|
||||
if ((*list)->prev == *list) {
|
||||
*list = 0;
|
||||
|
@ -66,7 +66,7 @@ privileged void dll_remove(struct Dll **list, struct Dll *elem) {
|
|||
* @param list is a doubly-linked list, where `!*list` means empty
|
||||
* @param elem must not be a member of `list`, or null for no-op
|
||||
*/
|
||||
privileged void dll_make_first(struct Dll **list, struct Dll *elem) {
|
||||
void dll_make_first(struct Dll **list, struct Dll *elem) {
|
||||
if (elem) {
|
||||
if (!*list) {
|
||||
*list = elem->prev;
|
||||
|
@ -85,7 +85,7 @@ privileged void dll_make_first(struct Dll **list, struct Dll *elem) {
|
|||
* @param list is a doubly-linked list, where `!*list` means empty
|
||||
* @param elem must not be a member of `list`, or null for no-op
|
||||
*/
|
||||
privileged void dll_make_last(struct Dll **list, struct Dll *elem) {
|
||||
void dll_make_last(struct Dll **list, struct Dll *elem) {
|
||||
if (elem) {
|
||||
dll_make_first(list, elem->next);
|
||||
*list = elem;
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include "libc/fmt/magnumstrs.internal.h"
|
||||
#include "libc/intrin/asmflag.h"
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/intrin/dll.h"
|
||||
#include "libc/intrin/getenv.internal.h"
|
||||
#include "libc/intrin/kprintf.h"
|
||||
#include "libc/intrin/likely.h"
|
||||
|
@ -154,27 +153,18 @@ __funline bool kischarmisaligned(const char *p, signed char t) {
|
|||
return false;
|
||||
}
|
||||
|
||||
privileged static bool32 kisdangerous_unlocked(const char *addr) {
|
||||
struct Dll *e;
|
||||
if ((e = dll_first(__maps.used))) {
|
||||
do {
|
||||
struct Map *map = MAP_CONTAINER(e);
|
||||
if (map->addr <= addr && addr < map->addr + map->size) {
|
||||
dll_remove(&__maps.used, e);
|
||||
dll_make_first(&__maps.used, e);
|
||||
return !(map->prot & PROT_READ);
|
||||
}
|
||||
} while ((e = dll_next(__maps.used, e)));
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
privileged bool32 kisdangerous(const void *addr) {
|
||||
bool32 res;
|
||||
bool32 res = true;
|
||||
__maps_lock();
|
||||
res = kisdangerous_unlocked(addr);
|
||||
if (__maps.maps) {
|
||||
struct Map *map;
|
||||
if ((map = __maps_floor(addr)))
|
||||
if ((const char *)addr >= map->addr &&
|
||||
(const char *)addr < map->addr + map->size)
|
||||
res = false;
|
||||
} else {
|
||||
res = false;
|
||||
}
|
||||
__maps_unlock();
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "ape/sections.internal.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/intrin/dll.h"
|
||||
#include "libc/intrin/maps.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/runtime/stack.h"
|
||||
#include "libc/sysv/consts/auxv.h"
|
||||
|
@ -32,8 +33,7 @@ __static_yoink("_init_maps");
|
|||
struct Maps __maps;
|
||||
|
||||
void __maps_add(struct Map *map) {
|
||||
dll_init(&map->elem);
|
||||
dll_make_first(&__maps.used, &map->elem);
|
||||
tree_insert(&__maps.maps, &map->tree, __maps_compare);
|
||||
++__maps.count;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,36 +1,41 @@
|
|||
#ifndef COSMOPOLITAN_LIBC_RUNTIME_MAPS_H_
|
||||
#define COSMOPOLITAN_LIBC_RUNTIME_MAPS_H_
|
||||
#ifndef COSMOPOLITAN_MAPS_H_
|
||||
#define COSMOPOLITAN_MAPS_H_
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/intrin/dll.h"
|
||||
#include "libc/intrin/tree.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/thread/tls2.internal.h"
|
||||
COSMOPOLITAN_C_START_
|
||||
|
||||
#define MAP_CONTAINER(e) DLL_CONTAINER(struct Map, elem, e)
|
||||
#define MAP_TREE_CONTAINER(e) TREE_CONTAINER(struct Map, tree, e)
|
||||
#define MAP_FREE_CONTAINER(e) DLL_CONTAINER(struct Map, free, e)
|
||||
|
||||
struct Map {
|
||||
char *addr; /* granule aligned */
|
||||
size_t size; /* must be nonzero */
|
||||
struct Dll elem; /* for __maps.free */
|
||||
int64_t off; /* -1 if anonymous */
|
||||
int64_t off; /* ignore for anon */
|
||||
int prot; /* memory protects */
|
||||
int flags; /* memory map flag */
|
||||
bool iscow; /* windows nt only */
|
||||
bool readonlyfile; /* windows nt only */
|
||||
unsigned visited; /* used for checks */
|
||||
unsigned oldprot; /* in windows fork */
|
||||
intptr_t hand; /* windows nt only */
|
||||
union {
|
||||
struct Tree tree;
|
||||
struct Dll free;
|
||||
};
|
||||
};
|
||||
|
||||
struct Maps {
|
||||
unsigned mono;
|
||||
atomic_int lock;
|
||||
struct Tree *maps;
|
||||
struct Dll *free;
|
||||
struct Dll *used;
|
||||
size_t count;
|
||||
size_t pages;
|
||||
atomic_ulong rollo;
|
||||
struct Map stack;
|
||||
struct Map guard;
|
||||
bool once;
|
||||
atomic_ulong rollo;
|
||||
};
|
||||
|
||||
struct AddrSize {
|
||||
|
@ -45,10 +50,37 @@ bool __maps_lock(void);
|
|||
void __maps_check(void);
|
||||
void __maps_unlock(void);
|
||||
void __maps_add(struct Map *);
|
||||
struct Map *__maps_alloc(void);
|
||||
void __maps_free(struct Map *);
|
||||
struct Map *__maps_alloc(void);
|
||||
struct Map *__maps_floor(const char *);
|
||||
void __maps_stack(char *, int, int, size_t, int, intptr_t);
|
||||
int __maps_compare(const struct Tree *, const struct Tree *);
|
||||
struct AddrSize __get_main_stack(void);
|
||||
|
||||
forceinline optimizespeed int __maps_search(const void *key,
|
||||
const struct Tree *node) {
|
||||
const char *addr = (const char *)key;
|
||||
const struct Map *map = (const struct Map *)MAP_TREE_CONTAINER(node);
|
||||
if (addr < map->addr)
|
||||
return +1;
|
||||
if (addr >= map->addr + map->size)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct Map *__maps_next(struct Map *map) {
|
||||
struct Tree *node;
|
||||
if ((node = tree_next(&map->tree)))
|
||||
return MAP_TREE_CONTAINER(node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct Map *__maps_first(void) {
|
||||
struct Tree *node;
|
||||
if ((node = tree_first(__maps.maps)))
|
||||
return MAP_TREE_CONTAINER(node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
COSMOPOLITAN_C_END_
|
||||
#endif /* COSMOPOLITAN_LIBC_RUNTIME_MAPS_H_ */
|
||||
#endif /* COSMOPOLITAN_MAPS_H_ */
|
||||
|
|
|
@ -67,20 +67,31 @@
|
|||
struct StackFrame *bp = __builtin_frame_address(0); \
|
||||
kprintf("%!s:%d: assertion failed: %!s\n", __FILE__, __LINE__, #x); \
|
||||
kprintf("bt %!s\n", (DescribeBacktrace)(bt, bp)); \
|
||||
__print_maps(); \
|
||||
_Exit(99); \
|
||||
__print_maps(0); \
|
||||
__builtin_trap(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
int __maps_compare(const struct Tree *ra, const struct Tree *rb) {
|
||||
const struct Map *a = (const struct Map *)MAP_TREE_CONTAINER(ra);
|
||||
const struct Map *b = (const struct Map *)MAP_TREE_CONTAINER(rb);
|
||||
return (a->addr > b->addr) - (a->addr < b->addr);
|
||||
}
|
||||
|
||||
privileged optimizespeed struct Map *__maps_floor(const char *addr) {
|
||||
struct Tree *node;
|
||||
if ((node = tree_floor(__maps.maps, addr, __maps_search)))
|
||||
return MAP_TREE_CONTAINER(node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool overlaps_existing_map(const char *addr, size_t size, int pagesz) {
|
||||
for (struct Dll *e = dll_first(__maps.used); e;
|
||||
e = dll_next(__maps.used, e)) {
|
||||
struct Map *map = MAP_CONTAINER(e);
|
||||
struct Map *map;
|
||||
if ((map = __maps_floor(addr)))
|
||||
if (MAX(addr, map->addr) <
|
||||
MIN(addr + PGUP(size), map->addr + PGUP(map->size)))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -89,66 +100,51 @@ void __maps_check(void) {
|
|||
size_t maps = 0;
|
||||
size_t pages = 0;
|
||||
int pagesz = getpagesize();
|
||||
unsigned id = ++__maps.mono;
|
||||
for (struct Dll *e = dll_first(__maps.used); e;
|
||||
e = dll_next(__maps.used, e)) {
|
||||
struct Map *map = MAP_CONTAINER(e);
|
||||
static unsigned mono;
|
||||
unsigned id = ++mono;
|
||||
for (struct Map *map = __maps_first(); map; map = __maps_next(map)) {
|
||||
ASSERT(map->addr != MAP_FAILED);
|
||||
ASSERT(map->visited != id);
|
||||
ASSERT(map->size);
|
||||
map->visited = id;
|
||||
pages += (map->size + getpagesize() - 1) / getpagesize();
|
||||
maps += 1;
|
||||
struct Map *next;
|
||||
if ((next = __maps_next(map))) {
|
||||
ASSERT(map->addr < next->addr);
|
||||
ASSERT(
|
||||
!(MAX(map->addr, next->addr) <
|
||||
MIN(map->addr + PGUP(map->size), next->addr + PGUP(next->size))));
|
||||
}
|
||||
}
|
||||
ASSERT(maps = __maps.count);
|
||||
ASSERT(pages == __maps.pages);
|
||||
for (struct Dll *e = dll_first(__maps.used); e;
|
||||
e = dll_next(__maps.used, e)) {
|
||||
struct Map *m1 = MAP_CONTAINER(e);
|
||||
for (struct Dll *f = dll_next(__maps.used, e); f;
|
||||
f = dll_next(__maps.used, f)) {
|
||||
struct Map *m2 = MAP_CONTAINER(f);
|
||||
ASSERT(MAX(m1->addr, m2->addr) >=
|
||||
MIN(m1->addr + PGUP(m1->size), m2->addr + PGUP(m2->size)));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void __maps_free(struct Map *map) {
|
||||
map->size = 0;
|
||||
map->addr = MAP_FAILED;
|
||||
ASSERT(dll_is_alone(&map->elem));
|
||||
dll_make_last(&__maps.free, &map->elem);
|
||||
dll_init(&map->free);
|
||||
dll_make_first(&__maps.free, &map->free);
|
||||
}
|
||||
|
||||
static void __maps_insert(struct Map *map) {
|
||||
struct Dll *e = dll_first(__maps.used);
|
||||
struct Map *last = e ? MAP_CONTAINER(e) : 0;
|
||||
__maps.pages += (map->size + getpagesize() - 1) / getpagesize();
|
||||
if (last && !IsWindows() && //
|
||||
map->addr == last->addr + last->size && //
|
||||
struct Map *floor = __maps_floor(map->addr);
|
||||
if (floor && !IsWindows() && //
|
||||
map->addr + map->size == floor->addr && //
|
||||
(map->flags & MAP_ANONYMOUS) && //
|
||||
map->flags == last->flags && //
|
||||
map->prot == last->prot) {
|
||||
last->size += map->size;
|
||||
dll_remove(&__maps.used, &last->elem);
|
||||
dll_make_first(&__maps.used, &last->elem);
|
||||
__maps_free(map);
|
||||
} else if (last && !IsWindows() && //
|
||||
map->addr + map->size == last->addr && //
|
||||
(map->flags & MAP_ANONYMOUS) && //
|
||||
map->flags == last->flags && //
|
||||
map->prot == last->prot) {
|
||||
last->addr -= map->size;
|
||||
last->size += map->size;
|
||||
dll_remove(&__maps.used, &last->elem);
|
||||
dll_make_first(&__maps.used, &last->elem);
|
||||
map->flags == floor->flags && //
|
||||
map->prot == floor->prot) {
|
||||
floor->addr -= map->size;
|
||||
floor->size += map->size;
|
||||
__maps_free(map);
|
||||
__maps_check();
|
||||
} else {
|
||||
__maps_add(map);
|
||||
__maps_check();
|
||||
}
|
||||
__maps_check();
|
||||
}
|
||||
|
||||
struct Map *__maps_alloc(void) {
|
||||
|
@ -156,7 +152,7 @@ struct Map *__maps_alloc(void) {
|
|||
struct Map *map;
|
||||
if ((e = dll_first(__maps.free))) {
|
||||
dll_remove(&__maps.free, e);
|
||||
map = MAP_CONTAINER(e);
|
||||
map = MAP_FREE_CONTAINER(e);
|
||||
return map;
|
||||
}
|
||||
int granularity = __granularity();
|
||||
|
@ -168,11 +164,8 @@ struct Map *__maps_alloc(void) {
|
|||
CloseHandle(sys.maphandle);
|
||||
map = sys.addr;
|
||||
map->addr = MAP_FAILED;
|
||||
dll_init(&map->elem);
|
||||
for (int i = 1; i < granularity / sizeof(struct Map); ++i) {
|
||||
dll_init(&map[i].elem);
|
||||
for (int i = 1; i < granularity / sizeof(struct Map); ++i)
|
||||
__maps_free(map + i);
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
|
@ -190,106 +183,106 @@ static int __munmap(char *addr, size_t size, bool untrack_only) {
|
|||
|
||||
// untrack mappings
|
||||
int rc = 0;
|
||||
struct Dll *cur;
|
||||
struct Dll *next;
|
||||
struct Dll *delete = 0;
|
||||
struct Map *map;
|
||||
struct Map *next;
|
||||
struct Dll *deleted = 0;
|
||||
if (__maps_lock()) {
|
||||
__maps_unlock();
|
||||
return edeadlk();
|
||||
}
|
||||
for (cur = dll_first(__maps.used); cur; cur = next) {
|
||||
next = dll_next(__maps.used, cur);
|
||||
struct Map *map = MAP_CONTAINER(cur);
|
||||
for (map = __maps_floor(addr); map; map = next) {
|
||||
next = __maps_next(map);
|
||||
char *map_addr = map->addr;
|
||||
size_t map_size = map->size;
|
||||
if (MAX(addr, map_addr) < MIN(addr + size, map_addr + PGUP(map_size))) {
|
||||
if (addr <= map_addr && addr + size >= map_addr + PGUP(map_size)) {
|
||||
// remove mapping completely
|
||||
dll_remove(&__maps.used, cur);
|
||||
dll_make_first(&delete, cur);
|
||||
__maps.pages -= (map_size + pagesz - 1) / pagesz;
|
||||
__maps.count -= 1;
|
||||
__maps_check();
|
||||
} else if (IsWindows()) {
|
||||
// you can't carve up memory maps on windows. our mmap() makes
|
||||
// this not a problem (for non-enormous memory maps) by making
|
||||
// independent mappings for each 64 kb granule, under the hood
|
||||
rc = einval();
|
||||
} else if (addr <= map_addr) {
|
||||
// shave off lefthand side of mapping
|
||||
ASSERT(addr + size < map_addr + PGUP(map_size));
|
||||
size_t left = PGUP(addr + size - map_addr);
|
||||
size_t right = map_size - left;
|
||||
ASSERT(right > 0);
|
||||
ASSERT(left > 0);
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
map->addr += left;
|
||||
map->size = right;
|
||||
if (!(map->flags & MAP_ANONYMOUS))
|
||||
map->off += left;
|
||||
__maps.pages -= (left + pagesz - 1) / pagesz;
|
||||
if (!(MAX(addr, map_addr) < MIN(addr + size, map_addr + PGUP(map_size))))
|
||||
break;
|
||||
if (addr <= map_addr && addr + size >= map_addr + PGUP(map_size)) {
|
||||
// remove mapping completely
|
||||
tree_remove(&__maps.maps, &map->tree);
|
||||
dll_init(&map->free);
|
||||
dll_make_first(&deleted, &map->free);
|
||||
__maps.pages -= (map_size + pagesz - 1) / pagesz;
|
||||
__maps.count -= 1;
|
||||
} else if (IsWindows()) {
|
||||
// you can't carve up memory maps on windows. our mmap() makes
|
||||
// this not a problem (for non-enormous memory maps) by making
|
||||
// independent mappings for each 64 kb granule, under the hood
|
||||
rc = einval();
|
||||
} else if (addr <= map_addr) {
|
||||
// shave off lefthand side of mapping
|
||||
ASSERT(addr + size < map_addr + PGUP(map_size));
|
||||
size_t left = PGUP(addr + size - map_addr);
|
||||
size_t right = map_size - left;
|
||||
ASSERT(right > 0);
|
||||
ASSERT(left > 0);
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
map->addr += left;
|
||||
map->size = right;
|
||||
if (!(map->flags & MAP_ANONYMOUS))
|
||||
map->off += left;
|
||||
__maps.pages -= (left + pagesz - 1) / pagesz;
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
dll_init(&leftmap->free);
|
||||
dll_make_first(&deleted, &leftmap->free);
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
} else if (addr + size >= map_addr + PGUP(map_size)) {
|
||||
// shave off righthand side of mapping
|
||||
size_t left = addr - map_addr;
|
||||
size_t right = map_addr + map_size - addr;
|
||||
struct Map *rightmap;
|
||||
if ((rightmap = __maps_alloc())) {
|
||||
map->size = left;
|
||||
__maps.pages -= (right + pagesz - 1) / pagesz;
|
||||
rightmap->addr = addr;
|
||||
rightmap->size = right;
|
||||
dll_init(&rightmap->free);
|
||||
dll_make_first(&deleted, &rightmap->free);
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
} else {
|
||||
// punch hole in mapping
|
||||
size_t left = addr - map_addr;
|
||||
size_t middle = size;
|
||||
size_t right = map_size - middle - left;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
struct Map *middlemap;
|
||||
if ((middlemap = __maps_alloc())) {
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
dll_make_first(&delete, &leftmap->elem);
|
||||
__maps_check();
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
} else if (addr + size >= map_addr + PGUP(map_size)) {
|
||||
// shave off righthand side of mapping
|
||||
size_t left = addr - map_addr;
|
||||
size_t right = map_addr + map_size - addr;
|
||||
struct Map *rightmap;
|
||||
if ((rightmap = __maps_alloc())) {
|
||||
map->size = left;
|
||||
__maps.pages -= (right + pagesz - 1) / pagesz;
|
||||
rightmap->addr = addr;
|
||||
rightmap->size = right;
|
||||
dll_make_first(&delete, &rightmap->elem);
|
||||
__maps_check();
|
||||
leftmap->off = map->off;
|
||||
leftmap->prot = map->prot;
|
||||
leftmap->flags = map->flags;
|
||||
map->addr += left + middle;
|
||||
map->size = right;
|
||||
if (!(map->flags & MAP_ANONYMOUS))
|
||||
map->off += left + middle;
|
||||
tree_insert(&__maps.maps, &leftmap->tree, __maps_compare);
|
||||
__maps.pages -= (middle + pagesz - 1) / pagesz;
|
||||
__maps.count += 1;
|
||||
middlemap->addr = addr;
|
||||
middlemap->size = size;
|
||||
dll_init(&middlemap->free);
|
||||
dll_make_first(&deleted, &middlemap->free);
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
} else {
|
||||
// punch hole in mapping
|
||||
size_t left = addr - map_addr;
|
||||
size_t middle = size;
|
||||
size_t right = map_size - middle - left;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
struct Map *middlemap;
|
||||
if ((middlemap = __maps_alloc())) {
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
leftmap->off = map->off;
|
||||
leftmap->prot = map->prot;
|
||||
leftmap->flags = map->flags;
|
||||
map->addr += left + middle;
|
||||
map->size = right;
|
||||
if (!(map->flags & MAP_ANONYMOUS))
|
||||
map->off += left + middle;
|
||||
dll_make_first(&__maps.used, &leftmap->elem);
|
||||
__maps.pages -= (middle + pagesz - 1) / pagesz;
|
||||
__maps.count += 1;
|
||||
middlemap->addr = addr;
|
||||
middlemap->size = size;
|
||||
dll_make_first(&delete, &middlemap->elem);
|
||||
__maps_check();
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
rc = -1;
|
||||
}
|
||||
}
|
||||
__maps_check();
|
||||
}
|
||||
__maps_unlock();
|
||||
|
||||
// delete mappings
|
||||
for (struct Dll *e = dll_first(delete); e; e = dll_next(delete, e)) {
|
||||
struct Map *map = MAP_CONTAINER(e);
|
||||
for (struct Dll *e = dll_first(deleted); e; e = dll_next(deleted, e)) {
|
||||
struct Map *map = MAP_FREE_CONTAINER(e);
|
||||
if (!untrack_only) {
|
||||
if (!IsWindows()) {
|
||||
if (sys_munmap(map->addr, map->size))
|
||||
|
@ -305,12 +298,12 @@ static int __munmap(char *addr, size_t size, bool untrack_only) {
|
|||
}
|
||||
|
||||
// free mappings
|
||||
if (!dll_is_empty(delete)) {
|
||||
if (!dll_is_empty(deleted)) {
|
||||
__maps_lock();
|
||||
struct Dll *e;
|
||||
while ((e = dll_first(delete))) {
|
||||
dll_remove(&delete, e);
|
||||
__maps_free(MAP_CONTAINER(e));
|
||||
while ((e = dll_first(deleted))) {
|
||||
dll_remove(&deleted, e);
|
||||
__maps_free(MAP_FREE_CONTAINER(e));
|
||||
}
|
||||
__maps_check();
|
||||
__maps_unlock();
|
||||
|
@ -350,6 +343,7 @@ static void *__mmap_chunk(void *addr, size_t size, int prot, int flags, int fd,
|
|||
__maps_unlock();
|
||||
return (void *)edeadlk();
|
||||
}
|
||||
__maps_check();
|
||||
map = __maps_alloc();
|
||||
__maps_unlock();
|
||||
if (!map)
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "libc/intrin/kprintf.h"
|
||||
#include "libc/intrin/maps.h"
|
||||
#include "libc/intrin/strace.internal.h"
|
||||
#include "libc/intrin/tree.h"
|
||||
#include "libc/nt/memory.h"
|
||||
#include "libc/runtime/internal.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
|
@ -51,7 +52,7 @@ static int __mprotect_chunk(char *addr, size_t size, int prot, bool iscow) {
|
|||
int __mprotect(char *addr, size_t size, int prot) {
|
||||
|
||||
// unix checks prot before checking size
|
||||
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
|
||||
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_GUARD))
|
||||
return einval();
|
||||
|
||||
// make new technology consistent with unix
|
||||
|
@ -68,64 +69,97 @@ int __mprotect(char *addr, size_t size, int prot) {
|
|||
|
||||
// change mappings
|
||||
int rc = 0;
|
||||
struct Dll *cur;
|
||||
bool found = false;
|
||||
if (__maps_lock()) {
|
||||
__maps_unlock();
|
||||
return edeadlk();
|
||||
}
|
||||
for (cur = dll_first(__maps.used); cur; cur = dll_next(__maps.used, cur)) {
|
||||
struct Map *map = MAP_CONTAINER(cur);
|
||||
for (struct Map *map = __maps_floor(addr); map; map = __maps_next(map)) {
|
||||
char *map_addr = map->addr;
|
||||
size_t map_size = map->size;
|
||||
char *beg = MAX(addr, map_addr);
|
||||
char *end = MIN(addr + size, map_addr + PGUP(map_size));
|
||||
if (beg < end) {
|
||||
found = true;
|
||||
if (addr <= map_addr && addr + size >= map_addr + PGUP(map_size)) {
|
||||
// change protection of entire mapping
|
||||
if (!__mprotect_chunk(map_addr, map_size, prot, map->iscow)) {
|
||||
if (beg >= end)
|
||||
break;
|
||||
found = true;
|
||||
if (addr <= map_addr && addr + size >= map_addr + PGUP(map_size)) {
|
||||
// change protection of entire mapping
|
||||
if (!__mprotect_chunk(map_addr, map_size, prot, map->iscow)) {
|
||||
map->prot = prot;
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
} else if (addr <= map_addr) {
|
||||
// change lefthand side of mapping
|
||||
size_t left = PGUP(addr + size - map_addr);
|
||||
size_t right = map_size - left;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
if (!__mprotect_chunk(map_addr, left, prot, false)) {
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
leftmap->prot = prot;
|
||||
leftmap->off = map->off;
|
||||
leftmap->flags = map->flags;
|
||||
leftmap->iscow = map->iscow;
|
||||
leftmap->readonlyfile = map->readonlyfile;
|
||||
leftmap->hand = map->hand;
|
||||
map->addr += left;
|
||||
map->size = right;
|
||||
map->hand = -1;
|
||||
if (!(map->flags & MAP_ANONYMOUS))
|
||||
map->off += left;
|
||||
tree_insert(&__maps.maps, &leftmap->tree, __maps_compare);
|
||||
__maps.count += 1;
|
||||
__maps_check();
|
||||
} else {
|
||||
__maps_free(leftmap);
|
||||
rc = -1;
|
||||
}
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
} else if (addr + size >= map_addr + PGUP(map_size)) {
|
||||
// change righthand side of mapping
|
||||
size_t left = addr - map_addr;
|
||||
size_t right = map_addr + map_size - addr;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
if (!__mprotect_chunk(map_addr + left, right, prot, false)) {
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
leftmap->off = map->off;
|
||||
leftmap->prot = map->prot;
|
||||
leftmap->flags = map->flags;
|
||||
leftmap->iscow = map->iscow;
|
||||
leftmap->readonlyfile = map->readonlyfile;
|
||||
leftmap->hand = map->hand;
|
||||
map->addr += left;
|
||||
map->size = right;
|
||||
map->prot = prot;
|
||||
map->hand = -1;
|
||||
if (!(map->flags & MAP_ANONYMOUS))
|
||||
map->off += left;
|
||||
tree_insert(&__maps.maps, &leftmap->tree, __maps_compare);
|
||||
__maps.count += 1;
|
||||
__maps_check();
|
||||
} else {
|
||||
__maps_free(leftmap);
|
||||
rc = -1;
|
||||
}
|
||||
} else if (addr <= map_addr) {
|
||||
// change lefthand side of mapping
|
||||
size_t left = PGUP(addr + size - map_addr);
|
||||
size_t right = map_size - left;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
if (!__mprotect_chunk(map_addr, left, prot, false)) {
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
leftmap->prot = prot;
|
||||
leftmap->off = map->off;
|
||||
leftmap->flags = map->flags;
|
||||
leftmap->iscow = map->iscow;
|
||||
leftmap->readonlyfile = map->readonlyfile;
|
||||
leftmap->hand = map->hand;
|
||||
map->addr += left;
|
||||
map->size = right;
|
||||
map->hand = -1;
|
||||
if (!(map->flags & MAP_ANONYMOUS))
|
||||
map->off += left;
|
||||
dll_make_first(&__maps.used, &leftmap->elem);
|
||||
__maps.count += 1;
|
||||
__maps_check();
|
||||
} else {
|
||||
__maps_free(leftmap);
|
||||
rc = -1;
|
||||
}
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
} else if (addr + size >= map_addr + PGUP(map_size)) {
|
||||
// change righthand side of mapping
|
||||
size_t left = addr - map_addr;
|
||||
size_t right = map_addr + map_size - addr;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
if (!__mprotect_chunk(map_addr + left, right, prot, false)) {
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
} else {
|
||||
// change middle of mapping
|
||||
size_t left = addr - map_addr;
|
||||
size_t middle = size;
|
||||
size_t right = map_size - middle - left;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
struct Map *midlmap;
|
||||
if ((midlmap = __maps_alloc())) {
|
||||
if (!__mprotect_chunk(map_addr + left, middle, prot, false)) {
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
leftmap->off = map->off;
|
||||
|
@ -134,67 +168,32 @@ int __mprotect(char *addr, size_t size, int prot) {
|
|||
leftmap->iscow = map->iscow;
|
||||
leftmap->readonlyfile = map->readonlyfile;
|
||||
leftmap->hand = map->hand;
|
||||
map->addr += left;
|
||||
midlmap->addr = map_addr + left;
|
||||
midlmap->size = middle;
|
||||
midlmap->off = (map->flags & MAP_ANONYMOUS) ? 0 : map->off + left;
|
||||
midlmap->prot = prot;
|
||||
midlmap->flags = map->flags;
|
||||
midlmap->hand = -1;
|
||||
map->addr += left + middle;
|
||||
map->size = right;
|
||||
map->prot = prot;
|
||||
map->hand = -1;
|
||||
if (!(map->flags & MAP_ANONYMOUS))
|
||||
map->off += left;
|
||||
dll_make_first(&__maps.used, &leftmap->elem);
|
||||
__maps.count += 1;
|
||||
map->off += left + middle;
|
||||
tree_insert(&__maps.maps, &leftmap->tree, __maps_compare);
|
||||
tree_insert(&__maps.maps, &midlmap->tree, __maps_compare);
|
||||
__maps.count += 2;
|
||||
__maps_check();
|
||||
} else {
|
||||
__maps_free(midlmap);
|
||||
__maps_free(leftmap);
|
||||
rc = -1;
|
||||
}
|
||||
} else {
|
||||
__maps_free(leftmap);
|
||||
rc = -1;
|
||||
}
|
||||
} else {
|
||||
// change middle of mapping
|
||||
size_t left = addr - map_addr;
|
||||
size_t middle = size;
|
||||
size_t right = map_size - middle - left;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
struct Map *midlmap;
|
||||
if ((midlmap = __maps_alloc())) {
|
||||
if (!__mprotect_chunk(map_addr + left, middle, prot, false)) {
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
leftmap->off = map->off;
|
||||
leftmap->prot = map->prot;
|
||||
leftmap->flags = map->flags;
|
||||
leftmap->iscow = map->iscow;
|
||||
leftmap->readonlyfile = map->readonlyfile;
|
||||
leftmap->hand = map->hand;
|
||||
midlmap->addr = map_addr + left;
|
||||
midlmap->size = middle;
|
||||
midlmap->off = (map->flags & MAP_ANONYMOUS) ? 0 : map->off + left;
|
||||
midlmap->prot = prot;
|
||||
midlmap->flags = map->flags;
|
||||
midlmap->hand = -1;
|
||||
map->addr += left + middle;
|
||||
map->size = right;
|
||||
map->hand = -1;
|
||||
if (!(map->flags & MAP_ANONYMOUS))
|
||||
map->off += left + middle;
|
||||
dll_make_first(&__maps.used, &midlmap->elem);
|
||||
dll_make_first(&__maps.used, &leftmap->elem);
|
||||
__maps.count += 2;
|
||||
__maps_check();
|
||||
} else {
|
||||
__maps_free(midlmap);
|
||||
__maps_free(leftmap);
|
||||
rc = -1;
|
||||
}
|
||||
} else {
|
||||
__maps_free(leftmap);
|
||||
rc = -1;
|
||||
}
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
rc = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,16 +34,17 @@ textwindows int sys_msync_nt(char *addr, size_t size, int flags) {
|
|||
return einval();
|
||||
|
||||
int rc = 0;
|
||||
__maps_lock();
|
||||
for (struct Dll *e = dll_first(__maps.used); e;
|
||||
e = dll_next(__maps.used, e)) {
|
||||
struct Map *map = MAP_CONTAINER(e);
|
||||
char *beg = MAX(addr, map->addr);
|
||||
char *end = MIN(addr + size, map->addr + map->size);
|
||||
if (beg < end)
|
||||
if (!FlushViewOfFile(beg, end - beg))
|
||||
rc = -1;
|
||||
// TODO(jart): FlushFileBuffers too on g_fds handle if MS_SYNC?
|
||||
if (__maps_lock()) {
|
||||
rc = edeadlk();
|
||||
} else {
|
||||
for (struct Map *map = __maps_floor(addr); map; map = __maps_next(map)) {
|
||||
char *beg = MAX(addr, map->addr);
|
||||
char *end = MIN(addr + size, map->addr + map->size);
|
||||
if (beg < end)
|
||||
if (!FlushViewOfFile(beg, end - beg))
|
||||
rc = -1;
|
||||
// TODO(jart): FlushFileBuffers too on g_fds handle if MS_SYNC?
|
||||
}
|
||||
}
|
||||
__maps_unlock();
|
||||
|
||||
|
|
|
@ -29,16 +29,11 @@
|
|||
/**
|
||||
* Prints memory mappings.
|
||||
*/
|
||||
void __print_maps(void) {
|
||||
int limit = 15;
|
||||
long maptally = 0;
|
||||
void __print_maps(size_t limit) {
|
||||
char mappingbuf[8], sb[16];
|
||||
__maps_lock();
|
||||
struct Dll *e, *e2;
|
||||
for (e = dll_first(__maps.used); e; e = e2) {
|
||||
e2 = dll_next(__maps.used, e);
|
||||
struct Map *map = MAP_CONTAINER(e);
|
||||
maptally += map->size;
|
||||
for (struct Tree *e = tree_first(__maps.maps); e; e = tree_next(e)) {
|
||||
struct Map *map = MAP_TREE_CONTAINER(e);
|
||||
kprintf("%012lx-%012lx %!s", map->addr, map->addr + map->size,
|
||||
(DescribeMapping)(mappingbuf, map->prot, map->flags));
|
||||
sizefmt(sb, map->size, 1024);
|
||||
|
|
|
@ -42,6 +42,8 @@ privileged int __prot2nt(int prot, int iscow) {
|
|||
return kNtPageExecuteReadwrite;
|
||||
}
|
||||
default:
|
||||
if (prot & PROT_GUARD)
|
||||
return kNtPageReadwrite | kNtPageGuard;
|
||||
return kNtPageNoaccess;
|
||||
}
|
||||
}
|
||||
|
|
40
libc/intrin/pthread_delay_np.c
Normal file
40
libc/intrin/pthread_delay_np.c
Normal file
|
@ -0,0 +1,40 @@
|
|||
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
|
||||
│ vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi │
|
||||
╞══════════════════════════════════════════════════════════════════════════════╡
|
||||
│ Copyright 2024 Justine Alexandra Roberts Tunney │
|
||||
│ │
|
||||
│ Permission to use, copy, modify, and/or distribute this software for │
|
||||
│ any purpose with or without fee is hereby granted, provided that the │
|
||||
│ above copyright notice and this permission notice appear in all copies. │
|
||||
│ │
|
||||
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
|
||||
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
|
||||
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
|
||||
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
|
||||
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
|
||||
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
|
||||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/intrin/strace.internal.h"
|
||||
#include "libc/thread/thread.h"
|
||||
|
||||
/**
|
||||
* Delays execution for brief moment.
|
||||
*
|
||||
* @param symbol may be used to strace names of static locks
|
||||
* @param backoff should start at zero and be feed back in
|
||||
* @return new value for backoff
|
||||
*/
|
||||
int pthread_delay_np(const void *symbol, int backoff) {
|
||||
if (backoff < 7) {
|
||||
volatile int i;
|
||||
for (i = 0; i != 1 << backoff; i++) {
|
||||
}
|
||||
backoff++;
|
||||
} else {
|
||||
STRACE("pthread_delay_np(%t)", symbol);
|
||||
pthread_yield_np();
|
||||
}
|
||||
return backoff;
|
||||
}
|
|
@ -18,8 +18,10 @@
|
|||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/calls/state.internal.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/errno.h"
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/intrin/describeflags.internal.h"
|
||||
#include "libc/intrin/strace.internal.h"
|
||||
#include "libc/intrin/weaken.h"
|
||||
#include "libc/runtime/internal.h"
|
||||
|
@ -27,6 +29,66 @@
|
|||
#include "libc/thread/thread.h"
|
||||
#include "third_party/nsync/mu.h"
|
||||
|
||||
static errno_t pthread_mutex_lock_impl(pthread_mutex_t *mutex) {
|
||||
int me;
|
||||
int backoff = 0;
|
||||
uint64_t word, lock;
|
||||
|
||||
// get current state of lock
|
||||
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
|
||||
|
||||
// use fancy nsync mutex if possible
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL && //
|
||||
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
|
||||
_weaken(nsync_mu_lock)) {
|
||||
_weaken(nsync_mu_lock)((nsync_mu *)mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// implement barebones normal mutexes
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
|
||||
for (;;) {
|
||||
word = MUTEX_UNLOCK(word);
|
||||
lock = MUTEX_LOCK(word);
|
||||
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
|
||||
memory_order_acquire,
|
||||
memory_order_relaxed))
|
||||
return 0;
|
||||
backoff = pthread_delay_np(mutex, backoff);
|
||||
}
|
||||
}
|
||||
|
||||
// implement recursive mutexes
|
||||
me = gettid();
|
||||
for (;;) {
|
||||
if (MUTEX_OWNER(word) == me) {
|
||||
if (MUTEX_TYPE(word) != PTHREAD_MUTEX_ERRORCHECK) {
|
||||
if (MUTEX_DEPTH(word) < MUTEX_DEPTH_MAX) {
|
||||
if (atomic_compare_exchange_weak_explicit(
|
||||
&mutex->_word, &word, MUTEX_INC_DEPTH(word),
|
||||
memory_order_relaxed, memory_order_relaxed))
|
||||
return 0;
|
||||
continue;
|
||||
} else {
|
||||
return EAGAIN;
|
||||
}
|
||||
} else {
|
||||
return EDEADLK;
|
||||
}
|
||||
}
|
||||
word = MUTEX_UNLOCK(word);
|
||||
lock = MUTEX_LOCK(word);
|
||||
lock = MUTEX_SET_OWNER(lock, me);
|
||||
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
|
||||
memory_order_acquire,
|
||||
memory_order_relaxed)) {
|
||||
mutex->_pid = __pid;
|
||||
return 0;
|
||||
}
|
||||
backoff = pthread_delay_np(mutex, backoff);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Locks mutex.
|
||||
*
|
||||
|
@ -65,65 +127,10 @@
|
|||
* @vforksafe
|
||||
*/
|
||||
errno_t pthread_mutex_lock(pthread_mutex_t *mutex) {
|
||||
int me;
|
||||
uint64_t word, lock;
|
||||
|
||||
LOCKTRACE("pthread_mutex_lock(%t)", mutex);
|
||||
|
||||
if (__vforked)
|
||||
return 0;
|
||||
|
||||
// get current state of lock
|
||||
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
|
||||
|
||||
// use fancy nsync mutex if possible
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL && //
|
||||
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
|
||||
_weaken(nsync_mu_lock)) {
|
||||
_weaken(nsync_mu_lock)((nsync_mu *)mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// implement barebones normal mutexes
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
|
||||
for (;;) {
|
||||
word = MUTEX_UNLOCK(word);
|
||||
lock = MUTEX_LOCK(word);
|
||||
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
|
||||
memory_order_acquire,
|
||||
memory_order_relaxed))
|
||||
return 0;
|
||||
pthread_pause_np();
|
||||
}
|
||||
}
|
||||
|
||||
// implement recursive mutexes
|
||||
me = gettid();
|
||||
for (;;) {
|
||||
if (MUTEX_OWNER(word) == me) {
|
||||
if (MUTEX_TYPE(word) != PTHREAD_MUTEX_ERRORCHECK) {
|
||||
if (MUTEX_DEPTH(word) < MUTEX_DEPTH_MAX) {
|
||||
if (atomic_compare_exchange_weak_explicit(
|
||||
&mutex->_word, &word, MUTEX_INC_DEPTH(word),
|
||||
memory_order_relaxed, memory_order_relaxed))
|
||||
return 0;
|
||||
continue;
|
||||
} else {
|
||||
return EAGAIN;
|
||||
}
|
||||
} else {
|
||||
return EDEADLK;
|
||||
}
|
||||
}
|
||||
word = MUTEX_UNLOCK(word);
|
||||
lock = MUTEX_LOCK(word);
|
||||
lock = MUTEX_SET_OWNER(lock, me);
|
||||
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
|
||||
memory_order_acquire,
|
||||
memory_order_relaxed)) {
|
||||
mutex->_pid = __pid;
|
||||
return 0;
|
||||
}
|
||||
pthread_pause_np();
|
||||
}
|
||||
LOCKTRACE("acquiring %t...", mutex);
|
||||
errno_t err = pthread_mutex_lock_impl(mutex);
|
||||
LOCKTRACE("pthread_mutex_lock(%t) → %s", mutex, DescribeErrno(err));
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/errno.h"
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/intrin/weaken.h"
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/errno.h"
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/intrin/strace.internal.h"
|
||||
|
|
|
@ -29,6 +29,6 @@ intptr_t _pthread_syshand(struct PosixThread *pt) {
|
|||
syshand = atomic_load_explicit(&pt->tib->tib_syshand, memory_order_acquire);
|
||||
if (syshand)
|
||||
return syshand;
|
||||
pthread_pause_np();
|
||||
pthread_yield_np();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,6 @@
|
|||
int _pthread_tid(struct PosixThread *pt) {
|
||||
int tid = 0;
|
||||
while (pt && !(tid = atomic_load_explicit(&pt->ptid, memory_order_acquire)))
|
||||
pthread_pause_np();
|
||||
pthread_yield_np();
|
||||
return tid;
|
||||
}
|
||||
|
|
|
@ -1,312 +0,0 @@
|
|||
// Copyright 2024 Justine Alexandra Roberts Tunney
|
||||
//
|
||||
// Permission to use, copy, modify, and/or distribute this software for
|
||||
// any purpose with or without fee is hereby granted, provided that the
|
||||
// above copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
// WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
// AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
// DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
// PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
// TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
// PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
#include "rbtree.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/str/str.h"
|
||||
|
||||
#define RBTREE_DEBUG
|
||||
|
||||
struct rbtree *rbtree_next(struct rbtree *node) {
|
||||
if (!node)
|
||||
return 0;
|
||||
if (node->right)
|
||||
return rbtree_first(node->right);
|
||||
struct rbtree *parent = node->parent;
|
||||
while (parent && node == parent->right) {
|
||||
node = parent;
|
||||
parent = parent->parent;
|
||||
}
|
||||
return parent;
|
||||
}
|
||||
|
||||
struct rbtree *rbtree_prev(struct rbtree *node) {
|
||||
if (!node)
|
||||
return 0;
|
||||
if (rbtree_get_left(node))
|
||||
return rbtree_last(rbtree_get_left(node));
|
||||
struct rbtree *parent = node->parent;
|
||||
while (parent && node == rbtree_get_left(parent)) {
|
||||
node = parent;
|
||||
parent = parent->parent;
|
||||
}
|
||||
return parent;
|
||||
}
|
||||
|
||||
struct rbtree *rbtree_first(struct rbtree *node) {
|
||||
while (node && rbtree_get_left(node))
|
||||
node = rbtree_get_left(node);
|
||||
return node;
|
||||
}
|
||||
|
||||
struct rbtree *rbtree_last(struct rbtree *node) {
|
||||
while (node && node->right)
|
||||
node = node->right;
|
||||
return node;
|
||||
}
|
||||
|
||||
static void rbtree_rotate_left(struct rbtree **root, struct rbtree *x) {
|
||||
struct rbtree *y = x->right;
|
||||
x->right = rbtree_get_left(y);
|
||||
if (rbtree_get_left(y))
|
||||
rbtree_get_left(y)->parent = x;
|
||||
y->parent = x->parent;
|
||||
if (!x->parent) {
|
||||
*root = y;
|
||||
} else if (x == rbtree_get_left(x->parent)) {
|
||||
rbtree_set_left(x->parent, y);
|
||||
} else {
|
||||
x->parent->right = y;
|
||||
}
|
||||
rbtree_set_left(y, x);
|
||||
x->parent = y;
|
||||
}
|
||||
|
||||
static void rbtree_rotate_right(struct rbtree **root, struct rbtree *y) {
|
||||
struct rbtree *x = rbtree_get_left(y);
|
||||
rbtree_set_left(y, x->right);
|
||||
if (x->right)
|
||||
x->right->parent = y;
|
||||
x->parent = y->parent;
|
||||
if (!y->parent) {
|
||||
*root = x;
|
||||
} else if (y == y->parent->right) {
|
||||
y->parent->right = x;
|
||||
} else {
|
||||
rbtree_set_left(y->parent, x);
|
||||
}
|
||||
x->right = y;
|
||||
y->parent = x;
|
||||
}
|
||||
|
||||
static void rbtree_insert_fixup(struct rbtree **root, struct rbtree *node) {
|
||||
rbtree_set_red(node, 1);
|
||||
while (node != *root && rbtree_get_red(node->parent)) {
|
||||
if (node->parent == rbtree_get_left(node->parent->parent)) {
|
||||
struct rbtree *uncle = node->parent->parent->right;
|
||||
if (uncle && rbtree_get_red(uncle)) {
|
||||
rbtree_set_red(node->parent, 0);
|
||||
rbtree_set_red(uncle, 0);
|
||||
rbtree_set_red(node->parent->parent, 1);
|
||||
node = node->parent->parent;
|
||||
} else {
|
||||
if (node == node->parent->right) {
|
||||
node = node->parent;
|
||||
rbtree_rotate_left(root, node);
|
||||
}
|
||||
rbtree_set_red(node->parent, 0);
|
||||
rbtree_set_red(node->parent->parent, 1);
|
||||
rbtree_rotate_right(root, node->parent->parent);
|
||||
}
|
||||
} else {
|
||||
struct rbtree *uncle = rbtree_get_left(node->parent->parent);
|
||||
if (uncle && rbtree_get_red(uncle)) {
|
||||
rbtree_set_red(node->parent, 0);
|
||||
rbtree_set_red(uncle, 0);
|
||||
rbtree_set_red(node->parent->parent, 1);
|
||||
node = node->parent->parent;
|
||||
} else {
|
||||
if (node == rbtree_get_left(node->parent)) {
|
||||
node = node->parent;
|
||||
rbtree_rotate_right(root, node);
|
||||
}
|
||||
rbtree_set_red(node->parent, 0);
|
||||
rbtree_set_red(node->parent->parent, 1);
|
||||
rbtree_rotate_left(root, node->parent->parent);
|
||||
}
|
||||
}
|
||||
}
|
||||
rbtree_set_red(*root, 0);
|
||||
}
|
||||
|
||||
void rbtree_insert(struct rbtree **root, struct rbtree *node,
|
||||
rbtree_cmp_f *cmp) {
|
||||
bzero(node, sizeof(*node));
|
||||
if (!*root) {
|
||||
*root = node;
|
||||
} else {
|
||||
struct rbtree *search = *root;
|
||||
struct rbtree *parent = 0;
|
||||
do {
|
||||
parent = search;
|
||||
if (cmp(node, search) < 0) {
|
||||
search = rbtree_get_left(search);
|
||||
} else {
|
||||
search = search->right;
|
||||
}
|
||||
} while (search);
|
||||
if (cmp(node, parent) < 0) {
|
||||
rbtree_set_left(parent, node);
|
||||
} else {
|
||||
parent->right = node;
|
||||
}
|
||||
node->parent = parent;
|
||||
rbtree_insert_fixup(root, node);
|
||||
}
|
||||
}
|
||||
|
||||
static void rbtree_transplant(struct rbtree **root, struct rbtree *u,
|
||||
struct rbtree *v) {
|
||||
if (!u->parent) {
|
||||
*root = v;
|
||||
} else if (u == rbtree_get_left(u->parent)) {
|
||||
rbtree_set_left(u->parent, v);
|
||||
} else {
|
||||
u->parent->right = v;
|
||||
}
|
||||
if (v)
|
||||
v->parent = u->parent;
|
||||
}
|
||||
|
||||
static void rbtree_remove_fixup(struct rbtree **root, struct rbtree *node,
|
||||
struct rbtree *parent) {
|
||||
while (node != *root && (!node || !rbtree_get_red(node))) {
|
||||
if (node == rbtree_get_left(parent)) {
|
||||
struct rbtree *sibling = parent->right;
|
||||
if (rbtree_get_red(sibling)) {
|
||||
rbtree_set_red(sibling, 0);
|
||||
rbtree_set_red(parent, 1);
|
||||
rbtree_rotate_left(root, parent);
|
||||
sibling = parent->right;
|
||||
}
|
||||
if ((!rbtree_get_left(sibling) ||
|
||||
!rbtree_get_red(rbtree_get_left(sibling))) &&
|
||||
(!sibling->right || !rbtree_get_red(sibling->right))) {
|
||||
rbtree_set_red(sibling, 1);
|
||||
node = parent;
|
||||
parent = node->parent;
|
||||
} else {
|
||||
if (!sibling->right || !rbtree_get_red(sibling->right)) {
|
||||
rbtree_set_red(rbtree_get_left(sibling), 0);
|
||||
rbtree_set_red(sibling, 1);
|
||||
rbtree_rotate_right(root, sibling);
|
||||
sibling = parent->right;
|
||||
}
|
||||
rbtree_set_red(sibling, rbtree_get_red(parent));
|
||||
rbtree_set_red(parent, 0);
|
||||
rbtree_set_red(sibling->right, 0);
|
||||
rbtree_rotate_left(root, parent);
|
||||
node = *root;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
struct rbtree *sibling = rbtree_get_left(parent);
|
||||
if (rbtree_get_red(sibling)) {
|
||||
rbtree_set_red(sibling, 0);
|
||||
rbtree_set_red(parent, 1);
|
||||
rbtree_rotate_right(root, parent);
|
||||
sibling = rbtree_get_left(parent);
|
||||
}
|
||||
if ((!sibling->right || !rbtree_get_red(sibling->right)) &&
|
||||
(!rbtree_get_left(sibling) ||
|
||||
!rbtree_get_red(rbtree_get_left(sibling)))) {
|
||||
rbtree_set_red(sibling, 1);
|
||||
node = parent;
|
||||
parent = node->parent;
|
||||
} else {
|
||||
if (!rbtree_get_left(sibling) ||
|
||||
!rbtree_get_red(rbtree_get_left(sibling))) {
|
||||
rbtree_set_red(sibling->right, 0);
|
||||
rbtree_set_red(sibling, 1);
|
||||
rbtree_rotate_left(root, sibling);
|
||||
sibling = rbtree_get_left(parent);
|
||||
}
|
||||
rbtree_set_red(sibling, rbtree_get_red(parent));
|
||||
rbtree_set_red(parent, 0);
|
||||
rbtree_set_red(rbtree_get_left(sibling), 0);
|
||||
rbtree_rotate_right(root, parent);
|
||||
node = *root;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (node)
|
||||
rbtree_set_red(node, 0);
|
||||
}
|
||||
|
||||
void rbtree_remove(struct rbtree **root, struct rbtree *node) {
|
||||
struct rbtree *y = node;
|
||||
struct rbtree *x = 0;
|
||||
struct rbtree *x_parent = 0;
|
||||
int y_original_color = rbtree_get_red(y);
|
||||
if (!rbtree_get_left(node)) {
|
||||
x = node->right;
|
||||
rbtree_transplant(root, node, node->right);
|
||||
x_parent = node->parent;
|
||||
} else if (!node->right) {
|
||||
x = rbtree_get_left(node);
|
||||
rbtree_transplant(root, node, rbtree_get_left(node));
|
||||
x_parent = node->parent;
|
||||
} else {
|
||||
y = rbtree_first(node->right);
|
||||
y_original_color = rbtree_get_red(y);
|
||||
x = y->right;
|
||||
if (y->parent == node) {
|
||||
if (x)
|
||||
x->parent = y;
|
||||
x_parent = y;
|
||||
} else {
|
||||
rbtree_transplant(root, y, y->right);
|
||||
y->right = node->right;
|
||||
y->right->parent = y;
|
||||
x_parent = y->parent;
|
||||
}
|
||||
rbtree_transplant(root, node, y);
|
||||
rbtree_set_left(y, rbtree_get_left(node));
|
||||
rbtree_get_left(y)->parent = y;
|
||||
rbtree_set_red(y, rbtree_get_red(node));
|
||||
}
|
||||
if (!y_original_color)
|
||||
rbtree_remove_fixup(root, x, x_parent);
|
||||
}
|
||||
|
||||
struct rbtree *rbtree_get(const struct rbtree *node, const struct rbtree *key,
|
||||
rbtree_cmp_f *cmp) {
|
||||
while (node) {
|
||||
int c = cmp(key, node);
|
||||
if (c < 0) {
|
||||
node = rbtree_get_left(node);
|
||||
} else if (c > 0) {
|
||||
node = node->right;
|
||||
} else {
|
||||
return (struct rbtree *)node;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct rbtree *rbtree_floor(const struct rbtree *node, const struct rbtree *key,
|
||||
rbtree_cmp_f *cmp) {
|
||||
while (node) {
|
||||
if (cmp(key, node) < 0) {
|
||||
node = rbtree_get_left(node);
|
||||
} else {
|
||||
node = node->right;
|
||||
}
|
||||
}
|
||||
return (struct rbtree *)node;
|
||||
}
|
||||
|
||||
struct rbtree *rbtree_ceil(const struct rbtree *node, const struct rbtree *key,
|
||||
rbtree_cmp_f *cmp) {
|
||||
while (node) {
|
||||
if (cmp(node, key) < 0) {
|
||||
node = rbtree_get_left(node);
|
||||
} else {
|
||||
node = node->right;
|
||||
}
|
||||
}
|
||||
return (struct rbtree *)node;
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
#ifdef _COSMO_SOURCE
|
||||
#ifndef COSMOPOLITAN_RBTREE_H_
|
||||
#define COSMOPOLITAN_RBTREE_H_
|
||||
#define rbtree_ceil __rbtree_ceil
|
||||
#define rbtree_first __rbtree_first
|
||||
#define rbtree_floor __rbtree_floor
|
||||
#define rbtree_get __rbtree_get
|
||||
#define rbtree_insert __rbtree_insert
|
||||
#define rbtree_last __rbtree_last
|
||||
#define rbtree_next __rbtree_next
|
||||
#define rbtree_prev __rbtree_prev
|
||||
#define rbtree_remove __rbtree_remove
|
||||
COSMOPOLITAN_C_START_
|
||||
|
||||
#define RBTREE_CONTAINER(t, f, p) ((t *)(((char *)(p)) - offsetof(t, f)))
|
||||
|
||||
struct rbtree {
|
||||
uintptr_t word;
|
||||
struct rbtree *right;
|
||||
struct rbtree *parent;
|
||||
};
|
||||
|
||||
typedef int rbtree_cmp_f(const struct rbtree *, const struct rbtree *);
|
||||
|
||||
static inline struct rbtree *rbtree_get_left(const struct rbtree *node) {
|
||||
return (struct rbtree *)(node->word & -2);
|
||||
}
|
||||
|
||||
static inline void rbtree_set_left(struct rbtree *node, struct rbtree *left) {
|
||||
node->word = (uintptr_t)left | (node->word & 1);
|
||||
}
|
||||
|
||||
static inline int rbtree_get_red(const struct rbtree *node) {
|
||||
return node->word & 1;
|
||||
}
|
||||
|
||||
static inline void rbtree_set_red(struct rbtree *node, int red) {
|
||||
node->word &= -2;
|
||||
node->word |= red;
|
||||
}
|
||||
|
||||
struct rbtree *rbtree_next(struct rbtree *) libcesque;
|
||||
struct rbtree *rbtree_prev(struct rbtree *) libcesque;
|
||||
struct rbtree *rbtree_first(struct rbtree *) libcesque;
|
||||
struct rbtree *rbtree_last(struct rbtree *) libcesque;
|
||||
void rbtree_remove(struct rbtree **, struct rbtree *) libcesque;
|
||||
void rbtree_insert(struct rbtree **, struct rbtree *, rbtree_cmp_f *) libcesque;
|
||||
struct rbtree *rbtree_get(const struct rbtree *, const struct rbtree *,
|
||||
rbtree_cmp_f *) libcesque;
|
||||
struct rbtree *rbtree_ceil(const struct rbtree *, const struct rbtree *,
|
||||
rbtree_cmp_f *) libcesque;
|
||||
struct rbtree *rbtree_floor(const struct rbtree *, const struct rbtree *,
|
||||
rbtree_cmp_f *) libcesque;
|
||||
|
||||
COSMOPOLITAN_C_END_
|
||||
#endif /* COSMOPOLITAN_RBTREE_H_ */
|
||||
#endif /* _COSMO_SOURCE */
|
271
libc/intrin/tree.c
Normal file
271
libc/intrin/tree.c
Normal file
|
@ -0,0 +1,271 @@
|
|||
// Copyright 2024 Justine Alexandra Roberts Tunney
|
||||
//
|
||||
// Permission to use, copy, modify, and/or distribute this software for
|
||||
// any purpose with or without fee is hereby granted, provided that the
|
||||
// above copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
// WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
// AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
// DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
// PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
// TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
// PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
#include "tree.h"
|
||||
|
||||
struct Tree *tree_last(struct Tree *node) {
|
||||
while (node && node->right)
|
||||
node = node->right;
|
||||
return node;
|
||||
}
|
||||
|
||||
struct Tree *tree_first(struct Tree *node) {
|
||||
while (node && tree_get_left(node))
|
||||
node = tree_get_left(node);
|
||||
return node;
|
||||
}
|
||||
|
||||
struct Tree *tree_next(struct Tree *node) {
|
||||
if (!node)
|
||||
return 0;
|
||||
if (node->right)
|
||||
return tree_first(node->right);
|
||||
struct Tree *parent = node->parent;
|
||||
while (parent && node == parent->right) {
|
||||
node = parent;
|
||||
parent = parent->parent;
|
||||
}
|
||||
return parent;
|
||||
}
|
||||
|
||||
struct Tree *tree_prev(struct Tree *node) {
|
||||
struct Tree *parent;
|
||||
if (!node)
|
||||
return 0;
|
||||
if (tree_get_left(node))
|
||||
return tree_last(tree_get_left(node));
|
||||
parent = node->parent;
|
||||
while (parent && node == tree_get_left(parent)) {
|
||||
node = parent;
|
||||
parent = parent->parent;
|
||||
}
|
||||
return parent;
|
||||
}
|
||||
|
||||
static void tree_rotate_left(struct Tree **root, struct Tree *x) {
|
||||
struct Tree *y = x->right;
|
||||
x->right = tree_get_left(y);
|
||||
if (tree_get_left(y))
|
||||
tree_get_left(y)->parent = x;
|
||||
y->parent = x->parent;
|
||||
if (!x->parent) {
|
||||
*root = y;
|
||||
} else if (x == tree_get_left(x->parent)) {
|
||||
tree_set_left(x->parent, y);
|
||||
} else {
|
||||
x->parent->right = y;
|
||||
}
|
||||
tree_set_left(y, x);
|
||||
x->parent = y;
|
||||
}
|
||||
|
||||
static void tree_rotate_right(struct Tree **root, struct Tree *y) {
|
||||
struct Tree *x = tree_get_left(y);
|
||||
tree_set_left(y, x->right);
|
||||
if (x->right)
|
||||
x->right->parent = y;
|
||||
x->parent = y->parent;
|
||||
if (!y->parent) {
|
||||
*root = x;
|
||||
} else if (y == y->parent->right) {
|
||||
y->parent->right = x;
|
||||
} else {
|
||||
tree_set_left(y->parent, x);
|
||||
}
|
||||
y->parent = x;
|
||||
x->right = y;
|
||||
}
|
||||
|
||||
static void tree_rebalance_insert(struct Tree **root, struct Tree *node) {
|
||||
struct Tree *uncle;
|
||||
tree_set_red(node, 1);
|
||||
while (node != *root && tree_get_red(node->parent)) {
|
||||
if (node->parent == tree_get_left(node->parent->parent)) {
|
||||
uncle = node->parent->parent->right;
|
||||
if (uncle && tree_get_red(uncle)) {
|
||||
tree_set_red(node->parent, 0);
|
||||
tree_set_red(uncle, 0);
|
||||
tree_set_red(node->parent->parent, 1);
|
||||
node = node->parent->parent;
|
||||
} else {
|
||||
if (node == node->parent->right) {
|
||||
node = node->parent;
|
||||
tree_rotate_left(root, node);
|
||||
}
|
||||
tree_set_red(node->parent, 0);
|
||||
tree_set_red(node->parent->parent, 1);
|
||||
tree_rotate_right(root, node->parent->parent);
|
||||
}
|
||||
} else {
|
||||
uncle = tree_get_left(node->parent->parent);
|
||||
if (uncle && tree_get_red(uncle)) {
|
||||
tree_set_red(node->parent, 0);
|
||||
tree_set_red(uncle, 0);
|
||||
tree_set_red(node->parent->parent, 1);
|
||||
node = node->parent->parent;
|
||||
} else {
|
||||
if (node == tree_get_left(node->parent)) {
|
||||
node = node->parent;
|
||||
tree_rotate_right(root, node);
|
||||
}
|
||||
tree_set_red(node->parent, 0);
|
||||
tree_set_red(node->parent->parent, 1);
|
||||
tree_rotate_left(root, node->parent->parent);
|
||||
}
|
||||
}
|
||||
}
|
||||
tree_set_red(*root, 0);
|
||||
}
|
||||
|
||||
void tree_insert(struct Tree **root, struct Tree *node, tree_cmp_f *cmp) {
|
||||
struct Tree *search, *parent;
|
||||
node->word = 0;
|
||||
node->right = 0;
|
||||
node->parent = 0;
|
||||
if (!*root) {
|
||||
*root = node;
|
||||
} else {
|
||||
search = *root;
|
||||
parent = 0;
|
||||
do {
|
||||
parent = search;
|
||||
if (cmp(node, search) < 0) {
|
||||
search = tree_get_left(search);
|
||||
} else {
|
||||
search = search->right;
|
||||
}
|
||||
} while (search);
|
||||
if (cmp(node, parent) < 0) {
|
||||
tree_set_left(parent, node);
|
||||
} else {
|
||||
parent->right = node;
|
||||
}
|
||||
node->parent = parent;
|
||||
tree_rebalance_insert(root, node);
|
||||
}
|
||||
}
|
||||
|
||||
static void tree_transplant(struct Tree **root, struct Tree *u,
|
||||
struct Tree *v) {
|
||||
if (!u->parent) {
|
||||
*root = v;
|
||||
} else if (u == tree_get_left(u->parent)) {
|
||||
tree_set_left(u->parent, v);
|
||||
} else {
|
||||
u->parent->right = v;
|
||||
}
|
||||
if (v)
|
||||
v->parent = u->parent;
|
||||
}
|
||||
|
||||
static void tree_rebalance_remove(struct Tree **root, struct Tree *node,
|
||||
struct Tree *parent) {
|
||||
struct Tree *sibling;
|
||||
while (node != *root && (!node || !tree_get_red(node))) {
|
||||
if (node == tree_get_left(parent)) {
|
||||
sibling = parent->right;
|
||||
if (tree_get_red(sibling)) {
|
||||
tree_set_red(sibling, 0);
|
||||
tree_set_red(parent, 1);
|
||||
tree_rotate_left(root, parent);
|
||||
sibling = parent->right;
|
||||
}
|
||||
if ((!tree_get_left(sibling) || !tree_get_red(tree_get_left(sibling))) &&
|
||||
(!sibling->right || !tree_get_red(sibling->right))) {
|
||||
tree_set_red(sibling, 1);
|
||||
node = parent;
|
||||
parent = node->parent;
|
||||
} else {
|
||||
if (!sibling->right || !tree_get_red(sibling->right)) {
|
||||
tree_set_red(tree_get_left(sibling), 0);
|
||||
tree_set_red(sibling, 1);
|
||||
tree_rotate_right(root, sibling);
|
||||
sibling = parent->right;
|
||||
}
|
||||
tree_set_red(sibling, tree_get_red(parent));
|
||||
tree_set_red(parent, 0);
|
||||
tree_set_red(sibling->right, 0);
|
||||
tree_rotate_left(root, parent);
|
||||
node = *root;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
sibling = tree_get_left(parent);
|
||||
if (tree_get_red(sibling)) {
|
||||
tree_set_red(sibling, 0);
|
||||
tree_set_red(parent, 1);
|
||||
tree_rotate_right(root, parent);
|
||||
sibling = tree_get_left(parent);
|
||||
}
|
||||
if ((!sibling->right || !tree_get_red(sibling->right)) &&
|
||||
(!tree_get_left(sibling) || !tree_get_red(tree_get_left(sibling)))) {
|
||||
tree_set_red(sibling, 1);
|
||||
node = parent;
|
||||
parent = node->parent;
|
||||
} else {
|
||||
if (!tree_get_left(sibling) || !tree_get_red(tree_get_left(sibling))) {
|
||||
tree_set_red(sibling->right, 0);
|
||||
tree_set_red(sibling, 1);
|
||||
tree_rotate_left(root, sibling);
|
||||
sibling = tree_get_left(parent);
|
||||
}
|
||||
tree_set_red(sibling, tree_get_red(parent));
|
||||
tree_set_red(parent, 0);
|
||||
tree_set_red(tree_get_left(sibling), 0);
|
||||
tree_rotate_right(root, parent);
|
||||
node = *root;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (node)
|
||||
tree_set_red(node, 0);
|
||||
}
|
||||
|
||||
void tree_remove(struct Tree **root, struct Tree *node) {
|
||||
struct Tree *x = 0;
|
||||
struct Tree *y = node;
|
||||
struct Tree *x_parent = 0;
|
||||
int y_original_color = tree_get_red(y);
|
||||
if (!tree_get_left(node)) {
|
||||
x = node->right;
|
||||
tree_transplant(root, node, node->right);
|
||||
x_parent = node->parent;
|
||||
} else if (!node->right) {
|
||||
x = tree_get_left(node);
|
||||
tree_transplant(root, node, tree_get_left(node));
|
||||
x_parent = node->parent;
|
||||
} else {
|
||||
y = tree_first(node->right);
|
||||
y_original_color = tree_get_red(y);
|
||||
x = y->right;
|
||||
if (y->parent == node) {
|
||||
if (x)
|
||||
x->parent = y;
|
||||
x_parent = y;
|
||||
} else {
|
||||
tree_transplant(root, y, y->right);
|
||||
y->right = node->right;
|
||||
y->right->parent = y;
|
||||
x_parent = y->parent;
|
||||
}
|
||||
tree_transplant(root, node, y);
|
||||
tree_set_left(y, tree_get_left(node));
|
||||
tree_get_left(y)->parent = y;
|
||||
tree_set_red(y, tree_get_red(node));
|
||||
}
|
||||
if (!y_original_color)
|
||||
tree_rebalance_remove(root, x, x_parent);
|
||||
}
|
91
libc/intrin/tree.h
Normal file
91
libc/intrin/tree.h
Normal file
|
@ -0,0 +1,91 @@
|
|||
#ifndef COSMOPOLITAN_TREE_H_
|
||||
#define COSMOPOLITAN_TREE_H_
|
||||
#define tree_first __tree_first
|
||||
#define tree_insert __tree_insert
|
||||
#define tree_last __tree_last
|
||||
#define tree_next __tree_next
|
||||
#define tree_prev __tree_prev
|
||||
#define tree_remove __tree_remove
|
||||
COSMOPOLITAN_C_START_
|
||||
|
||||
#define TREE_CONTAINER(t, f, p) ((t *)(((char *)(p)) - offsetof(t, f)))
|
||||
|
||||
struct Tree {
|
||||
uintptr_t word;
|
||||
struct Tree *right;
|
||||
struct Tree *parent;
|
||||
};
|
||||
|
||||
typedef int tree_search_f(const void *, const struct Tree *);
|
||||
typedef int tree_cmp_f(const struct Tree *, const struct Tree *);
|
||||
|
||||
forceinline struct Tree *tree_get_left(const struct Tree *node) {
|
||||
return (struct Tree *)(node->word & -2);
|
||||
}
|
||||
|
||||
static inline void tree_set_left(struct Tree *node, struct Tree *left) {
|
||||
node->word = (uintptr_t)left | (node->word & 1);
|
||||
}
|
||||
|
||||
static inline int tree_get_red(const struct Tree *node) {
|
||||
return node->word & 1;
|
||||
}
|
||||
|
||||
static inline void tree_set_red(struct Tree *node, int red) {
|
||||
node->word &= -2;
|
||||
node->word |= red;
|
||||
}
|
||||
|
||||
forceinline optimizespeed struct Tree *tree_floor(const struct Tree *node,
|
||||
const void *key,
|
||||
tree_search_f *cmp) {
|
||||
struct Tree *left = 0;
|
||||
while (node) {
|
||||
if (cmp(key, node) >= 0) {
|
||||
left = (struct Tree *)node;
|
||||
node = tree_get_left(node);
|
||||
} else {
|
||||
node = node->right;
|
||||
}
|
||||
}
|
||||
return left;
|
||||
}
|
||||
|
||||
static inline struct Tree *tree_ceil(const struct Tree *node, const void *key,
|
||||
tree_search_f *cmp) {
|
||||
struct Tree *right = 0;
|
||||
while (node) {
|
||||
if (cmp(key, node) < 0) {
|
||||
right = (struct Tree *)node;
|
||||
node = tree_get_left(node);
|
||||
} else {
|
||||
node = node->right;
|
||||
}
|
||||
}
|
||||
return right;
|
||||
}
|
||||
|
||||
static inline struct Tree *tree_get(const struct Tree *node, const void *key,
|
||||
tree_search_f *cmp) {
|
||||
while (node) {
|
||||
int c = cmp(key, node);
|
||||
if (c < 0) {
|
||||
node = tree_get_left(node);
|
||||
} else if (c > 0) {
|
||||
node = node->right;
|
||||
} else {
|
||||
return (struct Tree *)node;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct Tree *tree_next(struct Tree *) libcesque;
|
||||
struct Tree *tree_prev(struct Tree *) libcesque;
|
||||
struct Tree *tree_first(struct Tree *) libcesque;
|
||||
struct Tree *tree_last(struct Tree *) libcesque;
|
||||
void tree_remove(struct Tree **, struct Tree *) libcesque;
|
||||
void tree_insert(struct Tree **, struct Tree *, tree_cmp_f *) libcesque;
|
||||
|
||||
COSMOPOLITAN_C_END_
|
||||
#endif /* COSMOPOLITAN_TREE_H_ */
|
|
@ -241,14 +241,10 @@ static relegated void ShowCrashReport(int err, int sig, siginfo_t *si,
|
|||
klog(buf, p - buf);
|
||||
}
|
||||
kprintf("\n");
|
||||
if (!IsWindows()) {
|
||||
__print_maps();
|
||||
}
|
||||
if (__argv) {
|
||||
for (i = 0; i < __argc; ++i) {
|
||||
__print_maps(15);
|
||||
if (__argv)
|
||||
for (i = 0; i < __argc; ++i)
|
||||
kprintf("%s ", __argv[i]);
|
||||
}
|
||||
}
|
||||
kprintf("\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "libc/intrin/kprintf.h"
|
||||
#include "libc/intrin/maps.h"
|
||||
#include "libc/intrin/strace.internal.h"
|
||||
#include "libc/intrin/tree.h"
|
||||
#include "libc/intrin/weaken.h"
|
||||
#include "libc/macros.internal.h"
|
||||
#include "libc/nt/createfile.h"
|
||||
|
@ -95,7 +96,7 @@ static inline textwindows ssize_t ForkIo(int64_t h, char *p, size_t n,
|
|||
size_t i;
|
||||
uint32_t x;
|
||||
for (i = 0; i < n; i += x) {
|
||||
if (!f(h, p + i, n - i, &x, NULL))
|
||||
if (!f(h, p + i, n - i, &x, 0))
|
||||
return __winerr();
|
||||
if (!x)
|
||||
break;
|
||||
|
@ -109,10 +110,11 @@ static dontinline textwindows ssize_t ForkIo2(
|
|||
const char *sf, bool ischild) {
|
||||
ssize_t rc = ForkIo(h, buf, n, fn);
|
||||
if (ischild) {
|
||||
__tls_enabled_set(false); // prevent tls crash in kprintf
|
||||
// prevent crashes
|
||||
__tls_enabled_set(false);
|
||||
__pid = __imp_GetCurrentProcessId();
|
||||
__klog_handle = 0;
|
||||
__maps.used = 0;
|
||||
__maps.maps = 0;
|
||||
}
|
||||
NTTRACE("%s(%ld, %p, %'zu) → %'zd% m", sf, h, buf, n, rc);
|
||||
return rc;
|
||||
|
@ -121,9 +123,11 @@ static dontinline textwindows ssize_t ForkIo2(
|
|||
static dontinline textwindows bool WriteAll(int64_t h, void *buf, size_t n) {
|
||||
bool ok;
|
||||
ok = ForkIo2(h, buf, n, (void *)WriteFile, "WriteFile", false) != -1;
|
||||
if (!ok)
|
||||
AbortFork("WriteAll");
|
||||
// Sleep(10);
|
||||
if (!ok) {
|
||||
STRACE("fork() failed in parent due to WriteAll(%ld, %p, %'zu) → %u", h,
|
||||
buf, n, GetLastError());
|
||||
__print_maps(0);
|
||||
}
|
||||
return ok;
|
||||
}
|
||||
|
||||
|
@ -185,30 +189,6 @@ static textwindows void *Malloc(size_t size) {
|
|||
return HeapAlloc(GetProcessHeap(), 0, size);
|
||||
}
|
||||
|
||||
static textwindows void Free(void *addr) {
|
||||
HeapFree(GetProcessHeap(), 0, addr);
|
||||
}
|
||||
|
||||
static int CountMaps(struct Dll *maps) {
|
||||
int count = 0;
|
||||
for (struct Dll *e = dll_first(maps); e; e = dll_next(maps, e))
|
||||
++count;
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct Map **SortMaps(struct Dll *maps, int count) {
|
||||
int j, i = 0;
|
||||
struct Map **sorted = Malloc(count * sizeof(struct Map *));
|
||||
for (struct Dll *e = dll_first(maps); e; e = dll_next(maps, e)) {
|
||||
struct Map *map = MAP_CONTAINER(e);
|
||||
for (j = i; j > 0 && sorted[j - 1]->addr > map->addr; --j)
|
||||
sorted[j] = sorted[j - 1];
|
||||
sorted[j] = map;
|
||||
++i;
|
||||
}
|
||||
return sorted;
|
||||
}
|
||||
|
||||
textwindows void WinMainForked(void) {
|
||||
jmp_buf jb;
|
||||
int64_t reader;
|
||||
|
@ -233,35 +213,30 @@ textwindows void WinMainForked(void) {
|
|||
ReadOrDie(reader, jb, sizeof(jb));
|
||||
|
||||
// read memory mappings from parent process
|
||||
int n = 0;
|
||||
struct Dll *maps = 0;
|
||||
struct Tree *maps = 0;
|
||||
for (;;) {
|
||||
struct Map *map = Malloc(sizeof(struct Map));
|
||||
ReadOrDie(reader, map, sizeof(struct Map));
|
||||
if (map->addr == MAP_FAILED) {
|
||||
Free(map);
|
||||
if (map->addr == MAP_FAILED)
|
||||
break;
|
||||
}
|
||||
dll_init(&map->elem);
|
||||
dll_make_first(&maps, &map->elem);
|
||||
++n;
|
||||
tree_insert(&maps, &map->tree, __maps_compare);
|
||||
}
|
||||
|
||||
// created sorted array of maps
|
||||
struct Map **sorted = SortMaps(maps, n);
|
||||
|
||||
// map memory into process
|
||||
int granularity = __granularity();
|
||||
for (int i = 0; i < n; ++i) {
|
||||
struct Map *map = sorted[i];
|
||||
for (struct Tree *e = tree_first(maps); e; e = tree_next(e)) {
|
||||
struct Map *map = MAP_TREE_CONTAINER(e);
|
||||
if ((uintptr_t)map->addr & (granularity - 1))
|
||||
continue;
|
||||
size_t size = map->size;
|
||||
// get true length in case mprotect() chopped up actual win32 map
|
||||
for (int j = i + 1;
|
||||
j < n && sorted[j]->hand == -1 && map->addr + size == sorted[j]->addr;
|
||||
++j) {
|
||||
size += sorted[j]->size;
|
||||
size_t size = map->size;
|
||||
for (struct Tree *e2 = tree_next(e); e2; e2 = tree_next(e2)) {
|
||||
struct Map *map2 = MAP_TREE_CONTAINER(e2);
|
||||
if (map2->hand == -1 && map->addr + size == map2->addr) {
|
||||
size += map2->size;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
// obtain the most permissive access possible
|
||||
unsigned prot, access;
|
||||
|
@ -295,11 +270,11 @@ textwindows void WinMainForked(void) {
|
|||
|
||||
// fixup memory manager
|
||||
__maps.free = 0;
|
||||
__maps.used = 0;
|
||||
__maps.maps = 0;
|
||||
__maps.count = 0;
|
||||
__maps.pages = 0;
|
||||
for (int i = 0; i < n; ++i) {
|
||||
struct Map *map = sorted[i];
|
||||
for (struct Tree *e = tree_first(maps); e; e = tree_next(e)) {
|
||||
struct Map *map = MAP_TREE_CONTAINER(e);
|
||||
__maps.count += 1;
|
||||
__maps.pages += (map->size + getpagesize() - 1) / getpagesize();
|
||||
unsigned old_protect;
|
||||
|
@ -307,8 +282,7 @@ textwindows void WinMainForked(void) {
|
|||
&old_protect))
|
||||
AbortFork("VirtualProtect");
|
||||
}
|
||||
Free(sorted);
|
||||
__maps.used = maps;
|
||||
__maps.maps = maps;
|
||||
__maps_init();
|
||||
|
||||
// mitosis complete
|
||||
|
@ -393,19 +367,13 @@ textwindows int sys_fork_nt(uint32_t dwCreationFlags) {
|
|||
if (spawnrc != -1) {
|
||||
CloseHandle(procinfo.hThread);
|
||||
ok = WriteAll(writer, jb, sizeof(jb));
|
||||
int count = 0;
|
||||
// this list will be populated with the maps we're transferring
|
||||
struct Dll *e2, *maps = 0;
|
||||
for (struct Dll *e = dll_first(__maps.used); ok && e; e = e2) {
|
||||
e2 = dll_next(__maps.used, e);
|
||||
struct Map *map = MAP_CONTAINER(e);
|
||||
for (struct Map *map = __maps_first(); ok && map;
|
||||
map = __maps_next(map)) {
|
||||
if (MAX((char *)__executable_start, map->addr) <
|
||||
MIN((char *)_end, map->addr + map->size))
|
||||
continue; // executable image is loaded by windows
|
||||
dll_remove(&__maps.used, e);
|
||||
dll_make_last(&maps, e);
|
||||
ok = WriteAll(writer, map, sizeof(*map));
|
||||
++count;
|
||||
}
|
||||
// send a terminating Map struct to child
|
||||
if (ok) {
|
||||
|
@ -415,40 +383,44 @@ textwindows int sys_fork_nt(uint32_t dwCreationFlags) {
|
|||
}
|
||||
// now write content of each map to child
|
||||
int granularity = __granularity();
|
||||
struct Map **sorted = SortMaps(maps, count);
|
||||
uint32_t *old_protect = Malloc(count * 4);
|
||||
for (int i = 0; ok && i < count; ++i) {
|
||||
struct Map *map = sorted[i];
|
||||
for (struct Map *map = __maps_first(); ok && map;
|
||||
map = __maps_next(map)) {
|
||||
// we only need to worry about the base mapping
|
||||
if ((uintptr_t)map->addr & (granularity - 1))
|
||||
continue;
|
||||
if (MAX((char *)__executable_start, map->addr) <
|
||||
MIN((char *)_end, map->addr + map->size))
|
||||
continue; // executable image is loaded by windows
|
||||
// shared mappings don't need to be copied
|
||||
if ((map->flags & MAP_TYPE) == MAP_SHARED)
|
||||
continue;
|
||||
// get true length in case mprotect() chopped up actual win32 map
|
||||
int j;
|
||||
size_t size = map->size;
|
||||
for (j = i + 1; j < count && sorted[j]->hand == -1 &&
|
||||
map->addr + size == sorted[j]->addr;
|
||||
++j) {
|
||||
size += sorted[j]->size;
|
||||
for (struct Map *map2 = __maps_next(map); map2;
|
||||
map2 = __maps_next(map2)) {
|
||||
if (map2->hand == -1 && map->addr + size == map2->addr) {
|
||||
size += map2->size;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (struct Map *map2 = map; ok && map2; map2 = __maps_next(map2)) {
|
||||
if (!(map2->prot & PROT_READ))
|
||||
if (map->addr >= map2->addr && map->addr < map->addr + size)
|
||||
ok = VirtualProtect(
|
||||
map2->addr, map2->size,
|
||||
__prot2nt(map2->prot | PROT_READ, map2->iscow),
|
||||
&map2->oldprot);
|
||||
}
|
||||
for (int k = i; ok && k < j; ++k)
|
||||
if (!(sorted[k]->prot & PROT_READ))
|
||||
ok = VirtualProtect(
|
||||
sorted[k]->addr, sorted[k]->size,
|
||||
__prot2nt(sorted[k]->prot | PROT_READ, map->iscow),
|
||||
&old_protect[k]);
|
||||
if (ok)
|
||||
ok = WriteAll(writer, map->addr, size);
|
||||
for (int k = i; ok && k < j; ++k)
|
||||
if (!(sorted[k]->prot & PROT_READ))
|
||||
ok = VirtualProtect(sorted[k]->addr, sorted[k]->size,
|
||||
old_protect[k], &old_protect[k]);
|
||||
for (struct Map *map2 = map; ok && map2; map2 = __maps_next(map2)) {
|
||||
if (!(map2->prot & PROT_READ))
|
||||
if (map->addr >= map2->addr && map->addr < map->addr + size)
|
||||
ok = VirtualProtect(map2->addr, map2->size, map2->oldprot,
|
||||
&map2->oldprot);
|
||||
}
|
||||
}
|
||||
Free(old_protect);
|
||||
Free(sorted);
|
||||
dll_make_first(&__maps.used, maps);
|
||||
if (ok)
|
||||
ok = WriteAll(writer, __data_start, __data_end - __data_start);
|
||||
if (ok)
|
||||
|
@ -466,6 +438,7 @@ textwindows int sys_fork_nt(uint32_t dwCreationFlags) {
|
|||
} else {
|
||||
TerminateProcess(procinfo.hProcess, SIGKILL);
|
||||
CloseHandle(procinfo.hProcess);
|
||||
rc = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -473,9 +446,8 @@ textwindows int sys_fork_nt(uint32_t dwCreationFlags) {
|
|||
CloseHandle(reader);
|
||||
if (writer != -1)
|
||||
CloseHandle(writer);
|
||||
if (rc == -1 && errno != ENOMEM) {
|
||||
if (rc == -1 && errno != ENOMEM)
|
||||
eagain(); // posix fork() only specifies two errors
|
||||
}
|
||||
} else {
|
||||
rc = 0;
|
||||
// re-apply code morphing for thread-local storage
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "libc/calls/state.internal.h"
|
||||
#include "libc/calls/struct/sigset.h"
|
||||
#include "libc/calls/struct/sigset.internal.h"
|
||||
#include "libc/calls/struct/timespec.h"
|
||||
#include "libc/calls/syscall-nt.internal.h"
|
||||
#include "libc/calls/syscall-sysv.internal.h"
|
||||
#include "libc/dce.h"
|
||||
|
@ -45,7 +46,6 @@
|
|||
#include "libc/thread/posixthread.internal.h"
|
||||
#include "libc/thread/tls.h"
|
||||
|
||||
extern pthread_mutex_t nsync_waiters_mu;
|
||||
extern pthread_mutex_t _pthread_lock_obj;
|
||||
|
||||
static void _onfork_prepare(void) {
|
||||
|
@ -54,11 +54,10 @@ static void _onfork_prepare(void) {
|
|||
_pthread_lock();
|
||||
__maps_lock();
|
||||
__fds_lock();
|
||||
pthread_mutex_lock(&nsync_waiters_mu);
|
||||
LOCKTRACE("READY TO ROCK AND ROLL");
|
||||
}
|
||||
|
||||
static void _onfork_parent(void) {
|
||||
pthread_mutex_unlock(&nsync_waiters_mu);
|
||||
__fds_unlock();
|
||||
__maps_unlock();
|
||||
_pthread_unlock();
|
||||
|
@ -68,7 +67,6 @@ static void _onfork_parent(void) {
|
|||
|
||||
static void _onfork_child(void) {
|
||||
__fds_lock_obj = (pthread_mutex_t)PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
|
||||
nsync_waiters_mu = (pthread_mutex_t)PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
|
||||
_pthread_lock_obj = (pthread_mutex_t)PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
|
||||
atomic_store_explicit(&__maps.lock, 0, memory_order_relaxed);
|
||||
atomic_store_explicit(&__get_tls()->tib_relock_maps, 0, memory_order_relaxed);
|
||||
|
@ -77,7 +75,9 @@ static void _onfork_child(void) {
|
|||
}
|
||||
|
||||
int _fork(uint32_t dwCreationFlags) {
|
||||
long micros;
|
||||
struct Dll *e;
|
||||
struct timespec started;
|
||||
int ax, dx, tid, parent;
|
||||
parent = __pid;
|
||||
BLOCK_SIGNALS;
|
||||
|
@ -85,11 +85,13 @@ int _fork(uint32_t dwCreationFlags) {
|
|||
__proc_lock();
|
||||
if (__threaded)
|
||||
_onfork_prepare();
|
||||
started = timespec_real();
|
||||
if (!IsWindows()) {
|
||||
ax = sys_fork();
|
||||
} else {
|
||||
ax = sys_fork_nt(dwCreationFlags);
|
||||
}
|
||||
micros = timespec_tomicros(timespec_sub(timespec_real(), started));
|
||||
if (!ax) {
|
||||
|
||||
// get new process id
|
||||
|
@ -136,15 +138,14 @@ int _fork(uint32_t dwCreationFlags) {
|
|||
// run user fork callbacks
|
||||
if (__threaded)
|
||||
_onfork_child();
|
||||
STRACE("fork() → 0 (child of %d)", parent);
|
||||
STRACE("fork() → 0 (child of %d; took %ld us)", parent, micros);
|
||||
} else {
|
||||
// this is the parent process
|
||||
if (__threaded) {
|
||||
if (__threaded)
|
||||
_onfork_parent();
|
||||
}
|
||||
if (IsWindows())
|
||||
__proc_unlock();
|
||||
STRACE("fork() → %d% m", ax);
|
||||
STRACE("fork() → %d% m (took %ld us)", ax, micros);
|
||||
}
|
||||
ALLOW_SIGNALS;
|
||||
return ax;
|
||||
|
|
|
@ -482,9 +482,8 @@ errno_t posix_spawn(int *pid, const char *path,
|
|||
const posix_spawn_file_actions_t *file_actions,
|
||||
const posix_spawnattr_t *attrp, char *const argv[],
|
||||
char *const envp[]) {
|
||||
if (IsWindows()) {
|
||||
if (IsWindows())
|
||||
return posix_spawn_nt(pid, path, file_actions, attrp, argv, envp);
|
||||
}
|
||||
int pfds[2];
|
||||
bool use_pipe;
|
||||
volatile int status = 0;
|
||||
|
@ -516,66 +515,55 @@ errno_t posix_spawn(int *pid, const char *path,
|
|||
sigaction(sig, &dfl, 0);
|
||||
}
|
||||
}
|
||||
if (flags & POSIX_SPAWN_SETSID) {
|
||||
if (flags & POSIX_SPAWN_SETSID)
|
||||
setsid();
|
||||
}
|
||||
if ((flags & POSIX_SPAWN_SETPGROUP) && setpgid(0, (*attrp)->pgroup)) {
|
||||
if ((flags & POSIX_SPAWN_SETPGROUP) && setpgid(0, (*attrp)->pgroup))
|
||||
goto ChildFailed;
|
||||
}
|
||||
if ((flags & POSIX_SPAWN_RESETIDS) && setgid(getgid())) {
|
||||
if ((flags & POSIX_SPAWN_RESETIDS) && setgid(getgid()))
|
||||
goto ChildFailed;
|
||||
}
|
||||
if ((flags & POSIX_SPAWN_RESETIDS) && setuid(getuid())) {
|
||||
if ((flags & POSIX_SPAWN_RESETIDS) && setuid(getuid()))
|
||||
goto ChildFailed;
|
||||
}
|
||||
if (file_actions) {
|
||||
struct _posix_faction *a;
|
||||
for (a = *file_actions; a; a = a->next) {
|
||||
if (use_pipe && pfds[1] == a->fildes) {
|
||||
int p2;
|
||||
if ((p2 = dup(pfds[1])) == -1) {
|
||||
if ((p2 = dup(pfds[1])) == -1)
|
||||
goto ChildFailed;
|
||||
}
|
||||
lost_cloexec = true;
|
||||
close(pfds[1]);
|
||||
pfds[1] = p2;
|
||||
}
|
||||
switch (a->action) {
|
||||
case _POSIX_SPAWN_CLOSE:
|
||||
if (close(a->fildes)) {
|
||||
if (close(a->fildes))
|
||||
goto ChildFailed;
|
||||
}
|
||||
break;
|
||||
case _POSIX_SPAWN_DUP2:
|
||||
if (dup2(a->fildes, a->newfildes) == -1) {
|
||||
if (dup2(a->fildes, a->newfildes) == -1)
|
||||
goto ChildFailed;
|
||||
}
|
||||
break;
|
||||
case _POSIX_SPAWN_OPEN: {
|
||||
int t;
|
||||
if ((t = openat(AT_FDCWD, a->path, a->oflag, a->mode)) == -1) {
|
||||
if ((t = openat(AT_FDCWD, a->path, a->oflag, a->mode)) == -1)
|
||||
goto ChildFailed;
|
||||
}
|
||||
if (t != a->fildes) {
|
||||
if (dup2(t, a->fildes) == -1) {
|
||||
close(t);
|
||||
goto ChildFailed;
|
||||
}
|
||||
if (close(t)) {
|
||||
if (close(t))
|
||||
goto ChildFailed;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case _POSIX_SPAWN_CHDIR:
|
||||
if (chdir(a->path) == -1) {
|
||||
if (chdir(a->path) == -1)
|
||||
goto ChildFailed;
|
||||
}
|
||||
break;
|
||||
case _POSIX_SPAWN_FCHDIR:
|
||||
if (fchdir(a->fildes) == -1) {
|
||||
if (fchdir(a->fildes) == -1)
|
||||
goto ChildFailed;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
__builtin_unreachable();
|
||||
|
@ -583,17 +571,13 @@ errno_t posix_spawn(int *pid, const char *path,
|
|||
}
|
||||
}
|
||||
if (IsLinux() || IsFreebsd() || IsNetbsd()) {
|
||||
if (flags & POSIX_SPAWN_SETSCHEDULER) {
|
||||
if (flags & POSIX_SPAWN_SETSCHEDULER)
|
||||
if (sched_setscheduler(0, (*attrp)->schedpolicy,
|
||||
&(*attrp)->schedparam) == -1) {
|
||||
&(*attrp)->schedparam) == -1)
|
||||
goto ChildFailed;
|
||||
}
|
||||
}
|
||||
if (flags & POSIX_SPAWN_SETSCHEDPARAM) {
|
||||
if (sched_setparam(0, &(*attrp)->schedparam)) {
|
||||
if (flags & POSIX_SPAWN_SETSCHEDPARAM)
|
||||
if (sched_setparam(0, &(*attrp)->schedparam))
|
||||
goto ChildFailed;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (flags & POSIX_SPAWN_SETRLIMIT) {
|
||||
int rlimset = (*attrp)->rlimset;
|
||||
|
@ -608,9 +592,8 @@ errno_t posix_spawn(int *pid, const char *path,
|
|||
}
|
||||
}
|
||||
}
|
||||
if (lost_cloexec) {
|
||||
if (lost_cloexec)
|
||||
fcntl(pfds[1], F_SETFD, FD_CLOEXEC);
|
||||
}
|
||||
if (flags & POSIX_SPAWN_SETSIGMASK) {
|
||||
childmask = (*attrp)->sigmask;
|
||||
} else {
|
||||
|
@ -636,9 +619,8 @@ errno_t posix_spawn(int *pid, const char *path,
|
|||
if (!use_pipe) {
|
||||
res = status;
|
||||
} else {
|
||||
if (can_clobber) {
|
||||
if (can_clobber)
|
||||
atomic_store_explicit(&has_vfork, true, memory_order_release);
|
||||
}
|
||||
res = 0;
|
||||
read(pfds[0], &res, sizeof(res));
|
||||
}
|
||||
|
@ -651,9 +633,8 @@ errno_t posix_spawn(int *pid, const char *path,
|
|||
} else {
|
||||
res = errno;
|
||||
}
|
||||
if (use_pipe) {
|
||||
if (use_pipe)
|
||||
close(pfds[0]);
|
||||
}
|
||||
ParentFailed:
|
||||
sigprocmask(SIG_SETMASK, &oldmask, 0);
|
||||
pthread_setcancelstate(cs, 0);
|
||||
|
|
|
@ -50,9 +50,10 @@ void *NewCosmoStack(void) {
|
|||
if (IsOpenbsd() && __sys_mmap(p, n, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_FIXED | MAP_ANON_OPENBSD |
|
||||
MAP_STACK_OPENBSD,
|
||||
-1, 0, 0) != p) {
|
||||
-1, 0, 0) != p)
|
||||
notpossible;
|
||||
if (mprotect(p, GetGuardSize(), PROT_NONE | PROT_GUARD))
|
||||
notpossible;
|
||||
}
|
||||
return p;
|
||||
} else {
|
||||
return 0;
|
||||
|
|
|
@ -91,7 +91,7 @@ void ShowCrashReports(void) libcesque;
|
|||
int ftrace_install(void) libcesque;
|
||||
int ftrace_enabled(int) libcesque;
|
||||
int strace_enabled(int) libcesque;
|
||||
void __print_maps(void) libcesque;
|
||||
void __print_maps(size_t) libcesque;
|
||||
void __printargs(const char *) libcesque;
|
||||
/* builtin sh-like system/popen dsl */
|
||||
int _cocmd(int, char **, char **) libcesque;
|
||||
|
|
|
@ -468,7 +468,7 @@ textstartup void __printargs(const char *prologue) {
|
|||
|
||||
PRINT("");
|
||||
PRINT("MEMTRACK");
|
||||
__print_maps();
|
||||
__print_maps(0);
|
||||
|
||||
PRINT("");
|
||||
PRINT("TERMIOS");
|
||||
|
|
|
@ -292,6 +292,7 @@ syscon mprot PROT_NONE 0 0 0 0 0 0 0 0 # mmap, mprotect, unix
|
|||
syscon mprot PROT_READ 1 1 1 1 1 1 1 1 # mmap, mprotect, unix consensus
|
||||
syscon mprot PROT_WRITE 2 2 2 2 2 2 2 2 # mmap, mprotect, unix consensus
|
||||
syscon mprot PROT_EXEC 4 4 4 4 4 4 4 4 # mmap, mprotect, unix consensus
|
||||
syscon mprot PROT_GUARD 0 0 0 0 0 0 0 0x100 # mmap, mprotect, unix consensus
|
||||
|
||||
# mremap() flags
|
||||
# the revolutionary praxis of realloc()
|
||||
|
|
2
libc/sysv/consts/PROT_GUARD.S
Normal file
2
libc/sysv/consts/PROT_GUARD.S
Normal file
|
@ -0,0 +1,2 @@
|
|||
#include "libc/sysv/consts/syscon.internal.h"
|
||||
.syscon mprot,PROT_GUARD,0,0,0,0,0,0,0,0x100
|
|
@ -3,10 +3,7 @@
|
|||
#if !(__ASSEMBLER__ + __LINKER__ + 0)
|
||||
COSMOPOLITAN_C_START_
|
||||
|
||||
extern const int PROT_NONE;
|
||||
extern const int PROT_READ;
|
||||
extern const int PROT_WRITE;
|
||||
extern const int PROT_EXEC;
|
||||
extern const int PROT_GUARD;
|
||||
|
||||
COSMOPOLITAN_C_END_
|
||||
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
|
||||
|
|
|
@ -229,20 +229,10 @@ static errno_t pthread_create_impl(pthread_t *thread,
|
|||
-1, 0, 0) != pt->pt_attr.__stackaddr) {
|
||||
notpossible;
|
||||
}
|
||||
if (pt->pt_attr.__guardsize) {
|
||||
if (!IsWindows()) {
|
||||
if (mprotect(pt->pt_attr.__stackaddr, pt->pt_attr.__guardsize,
|
||||
PROT_NONE)) {
|
||||
notpossible;
|
||||
}
|
||||
} else {
|
||||
uint32_t oldattr;
|
||||
if (!VirtualProtect(pt->pt_attr.__stackaddr, pt->pt_attr.__guardsize,
|
||||
kNtPageReadwrite | kNtPageGuard, &oldattr)) {
|
||||
notpossible;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (pt->pt_attr.__guardsize)
|
||||
if (mprotect(pt->pt_attr.__stackaddr, pt->pt_attr.__guardsize,
|
||||
PROT_NONE | PROT_GUARD))
|
||||
notpossible;
|
||||
}
|
||||
if (!pt->pt_attr.__stackaddr || pt->pt_attr.__stackaddr == MAP_FAILED) {
|
||||
rc = errno;
|
||||
|
|
|
@ -190,6 +190,7 @@ int pthread_spin_trylock(pthread_spinlock_t *) libcesque paramsnonnull();
|
|||
int pthread_spin_unlock(pthread_spinlock_t *) libcesque paramsnonnull();
|
||||
int pthread_testcancel_np(void) libcesque;
|
||||
int pthread_tryjoin_np(pthread_t, void **) libcesque;
|
||||
int pthread_delay_np(const void *, int) libcesque;
|
||||
int pthread_yield_np(void) libcesque;
|
||||
int pthread_yield(void) libcesque;
|
||||
pthread_id_np_t pthread_getthreadid_np(void) libcesque;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue