mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-03-03 07:29:23 +00:00
Simplify memory manager code
This commit is contained in:
parent
379cd77078
commit
aca4214ff6
11 changed files with 442 additions and 325 deletions
|
@ -48,6 +48,7 @@
|
|||
#include "libc/nt/enum/wait.h"
|
||||
#include "libc/nt/errors.h"
|
||||
#include "libc/nt/events.h"
|
||||
#include "libc/nt/memory.h"
|
||||
#include "libc/nt/runtime.h"
|
||||
#include "libc/nt/struct/inputrecord.h"
|
||||
#include "libc/nt/synchronization.h"
|
||||
|
@ -127,38 +128,46 @@ struct Keystrokes {
|
|||
bool ohno_decckm;
|
||||
bool bypass_mode;
|
||||
uint16_t utf16hs;
|
||||
int16_t freekeys;
|
||||
size_t free_keys;
|
||||
int64_t cin, cot;
|
||||
struct Dll *list;
|
||||
struct Dll *line;
|
||||
struct Dll *free;
|
||||
pthread_mutex_t lock;
|
||||
struct Keystroke pool[512];
|
||||
};
|
||||
|
||||
static struct Keystrokes __keystroke = {
|
||||
.lock = PTHREAD_MUTEX_INITIALIZER,
|
||||
};
|
||||
static struct Keystrokes __keystroke;
|
||||
static pthread_mutex_t __keystroke_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
textwindows void sys_read_nt_wipe_keystrokes(void) {
|
||||
pthread_mutex_t lock = __keystroke.lock;
|
||||
bzero(&__keystroke, sizeof(__keystroke));
|
||||
__keystroke.lock = lock;
|
||||
_pthread_mutex_wipe_np(&__keystroke.lock);
|
||||
_pthread_mutex_wipe_np(&__keystroke_lock);
|
||||
}
|
||||
|
||||
textwindows static void FreeKeystrokeImpl(struct Dll *key) {
|
||||
dll_make_first(&__keystroke.free, key);
|
||||
++__keystroke.freekeys;
|
||||
++__keystroke.free_keys;
|
||||
}
|
||||
|
||||
textwindows static struct Keystroke *AllocKeystroke(void) {
|
||||
struct Keystroke *k;
|
||||
if (!(k = HeapAlloc(GetProcessHeap(), 0, sizeof(struct Keystroke))))
|
||||
return 0;
|
||||
dll_init(&k->elem);
|
||||
return k;
|
||||
}
|
||||
|
||||
textwindows static struct Keystroke *NewKeystroke(void) {
|
||||
struct Dll *e = dll_first(__keystroke.free);
|
||||
if (!e) // See MIN(freekeys) before ReadConsoleInput()
|
||||
__builtin_trap();
|
||||
struct Keystroke *k = KEYSTROKE_CONTAINER(e);
|
||||
dll_remove(&__keystroke.free, &k->elem);
|
||||
--__keystroke.freekeys;
|
||||
struct Dll *e;
|
||||
struct Keystroke *k;
|
||||
if ((e = dll_first(__keystroke.free))) {
|
||||
dll_remove(&__keystroke.free, e);
|
||||
k = KEYSTROKE_CONTAINER(e);
|
||||
--__keystroke.free_keys;
|
||||
} else {
|
||||
// PopulateKeystrokes() should make this branch impossible
|
||||
if (!(k = AllocKeystroke()))
|
||||
return 0;
|
||||
}
|
||||
k->buflen = 0;
|
||||
return k;
|
||||
}
|
||||
|
@ -174,15 +183,22 @@ textwindows static void FreeKeystrokes(struct Dll **list) {
|
|||
FreeKeystroke(list, key);
|
||||
}
|
||||
|
||||
textwindows static void PopulateKeystrokes(size_t want) {
|
||||
struct Keystroke *k;
|
||||
while (__keystroke.free_keys < want) {
|
||||
if ((k = AllocKeystroke())) {
|
||||
FreeKeystrokeImpl(&k->elem);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
textwindows static void OpenConsole(void) {
|
||||
__keystroke.cin = CreateFile(u"CONIN$", kNtGenericRead | kNtGenericWrite,
|
||||
kNtFileShareRead, 0, kNtOpenExisting, 0, 0);
|
||||
__keystroke.cot = CreateFile(u"CONOUT$", kNtGenericRead | kNtGenericWrite,
|
||||
kNtFileShareWrite, 0, kNtOpenExisting, 0, 0);
|
||||
for (int i = 0; i < ARRAYLEN(__keystroke.pool); ++i) {
|
||||
dll_init(&__keystroke.pool[i].elem);
|
||||
FreeKeystrokeImpl(&__keystroke.pool[i].elem);
|
||||
}
|
||||
}
|
||||
|
||||
textwindows static int AddSignal(int sig) {
|
||||
|
@ -196,11 +212,11 @@ textwindows static void InitConsole(void) {
|
|||
}
|
||||
|
||||
textwindows static void LockKeystrokes(void) {
|
||||
_pthread_mutex_lock(&__keystroke.lock);
|
||||
_pthread_mutex_lock(&__keystroke_lock);
|
||||
}
|
||||
|
||||
textwindows static void UnlockKeystrokes(void) {
|
||||
_pthread_mutex_unlock(&__keystroke.lock);
|
||||
_pthread_mutex_unlock(&__keystroke_lock);
|
||||
}
|
||||
|
||||
textwindows int64_t GetConsoleInputHandle(void) {
|
||||
|
@ -523,14 +539,12 @@ textwindows static void IngestConsoleInputRecord(struct NtInputRecord *r) {
|
|||
!(__ttyconf.magic & kTtyNoIexten)) { // IEXTEN
|
||||
if (__keystroke.bypass_mode) {
|
||||
struct Keystroke *k = NewKeystroke();
|
||||
if (!k)
|
||||
return;
|
||||
memcpy(k->buf, buf, sizeof(k->buf));
|
||||
k->buflen = len;
|
||||
dll_make_last(&__keystroke.line, &k->elem);
|
||||
EchoConsoleNt(buf, len, true);
|
||||
if (!__keystroke.freekeys) {
|
||||
dll_make_last(&__keystroke.list, __keystroke.line);
|
||||
__keystroke.line = 0;
|
||||
}
|
||||
__keystroke.bypass_mode = false;
|
||||
return;
|
||||
} else if (len == 1 && buf[0] && //
|
||||
|
@ -620,6 +634,8 @@ textwindows static void IngestConsoleInputRecord(struct NtInputRecord *r) {
|
|||
|
||||
// allocate object to hold keystroke
|
||||
struct Keystroke *k = NewKeystroke();
|
||||
if (!k)
|
||||
return;
|
||||
memcpy(k->buf, buf, sizeof(k->buf));
|
||||
k->buflen = len;
|
||||
|
||||
|
@ -633,12 +649,12 @@ textwindows static void IngestConsoleInputRecord(struct NtInputRecord *r) {
|
|||
} else {
|
||||
dll_make_last(&__keystroke.line, &k->elem);
|
||||
|
||||
// flush canonical mode line if oom or enter
|
||||
if (!__keystroke.freekeys || (len == 1 && buf[0] &&
|
||||
((buf[0] & 255) == '\n' || //
|
||||
(buf[0] & 255) == __ttyconf.veol || //
|
||||
((buf[0] & 255) == __ttyconf.veol2 &&
|
||||
!(__ttyconf.magic & kTtyNoIexten))))) {
|
||||
// flush canonical mode line on enter
|
||||
if (len == 1 && buf[0] &&
|
||||
((buf[0] & 255) == '\n' || //
|
||||
(buf[0] & 255) == __ttyconf.veol || //
|
||||
((buf[0] & 255) == __ttyconf.veol2 &&
|
||||
!(__ttyconf.magic & kTtyNoIexten)))) {
|
||||
dll_make_last(&__keystroke.list, __keystroke.line);
|
||||
__keystroke.line = 0;
|
||||
}
|
||||
|
@ -649,15 +665,17 @@ textwindows static void IngestConsoleInput(void) {
|
|||
uint32_t i, n;
|
||||
struct NtInputRecord records[16];
|
||||
for (;;) {
|
||||
if (!__keystroke.freekeys)
|
||||
return;
|
||||
if (__keystroke.end_of_file)
|
||||
return;
|
||||
if (!GetNumberOfConsoleInputEvents(__keystroke.cin, &n))
|
||||
goto UnexpectedEof;
|
||||
if (!n || !__keystroke.freekeys)
|
||||
if (n > ARRAYLEN(records))
|
||||
n = ARRAYLEN(records);
|
||||
PopulateKeystrokes(n + 1);
|
||||
if (n > __keystroke.free_keys)
|
||||
n = __keystroke.free_keys;
|
||||
if (!n)
|
||||
return;
|
||||
n = MIN(__keystroke.freekeys, MIN(ARRAYLEN(records), n));
|
||||
if (!ReadConsoleInput(__keystroke.cin, records, n, &n))
|
||||
goto UnexpectedEof;
|
||||
for (i = 0; i < n && !__keystroke.end_of_file; ++i)
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include "libc/runtime/stack.h"
|
||||
#include "libc/sysv/consts/prot.h"
|
||||
#include "libc/thread/lock.h"
|
||||
#include "libc/thread/tls.h"
|
||||
|
||||
#ifdef __x86_64__
|
||||
__static_yoink("_init_maps");
|
||||
|
@ -124,26 +125,33 @@ privileged static void __maps_panic(const char *msg) {
|
|||
}
|
||||
#endif
|
||||
|
||||
ABI bool __maps_lock(void) {
|
||||
bool __maps_held(void) {
|
||||
return __tls_enabled && !(__get_tls()->tib_flags & TIB_FLAG_VFORKED) &&
|
||||
MUTEX_OWNER(
|
||||
atomic_load_explicit(&__maps.lock.word, memory_order_relaxed)) ==
|
||||
atomic_load_explicit(&__get_tls()->tib_tid, memory_order_relaxed);
|
||||
}
|
||||
|
||||
ABI void __maps_lock(void) {
|
||||
int me;
|
||||
uint64_t word, lock;
|
||||
struct CosmoTib *tib;
|
||||
if (!__tls_enabled)
|
||||
return false;
|
||||
return;
|
||||
if (!(tib = __get_tls_privileged()))
|
||||
return false;
|
||||
return;
|
||||
if (tib->tib_flags & TIB_FLAG_VFORKED)
|
||||
return false;
|
||||
me = atomic_load_explicit(&tib->tib_tid, memory_order_acquire);
|
||||
return;
|
||||
me = atomic_load_explicit(&tib->tib_tid, memory_order_relaxed);
|
||||
if (me <= 0)
|
||||
return false;
|
||||
return;
|
||||
word = atomic_load_explicit(&__maps.lock.word, memory_order_relaxed);
|
||||
for (;;) {
|
||||
if (MUTEX_OWNER(word) == me) {
|
||||
if (atomic_compare_exchange_weak_explicit(
|
||||
&__maps.lock.word, &word, MUTEX_INC_DEPTH(word),
|
||||
memory_order_relaxed, memory_order_relaxed))
|
||||
return true;
|
||||
return;
|
||||
continue;
|
||||
}
|
||||
#if DEBUG_MAPS_LOCK
|
||||
|
@ -162,7 +170,7 @@ ABI bool __maps_lock(void) {
|
|||
__deadlock_track(&__maps.lock, 0);
|
||||
__deadlock_record(&__maps.lock, 0);
|
||||
#endif
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
for (;;) {
|
||||
word = atomic_load_explicit(&__maps.lock.word, memory_order_relaxed);
|
||||
|
|
|
@ -5,17 +5,16 @@
|
|||
#include "libc/runtime/runtime.h"
|
||||
COSMOPOLITAN_C_START_
|
||||
|
||||
#define MAPS_RETRY ((void *)-1)
|
||||
|
||||
#define MAP_TREE_CONTAINER(e) TREE_CONTAINER(struct Map, tree, e)
|
||||
|
||||
struct Map {
|
||||
char *addr; /* granule aligned */
|
||||
size_t size; /* must be nonzero */
|
||||
int64_t off; /* ignore for anon */
|
||||
int prot; /* memory protects */
|
||||
int flags; /* memory map flag */
|
||||
char prot; /* memory protects */
|
||||
bool iscow; /* windows nt only */
|
||||
bool precious; /* windows nt only */
|
||||
bool readonlyfile; /* windows nt only */
|
||||
unsigned visited; /* checks and fork */
|
||||
intptr_t hand; /* windows nt only */
|
||||
|
@ -39,7 +38,11 @@ struct Maps {
|
|||
size_t pages;
|
||||
struct Map stack;
|
||||
struct Map guard;
|
||||
struct Map spool[13];
|
||||
#ifdef MODE_DBG
|
||||
struct Map spool[1];
|
||||
#else
|
||||
struct Map spool[20];
|
||||
#endif
|
||||
};
|
||||
|
||||
struct AddrSize {
|
||||
|
@ -49,8 +52,9 @@ struct AddrSize {
|
|||
|
||||
extern struct Maps __maps;
|
||||
|
||||
bool __maps_held(void);
|
||||
void __maps_init(void);
|
||||
bool __maps_lock(void);
|
||||
void __maps_lock(void);
|
||||
void __maps_check(void);
|
||||
void __maps_unlock(void);
|
||||
void *__maps_randaddr(void);
|
||||
|
|
|
@ -43,13 +43,16 @@
|
|||
#include "libc/sysv/consts/o.h"
|
||||
#include "libc/sysv/consts/prot.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
#include "libc/thread/lock.h"
|
||||
#include "libc/thread/tls.h"
|
||||
|
||||
#define MMDEBUG 0
|
||||
#define MAX_SIZE 0x0ff800000000ul
|
||||
|
||||
#define MAP_FIXED_NOREPLACE_linux 0x100000
|
||||
|
||||
#define PGUP(x) (((x) + pagesz - 1) & -pagesz)
|
||||
#define PGUP(x) (((x) + __pagesize - 1) & -__pagesize)
|
||||
#define GRUP(x) (((x) + __gransize - 1) & -__gransize)
|
||||
|
||||
#define MASQUE 0x00fffffffffffff8
|
||||
#define PTR(x) ((uintptr_t)(x) & MASQUE)
|
||||
|
@ -88,7 +91,6 @@ privileged optimizespeed struct Map *__maps_floor(const char *addr) {
|
|||
}
|
||||
|
||||
static bool __maps_overlaps(const char *addr, size_t size) {
|
||||
int pagesz = __pagesize;
|
||||
struct Map *map, *floor = __maps_floor(addr);
|
||||
for (map = floor; map && map->addr <= addr + size; map = __maps_next(map))
|
||||
if (MAX(addr, map->addr) <
|
||||
|
@ -101,7 +103,6 @@ void __maps_check(void) {
|
|||
#if MMDEBUG
|
||||
size_t maps = 0;
|
||||
size_t pages = 0;
|
||||
int pagesz = __pagesize;
|
||||
static unsigned mono;
|
||||
unsigned id = ++mono;
|
||||
for (struct Map *map = __maps_first(); map; map = __maps_next(map)) {
|
||||
|
@ -109,7 +110,7 @@ void __maps_check(void) {
|
|||
ASSERT(map->visited != id);
|
||||
ASSERT(map->size);
|
||||
map->visited = id;
|
||||
pages += (map->size + pagesz - 1) / pagesz;
|
||||
pages += (map->size + __pagesize - 1) / __pagesize;
|
||||
maps += 1;
|
||||
struct Map *next;
|
||||
if ((next = __maps_next(map))) {
|
||||
|
@ -123,110 +124,98 @@ void __maps_check(void) {
|
|||
#endif
|
||||
}
|
||||
|
||||
static int __muntrack(char *addr, size_t size, int pagesz,
|
||||
struct Map **deleted) {
|
||||
static int __muntrack(char *addr, size_t size, struct Map **deleted,
|
||||
struct Map **untracked, struct Map temp[2]) {
|
||||
int rc = 0;
|
||||
size_t ti = 0;
|
||||
struct Map *map;
|
||||
struct Map *next;
|
||||
struct Map *floor;
|
||||
StartOver:
|
||||
size = PGUP(size);
|
||||
floor = __maps_floor(addr);
|
||||
for (map = floor; map && map->addr <= addr + size; map = next) {
|
||||
next = __maps_next(map);
|
||||
char *map_addr = map->addr;
|
||||
size_t map_size = map->size;
|
||||
if (!(MAX(addr, map_addr) <
|
||||
MIN(addr + PGUP(size), map_addr + PGUP(map_size))))
|
||||
if (!(MAX(addr, map_addr) < MIN(addr + size, map_addr + PGUP(map_size))))
|
||||
continue;
|
||||
if (addr <= map_addr && addr + PGUP(size) >= map_addr + PGUP(map_size)) {
|
||||
if (addr <= map_addr && addr + size >= map_addr + PGUP(map_size)) {
|
||||
if (map->precious)
|
||||
continue;
|
||||
// remove mapping completely
|
||||
tree_remove(&__maps.maps, &map->tree);
|
||||
map->freed = *deleted;
|
||||
*deleted = map;
|
||||
__maps.pages -= (map_size + pagesz - 1) / pagesz;
|
||||
__maps.pages -= (map_size + __pagesize - 1) / __pagesize;
|
||||
__maps.count -= 1;
|
||||
__maps_check();
|
||||
} else if (IsWindows()) {
|
||||
STRACE("you can't carve up memory maps on windows ;_;");
|
||||
rc = einval();
|
||||
rc = enotsup();
|
||||
} else if (addr <= map_addr) {
|
||||
// shave off lefthand side of mapping
|
||||
ASSERT(addr + PGUP(size) < map_addr + PGUP(map_size));
|
||||
size_t left = addr + PGUP(size) - map_addr;
|
||||
ASSERT(addr + size < map_addr + PGUP(map_size));
|
||||
size_t left = addr + size - map_addr;
|
||||
size_t right = map_size - left;
|
||||
ASSERT(right > 0);
|
||||
ASSERT(left > 0);
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
if (leftmap == MAPS_RETRY)
|
||||
goto StartOver;
|
||||
map->addr += left;
|
||||
map->size = right;
|
||||
if (!(map->flags & MAP_ANONYMOUS))
|
||||
map->off += left;
|
||||
__maps.pages -= (left + pagesz - 1) / pagesz;
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
leftmap->freed = *deleted;
|
||||
*deleted = leftmap;
|
||||
__maps_check();
|
||||
} else {
|
||||
rc = -1;
|
||||
map->addr += left;
|
||||
map->size = right;
|
||||
if (!(map->flags & MAP_ANONYMOUS))
|
||||
map->off += left;
|
||||
__maps.pages -= (left + __pagesize - 1) / __pagesize;
|
||||
if (untracked) {
|
||||
ASSERT(ti < 2);
|
||||
temp[ti].addr = map_addr;
|
||||
temp[ti].size = left;
|
||||
temp[ti].freed = *untracked;
|
||||
*untracked = temp;
|
||||
++ti;
|
||||
}
|
||||
} else if (addr + PGUP(size) >= map_addr + PGUP(map_size)) {
|
||||
__maps_check();
|
||||
} else if (addr + size >= map_addr + PGUP(map_size)) {
|
||||
// shave off righthand side of mapping
|
||||
size_t left = addr - map_addr;
|
||||
size_t right = map_addr + map_size - addr;
|
||||
struct Map *rightmap;
|
||||
if ((rightmap = __maps_alloc())) {
|
||||
if (rightmap == MAPS_RETRY)
|
||||
goto StartOver;
|
||||
map->size = left;
|
||||
__maps.pages -= (right + pagesz - 1) / pagesz;
|
||||
rightmap->addr = addr;
|
||||
rightmap->size = right;
|
||||
rightmap->freed = *deleted;
|
||||
*deleted = rightmap;
|
||||
__maps_check();
|
||||
} else {
|
||||
rc = -1;
|
||||
map->size = left;
|
||||
__maps.pages -= (right + __pagesize - 1) / __pagesize;
|
||||
if (untracked) {
|
||||
ASSERT(ti < 2);
|
||||
temp[ti].addr = addr;
|
||||
temp[ti].size = right;
|
||||
temp[ti].freed = *untracked;
|
||||
*untracked = temp;
|
||||
++ti;
|
||||
}
|
||||
__maps_check();
|
||||
} else {
|
||||
// punch hole in mapping
|
||||
size_t left = addr - map_addr;
|
||||
size_t middle = PGUP(size);
|
||||
size_t middle = size;
|
||||
size_t right = map_size - middle - left;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
if (leftmap == MAPS_RETRY)
|
||||
goto StartOver;
|
||||
struct Map *middlemap;
|
||||
if ((middlemap = __maps_alloc())) {
|
||||
if (middlemap == MAPS_RETRY) {
|
||||
__maps_free(leftmap);
|
||||
goto StartOver;
|
||||
}
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
leftmap->off = map->off;
|
||||
leftmap->prot = map->prot;
|
||||
leftmap->flags = map->flags;
|
||||
map->addr += left + middle;
|
||||
map->size = right;
|
||||
if (!(map->flags & MAP_ANONYMOUS))
|
||||
map->off += left + middle;
|
||||
tree_insert(&__maps.maps, &leftmap->tree, __maps_compare);
|
||||
__maps.pages -= (middle + pagesz - 1) / pagesz;
|
||||
__maps.count += 1;
|
||||
middlemap->addr = addr;
|
||||
middlemap->size = size;
|
||||
middlemap->freed = *deleted;
|
||||
*deleted = middlemap;
|
||||
__maps_check();
|
||||
} else {
|
||||
__maps_free(leftmap);
|
||||
rc = -1;
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
leftmap->off = map->off;
|
||||
leftmap->prot = map->prot;
|
||||
leftmap->flags = map->flags;
|
||||
map->addr += left + middle;
|
||||
map->size = right;
|
||||
if (!(map->flags & MAP_ANONYMOUS))
|
||||
map->off += left + middle;
|
||||
tree_insert(&__maps.maps, &leftmap->tree, __maps_compare);
|
||||
__maps.pages -= (middle + __pagesize - 1) / __pagesize;
|
||||
__maps.count += 1;
|
||||
if (untracked) {
|
||||
ASSERT(ti < 2);
|
||||
temp[ti].addr = addr;
|
||||
temp[ti].size = size;
|
||||
temp[ti].freed = *untracked;
|
||||
*untracked = temp;
|
||||
++ti;
|
||||
}
|
||||
__maps_check();
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
|
@ -258,13 +247,33 @@ static void __maps_free_all(struct Map *list) {
|
|||
}
|
||||
}
|
||||
|
||||
static int __maps_funge_prot(int prot) {
|
||||
prot &= ~MAP_FIXED;
|
||||
prot &= ~MAP_FIXED_NOREPLACE;
|
||||
return prot;
|
||||
static void __maps_insert_all(struct Map *list) {
|
||||
struct Map *next;
|
||||
for (struct Map *map = list; map; map = next) {
|
||||
next = map->freed;
|
||||
__maps_insert(map);
|
||||
}
|
||||
}
|
||||
|
||||
static int __maps_destroy_all(struct Map *list) {
|
||||
int rc = 0;
|
||||
for (struct Map *map = list; map; map = map->freed) {
|
||||
if (!IsWindows()) {
|
||||
if (sys_munmap(map->addr, map->size))
|
||||
rc = -1;
|
||||
} else if (map->hand != -1) {
|
||||
if (!UnmapViewOfFile(map->addr))
|
||||
rc = -1;
|
||||
if (!CloseHandle(map->hand))
|
||||
rc = -1;
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __maps_funge_flags(int flags) {
|
||||
flags &= ~MAP_FIXED;
|
||||
flags &= ~MAP_FIXED_NOREPLACE;
|
||||
if ((flags & MAP_TYPE) == MAP_SHARED_VALIDATE) {
|
||||
flags &= ~MAP_TYPE;
|
||||
flags |= MAP_SHARED;
|
||||
|
@ -280,20 +289,20 @@ static bool __maps_fungible(const struct Map *map) {
|
|||
}
|
||||
|
||||
static bool __maps_adjacent(const struct Map *x, const struct Map *y) {
|
||||
char *a = x->addr + ((x->size + __pagesize - 1) & -__pagesize);
|
||||
char *a = x->addr + PGUP(x->size);
|
||||
char *b = y->addr;
|
||||
ASSERT(a <= b);
|
||||
return a == b;
|
||||
}
|
||||
|
||||
static bool __maps_mergeable(const struct Map *x, const struct Map *y) {
|
||||
if (!__maps_adjacent(x, y))
|
||||
return false;
|
||||
if (!__maps_fungible(x))
|
||||
return false;
|
||||
if (!__maps_fungible(y))
|
||||
return false;
|
||||
if (!__maps_adjacent(x, y))
|
||||
return false;
|
||||
if (__maps_funge_prot(x->prot) != __maps_funge_prot(y->prot))
|
||||
if (x->prot != y->prot)
|
||||
return false;
|
||||
if (__maps_funge_flags(x->flags) != __maps_funge_flags(y->flags))
|
||||
return false;
|
||||
|
@ -304,7 +313,6 @@ void __maps_insert(struct Map *map) {
|
|||
struct Map *left, *right;
|
||||
ASSERT(map->size);
|
||||
ASSERT(!__maps_overlaps(map->addr, map->size));
|
||||
map->flags &= MAP_TYPE | MAP_ANONYMOUS | MAP_NOFORK;
|
||||
__maps.pages += (map->size + __pagesize - 1) / __pagesize;
|
||||
|
||||
// find adjacent mappings
|
||||
|
@ -317,8 +325,7 @@ void __maps_insert(struct Map *map) {
|
|||
// avoid insert by making mapping on left bigger
|
||||
if (left)
|
||||
if (__maps_mergeable(left, map)) {
|
||||
left->size += __pagesize - 1;
|
||||
left->size &= -__pagesize;
|
||||
left->size = PGUP(left->size);
|
||||
left->size += map->size;
|
||||
__maps_free(map);
|
||||
map = 0;
|
||||
|
@ -327,8 +334,7 @@ void __maps_insert(struct Map *map) {
|
|||
// avoid insert by making mapping on right bigger
|
||||
if (map && right)
|
||||
if (__maps_mergeable(map, right)) {
|
||||
map->size += __pagesize - 1;
|
||||
map->size &= -__pagesize;
|
||||
map->size = PGUP(map->size);
|
||||
right->addr -= map->size;
|
||||
right->size += map->size;
|
||||
__maps_free(map);
|
||||
|
@ -338,14 +344,12 @@ void __maps_insert(struct Map *map) {
|
|||
// check if we filled a hole
|
||||
if (!map && left && right)
|
||||
if (__maps_mergeable(left, right)) {
|
||||
left->size += __pagesize - 1;
|
||||
left->size &= -__pagesize;
|
||||
left->size = PGUP(left->size);
|
||||
right->addr -= left->size;
|
||||
right->size += left->size;
|
||||
tree_remove(&__maps.maps, &left->tree);
|
||||
__maps.count -= 1;
|
||||
__maps_free(left);
|
||||
map = 0;
|
||||
__maps.count -= 1;
|
||||
}
|
||||
|
||||
// otherwise just insert
|
||||
|
@ -356,26 +360,19 @@ void __maps_insert(struct Map *map) {
|
|||
__maps_check();
|
||||
}
|
||||
|
||||
static void __maps_track_insert(struct Map *map, char *addr, size_t size,
|
||||
uintptr_t map_handle, int prot, int flags) {
|
||||
// adds interval to rbtree (no sys_mmap)
|
||||
bool __maps_track(char *addr, size_t size, int prot, int flags) {
|
||||
struct Map *map;
|
||||
if (!(map = __maps_alloc()))
|
||||
return false;
|
||||
map->addr = addr;
|
||||
map->size = size;
|
||||
map->prot = prot;
|
||||
map->flags = flags;
|
||||
map->hand = map_handle;
|
||||
map->hand = -1;
|
||||
__maps_lock();
|
||||
__maps_insert(map);
|
||||
__maps_unlock();
|
||||
}
|
||||
|
||||
// adds interval to rbtree (no sys_mmap)
|
||||
bool __maps_track(char *addr, size_t size, int prot, int flags) {
|
||||
struct Map *map;
|
||||
do {
|
||||
if (!(map = __maps_alloc()))
|
||||
return false;
|
||||
} while (map == MAPS_RETRY);
|
||||
__maps_track_insert(map, addr, size, -1, prot, flags);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -383,7 +380,7 @@ bool __maps_track(char *addr, size_t size, int prot, int flags) {
|
|||
int __maps_untrack(char *addr, size_t size) {
|
||||
struct Map *deleted = 0;
|
||||
__maps_lock();
|
||||
int rc = __muntrack(addr, size, __pagesize, &deleted);
|
||||
int rc = __muntrack(addr, size, &deleted, 0, 0);
|
||||
__maps_unlock();
|
||||
__maps_free_all(deleted);
|
||||
return rc;
|
||||
|
@ -399,29 +396,22 @@ struct Map *__maps_alloc(void) {
|
|||
return map;
|
||||
pthread_pause_np();
|
||||
}
|
||||
void *mark;
|
||||
int size = 65536;
|
||||
__maps_lock();
|
||||
do {
|
||||
// we're creating sudden surprise memory. the user might be in the
|
||||
// middle of carefully planning a fixed memory structure. we don't
|
||||
// want the system allocator to put our surprise memory inside it.
|
||||
mark = __maps_randaddr();
|
||||
} while (__maps_overlaps(mark, size));
|
||||
struct DirectMap sys = sys_mmap(mark, size, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (sys.addr == MAP_FAILED) {
|
||||
__maps_unlock();
|
||||
// we're creating sudden surprise memory. the user might be in the
|
||||
// middle of carefully planning a fixed memory structure. we don't
|
||||
// want the system allocator to put our surprise memory inside it,
|
||||
// and we also want to avoid the chances of accidentally unmapping
|
||||
struct DirectMap sys =
|
||||
sys_mmap(__maps_randaddr(), size, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (sys.addr == MAP_FAILED)
|
||||
return 0;
|
||||
}
|
||||
map = sys.addr;
|
||||
__maps_track_insert(map, sys.addr, size, sys.maphandle,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NOFORK);
|
||||
__maps_unlock();
|
||||
if (IsWindows())
|
||||
CloseHandle(sys.maphandle);
|
||||
for (int i = 1; i < size / sizeof(struct Map); ++i)
|
||||
__maps_free(map + i);
|
||||
return MAPS_RETRY;
|
||||
return map;
|
||||
}
|
||||
|
||||
static int __munmap(char *addr, size_t size) {
|
||||
|
@ -431,41 +421,33 @@ static int __munmap(char *addr, size_t size) {
|
|||
!size || (uintptr_t)addr + size < size)
|
||||
return einval();
|
||||
|
||||
// test for signal handler tragedy
|
||||
if (__maps_held())
|
||||
return edeadlk();
|
||||
|
||||
// lock the memory manager
|
||||
__maps_lock();
|
||||
__maps_check();
|
||||
|
||||
// normalize size
|
||||
// abort if size doesn't include all pages in granule
|
||||
size_t pgup_size = (size + __pagesize - 1) & -__pagesize;
|
||||
size_t grup_size = (size + __gransize - 1) & -__gransize;
|
||||
if (grup_size > pgup_size)
|
||||
if (__maps_overlaps(addr + pgup_size, grup_size - pgup_size)) {
|
||||
if (GRUP(size) > PGUP(size))
|
||||
if (__maps_overlaps(addr + PGUP(size), GRUP(size) - PGUP(size))) {
|
||||
__maps_unlock();
|
||||
return einval();
|
||||
}
|
||||
|
||||
// untrack mappings
|
||||
int rc;
|
||||
struct Map temp[2];
|
||||
struct Map *deleted = 0;
|
||||
rc = __muntrack(addr, pgup_size, __pagesize, &deleted);
|
||||
struct Map *untracked = 0;
|
||||
rc = __muntrack(addr, size, &deleted, &untracked, temp);
|
||||
__maps_unlock();
|
||||
|
||||
// delete mappings
|
||||
for (struct Map *map = deleted; map; map = map->freed) {
|
||||
if (!IsWindows()) {
|
||||
if (sys_munmap(map->addr, map->size))
|
||||
rc = -1;
|
||||
} else if (map->hand != -1) {
|
||||
ASSERT(!((uintptr_t)map->addr & (__gransize - 1)));
|
||||
if (!UnmapViewOfFile(map->addr))
|
||||
rc = -1;
|
||||
if (!CloseHandle(map->hand))
|
||||
rc = -1;
|
||||
}
|
||||
}
|
||||
|
||||
// freed mappings
|
||||
// ask operating system to remove mappings
|
||||
rc |= __maps_destroy_all(untracked);
|
||||
rc |= __maps_destroy_all(deleted);
|
||||
__maps_free_all(deleted);
|
||||
|
||||
return rc;
|
||||
|
@ -485,14 +467,13 @@ void *__maps_randaddr(void) {
|
|||
static void *__maps_pickaddr(size_t size) {
|
||||
char *addr = 0;
|
||||
struct Map *map, *prev;
|
||||
size += __gransize - 1;
|
||||
size &= -__gransize;
|
||||
size = GRUP(size);
|
||||
if ((map = __maps_last())) {
|
||||
// choose address beneath higher mapping
|
||||
for (; map; map = prev) {
|
||||
char *min = (char *)(intptr_t)__gransize;
|
||||
if ((prev = __maps_prev(map)))
|
||||
min = prev->addr + ((prev->size + __gransize - 1) & -__gransize);
|
||||
min = prev->addr + GRUP(prev->size);
|
||||
if (map->addr > min && //
|
||||
map->addr - min >= size) {
|
||||
addr = map->addr - size;
|
||||
|
@ -502,7 +483,7 @@ static void *__maps_pickaddr(size_t size) {
|
|||
// append if existing maps are too dense
|
||||
if (!addr) {
|
||||
map = __maps_last();
|
||||
addr = map->addr + ((map->size + __gransize - 1) & -__gransize);
|
||||
addr = map->addr + GRUP(map->size);
|
||||
intptr_t end = (intptr_t)addr;
|
||||
if (ckd_add(&end, end, size))
|
||||
return 0;
|
||||
|
@ -518,7 +499,12 @@ static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
|
|||
int64_t off) {
|
||||
|
||||
// validate file map args
|
||||
if (!(flags & MAP_ANONYMOUS)) {
|
||||
if (flags & MAP_ANONYMOUS) {
|
||||
// some operating systems will complain unless we do this
|
||||
fd = -1;
|
||||
off = 0;
|
||||
} else {
|
||||
// validate arguments for file mapping
|
||||
if (off & (__gransize - 1))
|
||||
return (void *)einval();
|
||||
if (IsWindows()) {
|
||||
|
@ -531,10 +517,8 @@ static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
|
|||
|
||||
// allocate Map object
|
||||
struct Map *map;
|
||||
do {
|
||||
if (!(map = __maps_alloc()))
|
||||
return MAP_FAILED;
|
||||
} while (map == MAPS_RETRY);
|
||||
if (!(map = __maps_alloc()))
|
||||
return MAP_FAILED;
|
||||
|
||||
// polyfill nuances of fixed mappings
|
||||
int sysflags = flags;
|
||||
|
@ -588,18 +572,29 @@ static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
|
|||
}
|
||||
} else {
|
||||
// remove existing mappings and their tracking objects
|
||||
if (__munmap(addr, size)) {
|
||||
struct Map *deleted = 0;
|
||||
if (__muntrack(addr, size, &deleted, 0, 0)) {
|
||||
__maps_insert_all(deleted);
|
||||
__maps_unlock();
|
||||
__maps_free(map);
|
||||
return (void *)enomem();
|
||||
return MAP_FAILED;
|
||||
}
|
||||
int rc = __maps_destroy_all(deleted);
|
||||
__maps_free_all(deleted);
|
||||
if (rc) {
|
||||
__maps_unlock();
|
||||
__maps_free(map);
|
||||
return (void *)eperm();
|
||||
}
|
||||
}
|
||||
// claims intended interval while still holding the lock
|
||||
if (!__maps_track(addr, size, 0, 0)) {
|
||||
__maps_unlock();
|
||||
__maps_free(map);
|
||||
return (void *)enomem();
|
||||
}
|
||||
map->addr = addr;
|
||||
map->size = size;
|
||||
map->prot = 0;
|
||||
map->flags = 0;
|
||||
map->hand = -1;
|
||||
map->precious = true;
|
||||
__maps_insert(map);
|
||||
__maps_unlock();
|
||||
}
|
||||
|
||||
|
@ -611,15 +606,19 @@ static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
|
|||
|
||||
// handle failure
|
||||
if (IsWindows()) {
|
||||
// untrack reservation
|
||||
__maps_lock();
|
||||
tree_remove(&__maps.maps, &map->tree);
|
||||
__maps.pages -= (map->size + __pagesize - 1) / __pagesize;
|
||||
map->precious = false;
|
||||
__maps_unlock();
|
||||
if (errno == EADDRNOTAVAIL) {
|
||||
// we've encountered mystery memory
|
||||
if (fixedmode) {
|
||||
// TODO(jart): Use VirtualQuery() to destroy mystery memory.
|
||||
__maps_untrack(addr, size);
|
||||
errno = ENOMEM;
|
||||
} else if (noreplace) {
|
||||
// we can't try again with a different address in this case
|
||||
__maps_untrack(addr, size);
|
||||
errno = EEXIST;
|
||||
} else {
|
||||
// we shall leak the tracking object since it should at least
|
||||
|
@ -629,8 +628,6 @@ static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
|
|||
addr = 0;
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
__maps_untrack(addr, size);
|
||||
}
|
||||
}
|
||||
__maps_free(map);
|
||||
|
@ -652,6 +649,7 @@ static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
|
|||
map->prot = prot;
|
||||
map->flags = flags;
|
||||
map->hand = res.maphandle;
|
||||
map->precious = false;
|
||||
if (IsWindows()) {
|
||||
map->iscow = (flags & MAP_TYPE) != MAP_SHARED && fd != -1;
|
||||
map->readonlyfile = (flags & MAP_TYPE) == MAP_SHARED && fd != -1 &&
|
||||
|
@ -659,11 +657,18 @@ static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
|
|||
}
|
||||
|
||||
// track map object
|
||||
__maps_lock();
|
||||
if (IsWindows() || fixedmode)
|
||||
__maps_untrack(res.addr, size);
|
||||
__maps_insert(map);
|
||||
__maps_unlock();
|
||||
if (!IsWindows()) {
|
||||
struct Map *deleted = 0;
|
||||
__maps_lock();
|
||||
if (IsWindows() || fixedmode)
|
||||
if (__muntrack(res.addr, size, &deleted, 0, 0))
|
||||
STRACE("memtrack compromised by hole punch oom");
|
||||
__maps_insert(map);
|
||||
__maps_unlock();
|
||||
__maps_free_all(deleted);
|
||||
} else {
|
||||
atomic_thread_fence(memory_order_release);
|
||||
}
|
||||
|
||||
return res.addr;
|
||||
}
|
||||
|
@ -686,6 +691,10 @@ static void *__mmap(char *addr, size_t size, int prot, int flags, int fd,
|
|||
if (__maps.count * __pagesize + size > __virtualmax)
|
||||
return (void *)enomem();
|
||||
|
||||
// test for signal handler reentry
|
||||
if (__maps_held())
|
||||
return (void *)edeadlk();
|
||||
|
||||
// create memory mappping
|
||||
if (!__isfdkind(fd, kFdZip)) {
|
||||
res = __mmap_impl(addr, size, prot, flags, fd, off);
|
||||
|
@ -699,40 +708,32 @@ static void *__mmap(char *addr, size_t size, int prot, int flags, int fd,
|
|||
}
|
||||
|
||||
static void *__mremap_impl(char *old_addr, size_t old_size, size_t new_size,
|
||||
int flags, char *new_addr, int pagesz, int gransz) {
|
||||
int flags, char *new_addr) {
|
||||
|
||||
// normalize and validate old size
|
||||
// abort if size doesn't include all pages in granule
|
||||
size_t pgup_old_size = (old_size + pagesz - 1) & -pagesz;
|
||||
size_t grup_old_size = (old_size + gransz - 1) & -gransz;
|
||||
if (grup_old_size > pgup_old_size)
|
||||
if (__maps_overlaps(old_addr + pgup_old_size,
|
||||
grup_old_size - pgup_old_size))
|
||||
if (GRUP(old_size) > PGUP(old_size))
|
||||
if (__maps_overlaps(old_addr + PGUP(old_size),
|
||||
GRUP(old_size) - PGUP(old_size)))
|
||||
return (void *)einval();
|
||||
old_size = pgup_old_size;
|
||||
|
||||
// validate new size
|
||||
// abort if size doesn't include all pages in granule
|
||||
if (flags & MREMAP_FIXED) {
|
||||
size_t pgup_new_size = (new_size + pagesz - 1) & -pagesz;
|
||||
size_t grup_new_size = (new_size + gransz - 1) & -gransz;
|
||||
if (grup_new_size > pgup_new_size)
|
||||
if (__maps_overlaps(new_addr + pgup_new_size,
|
||||
grup_new_size - pgup_new_size))
|
||||
if (flags & MREMAP_FIXED)
|
||||
if (GRUP(new_size) > PGUP(new_size))
|
||||
if (__maps_overlaps(new_addr + PGUP(new_size),
|
||||
GRUP(new_size) - PGUP(new_size)))
|
||||
return (void *)einval();
|
||||
}
|
||||
|
||||
// allocate object for tracking new mapping
|
||||
struct Map *map;
|
||||
do {
|
||||
if (!(map = __maps_alloc()))
|
||||
return (void *)enomem();
|
||||
} while (map == MAPS_RETRY);
|
||||
if (!(map = __maps_alloc()))
|
||||
return (void *)enomem();
|
||||
|
||||
// check old interval is fully contained within one mapping
|
||||
struct Map *old_map;
|
||||
if (!(old_map = __maps_floor(old_addr)) ||
|
||||
old_addr + old_size > old_map->addr + PGUP(old_map->size) ||
|
||||
old_addr + PGUP(old_size) > old_map->addr + PGUP(old_map->size) ||
|
||||
old_addr < old_map->addr) {
|
||||
__maps_free(map);
|
||||
return (void *)efault();
|
||||
|
@ -777,7 +778,7 @@ static void *__mremap_impl(char *old_addr, size_t old_size, size_t new_size,
|
|||
|
||||
// untrack old mapping
|
||||
struct Map *deleted = 0;
|
||||
__muntrack(old_addr, old_size, pagesz, &deleted);
|
||||
__muntrack(old_addr, old_size, &deleted, 0, 0);
|
||||
__maps_free_all(deleted);
|
||||
|
||||
// track map object
|
||||
|
@ -794,9 +795,6 @@ static void *__mremap_impl(char *old_addr, size_t old_size, size_t new_size,
|
|||
static void *__mremap(char *old_addr, size_t old_size, size_t new_size,
|
||||
int flags, char *new_addr) {
|
||||
|
||||
int pagesz = __pagesize;
|
||||
int gransz = __gransize;
|
||||
|
||||
// kernel support
|
||||
if (!IsLinux() && !IsNetbsd())
|
||||
return (void *)enosys();
|
||||
|
@ -810,17 +808,16 @@ static void *__mremap(char *old_addr, size_t old_size, size_t new_size,
|
|||
// we support these flags
|
||||
if (flags & ~(MREMAP_MAYMOVE | MREMAP_FIXED))
|
||||
return (void *)einval();
|
||||
if (IsNetbsd() && !(flags & MREMAP_MAYMOVE) &&
|
||||
((new_size + pagesz - 1) & -pagesz) > old_size)
|
||||
if (IsNetbsd() && !(flags & MREMAP_MAYMOVE) && PGUP(new_size) > old_size)
|
||||
return (void *)enotsup();
|
||||
if ((flags & MREMAP_FIXED) && !(flags & MREMAP_MAYMOVE))
|
||||
return (void *)einval();
|
||||
|
||||
// addresses must be granularity aligned
|
||||
if ((uintptr_t)old_addr & (gransz - 1))
|
||||
if ((uintptr_t)old_addr & (__gransize - 1))
|
||||
return (void *)einval();
|
||||
if (flags & MREMAP_FIXED)
|
||||
if ((uintptr_t)new_addr & (gransz - 1))
|
||||
if ((uintptr_t)new_addr & (__gransize - 1))
|
||||
return (void *)einval();
|
||||
|
||||
// sizes must not be zero
|
||||
|
@ -850,20 +847,19 @@ static void *__mremap(char *old_addr, size_t old_size, size_t new_size,
|
|||
|
||||
// memory increase must not exceed RLIMIT_AS
|
||||
if (PGUP(new_size) > old_size)
|
||||
if (__maps.count * pagesz - old_size + PGUP(new_size) > __virtualmax)
|
||||
if (__maps.count * __pagesize - old_size + PGUP(new_size) > __virtualmax)
|
||||
return (void *)enomem();
|
||||
|
||||
// lock the memory manager
|
||||
// abort on reentry due to signal handler
|
||||
if (__maps_lock()) {
|
||||
__maps_unlock();
|
||||
// test for signal handler reentry
|
||||
if (__maps_held())
|
||||
return (void *)edeadlk();
|
||||
}
|
||||
|
||||
// lock the memory manager
|
||||
__maps_lock();
|
||||
__maps_check();
|
||||
|
||||
// perform operation
|
||||
char *res = __mremap_impl(old_addr, old_size, new_size, flags, new_addr,
|
||||
pagesz, gransz);
|
||||
char *res = __mremap_impl(old_addr, old_size, new_size, flags, new_addr);
|
||||
|
||||
// return result
|
||||
__maps_unlock();
|
||||
|
@ -940,6 +936,24 @@ static void *__mremap(char *old_addr, size_t old_size, size_t new_size,
|
|||
* The `MAP_CONCEAL` flag may be passed to prevent a memory mapping from
|
||||
* appearing in core dumps. This is currently supported on BSD OSes, and
|
||||
* is ignored on everything else.
|
||||
*
|
||||
* POSIX does not require mmap() to be asynchronous signal safe. But you
|
||||
* should be able to call this from a signal handler safely, if you know
|
||||
* that your signal will never interrupt the cosmopolitan memory manager
|
||||
* and the only way you can ensure that, is by blocking signals whenever
|
||||
* you call mmap(), munmap(), mprotect(), etc.
|
||||
*
|
||||
* @raise ENOMEM if `RUSAGE_AS` or similar limits are exceeded
|
||||
* @raise EEXIST if `flags` has `MAP_FIXED_NOREPLACE` and `addr` is used
|
||||
* @raise EPERM if `addr` is null and `flags` has `MAP_FIXED`
|
||||
* @raise ENOTSUP if memory map is cleaved on windows with `MAP_FIXED`
|
||||
* @raise EINVAL if `addr` isn't granularity aligned with `MAP_FIXED`
|
||||
* @raise EINVAL if `size` is zero
|
||||
* @raise EINVAL if `flags` or `prot` hold invalid values
|
||||
* @raise EACCESS if `fd` isn't a regular file
|
||||
* @raise EACCESS if `fd` was opened in write-only mode
|
||||
* @raise EACCESS if `off` isn't getgransize() aligned
|
||||
* @raise EDEADLK if called from signal handler interrupting mmap()
|
||||
*/
|
||||
void *mmap(void *addr, size_t size, int prot, int flags, int fd, int64_t off) {
|
||||
void *res = __mmap(addr, size, prot, flags, fd, off);
|
||||
|
@ -985,6 +999,11 @@ void *mremap(void *old_addr, size_t old_size, size_t new_size, int flags, ...) {
|
|||
* The `size` parameter is implicitly rounded up to the page size.
|
||||
*
|
||||
* @return 0 on success, or -1 w/ errno.
|
||||
* @raise ENOMEM if OOM happened when punching hole in existing mapping
|
||||
* @raise ENOTSUP if memory map is cleaved on windows with `MAP_FIXED`
|
||||
* @raise EDEADLK if called from signal handler interrupting mmap()
|
||||
* @raise EINVAL if `addr` isn't granularity aligned
|
||||
* @raise EINVAL if `size` didn't include all pages in granule
|
||||
*/
|
||||
int munmap(void *addr, size_t size) {
|
||||
int rc = __munmap(addr, size);
|
||||
|
|
|
@ -66,15 +66,15 @@ int __mprotect(char *addr, size_t size, int prot) {
|
|||
// normalize size
|
||||
size = (size + pagesz - 1) & -pagesz;
|
||||
|
||||
// test for signal handler reentry
|
||||
if (__maps_held())
|
||||
return edeadlk();
|
||||
|
||||
// change mappings
|
||||
int rc = 0;
|
||||
bool found = false;
|
||||
if (__maps_lock()) {
|
||||
__maps_unlock();
|
||||
return edeadlk();
|
||||
}
|
||||
__maps_lock();
|
||||
struct Map *map, *floor;
|
||||
StartOver:
|
||||
floor = __maps_floor(addr);
|
||||
for (map = floor; map && map->addr <= addr + size; map = __maps_next(map)) {
|
||||
char *map_addr = map->addr;
|
||||
|
@ -97,8 +97,6 @@ StartOver:
|
|||
size_t right = map_size - left;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
if (leftmap == MAPS_RETRY)
|
||||
goto StartOver;
|
||||
if (!__mprotect_chunk(map_addr, left, prot, false)) {
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
|
@ -129,8 +127,6 @@ StartOver:
|
|||
size_t right = map_addr + map_size - addr;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
if (leftmap == MAPS_RETRY)
|
||||
goto StartOver;
|
||||
if (!__mprotect_chunk(map_addr + left, right, prot, false)) {
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
|
@ -163,14 +159,8 @@ StartOver:
|
|||
size_t right = map_size - middle - left;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
if (leftmap == MAPS_RETRY)
|
||||
goto StartOver;
|
||||
struct Map *midlmap;
|
||||
if ((midlmap = __maps_alloc())) {
|
||||
if (midlmap == MAPS_RETRY) {
|
||||
__maps_free(leftmap);
|
||||
goto StartOver;
|
||||
}
|
||||
if (!__mprotect_chunk(map_addr + left, middle, prot, false)) {
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
|
@ -221,11 +211,20 @@ StartOver:
|
|||
/**
|
||||
* Modifies restrictions on virtual memory address range.
|
||||
*
|
||||
* @param addr needs to be 4kb aligned
|
||||
* @param prot can have PROT_{NONE,READ,WRITE,EXEC}
|
||||
* POSIX doesn't require mprotect() to be async signal safe. However you
|
||||
* should be able to call this from a signal handler safely, if you know
|
||||
* that your signal will never interrupt the cosmopolitan memory manager
|
||||
* and the only way you can ensure that, is by blocking signals whenever
|
||||
* you call mmap(), munmap(), mprotect(), etc.
|
||||
*
|
||||
* @param addr needs to be page size aligned
|
||||
* @param size is rounded up to the page size
|
||||
* @param prot can be PROT_NONE or a combination of PROT_READ,
|
||||
* PROT_WRITE, and PROT_EXEC
|
||||
* @return 0 on success, or -1 w/ errno
|
||||
* @raise EINVAL if `size` is zero
|
||||
* @raise ENOMEM on tracking memory oom
|
||||
* @see mmap()
|
||||
* @raise EDEADLK if called from signal handler interrupting mmap()
|
||||
*/
|
||||
int mprotect(void *addr, size_t size, int prot) {
|
||||
int rc;
|
||||
|
|
|
@ -26,27 +26,24 @@
|
|||
#include "libc/sysv/errfuns.h"
|
||||
|
||||
textwindows int sys_msync_nt(char *addr, size_t size, int flags) {
|
||||
size = (size + __pagesize - 1) & -__pagesize;
|
||||
|
||||
int pagesz = __pagesize;
|
||||
size = (size + pagesz - 1) & -pagesz;
|
||||
|
||||
if ((uintptr_t)addr & (pagesz - 1))
|
||||
if ((uintptr_t)addr & (__pagesize - 1))
|
||||
return einval();
|
||||
if (__maps_held())
|
||||
return edeadlk();
|
||||
|
||||
int rc = 0;
|
||||
if (__maps_lock()) {
|
||||
rc = edeadlk();
|
||||
} else {
|
||||
struct Map *map, *floor;
|
||||
floor = __maps_floor(addr);
|
||||
for (map = floor; map && map->addr <= addr + size; map = __maps_next(map)) {
|
||||
char *beg = MAX(addr, map->addr);
|
||||
char *end = MIN(addr + size, map->addr + map->size);
|
||||
if (beg < end)
|
||||
if (!FlushViewOfFile(beg, end - beg))
|
||||
rc = -1;
|
||||
// TODO(jart): FlushFileBuffers too on g_fds handle if MS_SYNC?
|
||||
}
|
||||
__maps_lock();
|
||||
struct Map *map, *floor;
|
||||
floor = __maps_floor(addr);
|
||||
for (map = floor; map && map->addr <= addr + size; map = __maps_next(map)) {
|
||||
char *beg = MAX(addr, map->addr);
|
||||
char *end = MIN(addr + size, map->addr + map->size);
|
||||
if (beg < end)
|
||||
if (!FlushViewOfFile(beg, end - beg))
|
||||
rc = -1;
|
||||
// TODO(jart): FlushFileBuffers too on g_fds handle if MS_SYNC?
|
||||
}
|
||||
__maps_unlock();
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
* @param flags needs MS_ASYNC or MS_SYNC and can have MS_INVALIDATE
|
||||
* @return 0 on success or -1 w/ errno
|
||||
* @raise ECANCELED if thread was cancelled in masked mode
|
||||
* @raise EDEADLK if called from signal handler interrupting mmap()
|
||||
* @raise EINTR if we needed to block and a signal was delivered instead
|
||||
* @raise EINVAL if `MS_SYNC` and `MS_ASYNC` were both specified
|
||||
* @raise EINVAL if unknown `flags` were passed
|
||||
|
|
|
@ -588,6 +588,22 @@ textwindows static void __sig_unmaskable(struct SignalFrame *sf) {
|
|||
DescribeBacktrace(
|
||||
(struct StackFrame *)sf->ctx.uc_mcontext.gregs[REG_RBP]));
|
||||
|
||||
// kills process if the user did not specify a handler for this signal
|
||||
// we also don't allow unmaskable signals to be ignored by the program
|
||||
if (sf->rva == (intptr_t)SIG_DFL || //
|
||||
sf->rva == (intptr_t)SIG_IGN)
|
||||
__sig_death(sf->si.si_signo, "uncaught ");
|
||||
|
||||
// we kill the process if this thread's signal mask blocks this signal
|
||||
// then we block some extra signals while executing the signal handler
|
||||
struct CosmoTib *tib = __get_tls();
|
||||
sigset_t blocksigs = __sighandmask[sf->si.si_signo];
|
||||
if (!(sf->flags & SA_NODEFER))
|
||||
blocksigs |= 1ull << (sf->si.si_signo - 1);
|
||||
sf->ctx.uc_sigmask = atomic_fetch_or(&tib->tib_sigmask, blocksigs);
|
||||
if (sf->ctx.uc_sigmask & (1ull << (sf->si.si_signo - 1)))
|
||||
__sig_death(sf->si.si_signo, "masked ");
|
||||
|
||||
// this will restore the guard page if the user is using a sigaltstack
|
||||
if (sf->si.si_errno == kNtStatusGuardPageViolation)
|
||||
__sig_reguard(sf->si.si_addr);
|
||||
|
@ -620,22 +636,6 @@ __msabi HAIRY static unsigned __sig_crash(struct NtExceptionPointers *ep) {
|
|||
if (flags & SA_RESETHAND)
|
||||
__sighandrvas[sig] = (int32_t)(intptr_t)SIG_DFL;
|
||||
|
||||
// kills process if the user did not specify a handler for this signal
|
||||
// we also don't allow unmaskable signals to be ignored by the program
|
||||
if (rva == (intptr_t)SIG_DFL || //
|
||||
rva == (intptr_t)SIG_IGN)
|
||||
__sig_death(sig, "uncaught ");
|
||||
|
||||
// we kill the process if this thread's signal mask blocks this signal
|
||||
// then we block some extra signals while executing the signal handler
|
||||
struct CosmoTib *tib = __get_tls();
|
||||
sigset_t blocksigs = __sighandmask[sig];
|
||||
if (!(flags & SA_NODEFER))
|
||||
blocksigs |= 1ull << (sig - 1);
|
||||
sigset_t oldsigmask = atomic_fetch_or(&tib->tib_sigmask, blocksigs);
|
||||
if (oldsigmask & (1ull << (sig - 1)))
|
||||
__sig_death(sig, "masked ");
|
||||
|
||||
// we don't know if it is safe for signal handlers to longjmp() out of
|
||||
// win32 vectored exception handlers so let's copy the machine context
|
||||
// and tell win32 to restore control to __sig_unmaskable() which shall
|
||||
|
@ -643,6 +643,7 @@ __msabi HAIRY static unsigned __sig_crash(struct NtExceptionPointers *ep) {
|
|||
// was caused by stack overflow, then we're literally executing inside
|
||||
// the guard page so this code can't use more than 4096 bytes of stack
|
||||
uintptr_t sp;
|
||||
struct CosmoTib *tib = __get_tls();
|
||||
if (__sig_should_use_altstack(flags, tib)) {
|
||||
sp = (uintptr_t)tib->tib_sigstack_addr + tib->tib_sigstack_size;
|
||||
} else {
|
||||
|
@ -654,7 +655,6 @@ __msabi HAIRY static unsigned __sig_crash(struct NtExceptionPointers *ep) {
|
|||
struct SignalFrame *sf = (struct SignalFrame *)sp;
|
||||
__repstosb(sf, 0, sizeof(*sf));
|
||||
__sig_translate(&sf->ctx, ep->ContextRecord);
|
||||
sf->ctx.uc_sigmask = oldsigmask;
|
||||
sf->rva = rva;
|
||||
sf->flags = flags;
|
||||
sf->si.si_code = sic;
|
||||
|
|
|
@ -33,11 +33,13 @@
|
|||
#include "libc/intrin/weaken.h"
|
||||
#include "libc/mem/leaks.h"
|
||||
#include "libc/nt/accounting.h"
|
||||
#include "libc/nt/enum/heap.h"
|
||||
#include "libc/nt/enum/processaccess.h"
|
||||
#include "libc/nt/enum/processcreationflags.h"
|
||||
#include "libc/nt/enum/status.h"
|
||||
#include "libc/nt/enum/wait.h"
|
||||
#include "libc/nt/events.h"
|
||||
#include "libc/nt/memory.h"
|
||||
#include "libc/nt/process.h"
|
||||
#include "libc/nt/runtime.h"
|
||||
#include "libc/nt/struct/filetime.h"
|
||||
|
@ -292,16 +294,9 @@ textwindows struct Proc *__proc_new(void) {
|
|||
proc = PROC_CONTAINER(e);
|
||||
dll_remove(&__proc.free, &proc->elem);
|
||||
}
|
||||
if (proc) {
|
||||
bzero(proc, sizeof(*proc));
|
||||
} else {
|
||||
proc = mmap(0, sizeof(struct Proc), PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (proc == MAP_FAILED) {
|
||||
enomem();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (!proc && !(proc = HeapAlloc(GetProcessHeap(), 0, sizeof(struct Proc))))
|
||||
return 0;
|
||||
bzero(proc, sizeof(*proc));
|
||||
dll_init(&proc->elem);
|
||||
return proc;
|
||||
}
|
||||
|
|
|
@ -534,35 +534,31 @@ void BenchMmapPrivate(void) {
|
|||
void *p;
|
||||
p = mmap(0, (sizes[count] = rand() % (pagesz * 500)), PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
if (p == MAP_FAILED)
|
||||
__builtin_trap();
|
||||
ASSERT_NE(MAP_FAILED, p);
|
||||
ptrs[count] = p;
|
||||
++count;
|
||||
}
|
||||
|
||||
void BenchUnmap(void) {
|
||||
--count;
|
||||
if (munmap(ptrs[count], sizes[count]))
|
||||
__builtin_trap();
|
||||
ASSERT_SYS(0, 0, munmap(ptrs[count], sizes[count]));
|
||||
}
|
||||
|
||||
void BenchBigMmap(void) {
|
||||
void *p;
|
||||
p = mmap(0, 101 * 1024 * 1024, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
if (p == MAP_FAILED)
|
||||
__builtin_trap();
|
||||
ASSERT_NE(MAP_FAILED, p);
|
||||
ptrs[count++] = p;
|
||||
}
|
||||
|
||||
void BenchBigMunmap(void) {
|
||||
if (munmap(ptrs[--count], 101 * 1024 * 1024))
|
||||
__builtin_trap();
|
||||
ASSERT_SYS(0, 0, munmap(ptrs[--count], 101 * 1024 * 1024));
|
||||
}
|
||||
|
||||
TEST(mmap, bench) {
|
||||
BENCHMARK(N, 1, BenchMmapPrivate());
|
||||
BENCHMARK(N, 1, BenchUnmap());
|
||||
// BENCHMARK(N, 1, BenchBigMmap());
|
||||
// BENCHMARK(N, 1, BenchBigMunmap());
|
||||
/* BENCHMARK(N, 1, BenchBigMmap()); */
|
||||
/* BENCHMARK(N, 1, BenchBigMunmap()); */
|
||||
}
|
||||
|
|
|
@ -53,26 +53,106 @@ TEST(munmap, test) {
|
|||
EXPECT_FALSE(testlib_memoryexists(p));
|
||||
}
|
||||
|
||||
TEST(munmap, carveMemory) {
|
||||
if (IsWindows())
|
||||
return; // needs carving
|
||||
char *p;
|
||||
int count = __maps.count;
|
||||
ASSERT_NE(MAP_FAILED,
|
||||
(p = mmap(__maps_randaddr(), gransz * 3, PROT_READ | PROT_WRITE,
|
||||
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
|
||||
EXPECT_EQ(count + 1, __maps.count);
|
||||
count = __maps.count;
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p + gransz * 0, gransz));
|
||||
EXPECT_EQ(count + 0, __maps.count);
|
||||
count = __maps.count;
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p + gransz * 2, gransz));
|
||||
EXPECT_EQ(count + 0, __maps.count);
|
||||
count = __maps.count;
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p + gransz * 1, gransz));
|
||||
EXPECT_EQ(count - 1, __maps.count);
|
||||
count = __maps.count;
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 2));
|
||||
}
|
||||
|
||||
TEST(munmap, punchHoleInMemory) {
|
||||
if (IsWindows())
|
||||
return; // needs carving
|
||||
char *p;
|
||||
ASSERT_NE(MAP_FAILED, (p = mmap(0, gransz * 3, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
|
||||
int count = __maps.count;
|
||||
ASSERT_NE(MAP_FAILED,
|
||||
(p = mmap(__maps_randaddr(), gransz * 3, PROT_READ | PROT_WRITE,
|
||||
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
|
||||
EXPECT_EQ(count + 1, __maps.count);
|
||||
count = __maps.count;
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p + gransz, gransz));
|
||||
EXPECT_EQ(count + 1, __maps.count);
|
||||
count = __maps.count;
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(p + gransz * 2));
|
||||
EXPECT_SYS(0, 0, munmap(p, gransz));
|
||||
EXPECT_EQ(count - 1, __maps.count);
|
||||
count = __maps.count;
|
||||
EXPECT_SYS(0, 0, munmap(p + gransz * 2, gransz));
|
||||
EXPECT_EQ(count - 1, __maps.count);
|
||||
count = __maps.count;
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(p + gransz * 2));
|
||||
}
|
||||
|
||||
TEST(munmap, fillHoleInMemory) {
|
||||
if (IsWindows())
|
||||
return; // needs fungible memory
|
||||
int count = __maps.count;
|
||||
char *base = __maps_randaddr();
|
||||
EXPECT_EQ(base + gransz * 0,
|
||||
mmap(base + gransz * 0, gransz, PROT_READ | PROT_WRITE,
|
||||
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
|
||||
EXPECT_EQ(count + 1, __maps.count);
|
||||
count = __maps.count;
|
||||
EXPECT_TRUE(testlib_memoryexists(base + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(base + gransz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(base + gransz * 2));
|
||||
EXPECT_EQ(base + gransz * 2,
|
||||
mmap(base + gransz * 2, gransz, PROT_READ | PROT_WRITE,
|
||||
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
|
||||
EXPECT_EQ(count + 1, __maps.count);
|
||||
count = __maps.count;
|
||||
EXPECT_TRUE(testlib_memoryexists(base + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(base + gransz * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(base + gransz * 2));
|
||||
EXPECT_EQ(base + gransz * 1,
|
||||
mmap(base + gransz * 1, gransz, PROT_READ | PROT_WRITE,
|
||||
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
|
||||
EXPECT_EQ(count - 1, __maps.count);
|
||||
count = __maps.count;
|
||||
EXPECT_TRUE(testlib_memoryexists(base + gransz * 0));
|
||||
EXPECT_TRUE(testlib_memoryexists(base + gransz * 1));
|
||||
EXPECT_TRUE(testlib_memoryexists(base + gransz * 2));
|
||||
EXPECT_SYS(0, 0, munmap(base, gransz * 3));
|
||||
EXPECT_EQ(count - 1, __maps.count);
|
||||
count = __maps.count;
|
||||
EXPECT_FALSE(testlib_memoryexists(base + gransz * 0));
|
||||
EXPECT_FALSE(testlib_memoryexists(base + gransz * 1));
|
||||
EXPECT_FALSE(testlib_memoryexists(base + gransz * 2));
|
||||
}
|
||||
|
||||
TEST(munmap, memoryHasHole) {
|
||||
if (IsWindows())
|
||||
return; // needs carving
|
||||
|
|
Loading…
Add table
Reference in a new issue