Fix bugs with new memory manager

This fixes a regression in mmap(MAP_FIXED) on Windows caused by a recent
revision. This change also fixes ZipOS so it no longer needs a MAP_FIXED
mapping to open files from the PKZIP store. The memory mapping mutex was
implemented incorrectly earlier which meant that ftrace and strace could
cause cause crashes. This lock and other recursive mutexes are rewritten
so that it should be provable that recursive mutexes in cosmopolitan are
asynchronous signal safe.
This commit is contained in:
Justine Tunney 2024-06-29 05:10:15 -07:00
parent 6de12c1032
commit 464858dbb4
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
34 changed files with 353 additions and 313 deletions

View file

@ -2,17 +2,30 @@
#define COSMOPOLITAN_LIBC_CALLS_STRUCT_SIGSET_INTERNAL_H_
#include "libc/calls/struct/sigset.h"
#include "libc/mem/alloca.h"
#include "libc/sysv/consts/sig.h"
COSMOPOLITAN_C_START_
#ifndef MODE_DBG
/* block sigs because theoretical edge cases */
#define BLOCK_SIGNALS \
do { \
sigset_t _SigMask; \
_SigMask = __sig_block()
#define ALLOW_SIGNALS \
__sig_unblock(_SigMask); \
} \
while (0)
#else
/* doesn't block signals so we can get a crash
report, when a core runtime library crashes */
#define BLOCK_SIGNALS \
do { \
sigset_t _SigMask; \
sigprocmask(SIG_SETMASK, 0, &_SigMask)
#define ALLOW_SIGNALS \
} \
while (0)
#endif
sigset_t __sig_block(void);
void __sig_unblock(sigset_t);

View file

@ -19,4 +19,4 @@
#include "libc/calls/state.internal.h"
#include "libc/thread/thread.h"
pthread_mutex_t __fds_lock_obj = {._type = PTHREAD_MUTEX_RECURSIVE};
pthread_mutex_t __fds_lock_obj = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;

View file

@ -20,14 +20,10 @@
#include "ape/sections.internal.h"
#include "libc/dce.h"
#include "libc/intrin/dll.h"
#include "libc/intrin/kprintf.h"
#include "libc/intrin/strace.internal.h"
#include "libc/runtime/runtime.h"
#include "libc/runtime/stack.h"
#include "libc/sysv/consts/auxv.h"
#include "libc/sysv/consts/prot.h"
#include "libc/thread/thread.h"
#include "libc/thread/tls.h"
#ifdef __x86_64__
__static_yoink("_init_maps");
@ -73,7 +69,7 @@ privileged void __maps_lock(void) {
if (!__tls_enabled)
return;
tib = __get_tls_privileged();
if (tib->tib_flags & TIB_FLAG_MAPLOCK)
if (tib->tib_relock_maps++)
return;
while (atomic_exchange_explicit(&__maps.lock, 1, memory_order_acquire)) {
#if defined(__GNUC__) && defined(__aarch64__)
@ -82,14 +78,15 @@ privileged void __maps_lock(void) {
__asm__ volatile("pause");
#endif
}
tib->tib_flags |= TIB_FLAG_MAPLOCK;
}
privileged void __maps_unlock(void) {
struct CosmoTib *tib;
atomic_store_explicit(&__maps.lock, 0, memory_order_release);
if (__tls_enabled) {
tib = __get_tls_privileged();
tib->tib_flags &= ~TIB_FLAG_MAPLOCK;
}
if (!__threaded)
return;
if (!__tls_enabled)
return;
tib = __get_tls_privileged();
if (!--tib->tib_relock_maps)
atomic_store_explicit(&__maps.lock, 0, memory_order_release);
}

View file

@ -8,21 +8,23 @@ COSMOPOLITAN_C_START_
#define MAP_CONTAINER(e) DLL_CONTAINER(struct Map, elem, e)
struct Map {
_Atomic(struct Map *) next; /* for __maps.maps */
char *addr; /* granule aligned */
size_t size; /* must be nonzero */
struct Dll elem; /* for __maps.free */
int64_t off; /* -1 if anonymous */
int prot; /* memory protects */
int flags; /* memory map flag */
bool iscow; /* windows nt only */
bool readonlyfile; /* windows nt only */
intptr_t h; /* windows nt only */
struct Map *next; /* for __maps.maps */
char *addr; /* granule aligned */
size_t size; /* must be nonzero */
struct Dll elem; /* for __maps.free */
int64_t off; /* -1 if anonymous */
int prot; /* memory protects */
int flags; /* memory map flag */
bool iscow; /* windows nt only */
bool readonlyfile; /* windows nt only */
unsigned visited; /* used for checks */
intptr_t h; /* windows nt only */
};
struct Maps {
unsigned mono;
atomic_int lock;
_Atomic(struct Map *) maps;
struct Map *maps;
struct Dll *free;
struct Map stack;
struct Dll *used;
@ -37,6 +39,7 @@ struct AddrSize {
extern struct Maps __maps;
void *randaddr(void);
void __maps_init(void);
void __maps_lock(void);
void __maps_check(void);
@ -44,6 +47,7 @@ void __maps_unlock(void);
struct Map *__maps_alloc(void);
void __maps_free(struct Map *);
void __maps_insert(struct Map *);
int __munmap(char *, size_t, bool);
void *__mmap(char *, size_t, int, int, int, int64_t);
struct AddrSize __get_main_stack(void);

View file

@ -18,6 +18,7 @@
*/
#include "ape/sections.internal.h"
#include "libc/atomic.h"
#include "libc/calls/blockcancel.internal.h"
#include "libc/calls/calls.h"
#include "libc/calls/internal.h"
#include "libc/calls/struct/sigset.internal.h"
@ -86,14 +87,23 @@ void __maps_check(void) {
size_t maps = 0;
size_t pages = 0;
int granularity = getauxval(AT_PAGESZ);
unsigned id = __maps.mono++;
for (struct Map *map = __maps.maps; map; map = map->next) {
ASSERT(map->addr != MAP_FAILED);
ASSERT(map->visited != id);
ASSERT(map->size);
map->visited = id;
pages += PGUP(map->size) / granularity;
maps += 1;
}
ASSERT(maps = __maps.count);
ASSERT(pages == __maps.pages);
for (struct Dll *e = dll_first(__maps.used); e;
e = dll_next(__maps.used, e)) {
ASSERT(MAP_CONTAINER(e)->visited == id);
--maps;
}
ASSERT(maps == 0);
for (struct Map *m1 = __maps.maps; m1; m1 = m1->next)
for (struct Map *m2 = m1->next; m2; m2 = m2->next)
ASSERT(MAX(m1->addr, m2->addr) >=
@ -168,11 +178,7 @@ struct Map *__maps_alloc(void) {
return map;
}
static int __munmap_chunk(void *addr, size_t size) {
return sys_munmap(addr, size);
}
static int __munmap(char *addr, size_t size, bool untrack_only) {
int __munmap(char *addr, size_t size, bool untrack_only) {
// validate arguments
int pagesz = getauxval(AT_PAGESZ);
@ -186,7 +192,7 @@ static int __munmap(char *addr, size_t size, bool untrack_only) {
__maps_lock();
StartOver:;
struct Map *map = __maps.maps;
_Atomic(struct Map *) *prev = &__maps.maps;
struct Map **prev = &__maps.maps;
while (map) {
char *map_addr = map->addr;
size_t map_size = map->size;
@ -207,7 +213,7 @@ StartOver:;
if (!IsWindows()) {
ASSERT(addr <= map_addr);
ASSERT(map_addr + PGUP(map_size) <= addr + PGUP(size));
if (__munmap_chunk(map_addr, map_size))
if (sys_munmap(map_addr, map_size))
rc = -1;
} else {
if (!UnmapViewOfFile(map_addr))
@ -236,7 +242,7 @@ StartOver:;
ASSERT(left > 0);
map->addr += left;
map->size = right;
if (map->off != -1)
if (!(map->flags & MAP_ANONYMOUS))
map->off += left;
__maps.pages -= (left + pagesz - 1) / pagesz;
__maps_check();
@ -244,7 +250,7 @@ StartOver:;
__maps_unlock();
ASSERT(addr <= map_addr);
ASSERT(map_addr + PGUP(left) <= addr + PGUP(size));
if (__munmap_chunk(map_addr, left) == -1)
if (sys_munmap(map_addr, left) == -1)
rc = -1;
__maps_lock();
goto StartOver;
@ -259,7 +265,7 @@ StartOver:;
if (!untrack_only) {
__maps_unlock();
ASSERT(PGUP(right) <= PGUP(size));
if (__munmap_chunk(addr, right) == -1)
if (sys_munmap(addr, right) == -1)
rc = -1;
__maps_lock();
goto StartOver;
@ -279,7 +285,7 @@ StartOver:;
leftmap->flags = map->flags;
map->addr += left + middle;
map->size = right;
if (map->off != -1)
if (!(map->flags & MAP_ANONYMOUS))
map->off += left + middle;
dll_make_first(&__maps.used, &leftmap->elem);
*prev = leftmap;
@ -288,7 +294,7 @@ StartOver:;
__maps_check();
if (!untrack_only) {
__maps_unlock();
if (__munmap_chunk(addr, size) == -1)
if (sys_munmap(addr, size) == -1)
rc = -1;
__maps_lock();
goto StartOver;
@ -339,6 +345,11 @@ static void *__mmap_chunk(void *addr, size_t size, int prot, int flags, int fd,
if (!map)
return MAP_FAILED;
// remove mapping we blew away
if (IsWindows() && should_untrack)
if (__munmap(addr, size, false))
return MAP_FAILED;
// obtain mapping from operating system
int olderr = errno;
struct DirectMap res;
@ -349,9 +360,7 @@ TryAgain:
if (noreplace) {
errno = EEXIST;
} else if (should_untrack) {
sys_munmap(res.addr, size);
errno = olderr;
goto TryAgain;
errno = ENOMEM;
} else {
addr += granularity;
errno = olderr;
@ -368,7 +377,12 @@ TryAgain:
// we assume non-linux gives us addr if it's free
// that's what linux (e.g. rhel7) did before noreplace
if (noreplace && res.addr != addr) {
sys_munmap(res.addr, size);
if (!IsWindows()) {
sys_munmap(res.addr, size);
} else {
UnmapViewOfFile(res.addr);
CloseHandle(res.maphandle);
}
__maps_lock();
__maps_free(map);
__maps_unlock();
@ -376,7 +390,7 @@ TryAgain:
}
// untrack mapping we blew away
if (should_untrack)
if (!IsWindows() && should_untrack)
__munmap(res.addr, size, true);
// track Map object
@ -425,7 +439,7 @@ static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
if (size <= granularity || size > 100 * 1024 * 1024)
return __mmap_chunk(addr, size, prot, flags, fd, off, granularity);
// so we create an separate map for each granule in the mapping
// so we create a separate map for each granule in the mapping
if (!(flags & MAP_FIXED)) {
while (overlaps_existing_map(addr, size)) {
if (flags & MAP_FIXED_NOREPLACE)
@ -486,7 +500,11 @@ void *__mmap(char *addr, size_t size, int prot, int flags, int fd,
void *mmap(void *addr, size_t size, int prot, int flags, int fd, int64_t off) {
void *res;
BLOCK_SIGNALS;
BLOCK_CANCELATION;
res = __mmap(addr, size, prot, flags, fd, off);
ALLOW_CANCELATION;
ALLOW_SIGNALS;
STRACE("mmap(%p, %'zu, %s, %s, %d, %'ld) → %p% m", addr, size,
DescribeProtFlags(prot), DescribeMapFlags(flags), fd, off, res);
return res;
@ -494,7 +512,11 @@ void *mmap(void *addr, size_t size, int prot, int flags, int fd, int64_t off) {
int munmap(void *addr, size_t size) {
int rc;
BLOCK_SIGNALS;
BLOCK_CANCELATION;
rc = __munmap(addr, size, false);
ALLOW_CANCELATION;
ALLOW_SIGNALS;
STRACE("munmap(%p, %'zu) → %d% m", addr, size, rc);
return rc;
}

View file

@ -30,6 +30,7 @@
#include "libc/runtime/runtime.h"
#include "libc/stdio/sysparam.h"
#include "libc/sysv/consts/auxv.h"
#include "libc/sysv/consts/map.h"
#include "libc/sysv/consts/prot.h"
#include "libc/sysv/errfuns.h"
@ -67,7 +68,7 @@ int __mprotect(char *addr, size_t size, int prot) {
__maps_lock();
bool found = false;
struct Map *map = __maps.maps;
_Atomic(struct Map *) *prev = &__maps.maps;
struct Map **prev = &__maps.maps;
while (map) {
char *map_addr = map->addr;
size_t map_size = map->size;
@ -104,7 +105,7 @@ int __mprotect(char *addr, size_t size, int prot) {
leftmap->flags = map->flags;
map->addr += left;
map->size = right;
if (map->off != -1)
if (!(map->flags & MAP_ANONYMOUS))
map->off += left;
dll_make_first(&__maps.used, &leftmap->elem);
*prev = leftmap;
@ -133,7 +134,7 @@ int __mprotect(char *addr, size_t size, int prot) {
map->addr += left;
map->size = right;
map->prot = prot;
if (map->off != -1)
if (!(map->flags & MAP_ANONYMOUS))
map->off += left;
dll_make_first(&__maps.used, &leftmap->elem);
*prev = leftmap;
@ -165,12 +166,12 @@ int __mprotect(char *addr, size_t size, int prot) {
midlmap->next = map;
midlmap->addr = map_addr + left;
midlmap->size = middle;
midlmap->off = map->off == -1 ? -1 : map->off + left;
midlmap->off = (map->flags & MAP_ANONYMOUS) ? 0 : map->off + left;
midlmap->prot = prot;
midlmap->flags = map->flags;
map->addr += left + middle;
map->size = right;
if (map->off != -1)
if (!(map->flags & MAP_ANONYMOUS))
map->off += left + middle;
dll_make_first(&__maps.used, &leftmap->elem);
dll_make_first(&__maps.used, &midlmap->elem);

View file

@ -35,9 +35,6 @@
*/
int pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *attr) {
*mutex = (pthread_mutex_t){
._type = attr ? attr->_type : 0,
._pshared = attr ? attr->_pshared : 0,
};
*mutex = (pthread_mutex_t){._word = attr ? attr->_word : 0};
return 0;
}

View file

@ -23,6 +23,7 @@
#include "libc/intrin/strace.internal.h"
#include "libc/intrin/weaken.h"
#include "libc/runtime/internal.h"
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
#include "libc/thread/tls.h"
#include "third_party/nsync/mu.h"
@ -65,49 +66,65 @@
* @vforksafe
*/
errno_t pthread_mutex_lock(pthread_mutex_t *mutex) {
int t;
int me;
uint64_t word, lock;
LOCKTRACE("pthread_mutex_lock(%t)", mutex);
if (__vforked) {
if (__vforked)
return 0;
}
if (mutex->_type == PTHREAD_MUTEX_NORMAL && //
mutex->_pshared == PTHREAD_PROCESS_PRIVATE && //
// get current state of lock
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
// use fancy nsync mutex if possible
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL && //
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
_weaken(nsync_mu_lock)) {
_weaken(nsync_mu_lock)((nsync_mu *)mutex);
return 0;
}
if (mutex->_type == PTHREAD_MUTEX_NORMAL) {
while (atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
// implement barebones normal mutexes
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
for (;;) {
word = MUTEX_UNLOCK(word);
lock = MUTEX_LOCK(word);
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
memory_order_acquire,
memory_order_relaxed))
return 0;
pthread_pause_np();
}
return 0;
}
t = gettid();
if (mutex->_owner == t) {
if (mutex->_type != PTHREAD_MUTEX_ERRORCHECK) {
if (mutex->_depth < 63) {
++mutex->_depth;
return 0;
// implement recursive mutexes
me = gettid();
for (;;) {
if (MUTEX_OWNER(word) == me) {
if (MUTEX_TYPE(word) != PTHREAD_MUTEX_ERRORCHECK) {
if (MUTEX_DEPTH(word) < MUTEX_DEPTH_MAX) {
if (atomic_compare_exchange_weak_explicit(
&mutex->_word, &word, MUTEX_INC_DEPTH(word),
memory_order_relaxed, memory_order_relaxed))
return 0;
continue;
} else {
return EAGAIN;
}
} else {
return EAGAIN;
return EDEADLK;
}
} else {
return EDEADLK;
}
}
while (atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
word = MUTEX_UNLOCK(word);
lock = MUTEX_LOCK(word);
lock = MUTEX_SET_OWNER(lock, me);
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
memory_order_acquire,
memory_order_relaxed)) {
mutex->_pid = __pid;
return 0;
}
pthread_pause_np();
}
mutex->_depth = 0;
mutex->_owner = t;
mutex->_pid = __pid;
return 0;
}

View file

@ -21,6 +21,7 @@
#include "libc/intrin/atomic.h"
#include "libc/intrin/weaken.h"
#include "libc/runtime/internal.h"
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
#include "third_party/nsync/mu.h"
@ -38,11 +39,15 @@
* current thread already holds this mutex
*/
errno_t pthread_mutex_trylock(pthread_mutex_t *mutex) {
int t;
int me;
uint64_t word, lock;
// get current state of lock
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
// delegate to *NSYNC if possible
if (mutex->_type == PTHREAD_MUTEX_NORMAL &&
mutex->_pshared == PTHREAD_PROCESS_PRIVATE && //
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL &&
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
_weaken(nsync_mu_trylock)) {
if (_weaken(nsync_mu_trylock)((nsync_mu *)mutex)) {
return 0;
@ -52,36 +57,43 @@ errno_t pthread_mutex_trylock(pthread_mutex_t *mutex) {
}
// handle normal mutexes
if (mutex->_type == PTHREAD_MUTEX_NORMAL) {
if (!atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
word = MUTEX_UNLOCK(word);
lock = MUTEX_LOCK(word);
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
memory_order_acquire,
memory_order_relaxed))
return 0;
} else {
return EBUSY;
}
}
// handle recursive and error check mutexes
t = gettid();
if (mutex->_owner == t) {
if (mutex->_type != PTHREAD_MUTEX_ERRORCHECK) {
if (mutex->_depth < 63) {
++mutex->_depth;
return 0;
} else {
return EAGAIN;
}
} else {
return EDEADLK;
}
}
if (atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
return EBUSY;
}
mutex->_depth = 0;
mutex->_owner = t;
mutex->_pid = __pid;
return 0;
// handle recursive and error check mutexes
me = gettid();
for (;;) {
if (MUTEX_OWNER(word) == me) {
if (MUTEX_TYPE(word) != PTHREAD_MUTEX_ERRORCHECK) {
if (MUTEX_DEPTH(word) < MUTEX_DEPTH_MAX) {
if (atomic_compare_exchange_weak_explicit(
&mutex->_word, &word, MUTEX_INC_DEPTH(word),
memory_order_relaxed, memory_order_relaxed))
return 0;
continue;
} else {
return EAGAIN;
}
} else {
return EDEADLK;
}
}
word = MUTEX_UNLOCK(word);
lock = MUTEX_LOCK(word);
lock = MUTEX_SET_OWNER(lock, me);
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
memory_order_acquire,
memory_order_relaxed)) {
mutex->_pid = __pid;
return 0;
}
return EBUSY;
}
}

View file

@ -22,6 +22,7 @@
#include "libc/intrin/strace.internal.h"
#include "libc/intrin/weaken.h"
#include "libc/runtime/internal.h"
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
#include "third_party/nsync/mu.h"
@ -35,38 +36,52 @@
* @vforksafe
*/
errno_t pthread_mutex_unlock(pthread_mutex_t *mutex) {
int t;
int me;
uint64_t word, lock;
LOCKTRACE("pthread_mutex_unlock(%t)", mutex);
if (mutex->_type == PTHREAD_MUTEX_NORMAL && //
mutex->_pshared == PTHREAD_PROCESS_PRIVATE && //
// get current state of lock
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
// use fancy nsync mutex if possible
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL && //
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
_weaken(nsync_mu_unlock)) {
_weaken(nsync_mu_unlock)((nsync_mu *)mutex);
return 0;
}
if (mutex->_type == PTHREAD_MUTEX_NORMAL) {
atomic_store_explicit(&mutex->_lock, 0, memory_order_release);
// implement barebones normal mutexes
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
lock = MUTEX_UNLOCK(word);
atomic_store_explicit(&mutex->_word, lock, memory_order_release);
return 0;
}
t = gettid();
// implement recursive mutex unlocking
me = gettid();
for (;;) {
// we allow unlocking an initialized lock that wasn't locked, but we
// don't allow unlocking a lock held by another thread, or unlocking
// recursive locks from a forked child, since it should be re-init'd
if (mutex->_owner && (mutex->_owner != t || mutex->_pid != __pid)) {
return EPERM;
// we allow unlocking an initialized lock that wasn't locked, but we
// don't allow unlocking a lock held by another thread, or unlocking
// recursive locks from a forked child, since it should be re-init'd
if (MUTEX_OWNER(word) && (MUTEX_OWNER(word) != me || mutex->_pid != __pid))
return EPERM;
// check if this is a nested lock with signal safety
if (MUTEX_DEPTH(word)) {
if (atomic_compare_exchange_weak_explicit(
&mutex->_word, &word, MUTEX_DEC_DEPTH(word), memory_order_relaxed,
memory_order_relaxed))
return 0;
continue;
}
// actually unlock the mutex
if (atomic_compare_exchange_weak_explicit(
&mutex->_word, &word, MUTEX_UNLOCK(word), memory_order_release,
memory_order_relaxed))
return 0;
}
if (mutex->_depth) {
--mutex->_depth;
return 0;
}
mutex->_owner = 0;
atomic_store_explicit(&mutex->_lock, 0, memory_order_release);
return 0;
}

View file

@ -16,6 +16,7 @@
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
/**
@ -28,6 +29,6 @@
*/
errno_t pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr,
int *pshared) {
*pshared = attr->_pshared;
*pshared = MUTEX_PSHARED(attr->_word);
return 0;
}

View file

@ -16,6 +16,7 @@
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
/**
@ -28,6 +29,6 @@
* @return 0 on success, or error on failure
*/
errno_t pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type) {
*type = attr->_type;
*type = MUTEX_TYPE(attr->_word);
return 0;
}

View file

@ -17,6 +17,7 @@
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/errno.h"
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
/**
@ -32,7 +33,7 @@ errno_t pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared) {
switch (pshared) {
case PTHREAD_PROCESS_SHARED:
case PTHREAD_PROCESS_PRIVATE:
attr->_pshared = pshared;
attr->_word = MUTEX_SET_PSHARED(attr->_word, pshared);
return 0;
default:
return EINVAL;

View file

@ -17,6 +17,7 @@
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/errno.h"
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
/**
@ -35,7 +36,7 @@ errno_t pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type) {
case PTHREAD_MUTEX_NORMAL:
case PTHREAD_MUTEX_RECURSIVE:
case PTHREAD_MUTEX_ERRORCHECK:
attr->_type = type;
attr->_word = MUTEX_SET_TYPE(attr->_word, type);
return 0;
default:
return EINVAL;

25
libc/intrin/randaddr.c Normal file
View file

@ -0,0 +1,25 @@
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi
Copyright 2024 Justine Alexandra Roberts Tunney
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted, provided that the
above copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
void *randaddr(void) {
static unsigned long lcg = 1;
lcg *= 6364136223846793005;
lcg += 1442695040888963407;
return (void *)(lcg >> 48 << 28);
}

View file

@ -95,11 +95,9 @@ static inline textwindows ssize_t ForkIo(int64_t h, char *p, size_t n,
struct NtOverlapped *)) {
size_t i;
uint32_t x;
for (i = 0; i < n; i += x) {
if (!f(h, p + i, n - i, &x, NULL)) {
for (i = 0; i < n; i += x)
if (!f(h, p + i, n - i, &x, NULL))
return __winerr();
}
}
return i;
}
@ -153,9 +151,8 @@ static textwindows dontinline void ReadOrDie(int64_t h, void *buf, size_t n) {
static textwindows int64_t MapOrDie(uint32_t prot, uint64_t size) {
int64_t h;
for (;;) {
if ((h = CreateFileMapping(-1, 0, prot, size >> 32, size, 0))) {
if ((h = CreateFileMapping(-1, 0, prot, size >> 32, size, 0)))
return h;
}
if (GetLastError() == kNtErrorAccessDenied) {
switch (prot) {
case kNtPageExecuteWritecopy:
@ -273,16 +270,14 @@ textwindows void WinMainForked(void) {
__maps.pages += (map->size + 4095) / 4096;
dll_make_last(&__maps.used, &map->elem);
if (!VirtualProtect(map->addr, map->size, __prot2nt(map->prot, map->iscow),
&oldprot)) {
&oldprot))
AbortFork("VirtualProtect");
}
}
__maps_init();
// mitosis complete
if (!CloseHandle(reader)) {
if (!CloseHandle(reader))
AbortFork("CloseHandle");
}
// rewrap the stdin named pipe hack
// since the handles closed on fork
@ -428,9 +423,8 @@ textwindows int sys_fork_nt(uint32_t dwCreationFlags) {
atomic_store_explicit(&_pthread_static.ptid, GetCurrentThreadId(),
memory_order_release);
}
if (rc == -1) {
if (rc == -1)
dll_make_first(&__proc.free, &proc->elem);
}
ftrace_enabled(+1);
strace_enabled(+1);
return rc;

View file

@ -74,10 +74,10 @@ static void _onfork_child(void) {
atomic_store_explicit(&free_waiters_mu, 0, memory_order_relaxed);
pthread_mutexattr_destroy(&attr);
_pthread_init();
__maps_unlock();
if (_weaken(_pthread_onfork_child)) {
atomic_store_explicit(&__maps.lock, 0, memory_order_relaxed);
atomic_store_explicit(&__get_tls()->tib_relock_maps, 0, memory_order_relaxed);
if (_weaken(_pthread_onfork_child))
_weaken(_pthread_onfork_child)();
}
}
int _fork(uint32_t dwCreationFlags) {

View file

@ -3,19 +3,15 @@
COSMOPOLITAN_C_START_
#ifndef __SANITIZE_ADDRESS__
#define kFixedmapStart 0x300000000
#define kFixedmapSize (0x400000000 - kFixedmapStart)
#define kMemtrackFdsStart 0x6fe000000
#define kMemtrackFdsSize (0x6ff000000 - kMemtrackFdsStart)
#define kMemtrackZiposStart 0x6fd000000
#define kMemtrackZiposSize (0xafe000000 - kMemtrackZiposStart)
#define kFixedmapStart 0x300000000
#define kFixedmapSize (0x400000000 - kFixedmapStart)
#define kMemtrackFdsStart 0x6fe000000
#define kMemtrackFdsSize (0x6ff000000 - kMemtrackFdsStart)
#else
#define kFixedmapStart 0x300000040000
#define kFixedmapSize (0x400000040000 - kFixedmapStart)
#define kMemtrackFdsStart 0x6fe000040000
#define kMemtrackFdsSize (0x6feffffc0000 - kMemtrackFdsStart)
#define kMemtrackZiposStart 0x6fd000040000
#define kMemtrackZiposSize (0x6fdffffc0000 - kMemtrackZiposStart)
#define kFixedmapStart 0x300000040000
#define kFixedmapSize (0x400000040000 - kFixedmapStart)
#define kMemtrackFdsStart 0x6fe000040000
#define kMemtrackFdsSize (0x6feffffc0000 - kMemtrackFdsStart)
#endif
COSMOPOLITAN_C_END_

View file

@ -69,9 +69,8 @@ static void __zipos_dismiss(uint8_t *map, const uint8_t *cdir, long pg) {
// this is supposed to reduce our rss usage but does it really?
lo = ROUNDDOWN(lo, pg);
hi = MIN(ROUNDUP(hi, pg), ROUNDDOWN(c, pg));
if (hi > lo) {
if (hi > lo)
posix_madvise(map + lo, hi - lo, POSIX_MADV_DONTNEED);
}
}
static int __zipos_compare_names(const void *a, const void *b, void *c) {
@ -96,9 +95,8 @@ static void __zipos_generate_index(struct Zipos *zipos) {
zipos->records = GetZipCdirRecords(zipos->cdir);
zipos->index = _mapanon(zipos->records * sizeof(size_t));
for (i = 0, c = GetZipCdirOffset(zipos->cdir); i < zipos->records;
++i, c += ZIP_CFILE_HDRSIZE(zipos->map + c)) {
++i, c += ZIP_CFILE_HDRSIZE(zipos->map + c))
zipos->index[i] = c;
}
// smoothsort() isn't the fastest algorithm, but it guarantees
// o(nlogn) won't smash the stack and doesn't depend on malloc
smoothsort_r(zipos->index, zipos->records, sizeof(size_t),
@ -122,9 +120,8 @@ static void __zipos_init(void) {
}
if (fd != -1 || PLEDGED(RPATH)) {
if (fd == -1) {
if (!progpath) {
if (!progpath)
progpath = GetProgramExecutableName();
}
fd = open(progpath, O_RDONLY);
}
if (fd != -1) {

View file

@ -16,112 +16,50 @@
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/assert.h"
#include "libc/calls/calls.h"
#include "libc/calls/blockcancel.internal.h"
#include "libc/calls/internal.h"
#include "libc/calls/state.internal.h"
#include "libc/calls/struct/sigset.h"
#include "libc/calls/struct/fd.internal.h"
#include "libc/calls/struct/sigset.internal.h"
#include "libc/calls/syscall-sysv.internal.h"
#include "libc/calls/syscall_support-sysv.internal.h"
#include "libc/dce.h"
#include "libc/errno.h"
#include "libc/intrin/atomic.h"
#include "libc/intrin/cmpxchg.h"
#include "libc/intrin/directmap.internal.h"
#include "libc/intrin/extend.internal.h"
#include "libc/intrin/likely.h"
#include "libc/intrin/strace.internal.h"
#include "libc/intrin/maps.h"
#include "libc/intrin/weaken.h"
#include "libc/limits.h"
#include "libc/runtime/internal.h"
#include "libc/runtime/memtrack.internal.h"
#include "libc/runtime/runtime.h"
#include "libc/runtime/zipos.internal.h"
#include "libc/str/str.h"
#include "libc/sysv/consts/f.h"
#include "libc/sysv/consts/fd.h"
#include "libc/sysv/consts/map.h"
#include "libc/sysv/consts/o.h"
#include "libc/sysv/consts/prot.h"
#include "libc/sysv/consts/s.h"
#include "libc/sysv/consts/sig.h"
#include "libc/sysv/errfuns.h"
#include "libc/thread/thread.h"
#include "libc/thread/tls.h"
#include "libc/zip.internal.h"
#define MAX_REFS SSIZE_MAX
static char *__zipos_mapend;
static size_t __zipos_maptotal;
static pthread_mutex_t __zipos_lock_obj;
static void __zipos_wipe(void) {
pthread_mutex_init(&__zipos_lock_obj, 0);
}
static void __zipos_lock(void) {
pthread_mutex_lock(&__zipos_lock_obj);
}
static void __zipos_unlock(void) {
pthread_mutex_unlock(&__zipos_lock_obj);
}
static void *__zipos_mmap_space(size_t mapsize) {
char *start;
size_t offset;
unassert(mapsize);
offset = __zipos_maptotal;
__zipos_maptotal += mapsize;
start = (char *)kMemtrackZiposStart;
if (!__zipos_mapend)
__zipos_mapend = start;
__zipos_mapend = _extend(start, __zipos_maptotal, __zipos_mapend, MAP_PRIVATE,
kMemtrackZiposStart + kMemtrackZiposSize);
return start + offset;
}
struct ZiposHandle *__zipos_keep(struct ZiposHandle *h) {
size_t refs = atomic_fetch_add_explicit(&h->refs, 1, memory_order_relaxed);
unassert(!VERY_UNLIKELY(refs > MAX_REFS));
atomic_fetch_add_explicit(&h->refs, 1, memory_order_relaxed);
return h;
}
void __zipos_drop(struct ZiposHandle *h) {
if (atomic_fetch_sub_explicit(&h->refs, 1, memory_order_release)) {
if (atomic_fetch_sub_explicit(&h->refs, 1, memory_order_release))
return;
}
atomic_thread_fence(memory_order_acquire);
__zipos_lock();
do
h->next = h->zipos->freelist;
while (!_cmpxchg(&h->zipos->freelist, h->next, h));
__zipos_unlock();
__munmap((char *)h, h->mapsize, false);
}
static struct ZiposHandle *__zipos_alloc(struct Zipos *zipos, size_t size) {
size_t mapsize;
struct ZiposHandle *h, **ph;
__zipos_lock();
int granularity;
struct ZiposHandle *h;
granularity = __granularity();
mapsize = sizeof(struct ZiposHandle) + size;
mapsize = ROUNDUP(mapsize, 4096);
StartOver:
ph = &zipos->freelist;
while ((h = *ph)) {
if (h->mapsize >= mapsize) {
if (!_cmpxchg(ph, h, h->next))
goto StartOver;
break;
}
ph = &h->next;
}
if (!h) {
h = __zipos_mmap_space(mapsize);
}
__zipos_unlock();
if (h) {
atomic_store_explicit(&h->refs, 0, memory_order_relaxed);
mapsize = (mapsize + granularity - 1) & -granularity;
if ((h = __mmap(randaddr(), mapsize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)) != MAP_FAILED) {
h->size = size;
h->zipos = zipos;
h->mapsize = mapsize;
@ -158,6 +96,7 @@ static int __zipos_load(struct Zipos *zipos, size_t cf, int flags,
size_t size;
int fd, minfd;
struct ZiposHandle *h;
if (cf == ZIPOS_SYNTHETIC_DIRECTORY) {
size = name->len;
if (!(h = __zipos_alloc(zipos, size + 1)))
@ -168,7 +107,6 @@ static int __zipos_load(struct Zipos *zipos, size_t cf, int flags,
h->mem = h->data;
} else {
lf = GetZipCfileOffset(zipos->map + cf);
npassert((ZIP_LFILE_MAGIC(zipos->map + lf) == kZipLfileHdrMagic));
size = GetZipLfileUncompressedSize(zipos->map + lf);
switch (ZIP_LFILE_COMPRESSIONMETHOD(zipos->map + lf)) {
case kZipCompressionNone:
@ -191,18 +129,17 @@ static int __zipos_load(struct Zipos *zipos, size_t cf, int flags,
return eio();
}
}
atomic_store_explicit(&h->pos, 0, memory_order_relaxed);
h->cfile = cf;
unassert(size < SIZE_MAX);
h->size = size;
if (h->mem) {
minfd = 3;
__fds_lock();
TryAgain:
if (IsWindows() || IsMetal()) {
if ((fd = __reservefd_unlocked(-1)) != -1) {
if ((fd = __reservefd_unlocked(-1)) != -1)
return __zipos_setfd(fd, h, flags);
}
} else if ((fd = __zipos_mkfd(minfd)) != -1) {
if (__ensurefds_unlocked(fd) != -1) {
if (g_fds.p[fd].kind) {
@ -221,16 +158,15 @@ static int __zipos_load(struct Zipos *zipos, size_t cf, int flags,
}
void __zipos_postdup(int oldfd, int newfd) {
if (oldfd == newfd) {
if (oldfd == newfd)
return;
}
BLOCK_SIGNALS;
BLOCK_CANCELATION;
__fds_lock();
if (__isfdkind(newfd, kFdZip)) {
__zipos_drop((struct ZiposHandle *)(intptr_t)g_fds.p[newfd].handle);
if (!__isfdkind(oldfd, kFdZip)) {
if (!__isfdkind(oldfd, kFdZip))
bzero(g_fds.p + newfd, sizeof(*g_fds.p));
}
}
if (__isfdkind(oldfd, kFdZip)) {
__zipos_keep((struct ZiposHandle *)(intptr_t)g_fds.p[oldfd].handle);
@ -238,6 +174,7 @@ void __zipos_postdup(int oldfd, int newfd) {
g_fds.p[newfd] = g_fds.p[oldfd];
}
__fds_unlock();
ALLOW_CANCELATION;
ALLOW_SIGNALS;
}
@ -260,45 +197,36 @@ int __zipos_open(struct ZiposUri *name, int flags) {
// validate api usage
if ((flags & O_CREAT) || //
(flags & O_TRUNC) || //
(flags & O_ACCMODE) != O_RDONLY) {
(flags & O_ACCMODE) != O_RDONLY)
return erofs();
}
// get the zipos global singleton
struct Zipos *zipos;
if (!(zipos = __zipos_get())) {
if (!(zipos = __zipos_get()))
return enoexec();
}
// most open() calls are due to languages path searching assets. the
// majority of these calls will return ENOENT or ENOTDIR. we need to
// perform two extremely costly sigprocmask() calls below. thanks to
// zipos being a read-only filesystem, we can avoid it in many cases
ssize_t cf;
if ((cf = __zipos_find(zipos, name)) == -1) {
if ((cf = __zipos_find(zipos, name)) == -1)
return -1;
}
if (flags & O_EXCL) {
if (flags & O_EXCL)
return eexist();
}
if (cf != ZIPOS_SYNTHETIC_DIRECTORY) {
int mode = GetZipCfileMode(zipos->map + cf);
if ((flags & O_DIRECTORY) && !S_ISDIR(mode)) {
if ((flags & O_DIRECTORY) && !S_ISDIR(mode))
return enotdir();
}
if (!(mode & 0444)) {
if (!(mode & 0444))
return eacces();
}
}
// now do the heavy lifting
BLOCK_SIGNALS;
BLOCK_CANCELATION;
rc = __zipos_load(zipos, cf, flags, name);
ALLOW_CANCELATION;
ALLOW_SIGNALS;
return rc;
}
__attribute__((__constructor__(60))) static textstartup void zipos_ctor(void) {
__zipos_wipe();
pthread_atfork(__zipos_lock, __zipos_unlock, __zipos_wipe);
}

View file

@ -34,16 +34,14 @@ static ssize_t __zipos_read_impl(struct ZiposHandle *h, const struct iovec *iov,
int i;
int64_t b, x, y, start_pos;
if (h->cfile == ZIPOS_SYNTHETIC_DIRECTORY ||
S_ISDIR(GetZipCfileMode(h->zipos->map + h->cfile))) {
S_ISDIR(GetZipCfileMode(h->zipos->map + h->cfile)))
return eisdir();
}
if (opt_offset == -1) {
Restart:
start_pos = atomic_load_explicit(&h->pos, memory_order_relaxed);
do {
if (UNLIKELY(start_pos == SIZE_MAX)) {
if (UNLIKELY(start_pos == SIZE_MAX))
goto Restart;
}
} while (!LIKELY(atomic_compare_exchange_weak_explicit(
&h->pos, &start_pos, SIZE_MAX, memory_order_acquire,
memory_order_relaxed)));

View file

@ -34,7 +34,6 @@ struct Zipos {
uint64_t dev;
size_t *index;
size_t records;
struct ZiposHandle *freelist;
};
int __zipos_close(int);

View file

@ -41,20 +41,16 @@ void(__fflush_unlock)(void) {
static void __stdio_fork_prepare(void) {
FILE *f;
__fflush_lock();
for (int i = 0; i < __fflush.handles.i; ++i) {
if ((f = __fflush.handles.p[i])) {
for (int i = 0; i < __fflush.handles.i; ++i)
if ((f = __fflush.handles.p[i]))
pthread_mutex_lock(&f->lock);
}
}
}
static void __stdio_fork_parent(void) {
FILE *f;
for (int i = __fflush.handles.i; i--;) {
if ((f = __fflush.handles.p[i])) {
for (int i = __fflush.handles.i; i--;)
if ((f = __fflush.handles.p[i]))
pthread_mutex_unlock(&f->lock);
}
}
__fflush_unlock();
}
@ -63,7 +59,7 @@ static void __stdio_fork_child(void) {
for (int i = __fflush.handles.i; i--;) {
if ((f = __fflush.handles.p[i])) {
bzero(&f->lock, sizeof(f->lock));
f->lock._type = PTHREAD_MUTEX_RECURSIVE;
f->lock._word = PTHREAD_MUTEX_RECURSIVE;
}
}
pthread_mutex_init(&__fflush_lock_obj, 0);

View file

@ -37,6 +37,6 @@ __attribute__((__constructor__(60))) static textstartup void errinit(void) {
stderr->iomode = O_WRONLY;
stderr->buf = stderr->mem;
stderr->size = sizeof(stderr->mem);
stderr->lock._type = PTHREAD_MUTEX_RECURSIVE;
stderr->lock._word = PTHREAD_MUTEX_RECURSIVE;
__fflush_register(stderr);
}

View file

@ -39,7 +39,7 @@ __attribute__((__constructor__(60))) static textstartup void initin(void) {
stdin->iomode = O_RDONLY;
stdin->buf = stdin->mem;
stdin->size = sizeof(stdin->mem);
stdin->lock._type = PTHREAD_MUTEX_RECURSIVE;
stdin->lock._word = PTHREAD_MUTEX_RECURSIVE;
if (fstat(STDIN_FILENO, &st) || !S_ISREG(st.st_mode))
stdin->bufmode = _IONBF;
__fflush_register(stdin);

View file

@ -38,7 +38,7 @@ __attribute__((__constructor__(60))) static textstartup void outinit(void) {
stdout->iomode = O_WRONLY;
stdout->buf = stdout->mem;
stdout->size = sizeof(stdout->mem);
stdout->lock._type = PTHREAD_MUTEX_RECURSIVE;
stdout->lock._word = PTHREAD_MUTEX_RECURSIVE;
/*
* Unlike other C libraries we don't bother calling fstat() to check

23
libc/thread/lock.h Normal file
View file

@ -0,0 +1,23 @@
#ifndef COSMOPOLITAN_LIBC_THREAD_LOCK_H_
#define COSMOPOLITAN_LIBC_THREAD_LOCK_H_
COSMOPOLITAN_C_START_
#define MUTEX_DEPTH_MIN 0x00000010ull
#define MUTEX_DEPTH_MAX 0x000003f0ull
#define MUTEX_TYPE(word) ((word) & 3)
#define MUTEX_PSHARED(word) ((word) & 4)
#define MUTEX_LOCKED(word) ((word) & 8)
#define MUTEX_DEPTH(word) ((word) & MUTEX_DEPTH_MAX)
#define MUTEX_OWNER(word) ((word) >> 32)
#define MUTEX_LOCK(word) (((word) & 7) | 8)
#define MUTEX_UNLOCK(word) ((word) & 7)
#define MUTEX_SET_TYPE(word, type) (((word) & ~3ull) | (type))
#define MUTEX_SET_PSHARED(word, pshared) (((word) & ~4ull) | (pshared))
#define MUTEX_INC_DEPTH(word) ((word) + MUTEX_DEPTH_MIN)
#define MUTEX_DEC_DEPTH(word) ((word) - MUTEX_DEPTH_MIN)
#define MUTEX_SET_OWNER(word, tid) ((uint64_t)(tid) << 32 | (uint32_t)(word))
COSMOPOLITAN_C_END_
#endif /* COSMOPOLITAN_LIBC_THREAD_LOCK_H_ */

View file

@ -17,6 +17,7 @@
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/errno.h"
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
#include "libc/thread/thread2.h"
#include "third_party/nsync/common.internal.h"
@ -48,12 +49,10 @@
*/
errno_t pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
const struct timespec *abstime) {
if (abstime && !(0 <= abstime->tv_nsec && abstime->tv_nsec < 1000000000)) {
if (abstime && !(0 <= abstime->tv_nsec && abstime->tv_nsec < 1000000000))
return EINVAL;
}
if (mutex->_type != PTHREAD_MUTEX_NORMAL) {
if (MUTEX_TYPE(mutex->_word) != PTHREAD_MUTEX_NORMAL)
nsync_panic_("pthread cond needs normal mutex\n");
}
return nsync_cv_wait_with_deadline(
(nsync_cv *)cond, (nsync_mu *)mutex,
abstime ? *abstime : nsync_time_no_deadline, 0);

View file

@ -15,7 +15,7 @@
#define PTHREAD_MUTEX_ROBUST 1
#define PTHREAD_PROCESS_PRIVATE 0
#define PTHREAD_PROCESS_SHARED 1
#define PTHREAD_PROCESS_SHARED 4
#define PTHREAD_CREATE_JOINABLE 0
#define PTHREAD_CREATE_DETACHED 1
@ -40,13 +40,12 @@
#if !(__ASSEMBLER__ + __LINKER__ + 0)
COSMOPOLITAN_C_START_
#define PTHREAD_ONCE_INIT _PTHREAD_INIT
#define PTHREAD_COND_INITIALIZER _PTHREAD_INIT
#define PTHREAD_RWLOCK_INITIALIZER _PTHREAD_INIT
#define PTHREAD_MUTEX_INITIALIZER _PTHREAD_INIT
#define PTHREAD_ONCE_INIT {0}
#define PTHREAD_COND_INITIALIZER {0}
#define PTHREAD_RWLOCK_INITIALIZER {0}
#define PTHREAD_MUTEX_INITIALIZER {0}
#define _PTHREAD_INIT \
{ 0 }
#define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP {0, 0, PTHREAD_MUTEX_RECURSIVE}
typedef uintptr_t pthread_t;
typedef int pthread_id_np_t;
@ -65,17 +64,13 @@ typedef struct pthread_spinlock_s {
} pthread_spinlock_t;
typedef struct pthread_mutex_s {
_Atomic(int32_t) _lock;
unsigned _type : 2;
unsigned _pshared : 1;
unsigned _depth : 6;
unsigned _owner : 23;
long _pid;
uint32_t _nsync;
int32_t _pid;
_Atomic(uint64_t) _word;
} pthread_mutex_t;
typedef struct pthread_mutexattr_s {
char _type;
char _pshared;
unsigned _word;
} pthread_mutexattr_t;
typedef struct pthread_cond_s {

View file

@ -4,7 +4,6 @@
#define TLS_ALIGNMENT 64
#define TIB_FLAG_VFORKED 1
#define TIB_FLAG_MAPLOCK 2
#if !(__ASSEMBLER__ + __LINKER__ + 0)
COSMOPOLITAN_C_START_
@ -38,8 +37,9 @@ struct CosmoTib {
char *tib_sigstack_addr;
uint32_t tib_sigstack_size;
uint32_t tib_sigstack_flags;
_Atomic(int) tib_relock_maps;
void *tib_nsync;
void *tib_keys[48];
void *tib_keys[47];
} __attribute__((__aligned__(64)));
extern int __threaded;

View file

@ -56,6 +56,7 @@
#include "libc/testlib/ezbench.h"
#include "libc/testlib/subprocess.h"
#include "libc/testlib/testlib.h"
#include "libc/thread/lock.h"
#include "libc/thread/posixthread.internal.h"
#include "libc/thread/thread.h"
#include "libc/time.h"
@ -652,7 +653,7 @@ TEST(pledge_openbsd, bigSyscalls) {
void *LockWorker(void *arg) {
flockfile(stdout);
ASSERT_EQ(gettid(), stdout->lock._owner);
ASSERT_EQ(gettid(), MUTEX_OWNER(stdout->lock._word));
funlockfile(stdout);
return 0;
}

View file

@ -98,6 +98,15 @@ TEST(mmap, noreplaceExistingMap) {
EXPECT_SYS(0, 0, munmap(p, granularity));
}
TEST(mmap, fixedTaken) {
char *p;
ASSERT_NE(MAP_FAILED, (p = mmap(0, granularity, PROT_READ,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)));
ASSERT_NE(MAP_FAILED, mmap(p, granularity, PROT_READ,
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
EXPECT_SYS(0, 0, munmap(p, granularity));
}
TEST(mmap, hint) {
char *p, *q;

View file

@ -54,12 +54,10 @@ void *Worker(void *arg) {
TEST(zipos, test) {
int i, n = 16;
pthread_t *t = gc(malloc(sizeof(pthread_t) * n));
for (i = 0; i < n; ++i) {
for (i = 0; i < n; ++i)
ASSERT_SYS(0, 0, pthread_create(t + i, 0, Worker, 0));
}
for (i = 0; i < n; ++i) {
for (i = 0; i < n; ++i)
EXPECT_SYS(0, 0, pthread_join(t[i], 0));
}
}
TEST(zipos, erofs) {

View file

@ -114,10 +114,10 @@ for arch in aarch64 x86_64; do
ln -sf $arch-linux-cosmo-objdump bin/$arch-unknown-cosmo-objdump
ln -sf $arch-linux-cosmo-readelf bin/$arch-unknown-cosmo-readelf
ln -sf $arch-linux-cosmo-strip bin/$arch-unknown-cosmo-strip
cmp -s libexec/gcc/$arch-linux-cosmo/$GCCVER/ld.bfd libexec/gcc/$arch-linux-cosmo/$GCCVER/ld
ln -sf ld.bfd libexec/gcc/$arch-linux-cosmo/$GCCVER/ld
cmp -s libexec/gcc/$arch-linux-cosmo/$GCCVER/ld.bfd bin/$arch-linux-cosmo-ld
ln -sf ../libexec/gcc/$arch-linux-cosmo/$GCCVER/ld.bfd bin/$arch-linux-cosmo-ld
# cmp -s libexec/gcc/$arch-linux-cosmo/$GCCVER/ld.bfd libexec/gcc/$arch-linux-cosmo/$GCCVER/ld
# ln -sf ld.bfd libexec/gcc/$arch-linux-cosmo/$GCCVER/ld
# cmp -s libexec/gcc/$arch-linux-cosmo/$GCCVER/ld.bfd bin/$arch-linux-cosmo-ld
# ln -sf ../libexec/gcc/$arch-linux-cosmo/$GCCVER/ld.bfd bin/$arch-linux-cosmo-ld
cmp -s libexec/gcc/$arch-linux-cosmo/$GCCVER/as bin/$arch-linux-cosmo-as
ln -sf ../libexec/gcc/$arch-linux-cosmo/$GCCVER/as bin/$arch-linux-cosmo-as
cmp -s libexec/gcc/$arch-linux-cosmo/$GCCVER/ld.bfd bin/$arch-linux-cosmo-ld.bfd