Fix bugs with new memory manager

This fixes a regression in mmap(MAP_FIXED) on Windows caused by a recent
revision. This change also fixes ZipOS so it no longer needs a MAP_FIXED
mapping to open files from the PKZIP store. The memory mapping mutex was
implemented incorrectly earlier which meant that ftrace and strace could
cause cause crashes. This lock and other recursive mutexes are rewritten
so that it should be provable that recursive mutexes in cosmopolitan are
asynchronous signal safe.
This commit is contained in:
Justine Tunney 2024-06-29 05:10:15 -07:00
parent 6de12c1032
commit 464858dbb4
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
34 changed files with 353 additions and 313 deletions

View file

@ -19,4 +19,4 @@
#include "libc/calls/state.internal.h"
#include "libc/thread/thread.h"
pthread_mutex_t __fds_lock_obj = {._type = PTHREAD_MUTEX_RECURSIVE};
pthread_mutex_t __fds_lock_obj = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;

View file

@ -20,14 +20,10 @@
#include "ape/sections.internal.h"
#include "libc/dce.h"
#include "libc/intrin/dll.h"
#include "libc/intrin/kprintf.h"
#include "libc/intrin/strace.internal.h"
#include "libc/runtime/runtime.h"
#include "libc/runtime/stack.h"
#include "libc/sysv/consts/auxv.h"
#include "libc/sysv/consts/prot.h"
#include "libc/thread/thread.h"
#include "libc/thread/tls.h"
#ifdef __x86_64__
__static_yoink("_init_maps");
@ -73,7 +69,7 @@ privileged void __maps_lock(void) {
if (!__tls_enabled)
return;
tib = __get_tls_privileged();
if (tib->tib_flags & TIB_FLAG_MAPLOCK)
if (tib->tib_relock_maps++)
return;
while (atomic_exchange_explicit(&__maps.lock, 1, memory_order_acquire)) {
#if defined(__GNUC__) && defined(__aarch64__)
@ -82,14 +78,15 @@ privileged void __maps_lock(void) {
__asm__ volatile("pause");
#endif
}
tib->tib_flags |= TIB_FLAG_MAPLOCK;
}
privileged void __maps_unlock(void) {
struct CosmoTib *tib;
atomic_store_explicit(&__maps.lock, 0, memory_order_release);
if (__tls_enabled) {
tib = __get_tls_privileged();
tib->tib_flags &= ~TIB_FLAG_MAPLOCK;
}
if (!__threaded)
return;
if (!__tls_enabled)
return;
tib = __get_tls_privileged();
if (!--tib->tib_relock_maps)
atomic_store_explicit(&__maps.lock, 0, memory_order_release);
}

View file

@ -8,21 +8,23 @@ COSMOPOLITAN_C_START_
#define MAP_CONTAINER(e) DLL_CONTAINER(struct Map, elem, e)
struct Map {
_Atomic(struct Map *) next; /* for __maps.maps */
char *addr; /* granule aligned */
size_t size; /* must be nonzero */
struct Dll elem; /* for __maps.free */
int64_t off; /* -1 if anonymous */
int prot; /* memory protects */
int flags; /* memory map flag */
bool iscow; /* windows nt only */
bool readonlyfile; /* windows nt only */
intptr_t h; /* windows nt only */
struct Map *next; /* for __maps.maps */
char *addr; /* granule aligned */
size_t size; /* must be nonzero */
struct Dll elem; /* for __maps.free */
int64_t off; /* -1 if anonymous */
int prot; /* memory protects */
int flags; /* memory map flag */
bool iscow; /* windows nt only */
bool readonlyfile; /* windows nt only */
unsigned visited; /* used for checks */
intptr_t h; /* windows nt only */
};
struct Maps {
unsigned mono;
atomic_int lock;
_Atomic(struct Map *) maps;
struct Map *maps;
struct Dll *free;
struct Map stack;
struct Dll *used;
@ -37,6 +39,7 @@ struct AddrSize {
extern struct Maps __maps;
void *randaddr(void);
void __maps_init(void);
void __maps_lock(void);
void __maps_check(void);
@ -44,6 +47,7 @@ void __maps_unlock(void);
struct Map *__maps_alloc(void);
void __maps_free(struct Map *);
void __maps_insert(struct Map *);
int __munmap(char *, size_t, bool);
void *__mmap(char *, size_t, int, int, int, int64_t);
struct AddrSize __get_main_stack(void);

View file

@ -18,6 +18,7 @@
*/
#include "ape/sections.internal.h"
#include "libc/atomic.h"
#include "libc/calls/blockcancel.internal.h"
#include "libc/calls/calls.h"
#include "libc/calls/internal.h"
#include "libc/calls/struct/sigset.internal.h"
@ -86,14 +87,23 @@ void __maps_check(void) {
size_t maps = 0;
size_t pages = 0;
int granularity = getauxval(AT_PAGESZ);
unsigned id = __maps.mono++;
for (struct Map *map = __maps.maps; map; map = map->next) {
ASSERT(map->addr != MAP_FAILED);
ASSERT(map->visited != id);
ASSERT(map->size);
map->visited = id;
pages += PGUP(map->size) / granularity;
maps += 1;
}
ASSERT(maps = __maps.count);
ASSERT(pages == __maps.pages);
for (struct Dll *e = dll_first(__maps.used); e;
e = dll_next(__maps.used, e)) {
ASSERT(MAP_CONTAINER(e)->visited == id);
--maps;
}
ASSERT(maps == 0);
for (struct Map *m1 = __maps.maps; m1; m1 = m1->next)
for (struct Map *m2 = m1->next; m2; m2 = m2->next)
ASSERT(MAX(m1->addr, m2->addr) >=
@ -168,11 +178,7 @@ struct Map *__maps_alloc(void) {
return map;
}
static int __munmap_chunk(void *addr, size_t size) {
return sys_munmap(addr, size);
}
static int __munmap(char *addr, size_t size, bool untrack_only) {
int __munmap(char *addr, size_t size, bool untrack_only) {
// validate arguments
int pagesz = getauxval(AT_PAGESZ);
@ -186,7 +192,7 @@ static int __munmap(char *addr, size_t size, bool untrack_only) {
__maps_lock();
StartOver:;
struct Map *map = __maps.maps;
_Atomic(struct Map *) *prev = &__maps.maps;
struct Map **prev = &__maps.maps;
while (map) {
char *map_addr = map->addr;
size_t map_size = map->size;
@ -207,7 +213,7 @@ StartOver:;
if (!IsWindows()) {
ASSERT(addr <= map_addr);
ASSERT(map_addr + PGUP(map_size) <= addr + PGUP(size));
if (__munmap_chunk(map_addr, map_size))
if (sys_munmap(map_addr, map_size))
rc = -1;
} else {
if (!UnmapViewOfFile(map_addr))
@ -236,7 +242,7 @@ StartOver:;
ASSERT(left > 0);
map->addr += left;
map->size = right;
if (map->off != -1)
if (!(map->flags & MAP_ANONYMOUS))
map->off += left;
__maps.pages -= (left + pagesz - 1) / pagesz;
__maps_check();
@ -244,7 +250,7 @@ StartOver:;
__maps_unlock();
ASSERT(addr <= map_addr);
ASSERT(map_addr + PGUP(left) <= addr + PGUP(size));
if (__munmap_chunk(map_addr, left) == -1)
if (sys_munmap(map_addr, left) == -1)
rc = -1;
__maps_lock();
goto StartOver;
@ -259,7 +265,7 @@ StartOver:;
if (!untrack_only) {
__maps_unlock();
ASSERT(PGUP(right) <= PGUP(size));
if (__munmap_chunk(addr, right) == -1)
if (sys_munmap(addr, right) == -1)
rc = -1;
__maps_lock();
goto StartOver;
@ -279,7 +285,7 @@ StartOver:;
leftmap->flags = map->flags;
map->addr += left + middle;
map->size = right;
if (map->off != -1)
if (!(map->flags & MAP_ANONYMOUS))
map->off += left + middle;
dll_make_first(&__maps.used, &leftmap->elem);
*prev = leftmap;
@ -288,7 +294,7 @@ StartOver:;
__maps_check();
if (!untrack_only) {
__maps_unlock();
if (__munmap_chunk(addr, size) == -1)
if (sys_munmap(addr, size) == -1)
rc = -1;
__maps_lock();
goto StartOver;
@ -339,6 +345,11 @@ static void *__mmap_chunk(void *addr, size_t size, int prot, int flags, int fd,
if (!map)
return MAP_FAILED;
// remove mapping we blew away
if (IsWindows() && should_untrack)
if (__munmap(addr, size, false))
return MAP_FAILED;
// obtain mapping from operating system
int olderr = errno;
struct DirectMap res;
@ -349,9 +360,7 @@ TryAgain:
if (noreplace) {
errno = EEXIST;
} else if (should_untrack) {
sys_munmap(res.addr, size);
errno = olderr;
goto TryAgain;
errno = ENOMEM;
} else {
addr += granularity;
errno = olderr;
@ -368,7 +377,12 @@ TryAgain:
// we assume non-linux gives us addr if it's free
// that's what linux (e.g. rhel7) did before noreplace
if (noreplace && res.addr != addr) {
sys_munmap(res.addr, size);
if (!IsWindows()) {
sys_munmap(res.addr, size);
} else {
UnmapViewOfFile(res.addr);
CloseHandle(res.maphandle);
}
__maps_lock();
__maps_free(map);
__maps_unlock();
@ -376,7 +390,7 @@ TryAgain:
}
// untrack mapping we blew away
if (should_untrack)
if (!IsWindows() && should_untrack)
__munmap(res.addr, size, true);
// track Map object
@ -425,7 +439,7 @@ static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
if (size <= granularity || size > 100 * 1024 * 1024)
return __mmap_chunk(addr, size, prot, flags, fd, off, granularity);
// so we create an separate map for each granule in the mapping
// so we create a separate map for each granule in the mapping
if (!(flags & MAP_FIXED)) {
while (overlaps_existing_map(addr, size)) {
if (flags & MAP_FIXED_NOREPLACE)
@ -486,7 +500,11 @@ void *__mmap(char *addr, size_t size, int prot, int flags, int fd,
void *mmap(void *addr, size_t size, int prot, int flags, int fd, int64_t off) {
void *res;
BLOCK_SIGNALS;
BLOCK_CANCELATION;
res = __mmap(addr, size, prot, flags, fd, off);
ALLOW_CANCELATION;
ALLOW_SIGNALS;
STRACE("mmap(%p, %'zu, %s, %s, %d, %'ld) → %p% m", addr, size,
DescribeProtFlags(prot), DescribeMapFlags(flags), fd, off, res);
return res;
@ -494,7 +512,11 @@ void *mmap(void *addr, size_t size, int prot, int flags, int fd, int64_t off) {
int munmap(void *addr, size_t size) {
int rc;
BLOCK_SIGNALS;
BLOCK_CANCELATION;
rc = __munmap(addr, size, false);
ALLOW_CANCELATION;
ALLOW_SIGNALS;
STRACE("munmap(%p, %'zu) → %d% m", addr, size, rc);
return rc;
}

View file

@ -30,6 +30,7 @@
#include "libc/runtime/runtime.h"
#include "libc/stdio/sysparam.h"
#include "libc/sysv/consts/auxv.h"
#include "libc/sysv/consts/map.h"
#include "libc/sysv/consts/prot.h"
#include "libc/sysv/errfuns.h"
@ -67,7 +68,7 @@ int __mprotect(char *addr, size_t size, int prot) {
__maps_lock();
bool found = false;
struct Map *map = __maps.maps;
_Atomic(struct Map *) *prev = &__maps.maps;
struct Map **prev = &__maps.maps;
while (map) {
char *map_addr = map->addr;
size_t map_size = map->size;
@ -104,7 +105,7 @@ int __mprotect(char *addr, size_t size, int prot) {
leftmap->flags = map->flags;
map->addr += left;
map->size = right;
if (map->off != -1)
if (!(map->flags & MAP_ANONYMOUS))
map->off += left;
dll_make_first(&__maps.used, &leftmap->elem);
*prev = leftmap;
@ -133,7 +134,7 @@ int __mprotect(char *addr, size_t size, int prot) {
map->addr += left;
map->size = right;
map->prot = prot;
if (map->off != -1)
if (!(map->flags & MAP_ANONYMOUS))
map->off += left;
dll_make_first(&__maps.used, &leftmap->elem);
*prev = leftmap;
@ -165,12 +166,12 @@ int __mprotect(char *addr, size_t size, int prot) {
midlmap->next = map;
midlmap->addr = map_addr + left;
midlmap->size = middle;
midlmap->off = map->off == -1 ? -1 : map->off + left;
midlmap->off = (map->flags & MAP_ANONYMOUS) ? 0 : map->off + left;
midlmap->prot = prot;
midlmap->flags = map->flags;
map->addr += left + middle;
map->size = right;
if (map->off != -1)
if (!(map->flags & MAP_ANONYMOUS))
map->off += left + middle;
dll_make_first(&__maps.used, &leftmap->elem);
dll_make_first(&__maps.used, &midlmap->elem);

View file

@ -35,9 +35,6 @@
*/
int pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *attr) {
*mutex = (pthread_mutex_t){
._type = attr ? attr->_type : 0,
._pshared = attr ? attr->_pshared : 0,
};
*mutex = (pthread_mutex_t){._word = attr ? attr->_word : 0};
return 0;
}

View file

@ -23,6 +23,7 @@
#include "libc/intrin/strace.internal.h"
#include "libc/intrin/weaken.h"
#include "libc/runtime/internal.h"
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
#include "libc/thread/tls.h"
#include "third_party/nsync/mu.h"
@ -65,49 +66,65 @@
* @vforksafe
*/
errno_t pthread_mutex_lock(pthread_mutex_t *mutex) {
int t;
int me;
uint64_t word, lock;
LOCKTRACE("pthread_mutex_lock(%t)", mutex);
if (__vforked) {
if (__vforked)
return 0;
}
if (mutex->_type == PTHREAD_MUTEX_NORMAL && //
mutex->_pshared == PTHREAD_PROCESS_PRIVATE && //
// get current state of lock
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
// use fancy nsync mutex if possible
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL && //
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
_weaken(nsync_mu_lock)) {
_weaken(nsync_mu_lock)((nsync_mu *)mutex);
return 0;
}
if (mutex->_type == PTHREAD_MUTEX_NORMAL) {
while (atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
// implement barebones normal mutexes
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
for (;;) {
word = MUTEX_UNLOCK(word);
lock = MUTEX_LOCK(word);
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
memory_order_acquire,
memory_order_relaxed))
return 0;
pthread_pause_np();
}
return 0;
}
t = gettid();
if (mutex->_owner == t) {
if (mutex->_type != PTHREAD_MUTEX_ERRORCHECK) {
if (mutex->_depth < 63) {
++mutex->_depth;
return 0;
// implement recursive mutexes
me = gettid();
for (;;) {
if (MUTEX_OWNER(word) == me) {
if (MUTEX_TYPE(word) != PTHREAD_MUTEX_ERRORCHECK) {
if (MUTEX_DEPTH(word) < MUTEX_DEPTH_MAX) {
if (atomic_compare_exchange_weak_explicit(
&mutex->_word, &word, MUTEX_INC_DEPTH(word),
memory_order_relaxed, memory_order_relaxed))
return 0;
continue;
} else {
return EAGAIN;
}
} else {
return EAGAIN;
return EDEADLK;
}
} else {
return EDEADLK;
}
}
while (atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
word = MUTEX_UNLOCK(word);
lock = MUTEX_LOCK(word);
lock = MUTEX_SET_OWNER(lock, me);
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
memory_order_acquire,
memory_order_relaxed)) {
mutex->_pid = __pid;
return 0;
}
pthread_pause_np();
}
mutex->_depth = 0;
mutex->_owner = t;
mutex->_pid = __pid;
return 0;
}

View file

@ -21,6 +21,7 @@
#include "libc/intrin/atomic.h"
#include "libc/intrin/weaken.h"
#include "libc/runtime/internal.h"
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
#include "third_party/nsync/mu.h"
@ -38,11 +39,15 @@
* current thread already holds this mutex
*/
errno_t pthread_mutex_trylock(pthread_mutex_t *mutex) {
int t;
int me;
uint64_t word, lock;
// get current state of lock
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
// delegate to *NSYNC if possible
if (mutex->_type == PTHREAD_MUTEX_NORMAL &&
mutex->_pshared == PTHREAD_PROCESS_PRIVATE && //
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL &&
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
_weaken(nsync_mu_trylock)) {
if (_weaken(nsync_mu_trylock)((nsync_mu *)mutex)) {
return 0;
@ -52,36 +57,43 @@ errno_t pthread_mutex_trylock(pthread_mutex_t *mutex) {
}
// handle normal mutexes
if (mutex->_type == PTHREAD_MUTEX_NORMAL) {
if (!atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
word = MUTEX_UNLOCK(word);
lock = MUTEX_LOCK(word);
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
memory_order_acquire,
memory_order_relaxed))
return 0;
} else {
return EBUSY;
}
}
// handle recursive and error check mutexes
t = gettid();
if (mutex->_owner == t) {
if (mutex->_type != PTHREAD_MUTEX_ERRORCHECK) {
if (mutex->_depth < 63) {
++mutex->_depth;
return 0;
} else {
return EAGAIN;
}
} else {
return EDEADLK;
}
}
if (atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
return EBUSY;
}
mutex->_depth = 0;
mutex->_owner = t;
mutex->_pid = __pid;
return 0;
// handle recursive and error check mutexes
me = gettid();
for (;;) {
if (MUTEX_OWNER(word) == me) {
if (MUTEX_TYPE(word) != PTHREAD_MUTEX_ERRORCHECK) {
if (MUTEX_DEPTH(word) < MUTEX_DEPTH_MAX) {
if (atomic_compare_exchange_weak_explicit(
&mutex->_word, &word, MUTEX_INC_DEPTH(word),
memory_order_relaxed, memory_order_relaxed))
return 0;
continue;
} else {
return EAGAIN;
}
} else {
return EDEADLK;
}
}
word = MUTEX_UNLOCK(word);
lock = MUTEX_LOCK(word);
lock = MUTEX_SET_OWNER(lock, me);
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
memory_order_acquire,
memory_order_relaxed)) {
mutex->_pid = __pid;
return 0;
}
return EBUSY;
}
}

View file

@ -22,6 +22,7 @@
#include "libc/intrin/strace.internal.h"
#include "libc/intrin/weaken.h"
#include "libc/runtime/internal.h"
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
#include "third_party/nsync/mu.h"
@ -35,38 +36,52 @@
* @vforksafe
*/
errno_t pthread_mutex_unlock(pthread_mutex_t *mutex) {
int t;
int me;
uint64_t word, lock;
LOCKTRACE("pthread_mutex_unlock(%t)", mutex);
if (mutex->_type == PTHREAD_MUTEX_NORMAL && //
mutex->_pshared == PTHREAD_PROCESS_PRIVATE && //
// get current state of lock
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
// use fancy nsync mutex if possible
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL && //
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
_weaken(nsync_mu_unlock)) {
_weaken(nsync_mu_unlock)((nsync_mu *)mutex);
return 0;
}
if (mutex->_type == PTHREAD_MUTEX_NORMAL) {
atomic_store_explicit(&mutex->_lock, 0, memory_order_release);
// implement barebones normal mutexes
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
lock = MUTEX_UNLOCK(word);
atomic_store_explicit(&mutex->_word, lock, memory_order_release);
return 0;
}
t = gettid();
// implement recursive mutex unlocking
me = gettid();
for (;;) {
// we allow unlocking an initialized lock that wasn't locked, but we
// don't allow unlocking a lock held by another thread, or unlocking
// recursive locks from a forked child, since it should be re-init'd
if (mutex->_owner && (mutex->_owner != t || mutex->_pid != __pid)) {
return EPERM;
// we allow unlocking an initialized lock that wasn't locked, but we
// don't allow unlocking a lock held by another thread, or unlocking
// recursive locks from a forked child, since it should be re-init'd
if (MUTEX_OWNER(word) && (MUTEX_OWNER(word) != me || mutex->_pid != __pid))
return EPERM;
// check if this is a nested lock with signal safety
if (MUTEX_DEPTH(word)) {
if (atomic_compare_exchange_weak_explicit(
&mutex->_word, &word, MUTEX_DEC_DEPTH(word), memory_order_relaxed,
memory_order_relaxed))
return 0;
continue;
}
// actually unlock the mutex
if (atomic_compare_exchange_weak_explicit(
&mutex->_word, &word, MUTEX_UNLOCK(word), memory_order_release,
memory_order_relaxed))
return 0;
}
if (mutex->_depth) {
--mutex->_depth;
return 0;
}
mutex->_owner = 0;
atomic_store_explicit(&mutex->_lock, 0, memory_order_release);
return 0;
}

View file

@ -16,6 +16,7 @@
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
/**
@ -28,6 +29,6 @@
*/
errno_t pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr,
int *pshared) {
*pshared = attr->_pshared;
*pshared = MUTEX_PSHARED(attr->_word);
return 0;
}

View file

@ -16,6 +16,7 @@
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
/**
@ -28,6 +29,6 @@
* @return 0 on success, or error on failure
*/
errno_t pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type) {
*type = attr->_type;
*type = MUTEX_TYPE(attr->_word);
return 0;
}

View file

@ -17,6 +17,7 @@
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/errno.h"
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
/**
@ -32,7 +33,7 @@ errno_t pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared) {
switch (pshared) {
case PTHREAD_PROCESS_SHARED:
case PTHREAD_PROCESS_PRIVATE:
attr->_pshared = pshared;
attr->_word = MUTEX_SET_PSHARED(attr->_word, pshared);
return 0;
default:
return EINVAL;

View file

@ -17,6 +17,7 @@
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/errno.h"
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
/**
@ -35,7 +36,7 @@ errno_t pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type) {
case PTHREAD_MUTEX_NORMAL:
case PTHREAD_MUTEX_RECURSIVE:
case PTHREAD_MUTEX_ERRORCHECK:
attr->_type = type;
attr->_word = MUTEX_SET_TYPE(attr->_word, type);
return 0;
default:
return EINVAL;

25
libc/intrin/randaddr.c Normal file
View file

@ -0,0 +1,25 @@
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi
Copyright 2024 Justine Alexandra Roberts Tunney
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted, provided that the
above copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
void *randaddr(void) {
static unsigned long lcg = 1;
lcg *= 6364136223846793005;
lcg += 1442695040888963407;
return (void *)(lcg >> 48 << 28);
}