mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-07-06 11:18:30 +00:00
Make more improvements to threads and mappings
- NetBSD should now have faster synchronization - POSIX barriers may now be shared across processes - An edge case with memory map tracking has been fixed - Grand Central Dispatch is no longer used on MacOS ARM64 - POSIX mutexes in normal mode now use futexes across processes
This commit is contained in:
parent
2187d6d2dd
commit
e398f3887c
20 changed files with 566 additions and 171 deletions
|
@ -26,11 +26,9 @@
|
|||
|
||||
int begin_cancelation_point(void) {
|
||||
int state = 0;
|
||||
struct CosmoTib *tib;
|
||||
struct PosixThread *pt;
|
||||
if (__tls_enabled) {
|
||||
tib = __get_tls();
|
||||
if ((pt = (struct PosixThread *)tib->tib_pthread)) {
|
||||
struct PosixThread *pt;
|
||||
if ((pt = _pthread_self())) {
|
||||
state = pt->pt_flags & PT_INCANCEL;
|
||||
pt->pt_flags |= PT_INCANCEL;
|
||||
}
|
||||
|
@ -39,11 +37,9 @@ int begin_cancelation_point(void) {
|
|||
}
|
||||
|
||||
void end_cancelation_point(int state) {
|
||||
struct CosmoTib *tib;
|
||||
struct PosixThread *pt;
|
||||
if (__tls_enabled) {
|
||||
tib = __get_tls();
|
||||
if ((pt = (struct PosixThread *)tib->tib_pthread)) {
|
||||
struct PosixThread *pt;
|
||||
if ((pt = _pthread_self())) {
|
||||
pt->pt_flags &= ~PT_INCANCEL;
|
||||
pt->pt_flags |= state;
|
||||
}
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
#include "libc/thread/tls2.internal.h"
|
||||
COSMOPOLITAN_C_START_
|
||||
|
||||
#define MAPS_RETRY ((void *)-1)
|
||||
|
||||
#define MAP_TREE_CONTAINER(e) TREE_CONTAINER(struct Map, tree, e)
|
||||
|
||||
struct Map {
|
||||
|
|
|
@ -120,6 +120,7 @@ static int __muntrack(char *addr, size_t size, int pagesz,
|
|||
struct Map *map;
|
||||
struct Map *next;
|
||||
struct Map *floor;
|
||||
StartOver:
|
||||
floor = __maps_floor(addr);
|
||||
for (map = floor; map && map->addr <= addr + size; map = next) {
|
||||
next = __maps_next(map);
|
||||
|
@ -148,6 +149,8 @@ static int __muntrack(char *addr, size_t size, int pagesz,
|
|||
ASSERT(left > 0);
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
if (leftmap == MAPS_RETRY)
|
||||
goto StartOver;
|
||||
map->addr += left;
|
||||
map->size = right;
|
||||
if (!(map->flags & MAP_ANONYMOUS))
|
||||
|
@ -167,6 +170,8 @@ static int __muntrack(char *addr, size_t size, int pagesz,
|
|||
size_t right = map_addr + map_size - addr;
|
||||
struct Map *rightmap;
|
||||
if ((rightmap = __maps_alloc())) {
|
||||
if (rightmap == MAPS_RETRY)
|
||||
goto StartOver;
|
||||
map->size = left;
|
||||
__maps.pages -= (right + pagesz - 1) / pagesz;
|
||||
rightmap->addr = addr;
|
||||
|
@ -184,8 +189,14 @@ static int __muntrack(char *addr, size_t size, int pagesz,
|
|||
size_t right = map_size - middle - left;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
if (leftmap == MAPS_RETRY)
|
||||
goto StartOver;
|
||||
struct Map *middlemap;
|
||||
if ((middlemap = __maps_alloc())) {
|
||||
if (middlemap == MAPS_RETRY) {
|
||||
__maps_free(leftmap);
|
||||
goto StartOver;
|
||||
}
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
leftmap->off = map->off;
|
||||
|
@ -204,6 +215,7 @@ static int __muntrack(char *addr, size_t size, int pagesz,
|
|||
*deleted = middlemap;
|
||||
__maps_check();
|
||||
} else {
|
||||
__maps_free(leftmap);
|
||||
rc = -1;
|
||||
}
|
||||
} else {
|
||||
|
@ -304,12 +316,11 @@ struct Map *__maps_alloc(void) {
|
|||
map->flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NOFORK;
|
||||
map->hand = sys.maphandle;
|
||||
__maps_lock();
|
||||
__maps_insert(map++);
|
||||
__maps_insert(map);
|
||||
__maps_unlock();
|
||||
map->addr = MAP_FAILED;
|
||||
for (int i = 1; i < gransz / sizeof(struct Map) - 1; ++i)
|
||||
for (int i = 1; i < gransz / sizeof(struct Map); ++i)
|
||||
__maps_free(map + i);
|
||||
return map;
|
||||
return MAPS_RETRY;
|
||||
}
|
||||
|
||||
static int __munmap(char *addr, size_t size) {
|
||||
|
@ -396,21 +407,32 @@ void *__maps_pickaddr(size_t size) {
|
|||
static void *__mmap_chunk(void *addr, size_t size, int prot, int flags, int fd,
|
||||
int64_t off, int pagesz, int gransz) {
|
||||
|
||||
// allocate Map object
|
||||
struct Map *map;
|
||||
do {
|
||||
if (!(map = __maps_alloc()))
|
||||
return MAP_FAILED;
|
||||
} while (map == MAPS_RETRY);
|
||||
|
||||
// polyfill nuances of fixed mappings
|
||||
int sysflags = flags;
|
||||
bool noreplace = false;
|
||||
bool should_untrack = false;
|
||||
if (flags & MAP_FIXED_NOREPLACE) {
|
||||
if (flags & MAP_FIXED)
|
||||
if (flags & MAP_FIXED) {
|
||||
__maps_free(map);
|
||||
return (void *)einval();
|
||||
}
|
||||
sysflags &= ~MAP_FIXED_NOREPLACE;
|
||||
if (IsLinux()) {
|
||||
noreplace = true;
|
||||
sysflags |= MAP_FIXED_NOREPLACE_linux;
|
||||
} else if (IsFreebsd() || IsNetbsd()) {
|
||||
sysflags |= MAP_FIXED;
|
||||
if (__maps_overlaps(addr, size, pagesz))
|
||||
if (__maps_overlaps(addr, size, pagesz)) {
|
||||
__maps_free(map);
|
||||
return (void *)eexist();
|
||||
}
|
||||
} else {
|
||||
noreplace = true;
|
||||
}
|
||||
|
@ -418,11 +440,6 @@ static void *__mmap_chunk(void *addr, size_t size, int prot, int flags, int fd,
|
|||
should_untrack = true;
|
||||
}
|
||||
|
||||
// allocate Map object
|
||||
struct Map *map;
|
||||
if (!(map = __maps_alloc()))
|
||||
return MAP_FAILED;
|
||||
|
||||
// remove mapping we blew away
|
||||
if (IsWindows() && should_untrack)
|
||||
__munmap(addr, size);
|
||||
|
@ -572,23 +589,27 @@ static void *__mremap_impl(char *old_addr, size_t old_size, size_t new_size,
|
|||
return (void *)einval();
|
||||
}
|
||||
|
||||
// allocate object for tracking new mapping
|
||||
struct Map *map;
|
||||
do {
|
||||
if (!(map = __maps_alloc()))
|
||||
return (void *)enomem();
|
||||
} while (map == MAPS_RETRY);
|
||||
|
||||
// check old interval is fully contained within one mapping
|
||||
struct Map *old_map;
|
||||
if (!(old_map = __maps_floor(old_addr)) ||
|
||||
old_addr + old_size > old_map->addr + PGUP(old_map->size) ||
|
||||
old_addr < old_map->addr)
|
||||
old_addr < old_map->addr) {
|
||||
__maps_free(map);
|
||||
return (void *)efault();
|
||||
}
|
||||
|
||||
// save old properties
|
||||
int old_off = old_map->off;
|
||||
int old_prot = old_map->prot;
|
||||
int old_flags = old_map->flags;
|
||||
|
||||
// allocate object for tracking new mapping
|
||||
struct Map *map;
|
||||
if (!(map = __maps_alloc()))
|
||||
return (void *)enomem();
|
||||
|
||||
// netbsd mremap fixed returns enoent rather than unmapping old pages
|
||||
if (IsNetbsd() && (flags & MREMAP_FIXED))
|
||||
if (__munmap(new_addr, new_size)) {
|
||||
|
|
|
@ -75,6 +75,7 @@ int __mprotect(char *addr, size_t size, int prot) {
|
|||
return edeadlk();
|
||||
}
|
||||
struct Map *map, *floor;
|
||||
StartOver:
|
||||
floor = __maps_floor(addr);
|
||||
for (map = floor; map && map->addr <= addr + size; map = __maps_next(map)) {
|
||||
char *map_addr = map->addr;
|
||||
|
@ -93,10 +94,12 @@ int __mprotect(char *addr, size_t size, int prot) {
|
|||
}
|
||||
} else if (addr <= map_addr) {
|
||||
// change lefthand side of mapping
|
||||
size_t left = PGUP(addr + size - map_addr);
|
||||
size_t left = addr + size - map_addr;
|
||||
size_t right = map_size - left;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
if (leftmap == MAPS_RETRY)
|
||||
goto StartOver;
|
||||
if (!__mprotect_chunk(map_addr, left, prot, false)) {
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
|
@ -127,6 +130,8 @@ int __mprotect(char *addr, size_t size, int prot) {
|
|||
size_t right = map_addr + map_size - addr;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
if (leftmap == MAPS_RETRY)
|
||||
goto StartOver;
|
||||
if (!__mprotect_chunk(map_addr + left, right, prot, false)) {
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
|
@ -159,8 +164,14 @@ int __mprotect(char *addr, size_t size, int prot) {
|
|||
size_t right = map_size - middle - left;
|
||||
struct Map *leftmap;
|
||||
if ((leftmap = __maps_alloc())) {
|
||||
if (leftmap == MAPS_RETRY)
|
||||
goto StartOver;
|
||||
struct Map *midlmap;
|
||||
if ((midlmap = __maps_alloc())) {
|
||||
if (midlmap == MAPS_RETRY) {
|
||||
__maps_free(leftmap);
|
||||
goto StartOver;
|
||||
}
|
||||
if (!__mprotect_chunk(map_addr + left, middle, prot, false)) {
|
||||
leftmap->addr = map_addr;
|
||||
leftmap->size = left;
|
||||
|
|
|
@ -27,41 +27,47 @@
|
|||
#include "libc/runtime/internal.h"
|
||||
#include "libc/thread/lock.h"
|
||||
#include "libc/thread/thread.h"
|
||||
#include "third_party/nsync/futex.internal.h"
|
||||
#include "third_party/nsync/mu.h"
|
||||
|
||||
static errno_t pthread_mutex_lock_impl(pthread_mutex_t *mutex) {
|
||||
int me;
|
||||
static void pthread_mutex_lock_naive(pthread_mutex_t *mutex, uint64_t word) {
|
||||
int backoff = 0;
|
||||
uint64_t word, lock;
|
||||
|
||||
// get current state of lock
|
||||
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
|
||||
|
||||
#if PTHREAD_USE_NSYNC
|
||||
// use fancy nsync mutex if possible
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL && //
|
||||
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
|
||||
_weaken(nsync_mu_lock)) {
|
||||
_weaken(nsync_mu_lock)((nsync_mu *)mutex);
|
||||
return 0;
|
||||
uint64_t lock;
|
||||
for (;;) {
|
||||
word = MUTEX_UNLOCK(word);
|
||||
lock = MUTEX_LOCK(word);
|
||||
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
|
||||
memory_order_acquire,
|
||||
memory_order_relaxed))
|
||||
return;
|
||||
backoff = pthread_delay_np(mutex, backoff);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// implement barebones normal mutexes
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
|
||||
for (;;) {
|
||||
word = MUTEX_UNLOCK(word);
|
||||
lock = MUTEX_LOCK(word);
|
||||
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
|
||||
memory_order_acquire,
|
||||
memory_order_relaxed))
|
||||
return 0;
|
||||
backoff = pthread_delay_np(mutex, backoff);
|
||||
}
|
||||
// see "take 3" algorithm in "futexes are tricky" by ulrich drepper
|
||||
// slightly improved to attempt acquiring multiple times b4 syscall
|
||||
static void pthread_mutex_lock_drepper(atomic_int *futex, char pshare) {
|
||||
int word;
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
word = 0;
|
||||
if (atomic_compare_exchange_strong_explicit(
|
||||
futex, &word, 1, memory_order_acquire, memory_order_acquire))
|
||||
return;
|
||||
pthread_pause_np();
|
||||
}
|
||||
if (word == 1)
|
||||
word = atomic_exchange_explicit(futex, 2, memory_order_acquire);
|
||||
while (word > 0) {
|
||||
_weaken(nsync_futex_wait_)(futex, 2, pshare, 0);
|
||||
word = atomic_exchange_explicit(futex, 2, memory_order_acquire);
|
||||
}
|
||||
}
|
||||
|
||||
// implement recursive mutexes
|
||||
me = gettid();
|
||||
static errno_t pthread_mutex_lock_recursive(pthread_mutex_t *mutex,
|
||||
uint64_t word) {
|
||||
uint64_t lock;
|
||||
int backoff = 0;
|
||||
int me = gettid();
|
||||
for (;;) {
|
||||
if (MUTEX_OWNER(word) == me) {
|
||||
if (MUTEX_TYPE(word) != PTHREAD_MUTEX_ERRORCHECK) {
|
||||
|
@ -91,6 +97,36 @@ static errno_t pthread_mutex_lock_impl(pthread_mutex_t *mutex) {
|
|||
}
|
||||
}
|
||||
|
||||
static errno_t pthread_mutex_lock_impl(pthread_mutex_t *mutex) {
|
||||
uint64_t word;
|
||||
|
||||
// get current state of lock
|
||||
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
|
||||
|
||||
#if PTHREAD_USE_NSYNC
|
||||
// use superior mutexes if possible
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL && //
|
||||
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
|
||||
_weaken(nsync_mu_lock)) {
|
||||
_weaken(nsync_mu_lock)((nsync_mu *)mutex);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
// handle normal mutexes
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
|
||||
if (_weaken(nsync_futex_wait_)) {
|
||||
pthread_mutex_lock_drepper(&mutex->_futex, MUTEX_PSHARED(word));
|
||||
} else {
|
||||
pthread_mutex_lock_naive(mutex, word);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// handle recursive and error checking mutexes
|
||||
return pthread_mutex_lock_recursive(mutex, word);
|
||||
}
|
||||
|
||||
/**
|
||||
* Locks mutex.
|
||||
*
|
||||
|
|
|
@ -24,54 +24,33 @@
|
|||
#include "libc/runtime/internal.h"
|
||||
#include "libc/thread/lock.h"
|
||||
#include "libc/thread/thread.h"
|
||||
#include "third_party/nsync/futex.internal.h"
|
||||
#include "third_party/nsync/mu.h"
|
||||
|
||||
/**
|
||||
* Attempts acquiring lock.
|
||||
*
|
||||
* Unlike pthread_mutex_lock() this function won't block and instead
|
||||
* returns an error immediately if the lock couldn't be acquired.
|
||||
*
|
||||
* @return 0 if lock was acquired, otherwise an errno
|
||||
* @raise EAGAIN if maximum number of recursive locks is held
|
||||
* @raise EBUSY if lock is currently held in read or write mode
|
||||
* @raise EINVAL if `mutex` doesn't refer to an initialized lock
|
||||
* @raise EDEADLK if `mutex` is `PTHREAD_MUTEX_ERRORCHECK` and the
|
||||
* current thread already holds this mutex
|
||||
*/
|
||||
errno_t pthread_mutex_trylock(pthread_mutex_t *mutex) {
|
||||
int me;
|
||||
uint64_t word, lock;
|
||||
static errno_t pthread_mutex_trylock_naive(pthread_mutex_t *mutex,
|
||||
uint64_t word) {
|
||||
uint64_t lock;
|
||||
word = MUTEX_UNLOCK(word);
|
||||
lock = MUTEX_LOCK(word);
|
||||
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
|
||||
memory_order_acquire,
|
||||
memory_order_relaxed))
|
||||
return 0;
|
||||
return EBUSY;
|
||||
}
|
||||
|
||||
// get current state of lock
|
||||
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
|
||||
static errno_t pthread_mutex_trylock_drepper(atomic_int *futex) {
|
||||
int word = 0;
|
||||
if (atomic_compare_exchange_strong_explicit(
|
||||
futex, &word, 1, memory_order_acquire, memory_order_acquire))
|
||||
return 0;
|
||||
return EBUSY;
|
||||
}
|
||||
|
||||
#if PTHREAD_USE_NSYNC
|
||||
// delegate to *NSYNC if possible
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL &&
|
||||
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
|
||||
_weaken(nsync_mu_trylock)) {
|
||||
if (_weaken(nsync_mu_trylock)((nsync_mu *)mutex)) {
|
||||
return 0;
|
||||
} else {
|
||||
return EBUSY;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// handle normal mutexes
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
|
||||
word = MUTEX_UNLOCK(word);
|
||||
lock = MUTEX_LOCK(word);
|
||||
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
|
||||
memory_order_acquire,
|
||||
memory_order_relaxed))
|
||||
return 0;
|
||||
return EBUSY;
|
||||
}
|
||||
|
||||
// handle recursive and error check mutexes
|
||||
me = gettid();
|
||||
static errno_t pthread_mutex_trylock_recursive(pthread_mutex_t *mutex,
|
||||
uint64_t word) {
|
||||
uint64_t lock;
|
||||
int me = gettid();
|
||||
for (;;) {
|
||||
if (MUTEX_OWNER(word) == me) {
|
||||
if (MUTEX_TYPE(word) != PTHREAD_MUTEX_ERRORCHECK) {
|
||||
|
@ -100,3 +79,47 @@ errno_t pthread_mutex_trylock(pthread_mutex_t *mutex) {
|
|||
return EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts acquiring lock.
|
||||
*
|
||||
* Unlike pthread_mutex_lock() this function won't block and instead
|
||||
* returns an error immediately if the lock couldn't be acquired.
|
||||
*
|
||||
* @return 0 if lock was acquired, otherwise an errno
|
||||
* @raise EAGAIN if maximum number of recursive locks is held
|
||||
* @raise EBUSY if lock is currently held in read or write mode
|
||||
* @raise EINVAL if `mutex` doesn't refer to an initialized lock
|
||||
* @raise EDEADLK if `mutex` is `PTHREAD_MUTEX_ERRORCHECK` and the
|
||||
* current thread already holds this mutex
|
||||
*/
|
||||
errno_t pthread_mutex_trylock(pthread_mutex_t *mutex) {
|
||||
|
||||
// get current state of lock
|
||||
uint64_t word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
|
||||
|
||||
#if PTHREAD_USE_NSYNC
|
||||
// use superior mutexes if possible
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL &&
|
||||
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
|
||||
_weaken(nsync_mu_trylock)) {
|
||||
if (_weaken(nsync_mu_trylock)((nsync_mu *)mutex)) {
|
||||
return 0;
|
||||
} else {
|
||||
return EBUSY;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// handle normal mutexes
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
|
||||
if (_weaken(nsync_futex_wait_)) {
|
||||
return pthread_mutex_trylock_drepper(&mutex->_futex);
|
||||
} else {
|
||||
return pthread_mutex_trylock_naive(mutex, word);
|
||||
}
|
||||
}
|
||||
|
||||
// handle recursive and error checking mutexes
|
||||
return pthread_mutex_trylock_recursive(mutex, word);
|
||||
}
|
||||
|
|
|
@ -25,45 +25,26 @@
|
|||
#include "libc/runtime/internal.h"
|
||||
#include "libc/thread/lock.h"
|
||||
#include "libc/thread/thread.h"
|
||||
#include "third_party/nsync/futex.internal.h"
|
||||
#include "third_party/nsync/mu.h"
|
||||
|
||||
/**
|
||||
* Releases mutex.
|
||||
*
|
||||
* This function does nothing in vfork() children.
|
||||
*
|
||||
* @return 0 on success or error number on failure
|
||||
* @raises EPERM if in error check mode and not owned by caller
|
||||
* @vforksafe
|
||||
*/
|
||||
errno_t pthread_mutex_unlock(pthread_mutex_t *mutex) {
|
||||
int me;
|
||||
uint64_t word, lock;
|
||||
static void pthread_mutex_unlock_naive(pthread_mutex_t *mutex, uint64_t word) {
|
||||
uint64_t lock = MUTEX_UNLOCK(word);
|
||||
atomic_store_explicit(&mutex->_word, lock, memory_order_release);
|
||||
}
|
||||
|
||||
LOCKTRACE("pthread_mutex_unlock(%t)", mutex);
|
||||
|
||||
// get current state of lock
|
||||
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
|
||||
|
||||
#if PTHREAD_USE_NSYNC
|
||||
// use fancy nsync mutex if possible
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL && //
|
||||
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
|
||||
_weaken(nsync_mu_unlock)) {
|
||||
_weaken(nsync_mu_unlock)((nsync_mu *)mutex);
|
||||
return 0;
|
||||
// see "take 3" algorithm in "futexes are tricky" by ulrich drepper
|
||||
static void pthread_mutex_unlock_drepper(atomic_int *futex, char pshare) {
|
||||
int word = atomic_fetch_sub_explicit(futex, 1, memory_order_release);
|
||||
if (word == 2) {
|
||||
atomic_store_explicit(futex, 0, memory_order_release);
|
||||
_weaken(nsync_futex_wake_)(futex, 1, pshare);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// implement barebones normal mutexes
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
|
||||
lock = MUTEX_UNLOCK(word);
|
||||
atomic_store_explicit(&mutex->_word, lock, memory_order_release);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// implement recursive mutex unlocking
|
||||
me = gettid();
|
||||
static errno_t pthread_mutex_unlock_recursive(pthread_mutex_t *mutex,
|
||||
uint64_t word) {
|
||||
int me = gettid();
|
||||
for (;;) {
|
||||
|
||||
// we allow unlocking an initialized lock that wasn't locked, but we
|
||||
|
@ -88,3 +69,44 @@ errno_t pthread_mutex_unlock(pthread_mutex_t *mutex) {
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases mutex.
|
||||
*
|
||||
* This function does nothing in vfork() children.
|
||||
*
|
||||
* @return 0 on success or error number on failure
|
||||
* @raises EPERM if in error check mode and not owned by caller
|
||||
* @vforksafe
|
||||
*/
|
||||
errno_t pthread_mutex_unlock(pthread_mutex_t *mutex) {
|
||||
uint64_t word;
|
||||
|
||||
LOCKTRACE("pthread_mutex_unlock(%t)", mutex);
|
||||
|
||||
// get current state of lock
|
||||
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
|
||||
|
||||
#if PTHREAD_USE_NSYNC
|
||||
// use superior mutexes if possible
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL && //
|
||||
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
|
||||
_weaken(nsync_mu_unlock)) {
|
||||
_weaken(nsync_mu_unlock)((nsync_mu *)mutex);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
// implement barebones normal mutexes
|
||||
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
|
||||
if (_weaken(nsync_futex_wake_)) {
|
||||
pthread_mutex_unlock_drepper(&mutex->_futex, MUTEX_PSHARED(word));
|
||||
} else {
|
||||
pthread_mutex_unlock_naive(mutex, word);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// handle recursive and error checking mutexes
|
||||
return pthread_mutex_unlock_recursive(mutex, word);
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ void sys_sched_yield(void);
|
|||
int pthread_yield_np(void) {
|
||||
if (IsXnuSilicon()) {
|
||||
__syslib->__pthread_yield_np();
|
||||
} else if (IsOpenbsd() || IsNetbsd()) {
|
||||
} else if (IsOpenbsd()) {
|
||||
// sched_yield() is punishingly slow on OpenBSD
|
||||
// it's ruinously slow it'll destroy everything
|
||||
pthread_pause_np();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue