mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-02-07 06:53:33 +00:00
Make recursive mutexes slightly faster
This commit is contained in:
parent
9ba5b227d9
commit
c7e3d9f7ff
6 changed files with 12 additions and 8 deletions
|
@ -18,6 +18,7 @@
|
|||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/assert.h"
|
||||
#include "libc/atomic.h"
|
||||
#include "libc/calls/cp.internal.h"
|
||||
#include "libc/calls/internal.h"
|
||||
#include "libc/calls/sig.internal.h"
|
||||
#include "libc/calls/struct/sigset.h"
|
||||
|
@ -232,6 +233,7 @@ static int cosmo_futex_fix_timeout (struct timespec *memory, int clock,
|
|||
* @raise EAGAIN if `*w` wasn't `expect`
|
||||
* @raise EINTR if a signal handler was called while waiting
|
||||
* @raise ECANCELED if calling thread was canceled while waiting
|
||||
* @cancelationpoint
|
||||
*/
|
||||
int cosmo_futex_wait (atomic_int *w, int expect, char pshare,
|
||||
int clock, const struct timespec *abstime) {
|
||||
|
@ -240,6 +242,7 @@ int cosmo_futex_wait (atomic_int *w, int expect, char pshare,
|
|||
struct PosixThread *pt;
|
||||
struct timespec tsmem;
|
||||
struct timespec *timeout = 0;
|
||||
BEGIN_CANCELATION_POINT;
|
||||
|
||||
cosmo_once (&g_cosmo_futex.once, cosmo_futex_init);
|
||||
|
||||
|
@ -351,6 +354,7 @@ Finished:
|
|||
DescribeTimespec (0, abstime),
|
||||
DescribeErrno (rc));
|
||||
|
||||
END_CANCELATION_POINT;
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
int gettid(void) {
|
||||
int tid;
|
||||
if (VERY_LIKELY(__tls_enabled && !__vforked)) {
|
||||
tid = atomic_load_explicit(&__get_tls()->tib_tid, memory_order_acquire);
|
||||
tid = atomic_load_explicit(&__get_tls()->tib_tid, memory_order_relaxed);
|
||||
if (VERY_LIKELY(tid > 0))
|
||||
return tid;
|
||||
}
|
||||
|
|
|
@ -192,7 +192,7 @@ ABI void __maps_unlock(void) {
|
|||
return;
|
||||
if (tib->tib_flags & TIB_FLAG_VFORKED)
|
||||
return;
|
||||
me = atomic_load_explicit(&tib->tib_tid, memory_order_acquire);
|
||||
me = atomic_load_explicit(&tib->tib_tid, memory_order_relaxed);
|
||||
if (me <= 0)
|
||||
return;
|
||||
word = atomic_load_explicit(&__maps.lock.word, memory_order_relaxed);
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/calls/blockcancel.internal.h"
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/calls/state.internal.h"
|
||||
#include "libc/cosmo.h"
|
||||
#include "libc/dce.h"
|
||||
|
@ -70,7 +69,7 @@ static errno_t pthread_mutex_lock_recursive(pthread_mutex_t *mutex,
|
|||
uint64_t word, bool is_trylock) {
|
||||
uint64_t lock;
|
||||
int backoff = 0;
|
||||
int me = gettid();
|
||||
int me = atomic_load_explicit(&__get_tls()->tib_tid, memory_order_relaxed);
|
||||
bool once = false;
|
||||
for (;;) {
|
||||
if (MUTEX_OWNER(word) == me) {
|
||||
|
@ -120,7 +119,7 @@ static errno_t pthread_mutex_lock_recursive(pthread_mutex_t *mutex,
|
|||
static errno_t pthread_mutex_lock_recursive_nsync(pthread_mutex_t *mutex,
|
||||
uint64_t word,
|
||||
bool is_trylock) {
|
||||
int me = gettid();
|
||||
int me = atomic_load_explicit(&__get_tls()->tib_tid, memory_order_relaxed);
|
||||
for (;;) {
|
||||
if (MUTEX_OWNER(word) == me) {
|
||||
if (MUTEX_DEPTH(word) < MUTEX_DEPTH_MAX) {
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "libc/thread/lock.h"
|
||||
#include "libc/thread/posixthread.internal.h"
|
||||
#include "libc/thread/thread.h"
|
||||
#include "libc/thread/tls.h"
|
||||
#include "third_party/nsync/mu.h"
|
||||
|
||||
// see "take 3" algorithm in "futexes are tricky" by ulrich drepper
|
||||
|
@ -43,7 +44,7 @@ static void pthread_mutex_unlock_drepper(atomic_int *futex, char pshare) {
|
|||
|
||||
static errno_t pthread_mutex_unlock_recursive(pthread_mutex_t *mutex,
|
||||
uint64_t word) {
|
||||
int me = gettid();
|
||||
int me = atomic_load_explicit(&__get_tls()->tib_tid, memory_order_relaxed);
|
||||
for (;;) {
|
||||
|
||||
// we allow unlocking an initialized lock that wasn't locked, but we
|
||||
|
@ -75,7 +76,7 @@ static errno_t pthread_mutex_unlock_recursive(pthread_mutex_t *mutex,
|
|||
#if PTHREAD_USE_NSYNC
|
||||
static errno_t pthread_mutex_unlock_recursive_nsync(pthread_mutex_t *mutex,
|
||||
uint64_t word) {
|
||||
int me = gettid();
|
||||
int me = atomic_load_explicit(&__get_tls()->tib_tid, memory_order_relaxed);
|
||||
for (;;) {
|
||||
|
||||
// we allow unlocking an initialized lock that wasn't locked, but we
|
||||
|
|
|
@ -81,7 +81,7 @@ errno_t pthread_setcancelstate(int state, int *oldstate) {
|
|||
}
|
||||
err = 0;
|
||||
}
|
||||
#if IsModeDbg()
|
||||
#if IsModeDbg() && 0
|
||||
STRACE("pthread_setcancelstate(%s, [%s]) → %s",
|
||||
DescribeCancelState(0, &state), DescribeCancelState(err, oldstate),
|
||||
DescribeErrno(err));
|
||||
|
|
Loading…
Reference in a new issue