Make recursive mutexes faster

Recursive mutexes now go as fast as normal mutexes. The tradeoff is they
are no longer safe to use in signal handlers. However you can still have
signal safe mutexes if you set your mutex to both recursive and pshared.
You can also make functions that use recursive mutexes signal safe using
sigprocmask to ensure recursion doesn't happen due to any signal handler

The impact of this change is that, on Windows, many functions which edit
the file descriptor table rely on recursive mutexes, e.g. open(). If you
develop your app so it uses pread() and pwrite() then your app should go
very fast when performing a heavily multithreaded and contended workload

For example, when scaling to 40+ cores, *NSYNC mutexes can go as much as
1000x faster (in CPU time) than the naive recursive lock implementation.
Now recursive will use *NSYNC under the hood when it's possible to do so
This commit is contained in:
Justine Tunney 2024-09-09 22:07:03 -07:00
parent 58d252f3db
commit 2f48a02b44
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
37 changed files with 2684 additions and 2209 deletions

View file

@ -17,6 +17,7 @@
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/calls/calls.h"
#include "libc/calls/state.internal.h"
#include "libc/dce.h"
#include "libc/errno.h"
#include "libc/intrin/atomic.h"
@ -69,6 +70,35 @@ static errno_t pthread_mutex_unlock_recursive(pthread_mutex_t *mutex,
}
}
#if PTHREAD_USE_NSYNC
static errno_t pthread_mutex_unlock_recursive_nsync(pthread_mutex_t *mutex,
uint64_t word) {
int me = gettid();
for (;;) {
// we allow unlocking an initialized lock that wasn't locked, but we
// don't allow unlocking a lock held by another thread, or unlocking
// recursive locks from a forked child, since it should be re-init'd
if (MUTEX_OWNER(word) && (MUTEX_OWNER(word) != me || mutex->_pid != __pid))
return EPERM;
// check if this is a nested lock with signal safety
if (MUTEX_DEPTH(word)) {
if (atomic_compare_exchange_strong_explicit(
&mutex->_word, &word, MUTEX_DEC_DEPTH(word), memory_order_relaxed,
memory_order_relaxed))
return 0;
continue;
}
// actually unlock the mutex
mutex->_word = MUTEX_UNLOCK(word);
_weaken(nsync_mu_unlock)((nsync_mu *)mutex->_nsyncx);
return 0;
}
}
#endif
/**
* Releases mutex.
*
@ -81,6 +111,11 @@ static errno_t pthread_mutex_unlock_recursive(pthread_mutex_t *mutex,
errno_t pthread_mutex_unlock(pthread_mutex_t *mutex) {
uint64_t word;
if (__vforked) {
LOCKTRACE("skipping pthread_mutex_lock(%t) due to vfork", mutex);
return 0;
}
LOCKTRACE("pthread_mutex_unlock(%t)", mutex);
// get current state of lock
@ -111,5 +146,14 @@ errno_t pthread_mutex_unlock(pthread_mutex_t *mutex) {
}
// handle recursive and error checking mutexes
#if PTHREAD_USE_NSYNC
if (_weaken(nsync_mu_unlock) &&
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE) {
return pthread_mutex_unlock_recursive_nsync(mutex, word);
} else {
return pthread_mutex_unlock_recursive(mutex, word);
}
#else
return pthread_mutex_unlock_recursive(mutex, word);
#endif
}