Introduce pthread_rwlock_try{rd,wr}lock

This also changes recursive mutexes to favor cpu over scheduler yield.
This commit is contained in:
Justine Tunney 2023-10-31 21:59:05 -07:00
parent a1e1e821cb
commit fadb64a2bf
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
13 changed files with 122 additions and 29 deletions

View file

@ -22,6 +22,7 @@
#include "libc/intrin/atomic.h"
#include "libc/intrin/strace.internal.h"
#include "libc/intrin/weaken.h"
#include "libc/nexgen32e/yield.h"
#include "libc/runtime/internal.h"
#include "libc/thread/thread.h"
#include "libc/thread/tls.h"
@ -64,7 +65,7 @@
* @see pthread_spin_lock()
* @vforksafe
*/
int pthread_mutex_lock(pthread_mutex_t *mutex) {
errno_t pthread_mutex_lock(pthread_mutex_t *mutex) {
int t;
LOCKTRACE("pthread_mutex_lock(%t)", mutex);
@ -82,7 +83,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex) {
if (mutex->_type == PTHREAD_MUTEX_NORMAL) {
while (atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
pthread_yield();
spin_yield();
}
return 0;
}
@ -102,7 +103,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex) {
}
while (atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
pthread_yield();
spin_yield();
}
mutex->_depth = 0;