mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-05-22 21:32:31 +00:00
Introduce pthread_rwlock_try{rd,wr}lock
This also changes recursive mutexes to favor cpu over scheduler yield.
This commit is contained in:
parent
a1e1e821cb
commit
fadb64a2bf
13 changed files with 122 additions and 29 deletions
|
@ -22,6 +22,7 @@
|
|||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/intrin/strace.internal.h"
|
||||
#include "libc/intrin/weaken.h"
|
||||
#include "libc/nexgen32e/yield.h"
|
||||
#include "libc/runtime/internal.h"
|
||||
#include "libc/thread/thread.h"
|
||||
#include "libc/thread/tls.h"
|
||||
|
@ -64,7 +65,7 @@
|
|||
* @see pthread_spin_lock()
|
||||
* @vforksafe
|
||||
*/
|
||||
int pthread_mutex_lock(pthread_mutex_t *mutex) {
|
||||
errno_t pthread_mutex_lock(pthread_mutex_t *mutex) {
|
||||
int t;
|
||||
|
||||
LOCKTRACE("pthread_mutex_lock(%t)", mutex);
|
||||
|
@ -82,7 +83,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex) {
|
|||
|
||||
if (mutex->_type == PTHREAD_MUTEX_NORMAL) {
|
||||
while (atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
|
||||
pthread_yield();
|
||||
spin_yield();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -102,7 +103,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex) {
|
|||
}
|
||||
|
||||
while (atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
|
||||
pthread_yield();
|
||||
spin_yield();
|
||||
}
|
||||
|
||||
mutex->_depth = 0;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue