mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-06-02 10:42:27 +00:00
Introduce pthread_rwlock_try{rd,wr}lock
This also changes recursive mutexes to favor cpu over scheduler yield.
This commit is contained in:
parent
a1e1e821cb
commit
fadb64a2bf
13 changed files with 122 additions and 29 deletions
|
@ -119,8 +119,6 @@ o/$(MODE)/libc/intrin/ksockoptnames.o: libc/intrin/ksockoptnames.S
|
|||
@$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $<
|
||||
o/$(MODE)/libc/intrin/ktcpoptnames.o: libc/intrin/ktcpoptnames.S
|
||||
@$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $<
|
||||
o/$(MODE)/libc/intrin/sched_yield.o: libc/intrin/sched_yield.S
|
||||
@$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $<
|
||||
o/$(MODE)/libc/intrin/stackcall.o: libc/intrin/stackcall.S
|
||||
@$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $<
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/intrin/strace.internal.h"
|
||||
#include "libc/intrin/weaken.h"
|
||||
#include "libc/nexgen32e/yield.h"
|
||||
#include "libc/runtime/internal.h"
|
||||
#include "libc/thread/thread.h"
|
||||
#include "libc/thread/tls.h"
|
||||
|
@ -64,7 +65,7 @@
|
|||
* @see pthread_spin_lock()
|
||||
* @vforksafe
|
||||
*/
|
||||
int pthread_mutex_lock(pthread_mutex_t *mutex) {
|
||||
errno_t pthread_mutex_lock(pthread_mutex_t *mutex) {
|
||||
int t;
|
||||
|
||||
LOCKTRACE("pthread_mutex_lock(%t)", mutex);
|
||||
|
@ -82,7 +83,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex) {
|
|||
|
||||
if (mutex->_type == PTHREAD_MUTEX_NORMAL) {
|
||||
while (atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
|
||||
pthread_yield();
|
||||
spin_yield();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -102,7 +103,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex) {
|
|||
}
|
||||
|
||||
while (atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
|
||||
pthread_yield();
|
||||
spin_yield();
|
||||
}
|
||||
|
||||
mutex->_depth = 0;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
|
||||
│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
|
||||
╞══════════════════════════════════════════════════════════════════════════════╡
|
||||
│ Copyright 2022 Justine Alexandra Roberts Tunney │
|
||||
│ Copyright 2023 Justine Alexandra Roberts Tunney │
|
||||
│ │
|
||||
│ Permission to use, copy, modify, and/or distribute this software for │
|
||||
│ any purpose with or without fee is hereby granted, provided that the │
|
||||
|
@ -19,27 +19,29 @@
|
|||
#include "libc/calls/calls.h"
|
||||
#include "libc/errno.h"
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/intrin/kprintf.h"
|
||||
#include "libc/intrin/weaken.h"
|
||||
#include "libc/runtime/internal.h"
|
||||
#include "libc/thread/thread.h"
|
||||
#include "libc/thread/tls.h"
|
||||
#include "third_party/nsync/mu.h"
|
||||
|
||||
/**
|
||||
* Locks mutex if it isn't locked already.
|
||||
* Attempts acquiring lock.
|
||||
*
|
||||
* Unlike pthread_mutex_lock() this function won't block and instead
|
||||
* returns an error immediately if the lock couldn't be acquired.
|
||||
*
|
||||
* @return 0 on success, or errno on error
|
||||
* @raise EBUSY if lock is already held
|
||||
* @raise ENOTRECOVERABLE if `mutex` is corrupted
|
||||
* @return 0 if lock was acquired, otherwise an errno
|
||||
* @raise EAGAIN if maximum number of recursive locks is held
|
||||
* @raise EBUSY if lock is currently held in read or write mode
|
||||
* @raise EINVAL if `mutex` doesn't refer to an initialized lock
|
||||
* @raise EDEADLK if `mutex` is `PTHREAD_MUTEX_ERRORCHECK` and the
|
||||
* current thread already holds this mutex
|
||||
*/
|
||||
errno_t pthread_mutex_trylock(pthread_mutex_t *mutex) {
|
||||
int t;
|
||||
|
||||
if (__tls_enabled && //
|
||||
mutex->_type == PTHREAD_MUTEX_NORMAL && //
|
||||
// delegate to *NSYNC if possible
|
||||
if (mutex->_type == PTHREAD_MUTEX_NORMAL &&
|
||||
mutex->_pshared == PTHREAD_PROCESS_PRIVATE && //
|
||||
_weaken(nsync_mu_trylock)) {
|
||||
if (_weaken(nsync_mu_trylock)((nsync_mu *)mutex)) {
|
||||
|
@ -49,6 +51,7 @@ errno_t pthread_mutex_trylock(pthread_mutex_t *mutex) {
|
|||
}
|
||||
}
|
||||
|
||||
// handle normal mutexes
|
||||
if (mutex->_type == PTHREAD_MUTEX_NORMAL) {
|
||||
if (!atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
|
||||
return 0;
|
||||
|
@ -57,6 +60,7 @@ errno_t pthread_mutex_trylock(pthread_mutex_t *mutex) {
|
|||
}
|
||||
}
|
||||
|
||||
// handle recursive and error check mutexes
|
||||
t = gettid();
|
||||
if (mutex->_owner == t) {
|
||||
if (mutex->_type != PTHREAD_MUTEX_ERRORCHECK) {
|
||||
|
@ -67,15 +71,17 @@ errno_t pthread_mutex_trylock(pthread_mutex_t *mutex) {
|
|||
return EAGAIN;
|
||||
}
|
||||
} else {
|
||||
return EBUSY;
|
||||
return EDEADLK;
|
||||
}
|
||||
}
|
||||
|
||||
if (!atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
|
||||
mutex->_depth = 0;
|
||||
mutex->_owner = t;
|
||||
return 0;
|
||||
} else {
|
||||
if (atomic_exchange_explicit(&mutex->_lock, 1, memory_order_acquire)) {
|
||||
return EBUSY;
|
||||
}
|
||||
|
||||
mutex->_depth = 0;
|
||||
mutex->_owner = t;
|
||||
mutex->_pid = __pid;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
* @raises EPERM if in error check mode and not owned by caller
|
||||
* @vforksafe
|
||||
*/
|
||||
int pthread_mutex_unlock(pthread_mutex_t *mutex) {
|
||||
errno_t pthread_mutex_unlock(pthread_mutex_t *mutex) {
|
||||
int t;
|
||||
|
||||
LOCKTRACE("pthread_mutex_unlock(%t)", mutex);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "libc/assert.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/nexgen32e/yield.h"
|
||||
#include "libc/thread/posixthread.internal.h"
|
||||
|
||||
intptr_t _pthread_syshand(struct PosixThread *pt) {
|
||||
|
@ -27,6 +28,6 @@ intptr_t _pthread_syshand(struct PosixThread *pt) {
|
|||
for (;;) {
|
||||
syshand = atomic_load_explicit(&pt->tib->tib_syshand, memory_order_acquire);
|
||||
if (syshand) return syshand;
|
||||
pthread_yield();
|
||||
spin_yield();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,12 +17,13 @@
|
|||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/nexgen32e/yield.h"
|
||||
#include "libc/thread/posixthread.internal.h"
|
||||
|
||||
int _pthread_tid(struct PosixThread *pt) {
|
||||
int tid = 0;
|
||||
while (pt && !(tid = atomic_load_explicit(&pt->ptid, memory_order_acquire))) {
|
||||
pthread_yield();
|
||||
spin_yield();
|
||||
}
|
||||
return tid;
|
||||
}
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
|
||||
│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
|
||||
╞══════════════════════════════════════════════════════════════════════════════╡
|
||||
│ Copyright 2022 Justine Alexandra Roberts Tunney │
|
||||
│ │
|
||||
│ Permission to use, copy, modify, and/or distribute this software for │
|
||||
│ any purpose with or without fee is hereby granted, provided that the │
|
||||
│ above copyright notice and this permission notice appear in all copies. │
|
||||
│ │
|
||||
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
|
||||
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
|
||||
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
|
||||
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
|
||||
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
|
||||
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
|
||||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/nexgen32e/yield.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/runtime/syslib.internal.h"
|
||||
#include "libc/thread/thread.h"
|
||||
|
||||
/**
|
||||
* Yields current thread's remaining timeslice to operating system.
|
||||
*
|
||||
* @return 0 on success, or error number on failure
|
||||
*/
|
||||
int pthread_yield(void) {
|
||||
if (IsXnuSilicon()) {
|
||||
__syslib->__pthread_yield_np();
|
||||
} else if (IsOpenbsd()) {
|
||||
spin_yield(); // sched_yield() is punishingly slow on OpenBSD
|
||||
} else {
|
||||
sched_yield();
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -1,112 +0,0 @@
|
|||
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│
|
||||
│vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│
|
||||
╞══════════════════════════════════════════════════════════════════════════════╡
|
||||
│ Copyright 2022 Justine Alexandra Roberts Tunney │
|
||||
│ │
|
||||
│ Permission to use, copy, modify, and/or distribute this software for │
|
||||
│ any purpose with or without fee is hereby granted, provided that the │
|
||||
│ above copyright notice and this permission notice appear in all copies. │
|
||||
│ │
|
||||
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
|
||||
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
|
||||
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
|
||||
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
|
||||
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
|
||||
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
|
||||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/dce.h"
|
||||
#include "libc/sysv/consts/nr.h"
|
||||
#include "libc/macros.internal.h"
|
||||
|
||||
// Relinquishes scheduled quantum.
|
||||
//
|
||||
// @return 0 on success, or -1 w/ errno
|
||||
.ftrace1
|
||||
sched_yield:
|
||||
.ftrace2
|
||||
#ifdef __x86_64__
|
||||
push %rbp
|
||||
mov %rsp,%rbp
|
||||
xor %eax,%eax
|
||||
mov __hostos(%rip),%dl
|
||||
|
||||
#if SupportsMetal()
|
||||
testb $_HOSTMETAL,%dl
|
||||
jnz 9f
|
||||
#endif
|
||||
|
||||
#if SupportsWindows()
|
||||
// Windows Support
|
||||
//
|
||||
// A value of zero, together with the bAlertable parameter set to
|
||||
// FALSE, causes the thread to relinquish the remainder of its time
|
||||
// slice to any other thread that is ready to run, if there are no
|
||||
// pending user APCs on the calling thread. If there are no other
|
||||
// threads ready to run and no user APCs are queued, the function
|
||||
// returns immediately, and the thread continues execution.
|
||||
// ──Quoth MSDN
|
||||
testb $_HOSTWINDOWS,%dl
|
||||
jz 1f
|
||||
xor %ecx,%ecx
|
||||
xor %edx,%edx
|
||||
ntcall __imp_SleepEx
|
||||
xor %eax,%eax
|
||||
jmp 9f
|
||||
1:
|
||||
#endif
|
||||
|
||||
#if SupportsSystemv()
|
||||
// On XNU we polyfill sched_yield() using sleep() which'll
|
||||
// be polyfilled using select() with a zero timeout, which
|
||||
// means to wait zero microseconds and then returns a zero
|
||||
// and this hopefully will give other threads a chance too
|
||||
// XNU has a special version we use called select_nocancel
|
||||
//
|
||||
// "If the readfds, writefds, and errorfds arguments are
|
||||
// all null pointers and the timeout argument is not a
|
||||
// null pointer, the pselect() or select() function shall
|
||||
// block for the time specified, or until interrupted by
|
||||
// a signal." ──Quoth IEEE 1003.1-2017 §functions/select
|
||||
//
|
||||
// On other platforms, sched_yield() takes no arguments.
|
||||
push $0 // timeout.tv_usec
|
||||
push $0 // timeout.tv_sec
|
||||
xor %edi,%edi // nfds
|
||||
xor %esi,%esi // readfds
|
||||
xor %edx,%edx // writefds
|
||||
xor %r10d,%r10d // exceptfds
|
||||
mov %rsp,%r8 // timeout
|
||||
mov __NR_sched_yield,%eax // ordinal
|
||||
clc // linux
|
||||
syscall
|
||||
// It should not be possible for this to fail so we don't
|
||||
// bother going through the errno ritual. If this somehow
|
||||
// fails a positive or negative errno might get returned.
|
||||
#endif
|
||||
|
||||
9: leave
|
||||
ret
|
||||
|
||||
#elif defined(__aarch64__)
|
||||
|
||||
stp x29,x30,[sp,-32]!
|
||||
mov x29,sp
|
||||
mov x3,0
|
||||
mov x2,0
|
||||
add x4,sp,16
|
||||
mov x1,0
|
||||
mov w0,0
|
||||
stp xzr,xzr,[sp,16]
|
||||
mov x8,#0x7c // sched_yield() for gnu/systemd
|
||||
mov x16,#0x5d // select(0,0,0,0,&blah) for xnu
|
||||
svc 0
|
||||
ldp x29,x30,[sp],32
|
||||
ret
|
||||
|
||||
#else
|
||||
#error "arch unsupported"
|
||||
#endif
|
||||
.endfn sched_yield,globl
|
||||
.previous
|
Loading…
Add table
Add a link
Reference in a new issue