2022-05-27 20:25:46 +00:00
|
|
|
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
|
|
|
|
│ vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi │
|
2020-06-15 14:18:57 +00:00
|
|
|
╞══════════════════════════════════════════════════════════════════════════════╡
|
2022-05-27 20:25:46 +00:00
|
|
|
│ Copyright 2022 Justine Alexandra Roberts Tunney │
|
2020-06-15 14:18:57 +00:00
|
|
|
│ │
|
2020-12-28 01:18:44 +00:00
|
|
|
│ Permission to use, copy, modify, and/or distribute this software for │
|
|
|
|
│ any purpose with or without fee is hereby granted, provided that the │
|
|
|
|
│ above copyright notice and this permission notice appear in all copies. │
|
2020-06-15 14:18:57 +00:00
|
|
|
│ │
|
2020-12-28 01:18:44 +00:00
|
|
|
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
|
|
|
|
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
|
|
|
|
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
|
|
|
|
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
|
|
|
|
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
|
|
|
|
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
|
|
|
|
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
|
|
|
│ PERFORMANCE OF THIS SOFTWARE. │
|
2020-06-15 14:18:57 +00:00
|
|
|
╚─────────────────────────────────────────────────────────────────────────────*/
|
2024-07-24 16:48:33 +00:00
|
|
|
#include "libc/calls/blockcancel.internal.h"
|
2022-10-16 19:05:08 +00:00
|
|
|
#include "libc/calls/state.internal.h"
|
2024-11-22 19:08:29 +00:00
|
|
|
#include "libc/cosmo.h"
|
2024-07-06 06:13:20 +00:00
|
|
|
#include "libc/dce.h"
|
2022-06-13 02:33:42 +00:00
|
|
|
#include "libc/errno.h"
|
2022-09-05 15:26:03 +00:00
|
|
|
#include "libc/intrin/atomic.h"
|
2024-07-06 06:13:20 +00:00
|
|
|
#include "libc/intrin/describeflags.h"
|
2024-12-17 04:51:27 +00:00
|
|
|
#include "libc/intrin/kprintf.h"
|
2022-10-16 19:05:08 +00:00
|
|
|
#include "libc/intrin/strace.h"
|
2022-09-11 18:02:07 +00:00
|
|
|
#include "libc/intrin/weaken.h"
|
2024-12-17 04:51:27 +00:00
|
|
|
#include "libc/macros.h"
|
2022-10-16 19:05:08 +00:00
|
|
|
#include "libc/runtime/internal.h"
|
2024-06-29 12:10:15 +00:00
|
|
|
#include "libc/thread/lock.h"
|
2024-12-24 05:57:52 +00:00
|
|
|
#include "libc/thread/posixthread.internal.h"
|
2022-09-10 09:56:25 +00:00
|
|
|
#include "libc/thread/thread.h"
|
2024-12-17 04:51:27 +00:00
|
|
|
#include "libc/thread/tls.h"
|
2022-09-11 18:02:07 +00:00
|
|
|
#include "third_party/nsync/mu.h"
|
2022-06-19 08:13:03 +00:00
|
|
|
|
2024-12-17 04:51:27 +00:00
|
|
|
static errno_t pthread_mutex_lock_normal_success(pthread_mutex_t *mutex,
|
|
|
|
uint64_t word) {
|
|
|
|
if (IsModeDbg() || MUTEX_TYPE(word) == PTHREAD_MUTEX_ERRORCHECK) {
|
|
|
|
__deadlock_track(mutex, MUTEX_TYPE(word) == PTHREAD_MUTEX_ERRORCHECK);
|
|
|
|
__deadlock_record(mutex, MUTEX_TYPE(word) == PTHREAD_MUTEX_ERRORCHECK);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-07-24 08:05:00 +00:00
|
|
|
// see "take 3" algorithm in "futexes are tricky" by ulrich drepper
|
|
|
|
// slightly improved to attempt acquiring multiple times b4 syscall
|
2024-12-17 04:51:27 +00:00
|
|
|
static int pthread_mutex_lock_drepper(pthread_mutex_t *mutex, uint64_t word,
|
|
|
|
bool is_trylock) {
|
|
|
|
int val = 0;
|
2024-09-03 01:21:03 +00:00
|
|
|
if (atomic_compare_exchange_strong_explicit(
|
2024-12-17 04:51:27 +00:00
|
|
|
&mutex->_futex, &val, 1, memory_order_acquire, memory_order_acquire))
|
|
|
|
return pthread_mutex_lock_normal_success(mutex, word);
|
|
|
|
if (is_trylock)
|
|
|
|
return EBUSY;
|
|
|
|
LOCKTRACE("acquiring pthread_mutex_lock_drepper(%t)...", mutex);
|
|
|
|
if (val == 1)
|
|
|
|
val = atomic_exchange_explicit(&mutex->_futex, 2, memory_order_acquire);
|
2024-09-03 06:37:50 +00:00
|
|
|
BLOCK_CANCELATION;
|
2024-12-17 04:51:27 +00:00
|
|
|
while (val > 0) {
|
|
|
|
cosmo_futex_wait(&mutex->_futex, 2, MUTEX_PSHARED(word), 0, 0);
|
|
|
|
val = atomic_exchange_explicit(&mutex->_futex, 2, memory_order_acquire);
|
2024-07-24 08:05:00 +00:00
|
|
|
}
|
2024-09-03 06:37:50 +00:00
|
|
|
ALLOW_CANCELATION;
|
2024-12-17 04:51:27 +00:00
|
|
|
return pthread_mutex_lock_normal_success(mutex, word);
|
2024-07-24 08:05:00 +00:00
|
|
|
}
|
2022-09-10 16:14:40 +00:00
|
|
|
|
2024-07-24 08:05:00 +00:00
|
|
|
static errno_t pthread_mutex_lock_recursive(pthread_mutex_t *mutex,
|
2024-12-17 04:51:27 +00:00
|
|
|
uint64_t word, bool is_trylock) {
|
2024-07-24 08:05:00 +00:00
|
|
|
uint64_t lock;
|
|
|
|
int backoff = 0;
|
2024-12-31 08:55:15 +00:00
|
|
|
int me = atomic_load_explicit(&__get_tls()->tib_ptid, memory_order_relaxed);
|
2024-09-03 01:21:03 +00:00
|
|
|
bool once = false;
|
2024-06-29 12:10:15 +00:00
|
|
|
for (;;) {
|
|
|
|
if (MUTEX_OWNER(word) == me) {
|
2024-12-17 04:51:27 +00:00
|
|
|
if (MUTEX_DEPTH(word) < MUTEX_DEPTH_MAX) {
|
|
|
|
if (atomic_compare_exchange_weak_explicit(
|
|
|
|
&mutex->_word, &word, MUTEX_INC_DEPTH(word),
|
|
|
|
memory_order_relaxed, memory_order_relaxed))
|
|
|
|
return 0;
|
|
|
|
continue;
|
2022-09-13 21:57:38 +00:00
|
|
|
} else {
|
2024-12-17 04:51:27 +00:00
|
|
|
return EAGAIN;
|
2022-09-13 21:57:38 +00:00
|
|
|
}
|
2022-09-10 16:14:40 +00:00
|
|
|
}
|
2024-12-17 04:51:27 +00:00
|
|
|
if (IsModeDbg())
|
|
|
|
__deadlock_check(mutex, 0);
|
2024-06-29 12:10:15 +00:00
|
|
|
word = MUTEX_UNLOCK(word);
|
|
|
|
lock = MUTEX_LOCK(word);
|
|
|
|
lock = MUTEX_SET_OWNER(lock, me);
|
|
|
|
if (atomic_compare_exchange_weak_explicit(&mutex->_word, &word, lock,
|
|
|
|
memory_order_acquire,
|
|
|
|
memory_order_relaxed)) {
|
2024-12-17 04:51:27 +00:00
|
|
|
if (IsModeDbg()) {
|
|
|
|
__deadlock_track(mutex, 0);
|
|
|
|
__deadlock_record(mutex, 0);
|
|
|
|
}
|
2024-06-29 12:10:15 +00:00
|
|
|
mutex->_pid = __pid;
|
|
|
|
return 0;
|
|
|
|
}
|
2024-12-17 04:51:27 +00:00
|
|
|
if (is_trylock)
|
|
|
|
return EBUSY;
|
2024-09-03 01:21:03 +00:00
|
|
|
if (!once) {
|
|
|
|
LOCKTRACE("acquiring pthread_mutex_lock_recursive(%t)...", mutex);
|
|
|
|
once = true;
|
|
|
|
}
|
2024-07-26 07:44:45 +00:00
|
|
|
for (;;) {
|
|
|
|
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
|
|
|
|
if (MUTEX_OWNER(word) == me)
|
|
|
|
break;
|
|
|
|
if (word == MUTEX_UNLOCK(word))
|
|
|
|
break;
|
|
|
|
backoff = pthread_delay_np(mutex, backoff);
|
|
|
|
}
|
2022-06-11 08:59:26 +00:00
|
|
|
}
|
2022-05-27 20:25:46 +00:00
|
|
|
}
|
2024-07-06 06:13:20 +00:00
|
|
|
|
2024-09-10 05:07:03 +00:00
|
|
|
#if PTHREAD_USE_NSYNC
|
|
|
|
static errno_t pthread_mutex_lock_recursive_nsync(pthread_mutex_t *mutex,
|
2024-12-17 04:51:27 +00:00
|
|
|
uint64_t word,
|
|
|
|
bool is_trylock) {
|
2024-12-31 08:55:15 +00:00
|
|
|
int me = atomic_load_explicit(&__get_tls()->tib_ptid, memory_order_relaxed);
|
2024-09-10 05:07:03 +00:00
|
|
|
for (;;) {
|
|
|
|
if (MUTEX_OWNER(word) == me) {
|
2024-12-17 04:51:27 +00:00
|
|
|
if (MUTEX_DEPTH(word) < MUTEX_DEPTH_MAX) {
|
|
|
|
if (atomic_compare_exchange_weak_explicit(
|
|
|
|
&mutex->_word, &word, MUTEX_INC_DEPTH(word),
|
|
|
|
memory_order_relaxed, memory_order_relaxed))
|
|
|
|
return 0;
|
|
|
|
continue;
|
2024-09-10 05:07:03 +00:00
|
|
|
} else {
|
2024-12-17 04:51:27 +00:00
|
|
|
return EAGAIN;
|
2024-09-10 05:07:03 +00:00
|
|
|
}
|
|
|
|
}
|
2024-12-17 04:51:27 +00:00
|
|
|
if (IsModeDbg())
|
|
|
|
__deadlock_check(mutex, 0);
|
|
|
|
if (!is_trylock) {
|
|
|
|
_weaken(nsync_mu_lock)((nsync_mu *)mutex->_nsync);
|
|
|
|
} else {
|
|
|
|
if (!_weaken(nsync_mu_trylock)((nsync_mu *)mutex->_nsync))
|
|
|
|
return EBUSY;
|
|
|
|
}
|
|
|
|
if (IsModeDbg()) {
|
|
|
|
__deadlock_track(mutex, 0);
|
|
|
|
__deadlock_record(mutex, 0);
|
|
|
|
}
|
2024-09-10 05:07:03 +00:00
|
|
|
word = MUTEX_UNLOCK(word);
|
|
|
|
word = MUTEX_LOCK(word);
|
|
|
|
word = MUTEX_SET_OWNER(word, me);
|
|
|
|
mutex->_word = word;
|
|
|
|
mutex->_pid = __pid;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2024-12-17 04:51:27 +00:00
|
|
|
static errno_t pthread_mutex_lock_impl(pthread_mutex_t *mutex,
|
|
|
|
bool is_trylock) {
|
|
|
|
uint64_t word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
|
|
|
|
|
|
|
|
// handle recursive mutexes
|
|
|
|
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_RECURSIVE) {
|
|
|
|
#if PTHREAD_USE_NSYNC
|
|
|
|
if (_weaken(nsync_mu_lock) &&
|
|
|
|
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE) {
|
|
|
|
return pthread_mutex_lock_recursive_nsync(mutex, word, is_trylock);
|
|
|
|
} else {
|
|
|
|
return pthread_mutex_lock_recursive(mutex, word, is_trylock);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
return pthread_mutex_lock_recursive(mutex, word, is_trylock);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// check if normal mutex is already owned by calling thread
|
|
|
|
if (!is_trylock &&
|
|
|
|
(MUTEX_TYPE(word) == PTHREAD_MUTEX_ERRORCHECK ||
|
|
|
|
(IsModeDbg() && MUTEX_TYPE(word) == PTHREAD_MUTEX_DEFAULT))) {
|
|
|
|
if (__deadlock_tracked(mutex) == 1) {
|
|
|
|
if (IsModeDbg() && MUTEX_TYPE(word) != PTHREAD_MUTEX_ERRORCHECK) {
|
|
|
|
kprintf("error: attempted to lock non-recursive mutex that's already "
|
|
|
|
"held by the calling thread: %t\n",
|
|
|
|
mutex);
|
|
|
|
DebugBreak();
|
|
|
|
}
|
|
|
|
return EDEADLK;
|
|
|
|
}
|
|
|
|
}
|
2024-07-24 08:05:00 +00:00
|
|
|
|
2024-12-17 04:51:27 +00:00
|
|
|
// check if locking will create cycle in lock graph
|
|
|
|
if (IsModeDbg() || MUTEX_TYPE(word) == PTHREAD_MUTEX_ERRORCHECK)
|
|
|
|
if (__deadlock_check(mutex, MUTEX_TYPE(word) == PTHREAD_MUTEX_ERRORCHECK))
|
|
|
|
return EDEADLK;
|
2024-07-24 08:05:00 +00:00
|
|
|
|
|
|
|
#if PTHREAD_USE_NSYNC
|
|
|
|
// use superior mutexes if possible
|
2024-12-17 04:51:27 +00:00
|
|
|
if (MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE &&
|
2024-07-24 08:05:00 +00:00
|
|
|
_weaken(nsync_mu_lock)) {
|
2024-09-03 01:21:03 +00:00
|
|
|
// on apple silicon we should just put our faith in ulock
|
|
|
|
// otherwise *nsync gets struck down by the eye of sauron
|
|
|
|
if (!IsXnuSilicon()) {
|
2024-12-17 04:51:27 +00:00
|
|
|
if (!is_trylock) {
|
|
|
|
_weaken(nsync_mu_lock)((nsync_mu *)mutex->_nsync);
|
|
|
|
return pthread_mutex_lock_normal_success(mutex, word);
|
|
|
|
} else {
|
|
|
|
if (_weaken(nsync_mu_trylock)((nsync_mu *)mutex->_nsync))
|
|
|
|
return pthread_mutex_lock_normal_success(mutex, word);
|
|
|
|
return EBUSY;
|
|
|
|
}
|
2024-09-03 01:21:03 +00:00
|
|
|
}
|
2024-07-24 08:05:00 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2024-12-17 04:51:27 +00:00
|
|
|
// isc licensed non-recursive mutex implementation
|
|
|
|
return pthread_mutex_lock_drepper(mutex, word, is_trylock);
|
2024-07-24 08:05:00 +00:00
|
|
|
}
|
|
|
|
|
2024-07-06 06:13:20 +00:00
|
|
|
/**
|
2024-12-17 04:51:27 +00:00
|
|
|
* Locks mutex, e.g.
|
2024-07-06 06:13:20 +00:00
|
|
|
*
|
|
|
|
* pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
* pthread_mutex_lock(&lock);
|
|
|
|
* // do work...
|
|
|
|
* pthread_mutex_unlock(&lock);
|
|
|
|
*
|
2024-12-17 04:51:27 +00:00
|
|
|
* The long way to do that is:
|
2024-07-06 06:13:20 +00:00
|
|
|
*
|
|
|
|
* pthread_mutex_t lock;
|
|
|
|
* pthread_mutexattr_t attr;
|
|
|
|
* pthread_mutexattr_init(&attr);
|
2024-12-17 04:51:27 +00:00
|
|
|
* pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
|
|
|
|
* pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_PRIVATE);
|
2024-07-06 06:13:20 +00:00
|
|
|
* pthread_mutex_init(&lock, &attr);
|
|
|
|
* pthread_mutexattr_destroy(&attr);
|
|
|
|
* pthread_mutex_lock(&lock);
|
|
|
|
* // do work...
|
|
|
|
* pthread_mutex_unlock(&lock);
|
|
|
|
* pthread_mutex_destroy(&lock);
|
|
|
|
*
|
2024-12-17 04:51:27 +00:00
|
|
|
* The following non-POSIX initializers are also provided by cosmo libc:
|
|
|
|
*
|
|
|
|
* - `PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP`
|
|
|
|
* - `PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP`
|
|
|
|
* - `PTHREAD_NORMAL_MUTEX_INITIALIZER_NP`
|
|
|
|
*
|
|
|
|
* Locking a mutex that's already locked by the calling thread will make
|
|
|
|
* the thread hang indefinitely, i.e. it's a deadlock condition. You can
|
|
|
|
* use `PTHREAD_MUTEX_RECURSIVE` to allow recursive locking, which could
|
|
|
|
* result in somewhat less performance. An alternative solution is using
|
|
|
|
* the `PTHREAD_MUTEX_ERRORCHECK` mode, which raises `EDEADLK` for that.
|
|
|
|
*
|
|
|
|
* If a thread locks a mutex while other mutexes are already locked then
|
|
|
|
* you need to observe a consistent global ordering, otherwise deadlocks
|
|
|
|
* might occur. The Cosmopolitan runtime can detect these cycles quickly
|
|
|
|
* so you can fix your code before it becomes an issue. With error check
|
|
|
|
* mode, an EPERM will be returned. If your app is using `cosmocc -mdbg`
|
|
|
|
* then an error message will be printed including the demangled symbols
|
|
|
|
* of the mutexes in the strongly connected component that was detected.
|
|
|
|
* Please note that, even for debug builds mutexes set to explicitly use
|
|
|
|
* the `PTHREAD_MUTEX_ERRORCHECK` mode will return an error code instead
|
|
|
|
* which means the cosmo debug mode only influences undefined behaviors.
|
|
|
|
*
|
|
|
|
* Cosmopolitan only supports error checking on mutexes stored in static
|
|
|
|
* memory, i.e. your `mutex` pointer must point inside the .data or .bss
|
|
|
|
* sections of your executable. When compiling your programs using -mdbg
|
|
|
|
* all your locks will gain error checking automatically. When deadlocks
|
|
|
|
* are detected an error message will be printed and a SIGTRAP signal is
|
|
|
|
* raised, which may be ignored to force EDEADLK and EPERM to be raised.
|
|
|
|
*
|
|
|
|
* Using `cosmocc -mdbg` also enhances `--strace` with information about
|
|
|
|
* mutexes. First, locks and unlocks will be logged. Since the lock line
|
|
|
|
* only appears after the lock is acquired, that might mean you'll never
|
|
|
|
* get an indication about a lock that takes a very long time to acquire
|
|
|
|
* so, whenever a lock can't immediately be acquired, a second line gets
|
|
|
|
* printed *before* the lock is acquired to let you know that the thread
|
|
|
|
* is waiting for a particular lock. If your mutex object resides within
|
|
|
|
* static memory, then its demangled symbol name will be printed. If you
|
|
|
|
* call ShowCrashReports() at the beginning of your main() function then
|
|
|
|
* you'll also see a backtrace when a locking violation occurs. When the
|
|
|
|
* symbols in the violation error messages show up as numbers, and it is
|
|
|
|
* desirable to see demangled symbols without enabling full crash report
|
|
|
|
* functionality the GetSymbolTable() function may be called for effect.
|
2024-07-06 06:13:20 +00:00
|
|
|
*
|
2024-12-17 04:51:27 +00:00
|
|
|
* If you use `PTHREAD_MUTEX_NORMAL`, instead of `PTHREAD_MUTEX_DEFAULT`
|
|
|
|
* then deadlocking is actually defined behavior according to POSIX.1 so
|
|
|
|
* the helpfulness of `cosmocc -mdbg` will be somewhat weakened.
|
|
|
|
*
|
|
|
|
* If your `mutex` object resides in `MAP_SHARED` memory, then undefined
|
|
|
|
* behavior will happen unless you use `PTHREAD_PROCESS_SHARED` mode, if
|
|
|
|
* the lock is used by multiple processes.
|
|
|
|
*
|
|
|
|
* This function does nothing when the process is in vfork() mode.
|
2024-09-03 01:21:03 +00:00
|
|
|
*
|
2024-07-06 06:13:20 +00:00
|
|
|
* @return 0 on success, or error number on failure
|
2024-12-17 04:51:27 +00:00
|
|
|
* @raise EDEADLK if mutex is recursive and locked by another thread
|
|
|
|
* @raise EDEADLK if mutex is non-recursive and locked by current thread
|
|
|
|
* @raise EDEADLK if cycle is detected in global nested lock graph
|
|
|
|
* @raise EAGAIN if maximum recursive locks is exceeded
|
2024-07-06 06:13:20 +00:00
|
|
|
* @see pthread_spin_lock()
|
|
|
|
* @vforksafe
|
|
|
|
*/
|
2024-12-24 05:57:52 +00:00
|
|
|
errno_t _pthread_mutex_lock(pthread_mutex_t *mutex) {
|
2024-12-17 04:51:27 +00:00
|
|
|
if (__tls_enabled && !__vforked) {
|
|
|
|
errno_t err = pthread_mutex_lock_impl(mutex, false);
|
2024-09-03 01:21:03 +00:00
|
|
|
LOCKTRACE("pthread_mutex_lock(%t) → %s", mutex, DescribeErrno(err));
|
|
|
|
return err;
|
|
|
|
} else {
|
2024-12-17 04:51:27 +00:00
|
|
|
LOCKTRACE("skipping pthread_mutex_lock(%t) due to runtime state", mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Attempts acquiring lock.
|
|
|
|
*
|
|
|
|
* Unlike pthread_mutex_lock() this function won't block and instead
|
|
|
|
* returns an error immediately if the lock couldn't be acquired.
|
|
|
|
*
|
|
|
|
* @return 0 if lock was acquired, otherwise an errno
|
|
|
|
* @raise EBUSY if lock is currently held by another thread
|
|
|
|
* @raise EAGAIN if maximum number of recursive locks is held
|
|
|
|
* @raise EDEADLK if `mutex` is `PTHREAD_MUTEX_ERRORCHECK` and the
|
|
|
|
* current thread already holds this mutex
|
|
|
|
*/
|
2024-12-24 05:57:52 +00:00
|
|
|
errno_t _pthread_mutex_trylock(pthread_mutex_t *mutex) {
|
2024-12-17 04:51:27 +00:00
|
|
|
if (__tls_enabled && !__vforked) {
|
|
|
|
errno_t err = pthread_mutex_lock_impl(mutex, true);
|
|
|
|
LOCKTRACE("pthread_mutex_trylock(%t) → %s", mutex, DescribeErrno(err));
|
|
|
|
return err;
|
|
|
|
} else {
|
|
|
|
LOCKTRACE("skipping pthread_mutex_trylock(%t) due to runtime state", mutex);
|
2024-07-06 06:13:20 +00:00
|
|
|
return 0;
|
2024-09-03 01:21:03 +00:00
|
|
|
}
|
2024-07-06 06:13:20 +00:00
|
|
|
}
|
2024-12-24 05:57:52 +00:00
|
|
|
|
|
|
|
__weak_reference(_pthread_mutex_lock, pthread_mutex_lock);
|
|
|
|
__weak_reference(_pthread_mutex_trylock, pthread_mutex_trylock);
|