2022-05-27 20:25:46 +00:00
|
|
|
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
|
|
|
|
│ vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi │
|
2020-06-15 14:18:57 +00:00
|
|
|
╞══════════════════════════════════════════════════════════════════════════════╡
|
2022-05-27 20:25:46 +00:00
|
|
|
│ Copyright 2022 Justine Alexandra Roberts Tunney │
|
2020-06-15 14:18:57 +00:00
|
|
|
│ │
|
2020-12-28 01:18:44 +00:00
|
|
|
│ Permission to use, copy, modify, and/or distribute this software for │
|
|
|
|
│ any purpose with or without fee is hereby granted, provided that the │
|
|
|
|
│ above copyright notice and this permission notice appear in all copies. │
|
2020-06-15 14:18:57 +00:00
|
|
|
│ │
|
2020-12-28 01:18:44 +00:00
|
|
|
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
|
|
|
|
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
|
|
|
|
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
|
|
|
|
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
|
|
|
|
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
|
|
|
|
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
|
|
|
|
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
|
|
|
│ PERFORMANCE OF THIS SOFTWARE. │
|
2020-06-15 14:18:57 +00:00
|
|
|
╚─────────────────────────────────────────────────────────────────────────────*/
|
2022-05-27 20:25:46 +00:00
|
|
|
#include "libc/calls/calls.h"
|
2024-07-06 06:13:20 +00:00
|
|
|
#include "libc/dce.h"
|
2022-06-13 02:33:42 +00:00
|
|
|
#include "libc/errno.h"
|
2022-08-20 19:32:51 +00:00
|
|
|
#include "libc/intrin/atomic.h"
|
2022-10-16 19:05:08 +00:00
|
|
|
#include "libc/intrin/strace.h"
|
2022-09-11 18:02:07 +00:00
|
|
|
#include "libc/intrin/weaken.h"
|
2022-10-16 19:05:08 +00:00
|
|
|
#include "libc/runtime/internal.h"
|
2024-06-29 12:10:15 +00:00
|
|
|
#include "libc/thread/lock.h"
|
2022-09-10 09:56:25 +00:00
|
|
|
#include "libc/thread/thread.h"
|
2024-07-24 08:05:00 +00:00
|
|
|
#include "third_party/nsync/futex.internal.h"
|
2022-09-11 18:02:07 +00:00
|
|
|
#include "third_party/nsync/mu.h"
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2024-07-26 07:44:45 +00:00
|
|
|
static void pthread_mutex_unlock_spin(atomic_int *word) {
|
|
|
|
atomic_store_explicit(word, 0, memory_order_release);
|
2024-07-24 08:05:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// see "take 3" algorithm in "futexes are tricky" by ulrich drepper
|
|
|
|
static void pthread_mutex_unlock_drepper(atomic_int *futex, char pshare) {
|
|
|
|
int word = atomic_fetch_sub_explicit(futex, 1, memory_order_release);
|
|
|
|
if (word == 2) {
|
|
|
|
atomic_store_explicit(futex, 0, memory_order_release);
|
|
|
|
_weaken(nsync_futex_wake_)(futex, 1, pshare);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static errno_t pthread_mutex_unlock_recursive(pthread_mutex_t *mutex,
|
|
|
|
uint64_t word) {
|
|
|
|
int me = gettid();
|
|
|
|
for (;;) {
|
|
|
|
|
|
|
|
// we allow unlocking an initialized lock that wasn't locked, but we
|
|
|
|
// don't allow unlocking a lock held by another thread, or unlocking
|
|
|
|
// recursive locks from a forked child, since it should be re-init'd
|
|
|
|
if (MUTEX_OWNER(word) && (MUTEX_OWNER(word) != me || mutex->_pid != __pid))
|
|
|
|
return EPERM;
|
|
|
|
|
|
|
|
// check if this is a nested lock with signal safety
|
|
|
|
if (MUTEX_DEPTH(word)) {
|
|
|
|
if (atomic_compare_exchange_weak_explicit(
|
|
|
|
&mutex->_word, &word, MUTEX_DEC_DEPTH(word), memory_order_relaxed,
|
|
|
|
memory_order_relaxed))
|
|
|
|
return 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// actually unlock the mutex
|
|
|
|
if (atomic_compare_exchange_weak_explicit(
|
|
|
|
&mutex->_word, &word, MUTEX_UNLOCK(word), memory_order_release,
|
|
|
|
memory_order_relaxed))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-11 08:59:26 +00:00
|
|
|
/**
|
|
|
|
* Releases mutex.
|
2022-09-05 15:26:03 +00:00
|
|
|
*
|
2022-10-16 19:05:08 +00:00
|
|
|
* This function does nothing in vfork() children.
|
|
|
|
*
|
2022-06-13 02:33:42 +00:00
|
|
|
* @return 0 on success or error number on failure
|
|
|
|
* @raises EPERM if in error check mode and not owned by caller
|
2022-10-16 19:05:08 +00:00
|
|
|
* @vforksafe
|
2022-06-11 08:59:26 +00:00
|
|
|
*/
|
2023-11-01 04:59:05 +00:00
|
|
|
errno_t pthread_mutex_unlock(pthread_mutex_t *mutex) {
|
2024-07-24 08:05:00 +00:00
|
|
|
uint64_t word;
|
2022-09-10 16:14:40 +00:00
|
|
|
|
2022-10-16 19:05:08 +00:00
|
|
|
LOCKTRACE("pthread_mutex_unlock(%t)", mutex);
|
|
|
|
|
2024-06-29 12:10:15 +00:00
|
|
|
// get current state of lock
|
|
|
|
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
|
|
|
|
|
2024-07-22 23:33:23 +00:00
|
|
|
#if PTHREAD_USE_NSYNC
|
2024-07-24 08:05:00 +00:00
|
|
|
// use superior mutexes if possible
|
2024-06-29 12:10:15 +00:00
|
|
|
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL && //
|
|
|
|
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
|
2022-09-13 21:57:38 +00:00
|
|
|
_weaken(nsync_mu_unlock)) {
|
2024-09-03 01:21:03 +00:00
|
|
|
// on apple silicon we should just put our faith in ulock
|
|
|
|
// otherwise *nsync gets struck down by the eye of sauron
|
|
|
|
if (!IsXnuSilicon()) {
|
|
|
|
_weaken(nsync_mu_unlock)((nsync_mu *)mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
2022-09-13 06:10:38 +00:00
|
|
|
}
|
2024-07-22 23:33:23 +00:00
|
|
|
#endif
|
2022-09-13 06:10:38 +00:00
|
|
|
|
2024-06-29 12:10:15 +00:00
|
|
|
// implement barebones normal mutexes
|
|
|
|
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
|
2024-07-24 08:05:00 +00:00
|
|
|
if (_weaken(nsync_futex_wake_)) {
|
|
|
|
pthread_mutex_unlock_drepper(&mutex->_futex, MUTEX_PSHARED(word));
|
|
|
|
} else {
|
2024-07-26 07:44:45 +00:00
|
|
|
pthread_mutex_unlock_spin(&mutex->_futex);
|
2024-07-24 08:05:00 +00:00
|
|
|
}
|
2022-09-10 16:14:40 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-07-24 08:05:00 +00:00
|
|
|
// handle recursive and error checking mutexes
|
|
|
|
return pthread_mutex_unlock_recursive(mutex, word);
|
2022-05-27 20:25:46 +00:00
|
|
|
}
|