Eliminate cyclic locks in runtime

This change introduces a new deadlock detector for Cosmo's POSIX threads
implementation. Error check mutexes will now track a DAG of nested locks
and report EDEADLK when a deadlock is theoretically possible. These will
occur rarely, but it's important for production hardening your code. You
don't even need to change your mutexes to use the POSIX error check mode
because `cosmocc -mdbg` will enable error checking on mutexes by default
globally. When cycles are found, an error message showing your demangled
symbols describing the strongly connected component are printed and then
the SIGTRAP is raised, which means you'll also get a backtrace if you're
using ShowCrashReports() too. This new error checker is so low-level and
so pure that it's able to verify the relationships of every libc runtime
lock, including those locks upon which the mutex implementation depends.
This commit is contained in:
Justine Tunney 2024-12-16 20:51:27 -08:00
parent 26c051c297
commit af7bd80430
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
141 changed files with 2094 additions and 1601 deletions

View file

@ -22,6 +22,8 @@
#include "libc/dce.h"
#include "libc/errno.h"
#include "libc/intrin/atomic.h"
#include "libc/intrin/describeflags.h"
#include "libc/intrin/kprintf.h"
#include "libc/intrin/strace.h"
#include "libc/intrin/weaken.h"
#include "libc/runtime/internal.h"
@ -61,8 +63,11 @@ static errno_t pthread_mutex_unlock_recursive(pthread_mutex_t *mutex,
// actually unlock the mutex
if (atomic_compare_exchange_weak_explicit(
&mutex->_word, &word, MUTEX_UNLOCK(word), memory_order_release,
memory_order_relaxed))
memory_order_relaxed)) {
if (IsModeDbg())
__deadlock_untrack(mutex);
return 0;
}
}
}
@ -89,63 +94,85 @@ static errno_t pthread_mutex_unlock_recursive_nsync(pthread_mutex_t *mutex,
// actually unlock the mutex
mutex->_word = MUTEX_UNLOCK(word);
_weaken(nsync_mu_unlock)((nsync_mu *)mutex->_nsyncx);
_weaken(nsync_mu_unlock)((nsync_mu *)mutex->_nsync);
if (IsModeDbg())
__deadlock_untrack(mutex);
return 0;
}
}
#endif
/**
* Releases mutex.
*
* This function does nothing in vfork() children.
*
* @return 0 on success or error number on failure
* @raises EPERM if in error check mode and not owned by caller
* @vforksafe
*/
errno_t pthread_mutex_unlock(pthread_mutex_t *mutex) {
uint64_t word;
static errno_t pthread_mutex_unlock_impl(pthread_mutex_t *mutex) {
uint64_t word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
if (__vforked) {
LOCKTRACE("skipping pthread_mutex_lock(%t) due to vfork", mutex);
return 0;
// check if mutex isn't held by calling thread
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_ERRORCHECK || IsModeDbg()) {
if (__deadlock_tracked(mutex) == 0) {
if (IsModeDbg() && MUTEX_TYPE(word) != PTHREAD_MUTEX_ERRORCHECK) {
kprintf("error: unlock mutex not owned by calling thread: %t\n", mutex);
DebugBreak();
}
return EPERM;
}
}
LOCKTRACE("pthread_mutex_unlock(%t)", mutex);
// get current state of lock
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
// handle recursive mutexes
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_RECURSIVE) {
#if PTHREAD_USE_NSYNC
if (_weaken(nsync_mu_unlock) &&
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE) {
return pthread_mutex_unlock_recursive_nsync(mutex, word);
} else {
return pthread_mutex_unlock_recursive(mutex, word);
}
#else
return pthread_mutex_unlock_recursive(mutex, word);
#endif
}
#if PTHREAD_USE_NSYNC
// use superior mutexes if possible
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL && //
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
if (MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
_weaken(nsync_mu_unlock)) {
// on apple silicon we should just put our faith in ulock
// otherwise *nsync gets struck down by the eye of sauron
if (!IsXnuSilicon()) {
_weaken(nsync_mu_unlock)((nsync_mu *)mutex);
_weaken(nsync_mu_unlock)((nsync_mu *)mutex->_nsync);
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_ERRORCHECK || IsModeDbg())
__deadlock_untrack(mutex);
return 0;
}
}
#endif
// implement barebones normal mutexes
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
pthread_mutex_unlock_drepper(&mutex->_futex, MUTEX_PSHARED(word));
pthread_mutex_unlock_drepper(&mutex->_futex, MUTEX_PSHARED(word));
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_ERRORCHECK || IsModeDbg())
__deadlock_untrack(mutex);
return 0;
}
/**
* Releases mutex.
*
* POSIX.1 says it's undefined behavior to unlock a mutex that wasn't
* locked by the calling thread. Therefore, if `mutex` isn't locked, or
* it is locked and the thing that locked it was a different thread or
* process, then you should expect your program to deadlock or crash.
*
* This function does nothing in vfork() children.
*
* @return 0 on success or error number on failure
* @raises EPERM if mutex ownership isn't acceptable
* @vforksafe
*/
errno_t pthread_mutex_unlock(pthread_mutex_t *mutex) {
if (__tls_enabled && !__vforked) {
errno_t err = pthread_mutex_unlock_impl(mutex);
LOCKTRACE("pthread_mutex_unlock(%t) → %s", mutex, DescribeErrno(err));
return err;
} else {
LOCKTRACE("skipping pthread_mutex_lock(%t) due to runtime state", mutex);
return 0;
}
// handle recursive and error checking mutexes
#if PTHREAD_USE_NSYNC
if (_weaken(nsync_mu_unlock) &&
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE) {
return pthread_mutex_unlock_recursive_nsync(mutex, word);
} else {
return pthread_mutex_unlock_recursive(mutex, word);
}
#else
return pthread_mutex_unlock_recursive(mutex, word);
#endif
}