Fix bugs with new memory manager

This fixes a regression in mmap(MAP_FIXED) on Windows caused by a recent
revision. This change also fixes ZipOS so it no longer needs a MAP_FIXED
mapping to open files from the PKZIP store. The memory mapping mutex was
implemented incorrectly earlier which meant that ftrace and strace could
cause cause crashes. This lock and other recursive mutexes are rewritten
so that it should be provable that recursive mutexes in cosmopolitan are
asynchronous signal safe.
This commit is contained in:
Justine Tunney 2024-06-29 05:10:15 -07:00
parent 6de12c1032
commit 464858dbb4
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
34 changed files with 353 additions and 313 deletions

View file

@ -22,6 +22,7 @@
#include "libc/intrin/strace.internal.h"
#include "libc/intrin/weaken.h"
#include "libc/runtime/internal.h"
#include "libc/thread/lock.h"
#include "libc/thread/thread.h"
#include "third_party/nsync/mu.h"
@ -35,38 +36,52 @@
* @vforksafe
*/
errno_t pthread_mutex_unlock(pthread_mutex_t *mutex) {
int t;
int me;
uint64_t word, lock;
LOCKTRACE("pthread_mutex_unlock(%t)", mutex);
if (mutex->_type == PTHREAD_MUTEX_NORMAL && //
mutex->_pshared == PTHREAD_PROCESS_PRIVATE && //
// get current state of lock
word = atomic_load_explicit(&mutex->_word, memory_order_relaxed);
// use fancy nsync mutex if possible
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL && //
MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE && //
_weaken(nsync_mu_unlock)) {
_weaken(nsync_mu_unlock)((nsync_mu *)mutex);
return 0;
}
if (mutex->_type == PTHREAD_MUTEX_NORMAL) {
atomic_store_explicit(&mutex->_lock, 0, memory_order_release);
// implement barebones normal mutexes
if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) {
lock = MUTEX_UNLOCK(word);
atomic_store_explicit(&mutex->_word, lock, memory_order_release);
return 0;
}
t = gettid();
// implement recursive mutex unlocking
me = gettid();
for (;;) {
// we allow unlocking an initialized lock that wasn't locked, but we
// don't allow unlocking a lock held by another thread, or unlocking
// recursive locks from a forked child, since it should be re-init'd
if (mutex->_owner && (mutex->_owner != t || mutex->_pid != __pid)) {
return EPERM;
// we allow unlocking an initialized lock that wasn't locked, but we
// don't allow unlocking a lock held by another thread, or unlocking
// recursive locks from a forked child, since it should be re-init'd
if (MUTEX_OWNER(word) && (MUTEX_OWNER(word) != me || mutex->_pid != __pid))
return EPERM;
// check if this is a nested lock with signal safety
if (MUTEX_DEPTH(word)) {
if (atomic_compare_exchange_weak_explicit(
&mutex->_word, &word, MUTEX_DEC_DEPTH(word), memory_order_relaxed,
memory_order_relaxed))
return 0;
continue;
}
// actually unlock the mutex
if (atomic_compare_exchange_weak_explicit(
&mutex->_word, &word, MUTEX_UNLOCK(word), memory_order_release,
memory_order_relaxed))
return 0;
}
if (mutex->_depth) {
--mutex->_depth;
return 0;
}
mutex->_owner = 0;
atomic_store_explicit(&mutex->_lock, 0, memory_order_release);
return 0;
}