Make locks more reliable

This change switches most of the core locks to be re-entrant, in order
to reduce the chance of deadlocking code that does, clever things with
asynchronous signal handlers. This change implements it it in pthreads
so we're one step closer to having a standardized threading primitives
This commit is contained in:
Justine Tunney 2022-06-11 01:59:26 -07:00
parent 5ea618f0af
commit c260345e06
35 changed files with 369 additions and 258 deletions

View file

@ -3,6 +3,7 @@
#include "libc/assert.h"
#include "libc/bits/midpoint.h"
#include "libc/dce.h"
#include "libc/intrin/pthread.h"
#include "libc/macros.internal.h"
#include "libc/nt/version.h"
#include "libc/runtime/stack.h"
@ -30,6 +31,9 @@ COSMOPOLITAN_C_START_
#define _kMem(NORMAL, WIN7) \
(!IsWindows() || IsAtLeastWindows10() ? NORMAL : WIN7)
#define __mmi_lock() pthread_mutex_lock(&_mmi.lock)
#define __mmi_unlock() pthread_mutex_unlock(&_mmi.lock)
struct MemoryInterval {
int x;
int y;
@ -46,7 +50,7 @@ struct MemoryIntervals {
size_t i, n;
struct MemoryInterval *p;
struct MemoryInterval s[OPEN_MAX];
_Alignas(64) int lock;
pthread_mutex_t lock;
};
extern hidden struct MemoryIntervals _mmi;

View file

@ -31,6 +31,7 @@
#include "libc/intrin/asancodes.h"
#include "libc/intrin/describeflags.internal.h"
#include "libc/intrin/kprintf.h"
#include "libc/intrin/pthread.h"
#include "libc/intrin/spinlock.h"
#include "libc/limits.h"
#include "libc/log/backtrace.internal.h"
@ -404,13 +405,11 @@ static noasan inline void *Mmap(void *addr, size_t size, int prot, int flags,
if (p != MAP_FAILED) {
if (needguard) {
if (IsWindows()) _spunlock(&_mmi.lock);
mprotect(p, PAGESIZE, PROT_NONE);
if (IsAsan()) {
__repstosb((void *)(((intptr_t)p >> 3) + 0x7fff8000),
kAsanStackOverflow, PAGESIZE / 8);
}
if (IsWindows()) _spinlock(&_mmi.lock);
}
}
@ -479,12 +478,12 @@ noasan void *mmap(void *addr, size_t size, int prot, int flags, int fd,
int64_t off) {
void *res;
size_t toto;
_spinlock(&_mmi.lock);
__mmi_lock();
res = Mmap(addr, size, prot, flags, fd, off);
#if SYSDEBUG
toto = __strace > 0 ? GetMemtrackSize(&_mmi) : 0;
#endif
_spunlock(&_mmi.lock);
__mmi_unlock();
STRACE("mmap(%p, %'zu, %s, %s, %d, %'ld) → %p% m (%'zu bytes total)", addr,
size, DescribeProtFlags(prot), DescribeMapFlags(flags), fd, off, res,
toto);

View file

@ -29,7 +29,7 @@ textwindows int sys_mprotect_nt(void *addr, size_t size, int prot) {
unsigned i;
uint32_t op;
char *a, *b, *x, *y, *p;
_spinlock(&_mmi.lock);
__mmi_lock();
p = addr;
i = FindMemoryInterval(&_mmi, (intptr_t)p >> 16);
if (i == _mmi.i || (!i && p + size <= ADDR(_mmi.p[0].x))) {
@ -58,6 +58,6 @@ textwindows int sys_mprotect_nt(void *addr, size_t size, int prot) {
}
}
}
_spunlock(&_mmi.lock);
__mmi_unlock();
return rc;
}

View file

@ -31,7 +31,7 @@
noasan textwindows int sys_msync_nt(char *addr, size_t size, int flags) {
int i, rc = 0;
char *a, *b, *x, *y;
_spinlock(&_mmi.lock);
__mmi_lock();
for (i = FindMemoryInterval(&_mmi, (intptr_t)addr >> 16); i < _mmi.i; ++i) {
x = ADDR(_mmi.p[i].x);
y = x + _mmi.p[i].size;
@ -48,6 +48,6 @@ noasan textwindows int sys_msync_nt(char *addr, size_t size, int flags) {
break;
}
}
_spunlock(&_mmi.lock);
__mmi_unlock();
return rc;
}

View file

@ -164,12 +164,12 @@ static noasan int Munmap(char *p, size_t n) {
noasan int munmap(void *p, size_t n) {
int rc;
size_t toto;
_spinlock(&_mmi.lock);
__mmi_lock();
rc = Munmap(p, n);
#if SYSDEBUG
toto = __strace > 0 ? GetMemtrackSize(&_mmi) : 0;
#endif
_spunlock(&_mmi.lock);
__mmi_unlock();
STRACE("munmap(%.12p, %'zu) → %d% m (%'zu bytes total)", p, n, rc, toto);
return rc;
}

View file

@ -24,7 +24,7 @@
* Prints memory mappings to stderr.
*/
void __print_maps(void) {
_spinlock(&_mmi.lock);
__mmi_lock();
PrintMemoryIntervals(2, &_mmi);
_spunlock(&_mmi.lock);
__mmi_unlock();
}

View file

@ -1,77 +0,0 @@
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi
Copyright 2021 Justine Alexandra Roberts Tunney
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted, provided that the
above copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/bits/atomic.h"
#include "libc/calls/calls.h"
#include "libc/errno.h"
#include "libc/intrin/lockcmpxchg.h"
#include "libc/intrin/spinlock.h"
#include "libc/runtime/runtime.h"
typedef void *pthread_t;
typedef int pthread_once_t;
typedef int pthread_mutex_t;
int pthread_once(pthread_once_t *once, void init(void)) {
int x;
unsigned tries;
switch ((x = atomic_load(once))) {
case 0:
if (atomic_compare_exchange_strong(once, &x, 1)) {
init();
atomic_store(once, 2);
break;
}
// fallthrough
case 1:
tries = 0;
do {
if (++tries & 7) {
__builtin_ia32_pause();
} else {
sched_yield();
}
} while (atomic_load(once) == 1);
break;
default:
break;
}
return 0;
}
int pthread_mutex_lock(pthread_mutex_t *mutex) {
_spinlock(mutex);
return 0;
}
int pthread_mutex_trylock(pthread_mutex_t *mutex) {
return _trylock(mutex);
}
int pthread_mutex_unlock(pthread_mutex_t *mutex) {
_spunlock(mutex);
return 0;
}
int pthread_cancel(pthread_t thread) {
return ESRCH;
}
void *__tls_get_addr(size_t v[2]) {
return NULL;
}