mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-05-24 14:22:28 +00:00
Use *NSYNC for POSIX threads locking APIs
Condition variables, barriers, and r/w locks now work very well.
This commit is contained in:
parent
3de35e196c
commit
b5cb71ab84
197 changed files with 3734 additions and 3817 deletions
232
third_party/dlmalloc/locks.inc
vendored
232
third_party/dlmalloc/locks.inc
vendored
|
@ -1,6 +1,5 @@
|
|||
// clang-format off
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/thread/thread.h"
|
||||
#include "third_party/nsync/mu.h"
|
||||
#include "libc/thread/tls.h"
|
||||
|
||||
/* --------------------------- Lock preliminaries ------------------------ */
|
||||
|
@ -39,232 +38,19 @@
|
|||
#define DESTROY_LOCK(l) (0)
|
||||
#define ACQUIRE_MALLOC_GLOBAL_LOCK()
|
||||
#define RELEASE_MALLOC_GLOBAL_LOCK()
|
||||
|
||||
#else
|
||||
#if USE_LOCKS > 1
|
||||
/* ----------------------- User-defined locks ------------------------ */
|
||||
/* Define your own lock implementation here */
|
||||
/* #define INITIAL_LOCK(lk) ... */
|
||||
/* #define DESTROY_LOCK(lk) ... */
|
||||
/* #define ACQUIRE_LOCK(lk) ... */
|
||||
/* #define RELEASE_LOCK(lk) ... */
|
||||
/* #define TRY_LOCK(lk) ... */
|
||||
/* static MLOCK_T malloc_global_mutex = ... */
|
||||
|
||||
#define MLOCK_T pthread_mutex_t
|
||||
#define ACQUIRE_LOCK(lk) (__threaded && pthread_mutex_lock(lk), 0)
|
||||
#define RELEASE_LOCK(lk) (__threaded && pthread_mutex_unlock(lk), 0)
|
||||
#define TRY_LOCK(lk) (__threaded ? !pthread_mutex_trylock(lk) : 1)
|
||||
#define INITIAL_LOCK(lk) pthread_mutex_init(lk, 0)
|
||||
#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
|
||||
|
||||
static MLOCK_T malloc_global_mutex;
|
||||
|
||||
#elif USE_SPIN_LOCKS
|
||||
|
||||
/* First, define CAS_LOCK and CLEAR_LOCK on ints */
|
||||
/* Note CAS_LOCK defined to return 0 on success */
|
||||
|
||||
#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
|
||||
#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
|
||||
#define CLEAR_LOCK(sl) __sync_lock_release(sl)
|
||||
|
||||
#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
|
||||
/* Custom spin locks for older gcc on x86 */
|
||||
FORCEINLINE int x86_cas_lock(int *sl) {
|
||||
int ret;
|
||||
int val = 1;
|
||||
int cmp = 0;
|
||||
__asm__ __volatile__ ("lock; cmpxchgl %1, %2"
|
||||
: "=a" (ret)
|
||||
: "r" (val), "m" (*(sl)), "0"(cmp)
|
||||
: "memory", "cc");
|
||||
return ret;
|
||||
}
|
||||
|
||||
FORCEINLINE void x86_clear_lock(int* sl) {
|
||||
assert(*sl != 0);
|
||||
int prev = 0;
|
||||
int ret;
|
||||
__asm__ __volatile__ ("lock; xchgl %0, %1"
|
||||
: "=r" (ret)
|
||||
: "m" (*(sl)), "0"(prev)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#define CAS_LOCK(sl) x86_cas_lock(sl)
|
||||
#define CLEAR_LOCK(sl) x86_clear_lock(sl)
|
||||
|
||||
#else /* Win32 MSC */
|
||||
#define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)
|
||||
#define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)
|
||||
|
||||
#endif /* ... gcc spins locks ... */
|
||||
|
||||
#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
|
||||
/* Plain spin locks use single word (embedded in malloc_states) */
|
||||
static dontinline int spin_acquire_lock(int *sl) {
|
||||
while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
|
||||
sched_yield();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MLOCK_T int
|
||||
#define TRY_LOCK(sl) !CAS_LOCK(sl)
|
||||
#define RELEASE_LOCK(sl) (__threaded && (CLEAR_LOCK(sl), 0))
|
||||
#define ACQUIRE_LOCK(sl) (__threaded && CAS_LOCK(sl) ? spin_acquire_lock(sl) : 0)
|
||||
#define INITIAL_LOCK(sl) (*sl = 0)
|
||||
#define DESTROY_LOCK(sl) (0)
|
||||
static MLOCK_T malloc_global_mutex = 0;
|
||||
|
||||
#else /* USE_RECURSIVE_LOCKS */
|
||||
/* types for lock owners */
|
||||
#ifdef WIN32
|
||||
#define THREAD_ID_T DWORD
|
||||
#define CURRENT_THREAD GetCurrentThreadId()
|
||||
#define EQ_OWNER(X,Y) ((X) == (Y))
|
||||
#else
|
||||
/*
|
||||
Note: the following assume that pthread_t is a type that can be
|
||||
initialized to (casted) zero. If this is not the case, you will need to
|
||||
somehow redefine these or not use spin locks.
|
||||
*/
|
||||
#define THREAD_ID_T pthread_t
|
||||
#define CURRENT_THREAD pthread_self()
|
||||
#define EQ_OWNER(X,Y) pthread_equal(X, Y)
|
||||
#endif
|
||||
|
||||
struct malloc_recursive_lock {
|
||||
int sl;
|
||||
unsigned int c;
|
||||
THREAD_ID_T threadid;
|
||||
};
|
||||
|
||||
#define MLOCK_T struct malloc_recursive_lock
|
||||
static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
|
||||
|
||||
FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
|
||||
assert(lk->sl != 0);
|
||||
if (--lk->c == 0) {
|
||||
CLEAR_LOCK(&lk->sl);
|
||||
}
|
||||
}
|
||||
|
||||
FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
|
||||
THREAD_ID_T mythreadid = CURRENT_THREAD;
|
||||
for (;;) {
|
||||
if (*((volatile int *)(&lk->sl)) == 0) {
|
||||
if (!CAS_LOCK(&lk->sl)) {
|
||||
lk->threadid = mythreadid;
|
||||
lk->c = 1;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
else if (EQ_OWNER(lk->threadid, mythreadid)) {
|
||||
++lk->c;
|
||||
return 0;
|
||||
}
|
||||
sched_yield();
|
||||
}
|
||||
}
|
||||
|
||||
FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
|
||||
THREAD_ID_T mythreadid = CURRENT_THREAD;
|
||||
if (*((volatile int *)(&lk->sl)) == 0) {
|
||||
if (!CAS_LOCK(&lk->sl)) {
|
||||
lk->threadid = mythreadid;
|
||||
lk->c = 1;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
else if (EQ_OWNER(lk->threadid, mythreadid)) {
|
||||
++lk->c;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define RELEASE_LOCK(lk) recursive_release_lock(lk)
|
||||
#define TRY_LOCK(lk) recursive_try_lock(lk)
|
||||
#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
|
||||
#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
|
||||
#define DESTROY_LOCK(lk) (0)
|
||||
#endif /* USE_RECURSIVE_LOCKS */
|
||||
|
||||
#elif defined(WIN32) /* Win32 critical sections */
|
||||
#define MLOCK_T CRITICAL_SECTION
|
||||
#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
|
||||
#define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
|
||||
#define TRY_LOCK(lk) TryEnterCriticalSection(lk)
|
||||
#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
|
||||
#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
|
||||
#define NEED_GLOBAL_LOCK_INIT
|
||||
|
||||
static MLOCK_T malloc_global_mutex;
|
||||
static volatile LONG malloc_global_mutex_status;
|
||||
|
||||
/* Use spin loop to initialize global lock */
|
||||
static void init_malloc_global_mutex() {
|
||||
for (;;) {
|
||||
long stat = malloc_global_mutex_status;
|
||||
if (stat > 0)
|
||||
return;
|
||||
/* transition to < 0 while initializing, then to > 0) */
|
||||
if (stat == 0 &&
|
||||
interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
|
||||
InitializeCriticalSection(&malloc_global_mutex);
|
||||
interlockedexchange(&malloc_global_mutex_status, (LONG)1);
|
||||
return;
|
||||
}
|
||||
SleepEx(0, FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
#else /* pthreads-based locks */
|
||||
#define MLOCK_T pthread_mutex_t
|
||||
#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
|
||||
#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
|
||||
#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
|
||||
#define INITIAL_LOCK(lk) pthread_init_lock(lk)
|
||||
#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
|
||||
|
||||
#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
|
||||
/* Cope with old-style linux recursive lock initialization by adding */
|
||||
/* skipped internal declaration from pthread.h */
|
||||
extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
|
||||
int __kind));
|
||||
#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
|
||||
#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
|
||||
#endif /* USE_RECURSIVE_LOCKS ... */
|
||||
|
||||
static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
static int pthread_init_lock (MLOCK_T *lk) {
|
||||
pthread_mutexattr_t attr;
|
||||
if (pthread_mutexattr_init(&attr)) return 1;
|
||||
#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
|
||||
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
|
||||
#endif
|
||||
if (pthread_mutex_init(lk, &attr)) return 1;
|
||||
if (pthread_mutexattr_destroy(&attr)) return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* ... lock types ... */
|
||||
|
||||
/* Common code for all lock types */
|
||||
#define USE_LOCK_BIT (2U)
|
||||
|
||||
#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
|
||||
#define MLOCK_T nsync_mu
|
||||
#define ACQUIRE_LOCK(lk) (__threaded && (nsync_mu_lock(lk), 0))
|
||||
#define RELEASE_LOCK(lk) (__threaded && (nsync_mu_unlock(lk), 0))
|
||||
#define TRY_LOCK(lk) (__threaded ? nsync_mu_trylock(lk) : 1)
|
||||
#define INITIAL_LOCK(lk) memset(lk, 0, sizeof(*lk))
|
||||
#define DESTROY_LOCK(lk) memset(lk, -1, sizeof(*lk))
|
||||
#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
|
||||
#endif
|
||||
|
||||
#ifndef RELEASE_MALLOC_GLOBAL_LOCK
|
||||
#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
|
||||
static MLOCK_T malloc_global_mutex;
|
||||
#endif
|
||||
|
||||
#endif /* USE_LOCKS */
|
||||
#define USE_LOCK_BIT (2U)
|
||||
|
||||
struct malloc_chunk {
|
||||
size_t prev_foot; /* Size of previous chunk (if free). */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue