Write some lock contention tests

This commit is contained in:
Justine Tunney 2022-06-16 09:06:09 -07:00
parent 42c38bc3e3
commit c06ffd458c
19 changed files with 205 additions and 76 deletions

View file

@ -29,7 +29,7 @@
static int cpus;
static double load;
_Alignas(64) static int lock;
_Alignas(64) static char lock;
static struct NtFileTime idle1, kern1, user1;
textwindows int sys_getloadavg_nt(double *a, int n) {

View file

@ -2,20 +2,20 @@
#define COSMOPOLITAN_LIBC_INTRIN_ONCE_H_
#include "libc/intrin/spinlock.h"
#define _once(x) \
({ \
typeof(x) oncerc; \
static bool once; \
static typeof(oncerc) onceresult; \
_Alignas(64) static int oncelock; \
_spinlock(&oncelock); \
if (once) { \
oncerc = onceresult; \
} else { \
oncerc = onceresult = x; \
} \
_spunlock(&oncelock); \
oncerc; \
#define _once(x) \
({ \
typeof(x) oncerc; \
static bool once; \
static typeof(oncerc) onceresult; \
_Alignas(64) static char oncelock; \
_spinlock(&oncelock); \
if (once) { \
oncerc = onceresult; \
} else { \
oncerc = onceresult = x; \
} \
_spunlock(&oncelock); \
oncerc; \
})
#endif /* COSMOPOLITAN_LIBC_INTRIN_ONCE_H_ */

View file

@ -7,9 +7,9 @@ COSMOPOLITAN_C_START_
#define PTHREAD_ONCE_INIT 0
#define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_RECURSIVE
#define PTHREAD_MUTEX_NORMAL 0
#define PTHREAD_MUTEX_RECURSIVE 1
#define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_NORMAL
#define PTHREAD_MUTEX_RECURSIVE 0
#define PTHREAD_MUTEX_NORMAL 1
#define PTHREAD_MUTEX_ERRORCHECK 2
#define PTHREAD_MUTEX_STALLED 0
#define PTHREAD_MUTEX_ROBUST 1
@ -26,7 +26,7 @@ typedef int pthread_once_t;
typedef struct {
int attr;
int reent;
_Atomic(int) owner;
_Atomic(int) lock;
_Atomic(int) waits;
} pthread_mutex_t;

View file

@ -29,11 +29,12 @@
* @return 0 on success, or error number on failure
*/
int pthread_mutex_lock(pthread_mutex_t *mutex) {
int me, owner;
unsigned tries;
int i, me, owner, tries;
for (tries = 0, me = gettid();;) {
owner = 0;
if (atomic_compare_exchange_strong(&mutex->owner, &owner, me)) {
owner = atomic_load_explicit(&mutex->lock, memory_order_relaxed);
if (!owner && atomic_compare_exchange_weak_explicit(
&mutex->lock, &owner, me, memory_order_acquire,
memory_order_relaxed)) {
break;
} else if (owner == me) {
if (mutex->attr != PTHREAD_MUTEX_ERRORCHECK) {
@ -42,15 +43,17 @@ int pthread_mutex_lock(pthread_mutex_t *mutex) {
return EDEADLK;
}
}
atomic_fetch_add(&mutex->waits, 1);
if (!IsLinux() || LinuxFutexWait((void *)&mutex->owner, owner, 0)) {
if (++tries & 7) {
__builtin_ia32_pause();
} else {
sched_yield();
if (tries < 7) {
for (i = 0; i != 1 << tries; i++) {
}
tries++;
} else if (IsLinux()) {
atomic_fetch_add(&mutex->waits, 1);
LinuxFutexWait(&mutex->lock, owner, 0);
atomic_fetch_sub(&mutex->waits, 1);
} else {
sched_yield();
}
atomic_fetch_sub(&mutex->waits, 1);
}
++mutex->reent;
return 0;

View file

@ -29,7 +29,7 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex) {
int rc, me, owner;
me = gettid();
owner = 0;
if (!atomic_compare_exchange_strong(&mutex->owner, &owner, me) &&
if (!atomic_compare_exchange_strong(&mutex->lock, &owner, me) &&
owner == me) {
rc = 0;
++mutex->reent;

View file

@ -31,14 +31,14 @@
*/
int pthread_mutex_unlock(pthread_mutex_t *mutex) {
int owner;
if (mutex->attr == PTHREAD_MUTEX_ERRORCHECK && mutex->owner != gettid()) {
if (mutex->attr == PTHREAD_MUTEX_ERRORCHECK && mutex->lock != gettid()) {
return EPERM;
}
if (!--mutex->reent) {
atomic_store_explicit(&mutex->owner, 0, memory_order_relaxed);
atomic_store_explicit(&mutex->lock, 0, memory_order_relaxed);
if (IsLinux() &&
atomic_load_explicit(&mutex->waits, memory_order_acquire)) {
LinuxFutexWake(&mutex->owner, 1);
LinuxFutexWake(&mutex->lock, 1);
}
}
return 0;

View file

@ -20,20 +20,19 @@
__atomic_store(__lock, &__x, __ATOMIC_RELEASE); \
})
#define _spinlock_tiny(lock) \
({ \
autotype(lock) __lock = (lock); \
while (_trylock(__lock)) { \
__builtin_ia32_pause(); \
} \
0; \
#define _spinlock_tiny(lock) \
({ \
while (_trylock(lock)) { \
__builtin_ia32_pause(); \
} \
0; \
})
#define _spinlock_cooperative(lock) \
({ \
autotype(lock) __lock = (lock); \
typeof(*__lock) __x; \
char __x; \
unsigned __tries = 0; \
char *__lock = (lock); \
for (;;) { \
__atomic_load(__lock, &__x, __ATOMIC_RELAXED); \
if (!__x && !_trylock(__lock)) { \
@ -47,6 +46,27 @@
0; \
})
#define _spinlock_cooperative_(lock) \
({ \
char __x; \
volatile int __i; \
unsigned __tries = 0; \
char *__lock = (lock); \
for (;;) { \
__atomic_load(__lock, &__x, __ATOMIC_RELAXED); \
if (!__x && !_trylock(__lock)) { \
break; \
} else if (__tries < 7) { \
for (__i = 0; __i != 1 << __tries; __i++) { \
} \
__tries++; \
} else { \
_spinlock_yield(); \
} \
} \
0; \
})
#define _trylock(lock) __atomic_test_and_set(lock, __ATOMIC_SEQ_CST)
void _spinlock_yield(void);

View file

@ -0,0 +1,20 @@
#ifndef COSMOPOLITAN_LIBC_INTRIN_WAIT0_H_
#define COSMOPOLITAN_LIBC_INTRIN_WAIT0_H_
#include "libc/bits/atomic.h"
#include "libc/calls/calls.h"
#include "libc/dce.h"
#include "libc/linux/futex.h"
#define _wait0(ptid) \
do { \
int x; \
if (!(x = atomic_load_explicit(ptid, memory_order_relaxed))) { \
break; \
} else if (IsLinux()) { \
LinuxFutexWait(ptid, x, 0); \
} else { \
sched_yield(); \
} \
} while (1)
#endif /* COSMOPOLITAN_LIBC_INTRIN_WAIT0_H_ */

View file

@ -63,7 +63,7 @@ struct CloneArgs {
int64_t tid64;
};
union {
int lock;
char lock;
void *pstack;
};
int *ctid;

View file

@ -32,7 +32,7 @@ const char *testlib_showerror_func;
const char *testlib_showerror_isfatal;
const char *testlib_showerror_macro;
const char *testlib_showerror_symbol;
_Alignas(64) static int testlib_showerror_lock;
_Alignas(64) static char testlib_showerror_lock;
testonly void testlib_showerror(const char *file, int line, const char *func,
const char *method, const char *symbol,

View file

@ -22,7 +22,6 @@
#include "libc/dce.h"
#include "libc/errno.h"
#include "libc/intrin/asan.internal.h"
#include "libc/intrin/spinlock.h"
#include "libc/runtime/runtime.h"
#include "libc/str/str.h"
#include "libc/sysv/consts/futex.h"