Use *NSYNC for POSIX threads locking APIs

Condition variables, barriers, and r/w locks now work very well.
This commit is contained in:
Justine Tunney 2022-09-11 11:02:07 -07:00
parent 3de35e196c
commit b5cb71ab84
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
197 changed files with 3734 additions and 3817 deletions

View file

@ -16,6 +16,7 @@
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/atomic.h"
#include "libc/calls/calls.h"
#include "libc/calls/struct/timespec.h"
#include "libc/intrin/atomic.h"
@ -26,13 +27,14 @@
#include "libc/thread/posixthread.internal.h"
#include "libc/thread/spawn.h"
#include "libc/thread/thread.h"
#include "third_party/nsync/mu.h"
int THREADS = 16;
int ITERATIONS = 100;
int count;
_Atomic(int) started;
_Atomic(int) finished;
atomic_int started;
atomic_int finished;
pthread_mutex_t lock;
pthread_mutexattr_t attr;
@ -108,29 +110,50 @@ void BenchLockUnlock(pthread_mutex_t *m) {
pthread_mutex_unlock(m);
}
void BenchLockUnlockNsync(nsync_mu *m) {
nsync_mu_lock(m);
nsync_mu_unlock(m);
}
BENCH(pthread_mutex_lock, bench_uncontended) {
{
pthread_spinlock_t s = {0};
EZBENCH2("spin 1x", donothing, BenchSpinUnspin(&s));
}
{
pthread_mutex_t m = {PTHREAD_MUTEX_NORMAL};
nsync_mu m = {0};
EZBENCH2("nsync 1x", donothing, BenchLockUnlockNsync(&m));
}
{
pthread_mutex_t m;
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL);
pthread_mutex_init(&m, &attr);
EZBENCH2("normal 1x", donothing, BenchLockUnlock(&m));
}
{
pthread_mutex_t m = {PTHREAD_MUTEX_RECURSIVE};
pthread_mutex_t m;
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&m, &attr);
EZBENCH2("recursive 1x", donothing, BenchLockUnlock(&m));
}
{
pthread_mutex_t m = {PTHREAD_MUTEX_ERRORCHECK};
pthread_mutex_t m;
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
pthread_mutex_init(&m, &attr);
EZBENCH2("errorcheck 1x", donothing, BenchLockUnlock(&m));
}
}
struct SpinContentionArgs {
pthread_spinlock_t *spin;
_Atomic(char) done;
_Atomic(char) ready;
atomic_char done;
atomic_char ready;
};
int SpinContentionWorker(void *arg, int tid) {
@ -145,8 +168,8 @@ int SpinContentionWorker(void *arg, int tid) {
struct MutexContentionArgs {
pthread_mutex_t *mutex;
_Atomic(char) done;
_Atomic(char) ready;
atomic_char done;
atomic_char ready;
};
int MutexContentionWorker(void *arg, int tid) {
@ -159,6 +182,22 @@ int MutexContentionWorker(void *arg, int tid) {
return 0;
}
struct NsyncContentionArgs {
nsync_mu *nsync;
atomic_char done;
atomic_char ready;
};
int NsyncContentionWorker(void *arg, int tid) {
struct NsyncContentionArgs *a = arg;
while (!atomic_load_explicit(&a->done, memory_order_relaxed)) {
nsync_mu_lock(a->nsync);
atomic_store_explicit(&a->ready, 1, memory_order_relaxed);
nsync_mu_unlock(a->nsync);
}
return 0;
}
BENCH(pthread_mutex_lock, bench_contended) {
struct spawn t;
{
@ -171,7 +210,20 @@ BENCH(pthread_mutex_lock, bench_contended) {
_join(&t);
}
{
pthread_mutex_t m = {PTHREAD_MUTEX_NORMAL};
nsync_mu m = {0};
struct NsyncContentionArgs a = {&m};
_spawn(NsyncContentionWorker, &a, &t);
while (!a.ready) sched_yield();
EZBENCH2("nsync 2x", donothing, BenchLockUnlockNsync(&m));
a.done = true;
_join(&t);
}
{
pthread_mutex_t m;
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL);
pthread_mutex_init(&m, &attr);
struct MutexContentionArgs a = {&m};
_spawn(MutexContentionWorker, &a, &t);
while (!a.ready) sched_yield();
@ -180,7 +232,11 @@ BENCH(pthread_mutex_lock, bench_contended) {
_join(&t);
}
{
pthread_mutex_t m = {PTHREAD_MUTEX_RECURSIVE};
pthread_mutex_t m;
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&m, &attr);
struct MutexContentionArgs a = {&m};
_spawn(MutexContentionWorker, &a, &t);
while (!a.ready) sched_yield();
@ -189,7 +245,11 @@ BENCH(pthread_mutex_lock, bench_contended) {
_join(&t);
}
{
pthread_mutex_t m = {PTHREAD_MUTEX_ERRORCHECK};
pthread_mutex_t m;
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
pthread_mutex_init(&m, &attr);
struct MutexContentionArgs a = {&m};
_spawn(MutexContentionWorker, &a, &t);
while (!a.ready) sched_yield();

View file

@ -17,12 +17,11 @@
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/assert.h"
#include "libc/atomic.h"
#include "libc/calls/calls.h"
#include "libc/calls/state.internal.h"
#include "libc/calls/strace.internal.h"
#include "libc/errno.h"
#include "libc/intrin/futex.internal.h"
#include "libc/intrin/wait0.internal.h"
#include "libc/log/check.h"
#include "libc/macros.internal.h"
#include "libc/math.h"
@ -40,13 +39,14 @@
#include "libc/thread/spawn.h"
#include "libc/thread/thread.h"
#include "libc/thread/tls.h"
#include "libc/thread/wait0.internal.h"
#define THREADS 8
#define ITERATIONS 512
int count;
_Atomic(int) started;
_Atomic(int) finished;
atomic_int started;
atomic_int finished;
pthread_mutex_t mylock;
pthread_spinlock_t slock;
struct spawn th[THREADS];

View file

@ -16,17 +16,19 @@
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/atomic.h"
#include "libc/intrin/atomic.h"
#include "libc/thread/thread.h"
#include "libc/mem/mem.h"
#include "libc/runtime/gc.internal.h"
#include "libc/testlib/testlib.h"
#include "libc/thread/spawn.h"
#include "libc/thread/thread.h"
int i, n;
struct spawn *t;
_Atomic(int) x, y;
atomic_int x, y;
pthread_barrier_t b;
static pthread_once_t once = PTHREAD_ONCE_INIT;
void InitFactory(void) {
ASSERT_EQ(0, atomic_load(&x));
@ -34,7 +36,6 @@ void InitFactory(void) {
}
int Worker(void *arg, int tid) {
static pthread_once_t once = PTHREAD_ONCE_INIT;
pthread_barrier_wait(&b);
ASSERT_EQ(0, pthread_once(&once, InitFactory));
ASSERT_EQ(1, atomic_load(&y));
@ -45,10 +46,11 @@ int Worker(void *arg, int tid) {
TEST(pthread_once, test) {
n = 32;
x = y = 0;
pthread_barrier_init(&b, 0, n);
ASSERT_EQ(0, pthread_barrier_init(&b, 0, n));
t = gc(malloc(sizeof(struct spawn) * n));
for (i = 0; i < n; ++i) ASSERT_SYS(0, 0, _spawn(Worker, 0, t + i));
for (i = 0; i < n; ++i) EXPECT_SYS(0, 0, _join(t + i));
ASSERT_EQ(n, atomic_load(&x));
ASSERT_EQ(1, atomic_load(&y));
ASSERT_EQ(0, pthread_barrier_destroy(&b));
}

View file

@ -43,7 +43,7 @@
#define ENTRIES 1024
volatile uint64_t A[THREADS * ENTRIES];
pthread_barrier_t barrier = PTHREAD_BARRIER_INITIALIZER;
pthread_barrier_t barrier;
void SetUpOnce(void) {
__enable_threads();
@ -93,7 +93,7 @@ TEST(rand64, testThreadSafety_doesntProduceIdenticalValues) {
sigemptyset(&ss);
sigaddset(&ss, SIGCHLD);
EXPECT_EQ(0, sigprocmask(SIG_BLOCK, &ss, &oldss));
pthread_barrier_init(&barrier, 0, THREADS);
ASSERT_EQ(0, pthread_barrier_init(&barrier, 0, THREADS));
for (i = 0; i < THREADS; ++i) {
ASSERT_SYS(0, 0, _spawn(Thrasher, (void *)(intptr_t)i, th + i));
}
@ -109,4 +109,5 @@ TEST(rand64, testThreadSafety_doesntProduceIdenticalValues) {
EXPECT_NE(A[i], A[j], "i=%d j=%d", i, j);
}
}
ASSERT_EQ(0, pthread_barrier_destroy(&barrier));
}

View file

@ -39,7 +39,8 @@ TEST_LIBC_INTRIN_DIRECTDEPS = \
LIBC_TINYMATH \
LIBC_X \
TOOL_VIZ_LIB \
THIRD_PARTY_COMPILER_RT
THIRD_PARTY_COMPILER_RT \
THIRD_PARTY_NSYNC
TEST_LIBC_INTRIN_DEPS := \
$(call uniq,$(foreach x,$(TEST_LIBC_INTRIN_DIRECTDEPS),$($(x))))