mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-01-31 19:43:32 +00:00
d5312b60f7
This change makes pthread_mutex_lock() as fast as _spinlock() by default. Thread instability issues on NetBSD have been resolved. Improvements made to gdtoa thread code. Crash reporting will now synchronize between threads in a slightly better way.
235 lines
8.6 KiB
C
235 lines
8.6 KiB
C
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
|
|
│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
|
|
╞══════════════════════════════════════════════════════════════════════════════╡
|
|
│ Copyright 2022 Justine Alexandra Roberts Tunney │
|
|
│ │
|
|
│ Permission to use, copy, modify, and/or distribute this software for │
|
|
│ any purpose with or without fee is hereby granted, provided that the │
|
|
│ above copyright notice and this permission notice appear in all copies. │
|
|
│ │
|
|
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
|
|
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
|
|
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
|
|
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
|
|
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
|
|
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
|
|
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
|
│ PERFORMANCE OF THIS SOFTWARE. │
|
|
╚─────────────────────────────────────────────────────────────────────────────*/
|
|
#include "libc/calls/calls.h"
|
|
#include "libc/calls/state.internal.h"
|
|
#include "libc/errno.h"
|
|
#include "libc/intrin/kprintf.h"
|
|
#include "libc/intrin/pthread.h"
|
|
#include "libc/intrin/spinlock.h"
|
|
#include "libc/intrin/wait0.internal.h"
|
|
#include "libc/nexgen32e/threaded.h"
|
|
#include "libc/runtime/internal.h"
|
|
#include "libc/runtime/runtime.h"
|
|
#include "libc/runtime/stack.h"
|
|
#include "libc/sysv/consts/clone.h"
|
|
#include "libc/sysv/consts/map.h"
|
|
#include "libc/sysv/consts/prot.h"
|
|
#include "libc/testlib/ezbench.h"
|
|
#include "libc/testlib/testlib.h"
|
|
#include "libc/thread/thread.h"
|
|
|
|
#define THREADS 8
|
|
#define ITERATIONS 512
|
|
|
|
char *stack[THREADS];
|
|
char tls[THREADS][64];
|
|
|
|
__attribute__((__constructor__)) void init(void) {
|
|
__enable_tls();
|
|
__enable_threads();
|
|
}
|
|
|
|
TEST(pthread_mutex_lock, normal) {
|
|
pthread_mutex_t lock;
|
|
pthread_mutexattr_t attr;
|
|
ASSERT_EQ(0, pthread_mutexattr_init(&attr));
|
|
ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
|
|
ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
|
|
ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
|
|
ASSERT_EQ(0, pthread_mutex_init(&lock, 0));
|
|
ASSERT_EQ(0, pthread_mutex_lock(&lock));
|
|
ASSERT_EQ(0, pthread_mutex_unlock(&lock));
|
|
ASSERT_EQ(0, pthread_mutex_lock(&lock));
|
|
ASSERT_EQ(0, pthread_mutex_unlock(&lock));
|
|
ASSERT_EQ(0, pthread_mutex_destroy(&lock));
|
|
}
|
|
|
|
TEST(pthread_mutex_lock, recursive) {
|
|
pthread_mutex_t lock;
|
|
pthread_mutexattr_t attr;
|
|
ASSERT_EQ(0, pthread_mutexattr_init(&attr));
|
|
ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
|
|
ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
|
|
ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
|
|
ASSERT_EQ(0, pthread_mutex_lock(&lock));
|
|
ASSERT_EQ(0, pthread_mutex_lock(&lock));
|
|
ASSERT_EQ(0, pthread_mutex_unlock(&lock));
|
|
ASSERT_EQ(0, pthread_mutex_lock(&lock));
|
|
ASSERT_EQ(0, pthread_mutex_unlock(&lock));
|
|
ASSERT_EQ(0, pthread_mutex_unlock(&lock));
|
|
ASSERT_EQ(0, pthread_mutex_destroy(&lock));
|
|
}
|
|
|
|
TEST(pthread_mutex_lock, errorcheck) {
|
|
pthread_mutex_t lock;
|
|
pthread_mutexattr_t attr;
|
|
ASSERT_EQ(0, pthread_mutexattr_init(&attr));
|
|
ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
|
|
ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
|
|
ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
|
|
ASSERT_EQ(EPERM, pthread_mutex_unlock(&lock));
|
|
ASSERT_EQ(0, pthread_mutex_lock(&lock));
|
|
ASSERT_EQ(EDEADLK, pthread_mutex_lock(&lock));
|
|
ASSERT_EQ(0, pthread_mutex_unlock(&lock));
|
|
ASSERT_EQ(EPERM, pthread_mutex_unlock(&lock));
|
|
ASSERT_EQ(0, pthread_mutex_destroy(&lock));
|
|
}
|
|
|
|
int count;
|
|
_Atomic(int) finished;
|
|
_Alignas(64) char slock;
|
|
pthread_mutex_t lock;
|
|
|
|
int MutexWorker(void *p) {
|
|
int i;
|
|
for (i = 0; i < ITERATIONS; ++i) {
|
|
pthread_mutex_lock(&lock);
|
|
++count;
|
|
pthread_mutex_unlock(&lock);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
TEST(pthread_mutex_lock, contention) {
|
|
int i;
|
|
pthread_mutexattr_t attr;
|
|
pthread_mutexattr_init(&attr);
|
|
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL);
|
|
pthread_mutex_init(&lock, &attr);
|
|
pthread_mutexattr_destroy(&attr);
|
|
count = 0;
|
|
for (i = 0; i < THREADS; ++i) {
|
|
clone(MutexWorker,
|
|
(stack[i] = mmap(0, GetStackSize(), PROT_READ | PROT_WRITE,
|
|
MAP_STACK | MAP_ANONYMOUS, -1, 0)),
|
|
GetStackSize(),
|
|
CLONE_THREAD | CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
|
|
CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID | CLONE_SETTLS,
|
|
0, 0, __initialize_tls(tls[i]), sizeof(tls[i]),
|
|
(int *)(tls[i] + 0x38));
|
|
}
|
|
for (i = 0; i < THREADS; ++i) {
|
|
_wait0((int *)(tls[i] + 0x38));
|
|
}
|
|
ASSERT_EQ(THREADS * ITERATIONS, count);
|
|
for (i = 0; i < THREADS; ++i) {
|
|
munmap(stack[i], GetStackSize());
|
|
}
|
|
pthread_mutex_destroy(&lock);
|
|
}
|
|
|
|
TEST(pthread_mutex_lock, rcontention) {
|
|
int i;
|
|
pthread_mutexattr_t attr;
|
|
pthread_mutexattr_init(&attr);
|
|
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
|
|
pthread_mutex_init(&lock, &attr);
|
|
pthread_mutexattr_destroy(&attr);
|
|
count = 0;
|
|
for (i = 0; i < THREADS; ++i) {
|
|
clone(MutexWorker,
|
|
(stack[i] = mmap(0, GetStackSize(), PROT_READ | PROT_WRITE,
|
|
MAP_STACK | MAP_ANONYMOUS, -1, 0)),
|
|
GetStackSize(),
|
|
CLONE_THREAD | CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
|
|
CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID | CLONE_SETTLS,
|
|
0, 0, __initialize_tls(tls[i]), sizeof(tls[i]),
|
|
(int *)(tls[i] + 0x38));
|
|
}
|
|
for (i = 0; i < THREADS; ++i) {
|
|
_wait0((int *)(tls[i] + 0x38));
|
|
}
|
|
ASSERT_EQ(THREADS * ITERATIONS, count);
|
|
for (i = 0; i < THREADS; ++i) {
|
|
munmap(stack[i], GetStackSize());
|
|
}
|
|
pthread_mutex_destroy(&lock);
|
|
}
|
|
|
|
TEST(pthread_mutex_lock, econtention) {
|
|
int i;
|
|
pthread_mutexattr_t attr;
|
|
pthread_mutexattr_init(&attr);
|
|
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
|
pthread_mutex_init(&lock, &attr);
|
|
pthread_mutexattr_destroy(&attr);
|
|
count = 0;
|
|
for (i = 0; i < THREADS; ++i) {
|
|
clone(MutexWorker,
|
|
(stack[i] = mmap(0, GetStackSize(), PROT_READ | PROT_WRITE,
|
|
MAP_STACK | MAP_ANONYMOUS, -1, 0)),
|
|
GetStackSize(),
|
|
CLONE_THREAD | CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
|
|
CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID | CLONE_SETTLS,
|
|
0, 0, __initialize_tls(tls[i]), sizeof(tls[i]),
|
|
(int *)(tls[i] + 0x38));
|
|
}
|
|
for (i = 0; i < THREADS; ++i) {
|
|
_wait0((int *)(tls[i] + 0x38));
|
|
}
|
|
ASSERT_EQ(THREADS * ITERATIONS, count);
|
|
for (i = 0; i < THREADS; ++i) {
|
|
munmap(stack[i], GetStackSize());
|
|
}
|
|
pthread_mutex_destroy(&lock);
|
|
}
|
|
|
|
int SpinlockWorker(void *p) {
|
|
int i;
|
|
for (i = 0; i < ITERATIONS; ++i) {
|
|
_spinlock(&slock);
|
|
++count;
|
|
_spunlock(&slock);
|
|
}
|
|
++finished;
|
|
return 0;
|
|
}
|
|
|
|
TEST(_spinlock, contention) {
|
|
int i;
|
|
count = 0;
|
|
finished = 0;
|
|
for (i = 0; i < THREADS; ++i) {
|
|
clone(SpinlockWorker,
|
|
(stack[i] = mmap(0, GetStackSize(), PROT_READ | PROT_WRITE,
|
|
MAP_STACK | MAP_ANONYMOUS, -1, 0)),
|
|
GetStackSize(),
|
|
CLONE_THREAD | CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
|
|
CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID | CLONE_SETTLS,
|
|
0, 0, __initialize_tls(tls[i]), sizeof(tls[i]),
|
|
(int *)(tls[i] + 0x38));
|
|
}
|
|
for (i = 0; i < THREADS; ++i) {
|
|
_wait0((int *)(tls[i] + 0x38));
|
|
}
|
|
ASSERT_EQ(THREADS, finished);
|
|
ASSERT_EQ(THREADS * ITERATIONS, count);
|
|
for (i = 0; i < THREADS; ++i) {
|
|
munmap(stack[i], GetStackSize());
|
|
}
|
|
}
|
|
|
|
BENCH(pthread_mutex_lock, bench) {
|
|
char schar = 0;
|
|
pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
|
|
EZBENCH2("_spinlock", donothing, _spinlock_contention());
|
|
EZBENCH2("normal", donothing, pthread_mutex_lock_contention());
|
|
EZBENCH2("recursive", donothing, pthread_mutex_lock_rcontention());
|
|
EZBENCH2("errorcheck", donothing, pthread_mutex_lock_econtention());
|
|
}
|