Make threads faster and more reliable

This change doubles the performance of thread spawning. That's thanks to
our new stack manager, which allows us to avoid zeroing stacks. It gives
us 15µs spawns rather than 30µs spawns on Linux. Also, pthread_exit() is
faster now, since it doesn't need to acquire the pthread GIL. On NetBSD,
that helps us avoid allocating too many semaphores. Even if that happens
we're now able to survive semaphores running out and even memory running
out, when allocating *NSYNC waiter objects. I found a lot more rare bugs
in the POSIX threads runtime that could cause things to crash, if you've
got dozens of threads all spawning and joining dozens of threads. I want
cosmo to be world class production worthy for 2025 so happy holidays all
This commit is contained in:
Justine Tunney 2024-12-18 04:59:02 -08:00
parent 906bd06a5a
commit 624573207e
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
51 changed files with 1006 additions and 321 deletions

View file

@ -696,35 +696,40 @@ textwindows dontinstrument static uint32_t __sig_worker(void *arg) {
}
// unblock stalled asynchronous signals in threads
_pthread_lock();
for (struct Dll *e = dll_first(_pthread_list); e;
e = dll_next(_pthread_list, e)) {
struct PosixThread *pt = POSIXTHREAD_CONTAINER(e);
if (atomic_load_explicit(&pt->pt_status, memory_order_acquire) >=
kPosixThreadTerminated) {
break;
struct PosixThread *mark;
for (;;) {
sigset_t pending, mask;
mark = 0;
_pthread_lock();
for (struct Dll *e = dll_first(_pthread_list); e;
e = dll_next(_pthread_list, e)) {
struct PosixThread *pt = POSIXTHREAD_CONTAINER(e);
if (atomic_load_explicit(&pt->pt_status, memory_order_acquire) >=
kPosixThreadTerminated)
break;
pending = atomic_load_explicit(&pt->tib->tib_sigpending,
memory_order_acquire);
mask =
atomic_load_explicit(&pt->tib->tib_sigmask, memory_order_acquire);
if (pending & ~mask) {
_pthread_ref(pt);
mark = pt;
break;
}
}
sigset_t pending =
atomic_load_explicit(&pt->tib->tib_sigpending, memory_order_acquire);
sigset_t mask =
atomic_load_explicit(&pt->tib->tib_sigmask, memory_order_acquire);
if (pending & ~mask) {
_pthread_ref(pt);
_pthread_unlock();
while (!atomic_compare_exchange_weak_explicit(
&pt->tib->tib_sigpending, &pending, pending & ~mask,
memory_order_acq_rel, memory_order_relaxed)) {
}
while ((pending = pending & ~mask)) {
int sig = bsfl(pending) + 1;
pending &= ~(1ull << (sig - 1));
__sig_killer(pt, sig, SI_KERNEL);
}
_pthread_lock();
_pthread_unref(pt);
_pthread_unlock();
if (!mark)
break;
while (!atomic_compare_exchange_weak_explicit(
&mark->tib->tib_sigpending, &pending, pending & ~mask,
memory_order_acq_rel, memory_order_relaxed)) {
}
while ((pending = pending & ~mask)) {
int sig = bsfl(pending) + 1;
pending &= ~(1ull << (sig - 1));
__sig_killer(mark, sig, SI_KERNEL);
}
}
_pthread_unlock();
// wait until next scheduler quantum
pthread_mutex_unlock(&__sig_worker_lock);