Make mmap() scalable

It's now possible to create thousands of thousands of sparse independent
memory mappings, without any slowdown. The memory manager is better with
tracking memory protection now, particularly on Windows in a precise way
that can be restored during fork(). You now have the highest quality mem
manager possible. It's even better than some OSes like XNU, where mmap()
is implemented as an O(n) operation which means sadly things aren't much
improved over there. With this change the llamafile HTTP server endpoint
at /tokenize with a prompt of 50 tokens is now able to handle 2.6m r/sec
This commit is contained in:
Justine Tunney 2024-07-05 23:13:20 -07:00
parent 3756870635
commit 8c645fa1ee
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
59 changed files with 1238 additions and 1067 deletions

View file

@ -18,6 +18,7 @@
#include "libc/calls/calls.h"
#include "libc/calls/syscall-sysv.internal.h"
#include "libc/dce.h"
#include "libc/intrin/directmap.internal.h"
#include "libc/intrin/dll.h"
#include "libc/intrin/extend.internal.h"
#include "libc/nt/enum/filemapflags.h"
@ -25,21 +26,17 @@
#include "libc/nt/memory.h"
#include "libc/nt/runtime.h"
#include "libc/runtime/memtrack.internal.h"
#include "libc/runtime/runtime.h"
#include "libc/stdalign.internal.h"
#include "libc/stdalign.internal.h"
#include "libc/sysv/consts/map.h"
#include "libc/sysv/consts/prot.h"
#include "libc/thread/thread.h"
#include "libc/thread/tls.h"
#include "third_party/nsync/atomic.h"
#include "third_party/nsync/atomic.internal.h"
#include "third_party/nsync/common.internal.h"
#include "third_party/nsync/mu_semaphore.h"
#include "third_party/nsync/races.internal.h"
#include "libc/runtime/runtime.h"
#include "libc/runtime/runtime.h"
#include "libc/sysv/consts/map.h"
#include "libc/nt/runtime.h"
#include "libc/intrin/directmap.internal.h"
#include "libc/thread/thread.h"
#include "libc/dce.h"
#include "third_party/nsync/wait_s.internal.h"
__static_yoink("nsync_notice");
@ -97,33 +94,15 @@ __static_yoink("nsync_notice");
distinct wakeup conditions were high. So clients are advised to resort to
condition variables if they have many distinct wakeup conditions. */
/* Used in spinloops to delay resumption of the loop.
Usage:
unsigned attempts = 0;
while (try_something) {
attempts = nsync_spin_delay_ (attempts);
} */
unsigned nsync_spin_delay_ (unsigned attempts) {
if (attempts < 7) {
volatile int i;
for (i = 0; i != 1 << attempts; i++) {
}
attempts++;
} else {
nsync_yield_ ();
}
return (attempts);
}
/* Spin until (*w & test) == 0, then atomically perform *w = ((*w | set) &
~clear), perform an acquire barrier, and return the previous value of *w.
*/
uint32_t nsync_spin_test_and_set_ (nsync_atomic_uint32_ *w, uint32_t test,
uint32_t set, uint32_t clear) {
uint32_t set, uint32_t clear, void *symbol) {
unsigned attempts = 0; /* CV_SPINLOCK retry count */
uint32_t old = ATM_LOAD (w);
while ((old & test) != 0 || !ATM_CAS_ACQ (w, old, (old | set) & ~clear)) {
attempts = nsync_spin_delay_ (attempts);
attempts = pthread_delay_np (symbol, attempts);
old = ATM_LOAD (w);
}
return (old);
@ -156,33 +135,69 @@ waiter *nsync_dll_waiter_samecond_ (struct Dll *e) {
/* -------------------------------- */
static void *nsync_malloc (size_t size) {
void *res;
if (!IsWindows ()) {
// too much of a performance hit to track
res = __sys_mmap ((void *)0x7110000000, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0, 0);
static _Atomic(waiter *) free_waiters;
static void free_waiters_push (waiter *w) {
int backoff = 0;
w->next_free = atomic_load_explicit (&free_waiters, memory_order_relaxed);
while (!atomic_compare_exchange_weak_explicit (&free_waiters, &w->next_free, w,
memory_order_acq_rel, memory_order_relaxed))
backoff = pthread_delay_np(free_waiters, backoff);
}
static void free_waiters_populate (void) {
int n;
if (IsNetbsd () || IsXnuSilicon ()) {
// netbsd needs one file descriptor per semaphore (!!)
// tim cook wants us to use his grand central dispatch
n = 1;
} else {
// must be tracked for fork() resurrection
res = mmap (0, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0);
n = getpagesize() / sizeof(waiter);
}
if (res == MAP_FAILED)
waiter *waiters = mmap (0, n * sizeof(waiter),
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0);
if (waiters == MAP_FAILED)
nsync_panic_ ("out of memory\n");
return res;
for (size_t i = 0; i < n; ++i) {
waiter *w = &waiters[i];
w->tag = WAITER_TAG;
w->nw.tag = NSYNC_WAITER_TAG;
if (!nsync_mu_semaphore_init (&w->sem)) {
if (!i)
nsync_panic_ ("out of semaphores\n");
break;
}
w->nw.sem = &w->sem;
dll_init (&w->nw.q);
NSYNC_ATOMIC_UINT32_STORE_ (&w->nw.waiting, 0);
w->nw.flags = NSYNC_WAITER_FLAG_MUCV;
ATM_STORE (&w->remove_count, 0);
dll_init (&w->same_condition);
w->flags = 0;
free_waiters_push (w);
}
}
static waiter *free_waiters_pop (void) {
waiter *w;
int backoff = 0;
for (;;) {
if ((w = atomic_load_explicit (&free_waiters, memory_order_relaxed))) {
if (atomic_compare_exchange_weak_explicit (&free_waiters, &w, w->next_free,
memory_order_acq_rel, memory_order_relaxed))
return w;
backoff = pthread_delay_np(free_waiters, backoff);
} else {
free_waiters_populate ();
}
}
return w;
}
/* -------------------------------- */
static struct Dll *free_waiters = NULL;
/* free_waiters points to a doubly-linked list of free waiter structs. */
pthread_mutex_t nsync_waiters_mu = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
#define waiter_for_thread __get_tls()->tib_nsync
void nsync_waiter_destroy (void *v) {
@ -193,45 +208,20 @@ void nsync_waiter_destroy (void *v) {
of thread-local variables can be arbitrary in some platform e.g.
POSIX. */
waiter_for_thread = NULL;
IGNORE_RACES_START ();
ASSERT ((w->flags & (WAITER_RESERVED|WAITER_IN_USE)) == WAITER_RESERVED);
w->flags &= ~WAITER_RESERVED;
pthread_mutex_lock (&nsync_waiters_mu);
dll_make_first (&free_waiters, &w->nw.q);
pthread_mutex_unlock (&nsync_waiters_mu);
IGNORE_RACES_END ();
free_waiters_push (w);
}
/* Return a pointer to an unused waiter struct.
Ensures that the enclosed timer is stopped and its channel drained. */
waiter *nsync_waiter_new_ (void) {
struct Dll *q;
waiter *tw;
waiter *w;
waiter *tw;
tw = waiter_for_thread;
w = tw;
if (w == NULL || (w->flags & (WAITER_RESERVED|WAITER_IN_USE)) != WAITER_RESERVED) {
w = NULL;
pthread_mutex_lock (&nsync_waiters_mu);
q = dll_first (free_waiters);
if (q != NULL) { /* If free list is non-empty, dequeue an item. */
dll_remove (&free_waiters, q);
w = DLL_WAITER (q);
}
pthread_mutex_unlock (&nsync_waiters_mu);
if (w == NULL) { /* If free list was empty, allocate an item. */
w = (waiter *) nsync_malloc (sizeof (*w));
w->tag = WAITER_TAG;
w->nw.tag = NSYNC_WAITER_TAG;
nsync_mu_semaphore_init (&w->sem);
w->nw.sem = &w->sem;
dll_init (&w->nw.q);
NSYNC_ATOMIC_UINT32_STORE_ (&w->nw.waiting, 0);
w->nw.flags = NSYNC_WAITER_FLAG_MUCV;
ATM_STORE (&w->remove_count, 0);
dll_init (&w->same_condition);
w->flags = 0;
}
w = free_waiters_pop ();
if (tw == NULL) {
w->flags |= WAITER_RESERVED;
waiter_for_thread = w;
@ -246,9 +236,7 @@ void nsync_waiter_free_ (waiter *w) {
ASSERT ((w->flags & WAITER_IN_USE) != 0);
w->flags &= ~WAITER_IN_USE;
if ((w->flags & WAITER_RESERVED) == 0) {
pthread_mutex_lock (&nsync_waiters_mu);
dll_make_first (&free_waiters, &w->nw.q);
pthread_mutex_unlock (&nsync_waiters_mu);
free_waiters_push (w);
if (w == waiter_for_thread)
waiter_for_thread = 0;
}

View file

@ -24,19 +24,11 @@ void nsync_yield_(void);
/* Retrieve the per-thread cache of the waiter object. Platform specific. */
void *nsync_per_thread_waiter_(void (*dest)(void *));
/* Used in spinloops to delay resumption of the loop.
Usage:
unsigned attempts = 0;
while (try_something) {
attempts = nsync_spin_delay_ (attempts);
} */
unsigned nsync_spin_delay_(unsigned attempts);
/* Spin until (*w & test) == 0, then atomically perform *w = ((*w | set) &
~clear), perform an acquire barrier, and return the previous value of *w.
*/
uint32_t nsync_spin_test_and_set_(nsync_atomic_uint32_ *w, uint32_t test,
uint32_t set, uint32_t clear);
uint32_t set, uint32_t clear, void *symbol);
/* Abort after printing the nul-temrinated string s[]. */
void nsync_panic_(const char *s) wontreturn;
@ -210,6 +202,7 @@ typedef struct waiter_s {
struct wait_condition_s cond; /* A condition on which to acquire a mu. */
struct Dll same_condition; /* Links neighbours in nw.q with same
non-nil condition. */
struct waiter_s * next_free;
} waiter;
static const uint32_t WAITER_TAG = 0x0590239f;
static const uint32_t NSYNC_WAITER_TAG = 0x726d2ba9;

View file

@ -193,7 +193,7 @@ static int nsync_cv_wait_with_deadline_impl_ (struct nsync_cv_wait_with_deadline
/* A timeout or cancellation occurred, and no wakeup.
Acquire *pcv's spinlock, and confirm. */
c->old_word = nsync_spin_test_and_set_ (&c->pcv->word, CV_SPINLOCK,
CV_SPINLOCK, 0);
CV_SPINLOCK, 0, c->cv_mu);
/* Check that w wasn't removed from the queue after we
checked above, but before we acquired the spinlock.
The test of remove_count confirms that the waiter *w
@ -226,7 +226,7 @@ static int nsync_cv_wait_with_deadline_impl_ (struct nsync_cv_wait_with_deadline
has not yet set the waiting field to zero; a
cancellation or timeout may prevent this thread
from blocking above on the semaphore. */
attempts = nsync_spin_delay_ (attempts);
attempts = pthread_delay_np (c->cv_mu, attempts);
}
}
if (c->cv_mu != NULL && c->w->cv_mu == NULL) { /* waiter was moved to *pmu's queue, and woken. */
@ -323,7 +323,7 @@ int nsync_cv_wait_with_deadline_generic (nsync_cv *pcv, void *pmu,
}
/* acquire spinlock, set non-empty */
c.old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK|CV_NON_EMPTY, 0);
c.old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK|CV_NON_EMPTY, 0, pmu);
dll_make_last (&pcv->waiters, &c.w->nw.q);
c.remove_count = ATM_LOAD (&c.w->remove_count);
/* Release the spin lock. */
@ -355,7 +355,7 @@ void nsync_cv_signal (nsync_cv *pcv) {
int all_readers = 0;
/* acquire spinlock */
uint32_t old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK,
CV_SPINLOCK, 0);
CV_SPINLOCK, 0, pcv);
if (!dll_is_empty (pcv->waiters)) {
/* Point to first waiter that enqueued itself, and
detach it from all others. */
@ -438,7 +438,7 @@ void nsync_cv_broadcast (nsync_cv *pcv) {
int all_readers;
struct Dll *to_wake_list = NULL; /* waiters that we will wake */
/* acquire spinlock */
nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK, 0);
nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK, 0, pcv);
p = NULL;
next = NULL;
all_readers = 1;
@ -497,7 +497,7 @@ static nsync_time cv_ready_time (void *v, struct nsync_waiter_s *nw) {
static int cv_enqueue (void *v, struct nsync_waiter_s *nw) {
nsync_cv *pcv = (nsync_cv *) v;
/* acquire spinlock */
uint32_t old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK, 0);
uint32_t old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK, 0, pcv);
dll_make_last (&pcv->waiters, &nw->q);
ATM_STORE (&nw->waiting, 1);
/* Release spinlock. */
@ -509,7 +509,7 @@ static int cv_dequeue (void *v, struct nsync_waiter_s *nw) {
nsync_cv *pcv = (nsync_cv *) v;
int was_queued = 0;
/* acquire spinlock */
uint32_t old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK, 0);
uint32_t old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK, 0, pcv);
if (ATM_LOAD_ACQ (&nw->waiting) != 0) {
dll_remove (&pcv->waiters, &nw->q);
ATM_STORE (&nw->waiting, 0);

View file

@ -197,7 +197,7 @@ static char *emit_mu_state (struct emit_buf *b, nsync_mu *mu,
word = ATM_LOAD (&mu->word);
if ((word & MU_WAITING) != 0 && print_waiters && /* can benefit from lock */
(blocking || (word & MU_SPINLOCK) == 0)) { /* willing, or no need to wait */
word = nsync_spin_test_and_set_ (&mu->word, MU_SPINLOCK, MU_SPINLOCK, 0);
word = nsync_spin_test_and_set_ (&mu->word, MU_SPINLOCK, MU_SPINLOCK, 0, mu);
acquired = 1;
}
readers = word / MU_RLOCK;
@ -234,7 +234,7 @@ static char *emit_cv_state (struct emit_buf *b, nsync_cv *cv,
word = ATM_LOAD (&cv->word);
if ((word & CV_NON_EMPTY) != 0 && print_waiters && /* can benefit from lock */
(blocking || (word & CV_SPINLOCK) == 0)) { /* willing, or no need to wait */
word = nsync_spin_test_and_set_ (&cv->word, CV_SPINLOCK, CV_SPINLOCK, 0);
word = nsync_spin_test_and_set_ (&cv->word, CV_SPINLOCK, CV_SPINLOCK, 0, cv);
acquired = 1;
}
emit_print (b, "cv 0x%i -> 0x%i = {", (uintptr_t) cv, word);

View file

@ -21,6 +21,7 @@
#include "third_party/nsync/common.internal.h"
#include "third_party/nsync/mu_semaphore.h"
#include "third_party/nsync/races.internal.h"
#include "libc/thread/thread.h"
#include "third_party/nsync/wait_s.internal.h"
__static_yoink("nsync_notice");
@ -84,7 +85,7 @@ static int mu_try_acquire_after_timeout_or_cancel (nsync_mu *mu, lock_type *l_ty
ATM_CAS_RELACQ (&mu->word, old_word,
old_word|MU_WRITER_WAITING);
}
spin_attempts = nsync_spin_delay_ (spin_attempts);
spin_attempts = pthread_delay_np (mu, spin_attempts);
old_word = ATM_LOAD (&mu->word);
}
/* Check that w wasn't removed from the queue after our caller checked,
@ -194,7 +195,7 @@ int nsync_mu_wait_with_deadline (nsync_mu *mu,
/* Acquire spinlock. */
old_word = nsync_spin_test_and_set_ (&mu->word, MU_SPINLOCK,
MU_SPINLOCK|MU_WAITING|has_condition, MU_ALL_FALSE);
MU_SPINLOCK|MU_WAITING|has_condition, MU_ALL_FALSE, mu);
had_waiters = ((old_word & (MU_DESIG_WAKER | MU_WAITING)) == MU_WAITING);
/* Queue the waiter. */
if (first_wait) {
@ -244,7 +245,7 @@ int nsync_mu_wait_with_deadline (nsync_mu *mu,
}
if (ATM_LOAD (&w->nw.waiting) != 0) {
attempts = nsync_spin_delay_ (attempts); /* will ultimately yield */
attempts = pthread_delay_np (mu, attempts); /* will ultimately yield */
}
}

View file

@ -21,6 +21,7 @@
#include "third_party/nsync/mu_semaphore.h"
#include "third_party/nsync/once.h"
#include "third_party/nsync/races.internal.h"
#include "libc/thread/thread.h"
#include "third_party/nsync/wait_s.internal.h"
__static_yoink("nsync_notice");
@ -92,7 +93,7 @@ static void nsync_run_once_impl (nsync_once *once, struct once_sync_s *s,
deadline = nsync_time_add (nsync_time_now (), nsync_time_ms (attempts));
nsync_cv_wait_with_deadline (&s->once_cv, &s->once_mu, deadline, NULL);
} else {
attempts = nsync_spin_delay_ (attempts);
attempts = pthread_delay_np (once, attempts);
}
}
if (s != NULL) {

View file

@ -22,6 +22,7 @@
#include "third_party/nsync/common.internal.h"
#include "third_party/nsync/mu_semaphore.h"
#include "third_party/nsync/races.internal.h"
#include "libc/thread/thread.h"
#include "third_party/nsync/wait_s.internal.h"
__static_yoink("nsync_notice");
@ -120,7 +121,7 @@ void nsync_mu_lock_slow_ (nsync_mu *mu, waiter *w, uint32_t clear, lock_type *l_
about waiting writers or long waiters. */
zero_to_acquire &= ~(MU_WRITER_WAITING | MU_LONG_WAIT);
}
attempts = nsync_spin_delay_ (attempts);
attempts = pthread_delay_np (mu, attempts);
}
ALLOW_CANCELATION;
}
@ -393,7 +394,7 @@ void nsync_mu_unlock_slow_ (nsync_mu *mu, lock_type *l_type) {
released above. */
if (testing_conditions) {
nsync_spin_test_and_set_ (&mu->word, MU_SPINLOCK,
MU_SPINLOCK, 0);
MU_SPINLOCK, 0, mu);
}
/* add the new_waiters to the last of the waiters. */
@ -444,7 +445,7 @@ void nsync_mu_unlock_slow_ (nsync_mu *mu, lock_type *l_type) {
}
return;
}
attempts = nsync_spin_delay_ (attempts);
attempts = pthread_delay_np (mu, attempts);
}
}

View file

@ -27,7 +27,7 @@ __static_yoink("nsync_notice");
#define PREFER_GCD_OVER_ULOCK 1
/* Initialize *s; the initial value is 0. */
void nsync_mu_semaphore_init (nsync_semaphore *s) {
bool nsync_mu_semaphore_init (nsync_semaphore *s) {
if (PREFER_GCD_OVER_ULOCK && IsXnuSilicon ()) {
return nsync_mu_semaphore_init_gcd (s);
} else if (IsNetbsd ()) {

View file

@ -8,7 +8,7 @@ typedef struct nsync_semaphore_s_ {
} nsync_semaphore;
/* Initialize *s; the initial value is 0. */
void nsync_mu_semaphore_init(nsync_semaphore *s);
bool nsync_mu_semaphore_init(nsync_semaphore *s);
/* Wait until the count of *s exceeds 0, and decrement it. */
errno_t nsync_mu_semaphore_p(nsync_semaphore *s);

View file

@ -4,17 +4,17 @@
#include "third_party/nsync/time.h"
COSMOPOLITAN_C_START_
void nsync_mu_semaphore_init_futex(nsync_semaphore *);
bool nsync_mu_semaphore_init_futex(nsync_semaphore *);
errno_t nsync_mu_semaphore_p_futex(nsync_semaphore *);
errno_t nsync_mu_semaphore_p_with_deadline_futex(nsync_semaphore *, nsync_time);
void nsync_mu_semaphore_v_futex(nsync_semaphore *);
void nsync_mu_semaphore_init_sem(nsync_semaphore *);
bool nsync_mu_semaphore_init_sem(nsync_semaphore *);
errno_t nsync_mu_semaphore_p_sem(nsync_semaphore *);
errno_t nsync_mu_semaphore_p_with_deadline_sem(nsync_semaphore *, nsync_time);
void nsync_mu_semaphore_v_sem(nsync_semaphore *);
void nsync_mu_semaphore_init_gcd(nsync_semaphore *);
bool nsync_mu_semaphore_init_gcd(nsync_semaphore *);
errno_t nsync_mu_semaphore_p_gcd(nsync_semaphore *);
errno_t nsync_mu_semaphore_p_with_deadline_gcd(nsync_semaphore *, nsync_time);
void nsync_mu_semaphore_v_gcd(nsync_semaphore *);

View file

@ -43,9 +43,10 @@ static nsync_semaphore *sem_big_enough_for_futex = (nsync_semaphore *) (uintptr_
(sizeof (struct futex) <= sizeof (*sem_big_enough_for_futex)));
/* Initialize *s; the initial value is 0. */
void nsync_mu_semaphore_init_futex (nsync_semaphore *s) {
bool nsync_mu_semaphore_init_futex (nsync_semaphore *s) {
struct futex *f = (struct futex *) s;
f->i = 0;
return true;
}
/* Wait until the count of *s exceeds 0, and decrement it. If POSIX cancellations

View file

@ -85,8 +85,8 @@ static errno_t nsync_dispatch_semaphore_wait (nsync_semaphore *s,
}
/* Initialize *s; the initial value is 0. */
void nsync_mu_semaphore_init_gcd (nsync_semaphore *s) {
*(dispatch_semaphore_t *)s = dispatch_semaphore_create (0);
bool nsync_mu_semaphore_init_gcd (nsync_semaphore *s) {
return !!(*(dispatch_semaphore_t *)s = dispatch_semaphore_create (0));
}
/* Wait until the count of *s exceeds 0, and decrement it. If POSIX cancellations

View file

@ -54,14 +54,22 @@ static struct {
static nsync_semaphore *sem_big_enough_for_sem = (nsync_semaphore *) (uintptr_t)(1 /
(sizeof (struct sem) <= sizeof (*sem_big_enough_for_sem)));
static void nsync_mu_semaphore_sem_create (struct sem *f) {
static bool nsync_mu_semaphore_sem_create (struct sem *f) {
int rc;
int lol;
f->id = 0;
ASSERT (!sys_sem_init (0, &f->id));
if ((lol = __sys_fcntl (f->id, F_DUPFD_CLOEXEC, 50)) >= 50) {
sys_close (f->id);
rc = sys_sem_init (0, &f->id);
STRACE ("sem_init(0, [%ld]) → %d", f->id, rc);
if (rc != 0)
return false;
lol = __sys_fcntl (f->id, F_DUPFD_CLOEXEC, 50);
STRACE ("fcntl(%ld, F_DUPFD_CLOEXEC, 50) → %d", f->id, lol);
if (lol >= 50) {
rc = sys_close (f->id);
STRACE ("close(%ld) → %d", f->id, rc);
f->id = lol;
}
return true;
}
static void nsync_mu_semaphore_sem_fork_child (void) {
@ -69,8 +77,12 @@ static void nsync_mu_semaphore_sem_fork_child (void) {
struct sem *f;
for (e = dll_first (g_sems.list); e; e = dll_next (g_sems.list, e)) {
f = SEM_CONTAINER (e);
sys_close (f->id);
nsync_mu_semaphore_sem_create (f);
int rc = sys_close (f->id);
STRACE ("close(%ld) → %d", f->id, rc);
}
for (e = dll_first (g_sems.list); e; e = dll_next (g_sems.list, e)) {
f = SEM_CONTAINER (e);
ASSERT (nsync_mu_semaphore_sem_create (f));
}
(void) pthread_spin_init (&g_sems.lock, 0);
}
@ -80,15 +92,16 @@ static void nsync_mu_semaphore_sem_init (void) {
}
/* Initialize *s; the initial value is 0. */
void nsync_mu_semaphore_init_sem (nsync_semaphore *s) {
bool nsync_mu_semaphore_init_sem (nsync_semaphore *s) {
struct sem *f = (struct sem *) s;
nsync_mu_semaphore_sem_create (f);
if (!nsync_mu_semaphore_sem_create (f))
return false;
cosmo_once (&g_sems.once, nsync_mu_semaphore_sem_init);
pthread_spin_lock (&g_sems.lock);
dll_init (&f->list);
dll_make_first (&g_sems.list, &f->list);
pthread_spin_unlock (&g_sems.lock);
STRACE ("sem_init(0, [%ld]) → 0", f->id);
return true;
}
/* Wait until the count of *s exceeds 0, and decrement it. If POSIX cancellations