Add *NSYNC to libc/isystem

This commit is contained in:
Justine Tunney 2022-09-13 01:46:29 -07:00
parent 6f7d0cb1c3
commit 22b63d0b98
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
32 changed files with 286 additions and 119 deletions

11
libc/isystem/nsync.h Normal file
View file

@ -0,0 +1,11 @@
#ifndef COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_H_
#define COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_H_
#include "third_party/nsync/counter.h"
#include "third_party/nsync/cv.h"
#include "third_party/nsync/debug.h"
#include "third_party/nsync/mu.h"
#include "third_party/nsync/mu_wait.h"
#include "third_party/nsync/note.h"
#include "third_party/nsync/once.h"
#include "third_party/nsync/waiter.h"
#endif /* COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_H_ */

View file

@ -0,0 +1,4 @@
#ifndef COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_ATOMIC_H_
#define COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_ATOMIC_H_
#include "third_party/nsync/atomic.h"
#endif /* COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_ATOMIC_H_ */

View file

@ -0,0 +1,4 @@
#ifndef COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_COUNTER_H_
#define COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_COUNTER_H_
#include "third_party/nsync/counter.h"
#endif /* COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_COUNTER_H_ */

4
libc/isystem/nsync_cv.h Normal file
View file

@ -0,0 +1,4 @@
#ifndef COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_CV_H_
#define COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_CV_H_
#include "third_party/nsync/cv.h"
#endif /* COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_CV_H_ */

View file

@ -0,0 +1,4 @@
#ifndef COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_DEBUG_H_
#define COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_DEBUG_H_
#include "third_party/nsync/debug.h"
#endif /* COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_DEBUG_H_ */

4
libc/isystem/nsync_mu.h Normal file
View file

@ -0,0 +1,4 @@
#ifndef COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_MU_H_
#define COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_MU_H_
#include "third_party/nsync/mu.h"
#endif /* COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_MU_H_ */

View file

@ -0,0 +1,4 @@
#ifndef COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_MU_WAIT_H_
#define COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_MU_WAIT_H_
#include "third_party/nsync/mu_wait.h"
#endif /* COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_MU_WAIT_H_ */

View file

@ -0,0 +1,4 @@
#ifndef COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_NOTE_H_
#define COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_NOTE_H_
#include "third_party/nsync/note.h"
#endif /* COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_NOTE_H_ */

View file

@ -0,0 +1,4 @@
#ifndef COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_ONCE_H_
#define COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_ONCE_H_
#include "third_party/nsync/once.h"
#endif /* COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_ONCE_H_ */

View file

@ -0,0 +1,4 @@
#ifndef COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_TIME_H_
#define COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_TIME_H_
#include "third_party/nsync/time.h"
#endif /* COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_TIME_H_ */

View file

@ -0,0 +1,4 @@
#ifndef COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_WAITER_H_
#define COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_WAITER_H_
#include "third_party/nsync/waiter.h"
#endif /* COSMOPOLITAN_LIBC_ISYSTEM_NSYNC_WAITER_H_ */

View file

@ -1,158 +0,0 @@
/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│
vi: set et ft=c ts=8 tw=8 fenc=utf-8 :vi
Copyright 2016 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 │
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "libc/mem/mem.h"
#include "libc/str/str.h"
#include "third_party/nsync/atomic.h"
#include "third_party/nsync/atomic.internal.h"
#include "third_party/nsync/common.internal.h"
#include "third_party/nsync/counter.h"
#include "third_party/nsync/dll.h"
#include "third_party/nsync/mu_semaphore.h"
#include "third_party/nsync/races.internal.h"
#include "third_party/nsync/wait_s.internal.h"
#include "third_party/nsync/waiter.h"
asm(".ident\t\"\\n\\n\
*NSYNC (Apache 2.0)\\n\
Copyright 2016 Google, Inc.\\n\
https://github.com/google/nsync\"");
// clang-format off
/* Internal details of nsync_counter. */
struct nsync_counter_s_ {
nsync_atomic_uint32_ waited; /* wait has been called */
nsync_mu counter_mu; /* protects fields below except reads of "value" */
nsync_atomic_uint32_ value; /* value of counter */
struct nsync_dll_element_s_ *waiters; /* list of waiters */
};
nsync_counter nsync_counter_new (uint32_t value) {
nsync_counter c = (nsync_counter) malloc (sizeof (*c));
if (c != NULL) {
memset ((void *) c, 0, sizeof (*c));
ATM_STORE (&c->value, value);
}
return (c);
}
void nsync_counter_free (nsync_counter c) {
nsync_mu_lock (&c->counter_mu);
ASSERT (nsync_dll_is_empty_ (c->waiters));
nsync_mu_unlock (&c->counter_mu);
free (c);
}
uint32_t nsync_counter_add (nsync_counter c, int32_t delta) {
uint32_t value;
IGNORE_RACES_START ();
if (delta == 0) {
value = ATM_LOAD_ACQ (&c->value);
} else {
nsync_mu_lock (&c->counter_mu);
do {
value = ATM_LOAD (&c->value);
} while (!ATM_CAS_RELACQ (&c->value, value, value+delta));
value += delta;
if (delta > 0) {
/* It's illegal to increase the count from zero if
there has been a waiter. */
ASSERT (value != (uint32_t) delta || !ATM_LOAD (&c->waited));
ASSERT (value > value - delta); /* Crash on overflow. */
} else {
ASSERT (value < value - delta); /* Crash on overflow. */
}
if (value == 0) {
nsync_dll_element_ *p;
while ((p = nsync_dll_first_ (c->waiters)) != NULL) {
struct nsync_waiter_s *nw = DLL_NSYNC_WAITER (p);
c->waiters = nsync_dll_remove_ (c->waiters, p);
ATM_STORE_REL (&nw->waiting, 0);
nsync_mu_semaphore_v (nw->sem);
}
}
nsync_mu_unlock (&c->counter_mu);
}
IGNORE_RACES_END ();
return (value);
}
uint32_t nsync_counter_value (nsync_counter c) {
uint32_t result;
IGNORE_RACES_START ();
result = ATM_LOAD_ACQ (&c->value);
IGNORE_RACES_END ();
return (result);
}
uint32_t nsync_counter_wait (nsync_counter c, nsync_time abs_deadline) {
struct nsync_waitable_s waitable;
struct nsync_waitable_s *pwaitable = &waitable;
uint32_t result = 0;
waitable.v = c;
waitable.funcs = &nsync_counter_waitable_funcs;
if (nsync_wait_n (NULL, NULL, NULL, abs_deadline, 1, &pwaitable) != 0) {
IGNORE_RACES_START ();
result = ATM_LOAD_ACQ (&c->value);
IGNORE_RACES_END ();
}
return (result);
}
static nsync_time counter_ready_time (void *v, struct nsync_waiter_s *nw) {
nsync_counter c = (nsync_counter) v;
nsync_time r;
ATM_STORE (&c->waited, 1);
r = (ATM_LOAD_ACQ (&c->value) == 0? nsync_time_zero : nsync_time_no_deadline);
return (r);
}
static int counter_enqueue (void *v, struct nsync_waiter_s *nw) {
nsync_counter c = (nsync_counter) v;
int32_t value;
nsync_mu_lock (&c->counter_mu);
value = ATM_LOAD_ACQ (&c->value);
if (value != 0) {
c->waiters = nsync_dll_make_last_in_list_ (c->waiters, &nw->q);
ATM_STORE (&nw->waiting, 1);
} else {
ATM_STORE (&nw->waiting, 0);
}
nsync_mu_unlock (&c->counter_mu);
return (value != 0);
}
static int counter_dequeue (void *v, struct nsync_waiter_s *nw) {
nsync_counter c = (nsync_counter) v;
int32_t value;
nsync_mu_lock (&c->counter_mu);
value = ATM_LOAD_ACQ (&c->value);
if (ATM_LOAD_ACQ (&nw->waiting) != 0) {
c->waiters = nsync_dll_remove_ (c->waiters, &nw->q);
ATM_STORE (&nw->waiting, 0);
}
nsync_mu_unlock (&c->counter_mu);
return (value != 0);
}
const struct nsync_waitable_funcs_s nsync_counter_waitable_funcs = {
&counter_ready_time,
&counter_enqueue,
&counter_dequeue
};

View file

@ -1,497 +0,0 @@
/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│
vi: set et ft=c ts=8 tw=8 fenc=utf-8 :vi
Copyright 2016 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 │
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "libc/str/str.h"
#include "third_party/nsync/atomic.internal.h"
#include "third_party/nsync/common.internal.h"
#include "third_party/nsync/cv.h"
#include "third_party/nsync/dll.h"
#include "third_party/nsync/races.internal.h"
#include "third_party/nsync/wait_s.internal.h"
#include "third_party/nsync/waiter.h"
asm(".ident\t\"\\n\\n\
*NSYNC (Apache 2.0)\\n\
Copyright 2016 Google, Inc.\\n\
https://github.com/google/nsync\"");
// clang-format off
/* Initialize *cv. */
void nsync_cv_init (nsync_cv *cv) {
memset ((void *) cv, 0, sizeof (*cv));
}
/* Wake the cv waiters in the circular list pointed to by
to_wake_list, which may not be NULL. If the waiter is associated with a
nsync_mu, the "wakeup" may consist of transferring the waiters to the nsync_mu's
queue. Requires that every waiter is associated with the same mutex.
all_readers indicates whether all the waiters on the list are readers. */
static void wake_waiters (nsync_dll_list_ to_wake_list, int all_readers) {
nsync_dll_element_ *p = NULL;
nsync_dll_element_ *next = NULL;
nsync_dll_element_ *first_waiter = nsync_dll_first_ (to_wake_list);
struct nsync_waiter_s *first_nw = DLL_NSYNC_WAITER (first_waiter);
waiter *first_w = NULL;
nsync_mu *pmu = NULL;
if ((first_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0) {
first_w = DLL_WAITER (first_waiter);
pmu = first_w->cv_mu;
}
if (pmu != NULL) { /* waiter is associated with the nsync_mu *pmu. */
/* We will transfer elements of to_wake_list to *pmu if all of:
- some thread holds the lock, and
- *pmu's spinlock is not held, and
- either *pmu cannot be acquired in the mode of the first
waiter, or there's more than one thread on to_wake_list
and not all are readers, and
- we acquire the spinlock on the first try.
The spinlock acquisition also marks *pmu as having waiters.
The requirement that some thread holds the lock ensures
that at least one of the transferred waiters will be woken.
*/
uint32_t old_mu_word = ATM_LOAD (&pmu->word);
int first_cant_acquire = ((old_mu_word & first_w->l_type->zero_to_acquire) != 0);
next = nsync_dll_next_ (to_wake_list, first_waiter);
if ((old_mu_word&MU_ANY_LOCK) != 0 &&
(old_mu_word&MU_SPINLOCK) == 0 &&
(first_cant_acquire || (next != NULL && !all_readers)) &&
ATM_CAS_ACQ (&pmu->word, old_mu_word,
(old_mu_word|MU_SPINLOCK|MU_WAITING) &
~MU_ALL_FALSE)) {
uint32_t set_on_release = 0;
/* For any waiter that should be transferred, rather
than woken, move it from to_wake_list to pmu->waiters. */
int first_is_writer = first_w->l_type == nsync_writer_type_;
int transferred_a_writer = 0;
int woke_areader = 0;
/* Transfer the first waiter iff it can't acquire *pmu. */
if (first_cant_acquire) {
to_wake_list = nsync_dll_remove_ (to_wake_list, first_waiter);
pmu->waiters = nsync_dll_make_last_in_list_ (pmu->waiters, first_waiter);
/* tell nsync_cv_wait_with_deadline() that we
moved the waiter to *pmu's queue. */
first_w->cv_mu = NULL;
/* first_nw.waiting is already 1, from being on
cv's waiter queue. */
transferred_a_writer = first_is_writer;
} else {
woke_areader = !first_is_writer;
}
/* Now process the other waiters. */
for (p = next; p != NULL; p = next) {
int p_is_writer;
struct nsync_waiter_s *p_nw = DLL_NSYNC_WAITER (p);
waiter *p_w = NULL;
if ((p_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0) {
p_w = DLL_WAITER (p);
}
next = nsync_dll_next_ (to_wake_list, p);
p_is_writer = (p_w != NULL &&
DLL_WAITER (p)->l_type == nsync_writer_type_);
/* We transfer this element if any of:
- the first waiter can't acquire *pmu, or
- the first waiter is a writer, or
- this element is a writer. */
if (p_w == NULL) {
/* wake non-native waiter */
} else if (first_cant_acquire || first_is_writer || p_is_writer) {
to_wake_list = nsync_dll_remove_ (to_wake_list, p);
pmu->waiters = nsync_dll_make_last_in_list_ (pmu->waiters, p);
/* tell nsync_cv_wait_with_deadline()
that we moved the waiter to *pmu's
queue. */
p_w->cv_mu = NULL;
/* p_nw->waiting is already 1, from
being on cv's waiter queue. */
transferred_a_writer = transferred_a_writer || p_is_writer;
} else {
woke_areader = woke_areader || !p_is_writer;
}
}
/* Claim a waiting writer if we transferred one, except if we woke readers,
in which case we want those readers to be able to acquire immediately. */
if (transferred_a_writer && !woke_areader) {
set_on_release |= MU_WRITER_WAITING;
}
/* release *pmu's spinlock (MU_WAITING was set by CAS above) */
old_mu_word = ATM_LOAD (&pmu->word);
while (!ATM_CAS_REL (&pmu->word, old_mu_word,
(old_mu_word|set_on_release) & ~MU_SPINLOCK)) {
old_mu_word = ATM_LOAD (&pmu->word);
}
}
}
/* Wake any waiters we didn't manage to enqueue on the mu. */
for (p = nsync_dll_first_ (to_wake_list); p != NULL; p = next) {
struct nsync_waiter_s *p_nw = DLL_NSYNC_WAITER (p);
next = nsync_dll_next_ (to_wake_list, p);
to_wake_list = nsync_dll_remove_ (to_wake_list, p);
/* Wake the waiter. */
ATM_STORE_REL (&p_nw->waiting, 0); /* release store */
nsync_mu_semaphore_v (p_nw->sem);
}
}
/* ------------------------------------------ */
/* Versions of nsync_mu_lock() and nsync_mu_unlock() that take "void *"
arguments, to avoid call through a function pointer of a different type,
which is undefined. */
static void void_mu_lock (void *mu) {
nsync_mu_lock ((nsync_mu *) mu);
}
static void void_mu_unlock (void *mu) {
nsync_mu_unlock ((nsync_mu *) mu);
}
/* Atomically release *pmu (which must be held on entry)
and block the calling thread on *pcv. Then wait until awakened by a
call to nsync_cv_signal() or nsync_cv_broadcast() (or a spurious wakeup), or by the time
reaching abs_deadline, or by cancel_note being notified. In all cases,
reacquire *pmu, and return the reason for the call returned (0, ETIMEDOUT,
or ECANCELED). Callers should abs_deadline==nsync_time_no_deadline for no
deadline, and cancel_note==NULL for no cancellation. nsync_cv_wait_with_deadline()
should be used in a loop, as with all Mesa-style condition variables. See
examples above.
There are two reasons for using an absolute deadline, rather than a relative
timeout---these are why pthread_cond_timedwait() also uses an absolute
deadline. First, condition variable waits have to be used in a loop; with
an absolute times, the deadline does not have to be recomputed on each
iteration. Second, in most real programmes, some activity (such as an RPC
to a server, or when guaranteeing response time in a UI), there is a
deadline imposed by the specification or the caller/user; relative delays
can shift arbitrarily with scheduling delays, and so after multiple waits
might extend beyond the expected deadline. Relative delays tend to be more
convenient mostly in tests and trivial examples than they are in real
programmes. */
int nsync_cv_wait_with_deadline_generic (nsync_cv *pcv, void *pmu,
void (*lock) (void *), void (*unlock) (void *),
nsync_time abs_deadline,
nsync_note cancel_note) {
nsync_mu *cv_mu = NULL;
int is_reader_mu;
uint32_t old_word;
uint32_t remove_count;
int sem_outcome;
unsigned attempts;
int outcome = 0;
waiter *w;
IGNORE_RACES_START ();
w = nsync_waiter_new_ ();
ATM_STORE (&w->nw.waiting, 1);
w->cond.f = NULL; /* Not using a conditional critical section. */
w->cond.v = NULL;
w->cond.eq = NULL;
if (lock == &void_mu_lock ||
lock == (void (*) (void *)) &nsync_mu_lock ||
lock == (void (*) (void *)) &nsync_mu_rlock) {
cv_mu = (nsync_mu *) pmu;
}
w->cv_mu = cv_mu; /* If *pmu is an nsync_mu, record its address, else record NULL. */
is_reader_mu = 0; /* If true, an nsync_mu in reader mode. */
if (cv_mu == NULL) {
w->l_type = NULL;
} else {
uint32_t old_mu_word = ATM_LOAD (&cv_mu->word);
int is_writer = (old_mu_word & MU_WHELD_IF_NON_ZERO) != 0;
int is_reader = (old_mu_word & MU_RHELD_IF_NON_ZERO) != 0;
if (is_writer) {
if (is_reader) {
nsync_panic_ ("mu held in reader and writer mode simultaneously "
"on entry to nsync_cv_wait_with_deadline()\n");
}
w->l_type = nsync_writer_type_;
} else if (is_reader) {
w->l_type = nsync_reader_type_;
is_reader_mu = 1;
} else {
nsync_panic_ ("mu not held on entry to nsync_cv_wait_with_deadline()\n");
}
}
/* acquire spinlock, set non-empty */
old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK|CV_NON_EMPTY, 0);
pcv->waiters = nsync_dll_make_last_in_list_ (pcv->waiters, &w->nw.q);
remove_count = ATM_LOAD (&w->remove_count);
/* Release the spin lock. */
ATM_STORE_REL (&pcv->word, old_word|CV_NON_EMPTY); /* release store */
/* Release *pmu. */
if (is_reader_mu) {
nsync_mu_runlock (cv_mu);
} else {
(*unlock) (pmu);
}
/* wait until awoken or a timeout. */
sem_outcome = 0;
attempts = 0;
while (ATM_LOAD_ACQ (&w->nw.waiting) != 0) { /* acquire load */
if (sem_outcome == 0) {
sem_outcome = nsync_sem_wait_with_cancel_ (w, abs_deadline, cancel_note);
}
if (sem_outcome != 0 && ATM_LOAD (&w->nw.waiting) != 0) {
/* A timeout or cancellation occurred, and no wakeup.
Acquire *pcv's spinlock, and confirm. */
old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK,
CV_SPINLOCK, 0);
/* Check that w wasn't removed from the queue after we
checked above, but before we acquired the spinlock.
The test of remove_count confirms that the waiter *w
is still governed by *pcv's spinlock; otherwise, some
other thread is about to set w.waiting==0. */
if (ATM_LOAD (&w->nw.waiting) != 0) {
if (remove_count == ATM_LOAD (&w->remove_count)) {
uint32_t old_value;
/* still in cv waiter queue */
/* Not woken, so remove *w from cv
queue, and declare a
timeout/cancellation. */
outcome = sem_outcome;
pcv->waiters = nsync_dll_remove_ (pcv->waiters,
&w->nw.q);
do {
old_value = ATM_LOAD (&w->remove_count);
} while (!ATM_CAS (&w->remove_count, old_value, old_value+1));
if (nsync_dll_is_empty_ (pcv->waiters)) {
old_word &= ~(CV_NON_EMPTY);
}
ATM_STORE_REL (&w->nw.waiting, 0); /* release store */
}
}
/* Release spinlock. */
ATM_STORE_REL (&pcv->word, old_word); /* release store */
}
if (ATM_LOAD (&w->nw.waiting) != 0) {
/* The delay here causes this thread ultimately to
yield to another that has dequeued this thread, but
has not yet set the waiting field to zero; a
cancellation or timeout may prevent this thread
from blocking above on the semaphore. */
attempts = nsync_spin_delay_ (attempts);
}
}
if (cv_mu != NULL && w->cv_mu == NULL) { /* waiter was moved to *pmu's queue, and woken. */
/* Requeue on *pmu using existing waiter struct; current thread
is the designated waker. */
nsync_mu_lock_slow_ (cv_mu, w, MU_DESIG_WAKER, w->l_type);
nsync_waiter_free_ (w);
} else {
/* Traditional case: We've woken from the cv, and need to reacquire *pmu. */
nsync_waiter_free_ (w);
if (is_reader_mu) {
nsync_mu_rlock (cv_mu);
} else {
(*lock) (pmu);
}
}
IGNORE_RACES_END ();
return (outcome);
}
/* Wake at least one thread if any are currently blocked on *pcv. If
the chosen thread is a reader on an nsync_mu, wake all readers and, if
possible, a writer. */
void nsync_cv_signal (nsync_cv *pcv) {
IGNORE_RACES_START ();
if ((ATM_LOAD_ACQ (&pcv->word) & CV_NON_EMPTY) != 0) { /* acquire load */
nsync_dll_list_ to_wake_list = NULL; /* waiters that we will wake */
int all_readers = 0;
/* acquire spinlock */
uint32_t old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK,
CV_SPINLOCK, 0);
if (!nsync_dll_is_empty_ (pcv->waiters)) {
/* Point to first waiter that enqueued itself, and
detach it from all others. */
struct nsync_waiter_s *first_nw;
nsync_dll_element_ *first = nsync_dll_first_ (pcv->waiters);
pcv->waiters = nsync_dll_remove_ (pcv->waiters, first);
first_nw = DLL_NSYNC_WAITER (first);
if ((first_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0) {
uint32_t old_value;
do {
old_value =
ATM_LOAD (&DLL_WAITER (first)->remove_count);
} while (!ATM_CAS (&DLL_WAITER (first)->remove_count,
old_value, old_value+1));
}
to_wake_list = nsync_dll_make_last_in_list_ (to_wake_list, first);
if ((first_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0 &&
DLL_WAITER (first)->l_type == nsync_reader_type_) {
int woke_writer;
/* If the first waiter is a reader, wake all readers, and
if it's possible, one writer. This allows reader-regions
to be added to a monitor without invalidating code in which
a client has optimized broadcast calls by converting them to
signal calls. In particular, we wake a writer when waking
readers because the readers will not invalidate the condition
that motivated the client to call nsync_cv_signal(). But we
wake at most one writer because the first writer may invalidate
the condition; the client is expecting only one writer to be
able make use of the wakeup, or he would have called
nsync_cv_broadcast(). */
nsync_dll_element_ *p = NULL;
nsync_dll_element_ *next = NULL;
all_readers = 1;
woke_writer = 0;
for (p = nsync_dll_first_ (pcv->waiters); p != NULL; p = next) {
struct nsync_waiter_s *p_nw = DLL_NSYNC_WAITER (p);
int should_wake;
next = nsync_dll_next_ (pcv->waiters, p);
should_wake = 0;
if ((p_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0 &&
DLL_WAITER (p)->l_type == nsync_reader_type_) {
should_wake = 1;
} else if (!woke_writer) {
woke_writer = 1;
all_readers = 0;
should_wake = 1;
}
if (should_wake) {
pcv->waiters = nsync_dll_remove_ (pcv->waiters, p);
if ((p_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0) {
uint32_t old_value;
do {
old_value = ATM_LOAD (
&DLL_WAITER (p)->remove_count);
} while (!ATM_CAS (&DLL_WAITER (p)->remove_count,
old_value, old_value+1));
}
to_wake_list = nsync_dll_make_last_in_list_ (
to_wake_list, p);
}
}
}
if (nsync_dll_is_empty_ (pcv->waiters)) {
old_word &= ~(CV_NON_EMPTY);
}
}
/* Release spinlock. */
ATM_STORE_REL (&pcv->word, old_word); /* release store */
if (!nsync_dll_is_empty_ (to_wake_list)) {
wake_waiters (to_wake_list, all_readers);
}
}
IGNORE_RACES_END ();
}
/* Wake all threads currently blocked on *pcv. */
void nsync_cv_broadcast (nsync_cv *pcv) {
IGNORE_RACES_START ();
if ((ATM_LOAD_ACQ (&pcv->word) & CV_NON_EMPTY) != 0) { /* acquire load */
nsync_dll_element_ *p;
nsync_dll_element_ *next;
int all_readers;
nsync_dll_list_ to_wake_list = NULL; /* waiters that we will wake */
/* acquire spinlock */
nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK, 0);
p = NULL;
next = NULL;
all_readers = 1;
/* Wake entire waiter list, which we leave empty. */
for (p = nsync_dll_first_ (pcv->waiters); p != NULL; p = next) {
struct nsync_waiter_s *p_nw = DLL_NSYNC_WAITER (p);
next = nsync_dll_next_ (pcv->waiters, p);
all_readers = all_readers && (p_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0 &&
(DLL_WAITER (p)->l_type == nsync_reader_type_);
pcv->waiters = nsync_dll_remove_ (pcv->waiters, p);
if ((p_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0) {
uint32_t old_value;
do {
old_value = ATM_LOAD (&DLL_WAITER (p)->remove_count);
} while (!ATM_CAS (&DLL_WAITER (p)->remove_count,
old_value, old_value+1));
}
to_wake_list = nsync_dll_make_last_in_list_ (to_wake_list, p);
}
/* Release spinlock and mark queue empty. */
ATM_STORE_REL (&pcv->word, 0); /* release store */
if (!nsync_dll_is_empty_ (to_wake_list)) { /* Wake them. */
wake_waiters (to_wake_list, all_readers);
}
}
IGNORE_RACES_END ();
}
/* Wait with deadline, using an nsync_mu. */
int nsync_cv_wait_with_deadline (nsync_cv *pcv, nsync_mu *pmu,
nsync_time abs_deadline,
nsync_note cancel_note) {
return (nsync_cv_wait_with_deadline_generic (pcv, pmu, &void_mu_lock,
&void_mu_unlock,
abs_deadline, cancel_note));
}
/* Atomically release *pmu and block the caller on *pcv. Wait
until awakened by a call to nsync_cv_signal() or nsync_cv_broadcast(), or a spurious
wakeup. Then reacquires *pmu, and return. The call is equivalent to a call
to nsync_cv_wait_with_deadline() with abs_deadline==nsync_time_no_deadline, and a NULL
cancel_note. It should be used in a loop, as with all standard Mesa-style
condition variables. See examples above. */
void nsync_cv_wait (nsync_cv *pcv, nsync_mu *pmu) {
nsync_cv_wait_with_deadline (pcv, pmu, nsync_time_no_deadline, NULL);
}
static nsync_time cv_ready_time (void *v, struct nsync_waiter_s *nw) {
nsync_time r;
r = (nw == NULL || ATM_LOAD_ACQ (&nw->waiting) != 0? nsync_time_no_deadline : nsync_time_zero);
return (r);
}
static int cv_enqueue (void *v, struct nsync_waiter_s *nw) {
nsync_cv *pcv = (nsync_cv *) v;
/* acquire spinlock */
uint32_t old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK, 0);
pcv->waiters = nsync_dll_make_last_in_list_ (pcv->waiters, &nw->q);
ATM_STORE (&nw->waiting, 1);
/* Release spinlock. */
ATM_STORE_REL (&pcv->word, old_word | CV_NON_EMPTY); /* release store */
return (1);
}
static int cv_dequeue (void *v, struct nsync_waiter_s *nw) {
nsync_cv *pcv = (nsync_cv *) v;
int was_queued = 0;
/* acquire spinlock */
uint32_t old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK, 0);
if (ATM_LOAD_ACQ (&nw->waiting) != 0) {
pcv->waiters = nsync_dll_remove_ (pcv->waiters, &nw->q);
ATM_STORE (&nw->waiting, 0);
was_queued = 1;
}
if (nsync_dll_is_empty_ (pcv->waiters)) {
old_word &= ~(CV_NON_EMPTY);
}
/* Release spinlock. */
ATM_STORE_REL (&pcv->word, old_word); /* release store */
return (was_queued);
}
const struct nsync_waitable_funcs_s nsync_cv_waitable_funcs = {
&cv_ready_time,
&cv_enqueue,
&cv_dequeue
};

View file

@ -1,292 +0,0 @@
/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│
vi: set et ft=c ts=8 tw=8 fenc=utf-8 :vi
Copyright 2016 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 │
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "third_party/nsync/atomic.h"
#include "third_party/nsync/common.internal.h"
#include "third_party/nsync/dll.h"
#include "third_party/nsync/mu_semaphore.h"
#include "third_party/nsync/races.internal.h"
#include "third_party/nsync/wait_s.internal.h"
asm(".ident\t\"\\n\\n\
*NSYNC (Apache 2.0)\\n\
Copyright 2016 Google, Inc.\\n\
https://github.com/google/nsync\"");
// clang-format off
/* Routines for debugging. */
/* An emit_buf represents a buffer into which debug information can
be written. */
struct emit_buf {
char *start; /* start of buffer */
int len; /* pength of buffer */
int pos; /* position of next character to bve written */
int overflow; /* non-zero iff buffer overflow has occurred */
};
/* Initialize *b to point to start[0, .., len-1], and return b.
of to an internal static buffer if buf==NULL. */
static struct emit_buf *emit_init (struct emit_buf *b, char *start, int len) {
b->start = start;
b->len = len;
b->pos = 0;
b->overflow = 0;
return (b);
}
/* Write character c to buffer *b. */
static void emit_c (struct emit_buf *b, int c) {
if (b->pos < b->len) {
b->start[b->pos++] = c;
} else if (!b->overflow) {
static const char suffix[] = "...";
const char *s = &suffix[sizeof (suffix)]; /* past nul */
char *p = &b->start[b->len]; /* past end */
while (s > suffix && p > b->start) {
*--p = *--s;
}
b->overflow = 1;
}
}
/* A printf-like function that writes to an emit_buf.
It understands only the format specifiers %s (const char *), and %i
(uintptr_t in hex), with no modifiers. */
static void emit_print (struct emit_buf *b, const char *fmt, ...) {
va_list ap;
va_start (ap, fmt);
while (*fmt != 0) {
int c = *fmt++;
if (c != '%') {
emit_c (b, c);
} else {
c = *fmt++;
if (c == 's') {
const char *s = va_arg (ap, const char *);
while (*s != 0) {
emit_c (b, *s++);
}
} else if (c == 'i') {
uintptr_t n = va_arg (ap, uintptr_t);
int i;
for (i = 0; (n >> i) >= 0x10; i += 4) {
}
for (; i >= 0; i -= 4) {
emit_c (b, "0123456789abcdef"[(n >> i) & 0xf]);
}
} else {
ASSERT (0);
}
}
}
va_end (ap);
}
/* Map a bit in a uint32_t to a human-readable name. */
struct bit_name {
uint32_t mask;
const char *name;
};
/* names for bits in a mu word */
static const struct bit_name mu_bit[] = {
{ MU_WLOCK, "wlock" },
{ MU_SPINLOCK, "spin" },
{ MU_WAITING, "wait" },
{ MU_DESIG_WAKER, "desig" },
{ MU_CONDITION, "cond" },
{ MU_WRITER_WAITING, "writer" },
{ MU_LONG_WAIT, "long" },
{ MU_ALL_FALSE, "false" },
{ 0, "" } /* sentinel */
};
/* names for bits in a cv word */
static const struct bit_name cv_bit[] = {
{ CV_SPINLOCK, "spin" },
{ CV_NON_EMPTY, "wait" },
{ 0, "" } /* sentinel */
};
/* names for bits in a waiter flags word */
static const struct bit_name waiter_flags_bit[] = {
{ WAITER_RESERVED, "rsrvd" },
{ WAITER_IN_USE, "in_use" },
{ 0, "" } /* sentinel */
};
/* Emit the names of bits in word to buffer *b using names[] */
static void emit_word (struct emit_buf *b, const struct bit_name *name, uint32_t word) {
int i;
for (i = 0; name[i].mask != 0; i++) {
if ((word & name[i].mask) != 0) {
emit_print (b, " %s", name[i].name);
}
}
}
/* Emit the waiter queue *q to *b. */
static void emit_waiters (struct emit_buf *b, nsync_dll_list_ list) {
nsync_dll_element_ *p = nsync_dll_first_ (list);
nsync_dll_element_ *next;
if (p != NULL) {
emit_print (b, "\nwaiters =\n");
}
for (; p != NULL && !b->overflow; p = next) {
struct nsync_waiter_s *nw = DLL_NSYNC_WAITER (p);
waiter *w = DLL_WAITER (p);
next = NULL;
emit_print (b, " %i", (uintptr_t) w);
if (w->tag != WAITER_TAG) {
emit_print (b, "bad WAITER_TAG %i",
(uintptr_t) w->tag);
} else {
next = nsync_dll_next_ (list, p);
if (nw->tag != NSYNC_WAITER_TAG) {
emit_print (b, " bad WAITER_TAG %i",
(uintptr_t) nw->tag);
} else {
emit_print (b, " embedded=%i waiting=%i",
(uintptr_t) (w->flags & NSYNC_WAITER_FLAG_MUCV),
(uintptr_t) ATM_LOAD (&nw->waiting));
}
emit_word (b, waiter_flags_bit, w->flags);
emit_print (b, " %s removes=%i cond=(%i %i %i)",
w->l_type == nsync_writer_type_? "writer" :
w->l_type == nsync_reader_type_? "reader" :
"??????",
(uintptr_t) ATM_LOAD (&w->remove_count),
(uintptr_t) w->cond.f,
(uintptr_t) w->cond.v,
(uintptr_t) w->cond.eq);
if (w->same_condition.next != &w->same_condition) {
emit_print (b, " same_as %i",
(uintptr_t) DLL_WAITER_SAMECOND (
w->same_condition.next));
}
}
emit_c (b, '\n');
}
}
/* Emit to *b the state of *mu, and return a pointer to *b's buffer.
If blocking!=0, print_waiters!=0, and *mu's waiter list is non-empty, the
call will block until it can acquire the spinlock.
If print_waiters!=0, the waiter list is printed.
The spinlock is released before return if it was acquired.
blocking==0 && print_waiters!=0 is unsafe and is intended for use within
interactive debuggers. */
static char *emit_mu_state (struct emit_buf *b, nsync_mu *mu,
int blocking, int print_waiters) {
uintptr_t word;
uintptr_t readers;
int acquired = 0;
IGNORE_RACES_START ();
word = ATM_LOAD (&mu->word);
if ((word & MU_WAITING) != 0 && print_waiters && /* can benefit from lock */
(blocking || (word & MU_SPINLOCK) == 0)) { /* willing, or no need to wait */
word = nsync_spin_test_and_set_ (&mu->word, MU_SPINLOCK, MU_SPINLOCK, 0);
acquired = 1;
}
readers = word / MU_RLOCK;
emit_print (b, "mu 0x%i -> 0x%i = {", (uintptr_t) mu, word);
emit_word (b, mu_bit, word);
if (readers != 0) {
emit_print (b, " readers=0x%i", readers);
}
emit_print (b, " }");
if (print_waiters) {
emit_waiters (b, mu->waiters);
}
if (acquired) {
ATM_STORE_REL (&mu->word, word); /* release store */
}
emit_c (b, 0);
IGNORE_RACES_END ();
return (b->start);
}
/* Emit to *b the state of *cv, and return a pointer to *b's buffer.
If blocking!=0, print_waiters!=0, and *cv's waiter list is non-empty, the
call will block until it can acquire the spinlock.
If print_waiters!=0, the waiter list is printed.
The spinlock is released before return if it was acquired.
blocking==0 && print_waiters!=0 is unsafe and is intended for use within
interactive debuggers. */
static char *emit_cv_state (struct emit_buf *b, nsync_cv *cv,
int blocking, int print_waiters) {
uintptr_t word;
int acquired = 0;
IGNORE_RACES_START ();
word = ATM_LOAD (&cv->word);
if ((word & CV_NON_EMPTY) != 0 && print_waiters && /* can benefit from lock */
(blocking || (word & CV_SPINLOCK) == 0)) { /* willing, or no need to wait */
word = nsync_spin_test_and_set_ (&cv->word, CV_SPINLOCK, CV_SPINLOCK, 0);
acquired = 1;
}
emit_print (b, "cv 0x%i -> 0x%i = {", (uintptr_t) cv, word);
emit_word (b, cv_bit, word);
emit_print (b, " }");
if (print_waiters) {
emit_waiters (b, cv->waiters);
}
if (acquired) {
ATM_STORE_REL (&cv->word, word); /* release store */
}
emit_c (b, 0);
IGNORE_RACES_END ();
return (b->start);
}
char *nsync_mu_debug_state (nsync_mu *mu, char *buf, int n) {
struct emit_buf b;
return (emit_mu_state (emit_init (&b, buf, n), mu, 0, 0));
}
char *nsync_cv_debug_state (nsync_cv *cv, char *buf, int n) {
struct emit_buf b;
return (emit_cv_state (emit_init (&b, buf, n), cv, 0, 0));
}
char *nsync_mu_debug_state_and_waiters (nsync_mu *mu, char *buf, int n) {
struct emit_buf b;
return (emit_mu_state (emit_init (&b, buf, n), mu, 1, 1));
}
char *nsync_cv_debug_state_and_waiters (nsync_cv *cv, char *buf, int n) {
struct emit_buf b;
return (emit_cv_state (emit_init (&b, buf, n), cv, 1, 1));
}
static char nsync_debug_buf[1024];
char *nsync_mu_debugger (nsync_mu *mu) {
struct emit_buf b;
return (emit_mu_state (emit_init (&b, nsync_debug_buf,
(int) sizeof (nsync_debug_buf)),
mu, 0, 1));
}
char *nsync_cv_debugger (nsync_cv *cv) {
struct emit_buf b;
return (emit_cv_state (emit_init (&b, nsync_debug_buf,
(int) sizeof (nsync_debug_buf)),
cv, 0, 1));
}

View file

@ -1,322 +0,0 @@
/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│
vi: set et ft=c ts=8 tw=8 fenc=utf-8 :vi
Copyright 2016 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 │
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "third_party/nsync/atomic.h"
#include "third_party/nsync/common.internal.h"
#include "third_party/nsync/dll.h"
#include "third_party/nsync/mu_semaphore.h"
#include "third_party/nsync/races.internal.h"
#include "third_party/nsync/wait_s.internal.h"
asm(".ident\t\"\\n\\n\
*NSYNC (Apache 2.0)\\n\
Copyright 2016 Google, Inc.\\n\
https://github.com/google/nsync\"");
// clang-format off
/* Attempt to remove waiter *w from *mu's
waiter queue. If successful, leave the lock held in mode *l_type, and
return non-zero; otherwise return zero. Requires that the current thread
hold neither *mu nor its spinlock, that remove_count be the value of
w.remove_count when *w was inserted into the queue (which it will still be if
it has not been removed).
This is a tricky part of the design. Here is the rationale.
When a condition times out or is cancelled, we must "turn off" the
condition, making it always true, so the lock will be acquired in the normal
way. The naive approach would be to set a field atomically to tell future
waiters to ignore the condition. Unfortunately, that would violate the
same_condition list invariants, and the same_condition optimization is
probably worth keeping.
To fixup the same_condition list, we must have mutual exclusion with the loop
in nsync_mu_unlock_slow_() that is examining waiters, evaluating their conditions, and
removing them from the queue. That loop uses both the spinlock (to allow
queue changes), and the mutex itself (to allow condition evaluation).
Therefore, to "turn off" the condition requires acquiring both the spinlock
and the mutex. This has two consequences:
- If we must acquire *mu to "turn off" the condition, we might as well give
the lock to this waiter and return from nsync_cv_wait_with_deadline() after we've
done so. It would be wasted work to put it back on the waiter queue, and
have it wake up and acquire yet again. (There are possibilities for
starvation here that we ignore, under the assumption that the client
avoids timeouts that are extremely short relative to the durations of his
section durations.)
- We can't use *w to wait for the lock to be free, because *w is already on
the waiter queue with the wrong condition; we now want to wait with no
condition. So either we must spin to acquire the lock, or we must
allocate _another_ waiter object. The latter option is feasible, but
delicate: the thread would have two waiter objects, and would have to
handle being woken by either one or both, and possibly removing one that
was not awoken. For the moment, we spin, because it's easier, and seems
not to cause problems in practice, since the spinloop backs off
aggressively. */
static int mu_try_acquire_after_timeout_or_cancel (nsync_mu *mu, lock_type *l_type,
waiter *w, uint32_t remove_count) {
int success = 0;
unsigned spin_attempts = 0;
uint32_t old_word = ATM_LOAD (&mu->word);
/* Spin until we can acquire the spinlock and a writer lock on *mu. */
while ((old_word&(MU_WZERO_TO_ACQUIRE|MU_SPINLOCK)) != 0 ||
!ATM_CAS_ACQ (&mu->word, old_word,
(old_word+MU_WADD_TO_ACQUIRE+MU_SPINLOCK) &
~MU_WCLEAR_ON_ACQUIRE)) {
/* Failed to acquire. If we can, set the MU_WRITER_WAITING bit
to avoid being starved by readers. */
if ((old_word & (MU_WRITER_WAITING | MU_SPINLOCK)) == 0) {
/* If the following CAS succeeds, it effectively
acquires and releases the spinlock atomically, so
must be both an acquire and release barrier.
MU_WRITER_WAITING will be cleared via
MU_WCLEAR_ON_ACQUIRE when this loop succeeds.
An optimization; failures are ignored. */
ATM_CAS_RELACQ (&mu->word, old_word,
old_word|MU_WRITER_WAITING);
}
spin_attempts = nsync_spin_delay_ (spin_attempts);
old_word = ATM_LOAD (&mu->word);
}
/* Check that w wasn't removed from the queue after our caller checked,
but before we acquired the spinlock.
The check of remove_count confirms that the waiter *w is still
governed by *mu's spinlock. Otherwise, some other thread may be
about to set w.waiting==0. */
if (ATM_LOAD (&w->nw.waiting) != 0 && remove_count == ATM_LOAD (&w->remove_count)) {
/* This thread's condition is now irrelevant, and it
holds a writer lock. Remove it from the queue,
and possibly convert back to a reader lock. */
mu->waiters = nsync_remove_from_mu_queue_ (mu->waiters, &w->nw.q);
ATM_STORE (&w->nw.waiting, 0);
/* Release spinlock but keep desired lock type. */
ATM_STORE_REL (&mu->word, old_word+l_type->add_to_acquire); /* release store */
success = 1;
} else {
/* Release spinlock and *mu. */
ATM_STORE_REL (&mu->word, old_word); /* release store */
}
return (success);
}
/* Return when at least one of: the condition is true, the
deadline expires, or cancel_note is notified. It may unlock and relock *mu
while blocked waiting for one of these events, but always returns with *mu
held. It returns 0 iff the condition is true on return, and otherwise
either ETIMEDOUT or ECANCELED, depending on why the call returned early. Use
abs_deadline==nsync_time_no_deadline for no deadline, and cancel_note==NULL for no
cancellation.
Requires that *mu be held on entry.
Requires that condition.eval() neither modify state protected by *mu, nor
return a value dependent on state not protected by *mu. To depend on time,
use the abs_deadline parameter.
(Conventional use of condition variables have the same restrictions on the
conditions tested by the while-loop.)
The implementation calls condition.eval() only with *mu held, though not
always from the calling thread, and may elect to hold only a read lock
during the call, even if the client is attempting to acquire only write
locks.
The nsync_mu_wait() and nsync_mu_wait_with_deadline() calls can be used instead of condition
variables. In many straightforward situations they are of equivalent
performance and are somewhat easier to use, because unlike condition
variables, they do not require that the waits be placed in a loop, and they
do not require explicit wakeup calls. In the current implementation, use of
nsync_mu_wait() and nsync_mu_wait_with_deadline() can take longer if many distinct
wait conditions are used. In such cases, use an explicit condition variable
per wakeup condition for best performance. */
int nsync_mu_wait_with_deadline (nsync_mu *mu,
int (*condition) (const void *condition_arg),
const void *condition_arg,
int (*condition_arg_eq) (const void *a, const void *b),
nsync_time abs_deadline, nsync_note cancel_note) {
lock_type *l_type;
int first_wait;
int condition_is_true;
waiter *w;
int outcome;
/* Work out in which mode the lock is held. */
uint32_t old_word;
IGNORE_RACES_START ();
old_word = ATM_LOAD (&mu->word);
if ((old_word & MU_ANY_LOCK) == 0) {
nsync_panic_ ("nsync_mu not held in some mode when calling "
"nsync_mu_wait_with_deadline()\n");
}
l_type = nsync_writer_type_;
if ((old_word & MU_RHELD_IF_NON_ZERO) != 0) {
l_type = nsync_reader_type_;
}
first_wait = 1; /* first time through the loop below. */
condition_is_true = (condition == NULL || (*condition) (condition_arg));
/* Loop until either the condition becomes true, or "outcome" indicates
cancellation or timeout. */
w = NULL;
outcome = 0;
while (outcome == 0 && !condition_is_true) {
uint32_t has_condition;
uint32_t remove_count;
uint32_t add_to_acquire;
int had_waiters;
int sem_outcome;
unsigned attempts;
int have_lock;
if (w == NULL) {
w = nsync_waiter_new_ (); /* get a waiter struct if we need one. */
}
/* Prepare to wait. */
w->cv_mu = NULL; /* not a condition variable wait */
w->l_type = l_type;
w->cond.f = condition;
w->cond.v = condition_arg;
w->cond.eq = condition_arg_eq;
has_condition = 0; /* set to MU_CONDITION if condition is non-NULL */
if (condition != NULL) {
has_condition = MU_CONDITION;
}
ATM_STORE (&w->nw.waiting, 1);
remove_count = ATM_LOAD (&w->remove_count);
/* Acquire spinlock. */
old_word = nsync_spin_test_and_set_ (&mu->word, MU_SPINLOCK,
MU_SPINLOCK|MU_WAITING|has_condition, MU_ALL_FALSE);
had_waiters = ((old_word & (MU_DESIG_WAKER | MU_WAITING)) == MU_WAITING);
/* Queue the waiter. */
if (first_wait) {
nsync_maybe_merge_conditions_ (nsync_dll_last_ (mu->waiters),
&w->nw.q);
/* first wait goes to end of queue */
mu->waiters = nsync_dll_make_last_in_list_ (mu->waiters,
&w->nw.q);
first_wait = 0;
} else {
nsync_maybe_merge_conditions_ (&w->nw.q,
nsync_dll_first_ (mu->waiters));
/* subsequent waits go to front of queue */
mu->waiters = nsync_dll_make_first_in_list_ (mu->waiters,
&w->nw.q);
}
/* Release spinlock and *mu. */
do {
old_word = ATM_LOAD (&mu->word);
add_to_acquire = l_type->add_to_acquire;
if (((old_word-l_type->add_to_acquire)&MU_ANY_LOCK) == 0 && had_waiters) {
add_to_acquire = 0; /* release happens in nsync_mu_unlock_slow_ */
}
} while (!ATM_CAS_REL (&mu->word, old_word,
(old_word - add_to_acquire) & ~MU_SPINLOCK));
if (add_to_acquire == 0) {
/* The lock will be fully released, there are waiters, and
no designated waker, so wake waiters. */
nsync_mu_unlock_slow_ (mu, l_type);
}
/* wait until awoken or a timeout. */
sem_outcome = 0;
attempts = 0;
have_lock = 0;
while (ATM_LOAD_ACQ (&w->nw.waiting) != 0) { /* acquire load */
if (sem_outcome == 0) {
sem_outcome = nsync_sem_wait_with_cancel_ (w, abs_deadline,
cancel_note);
if (sem_outcome != 0 && ATM_LOAD (&w->nw.waiting) != 0) {
/* A timeout or cancellation occurred, and no wakeup.
Acquire the spinlock and mu, and confirm. */
have_lock = mu_try_acquire_after_timeout_or_cancel (
mu, l_type, w, remove_count);
if (have_lock) { /* Successful acquire. */
outcome = sem_outcome;
}
}
}
if (ATM_LOAD (&w->nw.waiting) != 0) {
attempts = nsync_spin_delay_ (attempts); /* will ultimately yield */
}
}
if (!have_lock) {
/* If we didn't reacquire due to a cancellation/timeout, acquire now. */
nsync_mu_lock_slow_ (mu, w, MU_DESIG_WAKER, l_type);
}
condition_is_true = (condition == NULL || (*condition) (condition_arg));
}
if (w != NULL) {
nsync_waiter_free_ (w); /* free waiter if we allocated one. */
}
if (condition_is_true) {
outcome = 0; /* condition is true trumps other outcomes. */
}
IGNORE_RACES_END ();
return (outcome);
}
/* Return when the condition is true. Perhaps unlock and relock *mu
while blocked waiting for the condition to become true. It is equivalent to
a call to nsync_mu_wait_with_deadline() with abs_deadline==nsync_time_no_deadline, and
cancel_note==NULL.
Requires that *mu be held on entry.
Calls condition.eval() only with *mu held, though not always from the
calling thread.
See wait_with_deadline() for the restrictions on condition and performance
considerations. */
void nsync_mu_wait (nsync_mu *mu, int (*condition) (const void *condition_arg),
const void *condition_arg,
int (*condition_arg_eq) (const void *a, const void *b)) {
if (nsync_mu_wait_with_deadline (mu, condition, condition_arg, condition_arg_eq,
nsync_time_no_deadline, NULL) != 0) {
nsync_panic_ ("nsync_mu_wait woke but condition not true\n");
}
}
/* Unlock *mu, which must be held in write mode, and wake waiters, if
appropriate. Unlike nsync_mu_unlock(), this call is not required to wake
nsync_mu_wait/nsync_mu_wait_with_deadline calls on conditions that were
false before this thread acquired the lock. This call should be used only
at the end of critical sections for which:
- nsync_mu_wait/nsync_mu_wait_with_deadline are in use on the same mutex,
- this critical section cannot make the condition true for any of those
nsync_mu_wait/nsync_mu_wait_with_deadline waits, and
- when performance is significantly improved by doing so. */
void nsync_mu_unlock_without_wakeup (nsync_mu *mu) {
IGNORE_RACES_START ();
/* See comment in nsync_mu_unlock(). */
if (!ATM_CAS_REL (&mu->word, MU_WLOCK, 0)) {
uint32_t old_word = ATM_LOAD (&mu->word);
uint32_t new_word = old_word - MU_WLOCK;
if ((new_word & (MU_RLOCK_FIELD | MU_WLOCK)) != 0) {
if ((old_word & MU_RLOCK_FIELD) != 0) {
nsync_panic_ ("attempt to nsync_mu_unlock() an nsync_mu "
"held in read mode\n");
} else {
nsync_panic_ ("attempt to nsync_mu_unlock() an nsync_mu "
"not held in write mode\n");
}
} else if ((old_word & (MU_WAITING | MU_DESIG_WAKER | MU_ALL_FALSE)) ==
MU_WAITING || !ATM_CAS_REL (&mu->word, old_word, new_word)) {
nsync_mu_unlock_slow_ (mu, nsync_writer_type_);
}
}
IGNORE_RACES_END ();
}

View file

@ -1,307 +0,0 @@
/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│
vi: set et ft=c ts=8 tw=8 fenc=utf-8 :vi
Copyright 2016 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 │
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "libc/mem/mem.h"
#include "libc/str/str.h"
#include "third_party/nsync/atomic.h"
#include "third_party/nsync/common.internal.h"
#include "third_party/nsync/dll.h"
#include "third_party/nsync/mu_semaphore.h"
#include "third_party/nsync/mu_wait.h"
#include "third_party/nsync/races.internal.h"
#include "third_party/nsync/wait_s.internal.h"
#include "third_party/nsync/waiter.h"
asm(".ident\t\"\\n\\n\
*NSYNC (Apache 2.0)\\n\
Copyright 2016 Google, Inc.\\n\
https://github.com/google/nsync\"");
// clang-format off
/* Locking discipline for the nsync_note implementation:
Each nsync_note has a lock "note_mu" which protects the "parent" pointer,
"waiters" list, and "disconnecting" count. It also protects the "children"
list; thus each node's "parent_child_link", which links together the
children of a single parent, is protected by the parent's "note_mu".
To connect a parent to a child, or to disconnect one, the parent's lock must
be held to manipulate its child list, and the child's lock must be held to
change the parent pointer, so both must be held simultaneously.
The locking order is "parent before child".
Operations like notify and free are given a node pointer n and must
disconnect *n from its parent n->parent. The call must hold n->note_mu to
read n->parent, but need to release n->note_mu to acquire
n->parent->note_mu. The parent could be disconnected and freed while
n->note_mu is not held. The n->disconnecting count handles this; the
operation acquires n->note_mu, increments n->disconnecting, and can then
release n->note_mu, and acquire n->parent->note_mu and n->note_mu is the
correct order. n->disconnecting!=0 indicates that a thread is already in
the processes of disconnecting n from n->parent. A thread freeing or
notifying the parent should not perform the disconnection of that child, but
should instead wait for the "children" list to become empty via
WAIT_FOR_NO_CHILDREN(). WAKEUP_NO_CHILDREN() should be used whenever this
condition could become true. */
/* Set the expiry time in *n to t */
static void set_expiry_time (nsync_note n, nsync_time t) {
n->expiry_time = t;
n->expiry_time_valid = 1;
}
/* Return a pointer to the note containing nsync_dll_element_ *e. */
#define DLL_NOTE(e) ((nsync_note)((e)->container))
/* Return whether n->children is empty. Assumes n->note_mu held. */
static int no_children (const void *v) {
return (nsync_dll_is_empty_ (((nsync_note)v)->children));
}
#define WAIT_FOR_NO_CHILDREN(pred_, n_) nsync_mu_wait (&(n_)->note_mu, &pred_, (n_), NULL)
#define WAKEUP_NO_CHILDREN(n_) do { } while (0)
/*
// These lines can be used in place of those above if conditional critical
// sections have been removed from the source.
#define WAIT_FOR_NO_CHILDREN(pred_, n_) do { \
while (!pred_ (n_)) { nsync_cv_wait (&(n_)->no_children_cv, &(n_)->note_mu); } \
} while (0)
#define WAKEUP_NO_CHILDREN(n_) nsync_cv_broadcast (&(n_)->no_children_cv)
*/
/* Notify *n and all its descendants that are not already disconnnecting.
n->note_mu is held. May release and reacquire n->note_mu.
parent->note_mu is held if parent != NULL. */
static void note_notify_child (nsync_note n, nsync_note parent) {
nsync_time t;
t = NOTIFIED_TIME (n);
if (nsync_time_cmp (t, nsync_time_zero) > 0) {
nsync_dll_element_ *p;
nsync_dll_element_ *next;
ATM_STORE_REL (&n->notified, 1);
while ((p = nsync_dll_first_ (n->waiters)) != NULL) {
struct nsync_waiter_s *nw = DLL_NSYNC_WAITER (p);
n->waiters = nsync_dll_remove_ (n->waiters, p);
ATM_STORE_REL (&nw->waiting, 0);
nsync_mu_semaphore_v (nw->sem);
}
for (p = nsync_dll_first_ (n->children); p != NULL; p = next) {
nsync_note child = DLL_NOTE (p);
next = nsync_dll_next_ (n->children, p);
nsync_mu_lock (&child->note_mu);
if (child->disconnecting == 0) {
note_notify_child (child, n);
}
nsync_mu_unlock (&child->note_mu);
}
WAIT_FOR_NO_CHILDREN (no_children, n);
if (parent != NULL) {
parent->children = nsync_dll_remove_ (parent->children,
&n->parent_child_link);
WAKEUP_NO_CHILDREN (parent);
n->parent = NULL;
}
}
}
/* Notify *n and all its descendants that are not already disconnnecting.
No locks are held. */
static void notify (nsync_note n) {
nsync_time t;
nsync_mu_lock (&n->note_mu);
t = NOTIFIED_TIME (n);
if (nsync_time_cmp (t, nsync_time_zero) > 0) {
nsync_note parent;
n->disconnecting++;
parent = n->parent;
if (parent != NULL && !nsync_mu_trylock (&parent->note_mu)) {
nsync_mu_unlock (&n->note_mu);
nsync_mu_lock (&parent->note_mu);
nsync_mu_lock (&n->note_mu);
}
note_notify_child (n, parent);
if (parent != NULL) {
nsync_mu_unlock (&parent->note_mu);
}
n->disconnecting--;
}
nsync_mu_unlock (&n->note_mu);
}
/* Return the deadline by which *n is certain to be notified,
setting it to zero if it already has passed that time.
Requires n->note_mu not held on entry.
Not static; used in sem_wait.c */
nsync_time nsync_note_notified_deadline_ (nsync_note n) {
nsync_time ntime;
if (ATM_LOAD_ACQ (&n->notified) != 0) {
ntime = nsync_time_zero;
} else {
nsync_mu_lock (&n->note_mu);
ntime = NOTIFIED_TIME (n);
nsync_mu_unlock (&n->note_mu);
if (nsync_time_cmp (ntime, nsync_time_zero) > 0) {
if (nsync_time_cmp (ntime, nsync_time_now ()) <= 0) {
notify (n);
ntime = nsync_time_zero;
}
}
}
return (ntime);
}
int nsync_note_is_notified (nsync_note n) {
int result;
IGNORE_RACES_START ();
result = (nsync_time_cmp (nsync_note_notified_deadline_ (n), nsync_time_zero) <= 0);
IGNORE_RACES_END ();
return (result);
}
nsync_note nsync_note_new (nsync_note parent,
nsync_time abs_deadline) {
nsync_note n = (nsync_note) malloc (sizeof (*n));
if (n != NULL) {
memset ((void *) n, 0, sizeof (*n));
nsync_dll_init_ (&n->parent_child_link, n);
set_expiry_time (n, abs_deadline);
if (!nsync_note_is_notified (n) && parent != NULL) {
nsync_time parent_time;
nsync_mu_lock (&parent->note_mu);
parent_time = NOTIFIED_TIME (parent);
if (nsync_time_cmp (parent_time, abs_deadline) < 0) {
set_expiry_time (n, parent_time);
}
if (nsync_time_cmp (parent_time, nsync_time_zero) > 0) {
n->parent = parent;
parent->children = nsync_dll_make_last_in_list_ (parent->children,
&n->parent_child_link);
}
nsync_mu_unlock (&parent->note_mu);
}
}
return (n);
}
void nsync_note_free (nsync_note n) {
nsync_note parent;
nsync_dll_element_ *p;
nsync_dll_element_ *next;
nsync_mu_lock (&n->note_mu);
n->disconnecting++;
ASSERT (nsync_dll_is_empty_ (n->waiters));
parent = n->parent;
if (parent != NULL && !nsync_mu_trylock (&parent->note_mu)) {
nsync_mu_unlock (&n->note_mu);
nsync_mu_lock (&parent->note_mu);
nsync_mu_lock (&n->note_mu);
}
for (p = nsync_dll_first_ (n->children); p != NULL; p = next) {
nsync_note child = DLL_NOTE (p);
next = nsync_dll_next_ (n->children, p);
nsync_mu_lock (&child->note_mu);
if (child->disconnecting == 0) {
n->children = nsync_dll_remove_ (n->children,
&child->parent_child_link);
if (parent != NULL) {
child->parent = parent;
parent->children = nsync_dll_make_last_in_list_ (
parent->children, &child->parent_child_link);
} else {
child->parent = NULL;
}
}
nsync_mu_unlock (&child->note_mu);
}
WAIT_FOR_NO_CHILDREN (no_children, n);
if (parent != NULL) {
parent->children = nsync_dll_remove_ (parent->children,
&n->parent_child_link);
WAKEUP_NO_CHILDREN (parent);
n->parent = NULL;
nsync_mu_unlock (&parent->note_mu);
}
n->disconnecting--;
nsync_mu_unlock (&n->note_mu);
free (n);
}
void nsync_note_notify (nsync_note n) {
IGNORE_RACES_START ();
if (nsync_time_cmp (nsync_note_notified_deadline_ (n), nsync_time_zero) > 0) {
notify (n);
}
IGNORE_RACES_END ();
}
int nsync_note_wait (nsync_note n, nsync_time abs_deadline) {
struct nsync_waitable_s waitable;
struct nsync_waitable_s *pwaitable = &waitable;
waitable.v = n;
waitable.funcs = &nsync_note_waitable_funcs;
return (nsync_wait_n (NULL, NULL, NULL, abs_deadline, 1, &pwaitable) == 0);
}
nsync_time nsync_note_expiry (nsync_note n) {
return (n->expiry_time);
}
static nsync_time note_ready_time (void *v, struct nsync_waiter_s *nw) {
return (nsync_note_notified_deadline_ ((nsync_note)v));
}
static int note_enqueue (void *v, struct nsync_waiter_s *nw) {
int waiting = 0;
nsync_note n = (nsync_note) v;
nsync_time ntime;
nsync_mu_lock (&n->note_mu);
ntime = NOTIFIED_TIME (n);
if (nsync_time_cmp (ntime, nsync_time_zero) > 0) {
n->waiters = nsync_dll_make_last_in_list_ (n->waiters, &nw->q);
ATM_STORE (&nw->waiting, 1);
waiting = 1;
} else {
ATM_STORE (&nw->waiting, 0);
waiting = 0;
}
nsync_mu_unlock (&n->note_mu);
return (waiting);
}
static int note_dequeue (void *v, struct nsync_waiter_s *nw) {
int was_queued = 0;
nsync_note n = (nsync_note) v;
nsync_time ntime;
nsync_note_notified_deadline_ (n);
nsync_mu_lock (&n->note_mu);
ntime = NOTIFIED_TIME (n);
if (nsync_time_cmp (ntime, nsync_time_zero) > 0) {
n->waiters = nsync_dll_remove_ (n->waiters, &nw->q);
ATM_STORE (&nw->waiting, 0);
was_queued = 1;
}
nsync_mu_unlock (&n->note_mu);
return (was_queued);
}
const struct nsync_waitable_funcs_s nsync_note_waitable_funcs = {
&note_ready_time,
&note_enqueue,
&note_dequeue
};

View file

@ -1,150 +0,0 @@
/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│
vi: set et ft=c ts=8 tw=8 fenc=utf-8 :vi
Copyright 2016 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 │
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "third_party/nsync/atomic.h"
#include "third_party/nsync/atomic.internal.h"
#include "third_party/nsync/common.internal.h"
#include "third_party/nsync/dll.h"
#include "third_party/nsync/mu_semaphore.h"
#include "third_party/nsync/once.h"
#include "third_party/nsync/races.internal.h"
#include "third_party/nsync/wait_s.internal.h"
asm(".ident\t\"\\n\\n\
*NSYNC (Apache 2.0)\\n\
Copyright 2016 Google, Inc.\\n\
https://github.com/google/nsync\"");
// clang-format off
/* An once_sync_s struct contains a lock, and a condition variable on which
threads may wait for an nsync_once to be initialized by another thread.
A separate struct is used only to keep nsync_once small.
A given nsync_once can be associated with any once_sync_s struct, but cannot
be associated with more than one. nsync_once instances are mapped to
once_sync_s instances by a trivial hashing scheme implemented by
NSYNC_ONCE_SYNC_().
The number of once_sync_s structs in the following array is greater than one
only to reduce the probability of contention if a great many distinct
nsync_once variables are initialized concurrently. */
static struct once_sync_s {
nsync_mu once_mu;
nsync_cv once_cv;
} once_sync[64];
/* Return a pointer to the once_sync_s struct associated with the nsync_once *p. */
#define NSYNC_ONCE_SYNC_(p) &once_sync[(((uintptr_t) (p)) / sizeof (*(p))) % \
(sizeof (once_sync) / sizeof (once_sync[0]))]
/* Implement nsync_run_once, nsync_run_once_arg, nsync_run_once_spin, or
nsync_run_once_arg_spin, chosen as described below.
If s!=NULL, s is required to point to the once_sync_s associated with *once,
and the semantics of nsync_run_once or nsync_run_once_arg are provided.
If s==NULL, the semantics of nsync_run_once_spin, or nsync_run_once_arg_spin
are provided.
If f!=NULL, the semantics of nsync_run_once or nsync_run_once_spin are
provided. Otherwise, farg is required to be non-NULL, and the semantics of
nsync_run_once_arg or nsync_run_once_arg_spin are provided. */
static void nsync_run_once_impl (nsync_once *once, struct once_sync_s *s,
void (*f) (void), void (*farg) (void *arg), void *arg) {
uint32_t o = ATM_LOAD_ACQ (once);
if (o != 2) {
unsigned attempts = 0;
if (s != NULL) {
nsync_mu_lock (&s->once_mu);
}
while (o == 0 && !ATM_CAS_ACQ (once, 0, 1)) {
o = ATM_LOAD (once);
}
if (o == 0) {
if (s != NULL) {
nsync_mu_unlock (&s->once_mu);
}
if (f != NULL) {
(*f) ();
} else {
(*farg) (arg);
}
if (s != NULL) {
nsync_mu_lock (&s->once_mu);
nsync_cv_broadcast (&s->once_cv);
}
ATM_STORE_REL (once, 2);
}
while (ATM_LOAD_ACQ (once) != 2) {
if (s != NULL) {
nsync_time deadline;
if (attempts < 50) {
attempts += 10;
}
deadline = nsync_time_add (nsync_time_now (), nsync_time_ms (attempts));
nsync_cv_wait_with_deadline (&s->once_cv, &s->once_mu, deadline, NULL);
} else {
attempts = nsync_spin_delay_ (attempts);
}
}
if (s != NULL) {
nsync_mu_unlock (&s->once_mu);
}
}
}
void nsync_run_once (nsync_once *once, void (*f) (void)) {
uint32_t o;
IGNORE_RACES_START ();
o = ATM_LOAD_ACQ (once);
if (o != 2) {
struct once_sync_s *s = NSYNC_ONCE_SYNC_ (once);
nsync_run_once_impl (once, s, f, NULL, NULL);
}
IGNORE_RACES_END ();
}
void nsync_run_once_arg (nsync_once *once, void (*farg) (void *arg), void *arg) {
uint32_t o;
IGNORE_RACES_START ();
o = ATM_LOAD_ACQ (once);
if (o != 2) {
struct once_sync_s *s = NSYNC_ONCE_SYNC_ (once);
nsync_run_once_impl (once, s, NULL, farg, arg);
}
IGNORE_RACES_END ();
}
void nsync_run_once_spin (nsync_once *once, void (*f) (void)) {
uint32_t o;
IGNORE_RACES_START ();
o = ATM_LOAD_ACQ (once);
if (o != 2) {
nsync_run_once_impl (once, NULL, f, NULL, NULL);
}
IGNORE_RACES_END ();
}
void nsync_run_once_arg_spin (nsync_once *once, void (*farg) (void *arg), void *arg) {
uint32_t o;
IGNORE_RACES_START ();
o = ATM_LOAD_ACQ (once);
if (o != 2) {
nsync_run_once_impl (once, NULL, NULL, farg, arg);
}
IGNORE_RACES_END ();
}

View file

@ -1,85 +0,0 @@
/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│
vi: set et ft=c ts=8 tw=8 fenc=utf-8 :vi
Copyright 2016 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 │
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "libc/errno.h"
#include "third_party/nsync/atomic.h"
#include "third_party/nsync/atomic.internal.h"
#include "third_party/nsync/common.internal.h"
#include "third_party/nsync/dll.h"
#include "third_party/nsync/mu_semaphore.h"
#include "third_party/nsync/wait_s.internal.h"
asm(".ident\t\"\\n\\n\
*NSYNC (Apache 2.0)\\n\
Copyright 2016 Google, Inc.\\n\
https://github.com/google/nsync\"");
// clang-format off
/* Wait until one of:
w->sem is non-zero----decrement it and return 0.
abs_deadline expires---return ETIMEDOUT.
cancel_note is non-NULL and *cancel_note becomes notified---return ECANCELED. */
int nsync_sem_wait_with_cancel_ (waiter *w, nsync_time abs_deadline,
nsync_note cancel_note) {
int sem_outcome;
if (cancel_note == NULL) {
sem_outcome = nsync_mu_semaphore_p_with_deadline (&w->sem, abs_deadline);
} else {
nsync_time cancel_time;
cancel_time = nsync_note_notified_deadline_ (cancel_note);
sem_outcome = ECANCELED;
if (nsync_time_cmp (cancel_time, nsync_time_zero) > 0) {
struct nsync_waiter_s nw;
nw.tag = NSYNC_WAITER_TAG;
nw.sem = &w->sem;
nsync_dll_init_ (&nw.q, &nw);
ATM_STORE (&nw.waiting, 1);
nw.flags = 0;
nsync_mu_lock (&cancel_note->note_mu);
cancel_time = NOTIFIED_TIME (cancel_note);
if (nsync_time_cmp (cancel_time, nsync_time_zero) > 0) {
nsync_time local_abs_deadline;
int deadline_is_nearer = 0;
cancel_note->waiters = nsync_dll_make_last_in_list_ (
cancel_note->waiters, &nw.q);
local_abs_deadline = cancel_time;
if (nsync_time_cmp (abs_deadline, cancel_time) < 0) {
local_abs_deadline = abs_deadline;
deadline_is_nearer = 1;
}
nsync_mu_unlock (&cancel_note->note_mu);
sem_outcome = nsync_mu_semaphore_p_with_deadline (&w->sem,
local_abs_deadline);
if (sem_outcome == ETIMEDOUT && !deadline_is_nearer) {
sem_outcome = ECANCELED;
nsync_note_notify (cancel_note);
}
nsync_mu_lock (&cancel_note->note_mu);
cancel_time = NOTIFIED_TIME (cancel_note);
if (nsync_time_cmp (cancel_time,
nsync_time_zero) > 0) {
cancel_note->waiters = nsync_dll_remove_ (
cancel_note->waiters, &nw.q);
}
}
nsync_mu_unlock (&cancel_note->note_mu);
}
}
return (sem_outcome);
}

View file

@ -1,33 +0,0 @@
/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│
vi: set et ft=c ts=8 tw=8 fenc=utf-8 :vi
Copyright 2016 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 │
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "third_party/nsync/common.internal.h"
#include "third_party/nsync/mu_semaphore.h"
asm(".ident\t\"\\n\\n\
*NSYNC (Apache 2.0)\\n\
Copyright 2016 Google, Inc.\\n\
https://github.com/google/nsync\"");
// clang-format off
/* Wait until one of:
w->sem is non-zero----decrement it and return 0.
abs_deadline expires---return ETIMEDOUT.
Ignores cancel_note. */
int nsync_sem_wait_with_cancel_ (waiter *w, nsync_time abs_deadline, nsync_note cancel_note) {
return (nsync_mu_semaphore_p_with_deadline (&w->sem, abs_deadline));
}

View file

@ -1,109 +0,0 @@
/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│
vi: set et ft=c ts=8 tw=8 fenc=utf-8 :vi
Copyright 2016 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 │
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "libc/mem/mem.h"
#include "third_party/nsync/atomic.h"
#include "third_party/nsync/atomic.internal.h"
#include "third_party/nsync/common.internal.h"
#include "third_party/nsync/dll.h"
#include "third_party/nsync/mu_semaphore.h"
#include "third_party/nsync/races.internal.h"
#include "third_party/nsync/wait_s.internal.h"
#include "third_party/nsync/waiter.h"
asm(".ident\t\"\\n\\n\
*NSYNC (Apache 2.0)\\n\
Copyright 2016 Google, Inc.\\n\
https://github.com/google/nsync\"");
// clang-format off
int nsync_wait_n (void *mu, void (*lock) (void *), void (*unlock) (void *),
nsync_time abs_deadline,
int count, struct nsync_waitable_s *waitable[]) {
int ready;
IGNORE_RACES_START ();
for (ready = 0; ready != count &&
nsync_time_cmp ((*waitable[ready]->funcs->ready_time) (
waitable[ready]->v, NULL),
nsync_time_zero) > 0;
ready++) {
}
if (ready == count && nsync_time_cmp (abs_deadline, nsync_time_zero) > 0) {
int i;
int unlocked = 0;
int j;
int enqueued = 1;
waiter *w = nsync_waiter_new_ ();
struct nsync_waiter_s nw_set[4];
struct nsync_waiter_s *nw = nw_set;
if (count > (int) (sizeof (nw_set) / sizeof (nw_set[0]))) {
nw = (struct nsync_waiter_s *) malloc (count * sizeof (nw[0]));
}
for (i = 0; i != count && enqueued; i++) {
nw[i].tag = NSYNC_WAITER_TAG;
nw[i].sem = &w->sem;
nsync_dll_init_ (&nw[i].q, &nw[i]);
ATM_STORE (&nw[i].waiting, 0);
nw[i].flags = 0;
enqueued = (*waitable[i]->funcs->enqueue) (waitable[i]->v, &nw[i]);
}
if (i == count) {
nsync_time min_ntime;
if (mu != NULL) {
(*unlock) (mu);
unlocked = 1;
}
do {
min_ntime = abs_deadline;
for (j = 0; j != count; j++) {
nsync_time ntime;
ntime = (*waitable[j]->funcs->ready_time) (
waitable[j]->v, &nw[j]);
if (nsync_time_cmp (ntime, min_ntime) < 0) {
min_ntime = ntime;
}
}
} while (nsync_time_cmp (min_ntime, nsync_time_zero) > 0 &&
nsync_mu_semaphore_p_with_deadline (&w->sem,
min_ntime) == 0);
}
/* An attempt was made above to enqueue waitable[0..i-1].
Dequeue any that are still enqueued, and remember the index
of the first ready (i.e., not still enqueued) object, if any. */
for (j = 0; j != i; j++) {
int was_still_enqueued =
(*waitable[j]->funcs->dequeue) (waitable[j]->v, &nw[j]);
if (!was_still_enqueued && ready == count) {
ready = j;
}
}
if (nw != nw_set) {
free (nw);
}
nsync_waiter_free_ (w);
if (unlocked) {
(*lock) (mu);
}
}
IGNORE_RACES_END ();
return (ready);
}

View file

@ -34,7 +34,8 @@ LIBC_THREAD_A_DIRECTDEPS = \
LIBC_SYSV \
LIBC_SYSV_CALLS \
LIBC_NEXGEN32E \
THIRD_PARTY_NSYNC
THIRD_PARTY_NSYNC \
THIRD_PARTY_NSYNC_MEM
LIBC_THREAD_A_DEPS := \
$(call uniq,$(foreach x,$(LIBC_THREAD_A_DIRECTDEPS),$($(x))))