Make futexes cancellable by pthreads

This commit is contained in:
Justine Tunney 2022-11-04 18:19:05 -07:00
parent 2278327eba
commit 022536cab6
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
101 changed files with 627 additions and 391 deletions

View file

@ -17,3 +17,4 @@ LOCAL CHANGES
- nsync_malloc_() is implemented as kmalloc()
- nsync_mu_semaphore uses Cosmopolitan Futexes
- block pthread cancellations in nsync_mu_lock_slow_

View file

@ -23,17 +23,20 @@
#include "libc/calls/state.internal.h"
#include "libc/calls/struct/timespec.h"
#include "libc/calls/struct/timespec.internal.h"
#include "libc/calls/syscall_support-nt.internal.h"
#include "libc/dce.h"
#include "libc/errno.h"
#include "libc/intrin/atomic.h"
#include "libc/intrin/describeflags.internal.h"
#include "libc/intrin/strace.internal.h"
#include "libc/intrin/weaken.h"
#include "libc/limits.h"
#include "libc/nt/runtime.h"
#include "libc/nt/synchronization.h"
#include "libc/sysv/consts/clock.h"
#include "libc/sysv/consts/futex.h"
#include "libc/sysv/consts/timer.h"
#include "libc/sysv/errfuns.h"
#include "libc/thread/freebsd.internal.h"
#include "libc/thread/thread.h"
#include "libc/thread/tls.h"
@ -45,7 +48,9 @@
#define FUTEX_WAIT_BITS_ FUTEX_BITSET_MATCH_ANY
int _futex (atomic_int *, int, int, const struct timespec *, int *, int);
errno_t _futex (atomic_int *, int, int, const struct timespec *, int *, int);
errno_t _futex_wake (atomic_int *, int, int) asm ("_futex");
int sys_futex_cp (atomic_int *, int, int, const struct timespec *, int *, int);
static int FUTEX_WAIT_;
static int FUTEX_PRIVATE_FLAG_;
@ -59,6 +64,7 @@ __attribute__((__constructor__)) static void nsync_futex_init_ (void) {
if (IsWindows ()) {
FUTEX_IS_SUPPORTED = true;
FUTEX_TIMEOUT_IS_ABSOLUTE = true;
return;
}
@ -70,8 +76,6 @@ __attribute__((__constructor__)) static void nsync_futex_init_ (void) {
}
if (!(FUTEX_IS_SUPPORTED = IsLinux () || IsOpenbsd ())) {
// we're using sched_yield() so let's
// avoid needless clock_gettime calls
FUTEX_TIMEOUT_IS_ABSOLUTE = true;
return;
}
@ -101,7 +105,7 @@ __attribute__((__constructor__)) static void nsync_futex_init_ (void) {
FUTEX_TIMEOUT_IS_ABSOLUTE = true;
} else if (IsOpenbsd () ||
(!IsTiny () && IsLinux () &&
!_futex (&x, FUTEX_WAKE_PRIVATE, 1, 0, 0, 0))) {
!_futex_wake (&x, FUTEX_WAKE_PRIVATE, 1))) {
FUTEX_WAIT_ = FUTEX_WAIT;
FUTEX_PRIVATE_FLAG_ = FUTEX_PRIVATE_FLAG;
} else {
@ -110,13 +114,10 @@ __attribute__((__constructor__)) static void nsync_futex_init_ (void) {
}
static int nsync_futex_polyfill_ (atomic_int *w, int expect, struct timespec *timeout) {
int rc;
int64_t nanos, maxnanos;
struct timespec ts, deadline;
if (atomic_load_explicit (w, memory_order_relaxed) != expect) {
return -EAGAIN;
}
ts = nsync_time_now ();
if (!timeout) {
deadline = nsync_time_no_deadline;
@ -129,15 +130,15 @@ static int nsync_futex_polyfill_ (atomic_int *w, int expect, struct timespec *ti
nanos = 100;
maxnanos = __SIG_POLLING_INTERVAL_MS * 1000L * 1000;
while (nsync_time_cmp (deadline, ts) > 0) {
if (atomic_load_explicit (w, memory_order_relaxed) != expect) {
return 0;
}
ts = nsync_time_add (ts, _timespec_fromnanos (nanos));
if (nsync_time_cmp (ts, deadline) > 0) {
ts = deadline;
}
if (nsync_time_sleep_until (ts)) {
return -EINTR;
if (atomic_load_explicit (w, memory_order_acquire) != expect) {
return 0;
}
if ((rc = nsync_time_sleep_until (ts))) {
return -rc;
}
if (nanos < maxnanos) {
nanos <<= 1;
@ -150,10 +151,46 @@ static int nsync_futex_polyfill_ (atomic_int *w, int expect, struct timespec *ti
return -ETIMEDOUT;
}
int nsync_futex_wait_ (atomic_int *w, int expect, char pshare, struct timespec *timeout) {
static int nsync_futex_wait_win32_ (atomic_int *w, int expect, char pshare, struct timespec *timeout) {
int rc;
uint32_t ms;
struct timespec deadline, interval, remain, wait, now;
if (timeout) {
deadline = *timeout;
} else {
deadline = nsync_time_no_deadline;
}
while (!(rc = _check_interrupts (false, 0))) {
now = nsync_time_now ();
if (nsync_time_cmp (now, deadline) > 0) {
rc = etimedout();
break;
}
remain = nsync_time_sub (deadline, now);
interval = _timespec_frommillis (__SIG_POLLING_INTERVAL_MS);
wait = nsync_time_cmp (remain, interval) > 0 ? interval : remain;
if (atomic_load_explicit (w, memory_order_acquire) != expect) {
break;
}
if (WaitOnAddress (w, &expect, sizeof(int), _timespec_tomillis (wait))) {
break;
} else {
ASSERT (GetLastError () == ETIMEDOUT);
}
}
return rc;
}
int nsync_futex_wait_ (atomic_int *w, int expect, char pshare, struct timespec *timeout) {
int e, rc, op, fop;
if (atomic_load_explicit (w, memory_order_acquire) != expect) {
return -EAGAIN;
}
op = FUTEX_WAIT_;
if (pshare == PTHREAD_PROCESS_PRIVATE) {
op |= FUTEX_PRIVATE_FLAG_;
@ -165,37 +202,35 @@ int nsync_futex_wait_ (atomic_int *w, int expect, char pshare, struct timespec *
DescribeTimespec (0, timeout));
if (FUTEX_IS_SUPPORTED) {
e = errno;
if (IsWindows ()) {
// Windows 8 futexes don't support multiple processes :(
if (pshare) {
goto Polyfill;
}
e = errno;
if (_check_interrupts (false, 0)) {
rc = -errno;
errno = e;
} else {
if (timeout) {
ms = _timespec_tomillis (*timeout);
} else {
ms = -1;
}
if (WaitOnAddress (w, &expect, sizeof(int), ms)) {
rc = 0;
} else {
rc = -GetLastError ();
}
}
if (pshare) goto Polyfill;
rc = nsync_futex_wait_win32_ (w, expect, pshare, timeout);
} else if (IsFreebsd ()) {
rc = sys_umtx_timedwait_uint (
w, expect, pshare, timeout);
rc = sys_umtx_timedwait_uint (w, expect, pshare, timeout);
} else {
rc = _futex (w, op, expect, timeout, 0,
FUTEX_WAIT_BITS_);
rc = sys_futex_cp (w, op, expect, timeout, 0, FUTEX_WAIT_BITS_);
if (IsOpenbsd() && rc > 0) {
rc = -rc;
// OpenBSD futex() returns errors as
// positive numbers without setting the
// carry flag. It's an irregular miracle
// because OpenBSD returns ECANCELED if
// futex() is interrupted w/ SA_RESTART
// so we're able to tell it apart from
// PT_MASKED which causes the wrapper
// to put ECANCELED into errno.
if (rc == ECANCELED) {
rc = EINTR;
}
errno = rc;
rc = -1;
}
}
if (rc == -1) {
rc = -errno;
errno = e;
}
} else {
Polyfill:
__get_tls()->tib_flags |= TIB_FLAG_TIME_CRITICAL;
@ -214,7 +249,6 @@ int nsync_futex_wait_ (atomic_int *w, int expect, char pshare, struct timespec *
int nsync_futex_wake_ (atomic_int *w, int count, char pshare) {
int e, rc, op, fop;
int wake (atomic_int *, int, int) asm ("_futex");
ASSERT (count == 1 || count == INT_MAX);
@ -240,9 +274,9 @@ int nsync_futex_wake_ (atomic_int *w, int count, char pshare) {
} else {
fop = UMTX_OP_WAKE_PRIVATE;
}
rc = sys_umtx_op (w, fop, count, 0, 0);
rc = _futex_wake (w, fop, count);
} else {
rc = wake (w, op, count);
rc = _futex_wake (w, op, count);
}
} else {
Polyfill:

View file

@ -16,6 +16,7 @@
limitations under the License.
*/
#include "libc/str/str.h"
#include "libc/thread/thread.h"
#include "third_party/nsync/atomic.internal.h"
#include "third_party/nsync/common.internal.h"
#include "third_party/nsync/cv.h"
@ -202,6 +203,7 @@ int nsync_cv_wait_with_deadline_generic (nsync_cv *pcv, void *pmu,
waiter *w;
IGNORE_RACES_START ();
w = nsync_waiter_new_ ();
pthread_cleanup_push((void *)nsync_waiter_free_, w);
ATM_STORE (&w->nw.waiting, 1);
w->cond.f = NULL; /* Not using a conditional critical section. */
w->cond.v = NULL;
@ -312,6 +314,7 @@ int nsync_cv_wait_with_deadline_generic (nsync_cv *pcv, void *pmu,
(*lock) (pmu);
}
}
pthread_cleanup_pop(0);
IGNORE_RACES_END ();
return (outcome);
}

View file

@ -15,6 +15,7 @@
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "libc/calls/blockcancel.internal.h"
#include "third_party/nsync/atomic.h"
#include "third_party/nsync/common.internal.h"
#include "third_party/nsync/dll.h"
@ -153,6 +154,7 @@ int nsync_mu_wait_with_deadline (nsync_mu *mu,
/* Work out in which mode the lock is held. */
uint32_t old_word;
IGNORE_RACES_START ();
BLOCK_CANCELLATIONS; /* not supported yet */
old_word = ATM_LOAD (&mu->word);
if ((old_word & MU_ANY_LOCK) == 0) {
nsync_panic_ ("nsync_mu not held in some mode when calling "
@ -265,6 +267,7 @@ int nsync_mu_wait_with_deadline (nsync_mu *mu,
if (condition_is_true) {
outcome = 0; /* condition is true trumps other outcomes. */
}
ALLOW_CANCELLATIONS;
IGNORE_RACES_END ();
return (outcome);
}

View file

@ -15,6 +15,7 @@
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "libc/calls/blockcancel.internal.h"
#include "libc/mem/mem.h"
#include "third_party/nsync/atomic.h"
#include "third_party/nsync/atomic.internal.h"
@ -36,6 +37,7 @@ int nsync_wait_n (void *mu, void (*lock) (void *), void (*unlock) (void *),
int count, struct nsync_waitable_s *waitable[]) {
int ready;
IGNORE_RACES_START ();
BLOCK_CANCELLATIONS; /* TODO(jart): Does this need pthread cancellations? */
for (ready = 0; ready != count &&
nsync_time_cmp ((*waitable[ready]->funcs->ready_time) (
waitable[ready]->v, NULL),
@ -102,6 +104,7 @@ int nsync_wait_n (void *mu, void (*lock) (void *), void (*unlock) (void *),
(*lock) (mu);
}
}
ALLOW_CANCELLATIONS;
IGNORE_RACES_END ();
return (ready);
}

View file

@ -15,6 +15,7 @@
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "libc/calls/blockcancel.internal.h"
#include "libc/str/str.h"
#include "third_party/nsync/atomic.h"
#include "third_party/nsync/common.internal.h"
@ -52,6 +53,7 @@ void nsync_mu_lock_slow_ (nsync_mu *mu, waiter *w, uint32_t clear, lock_type *l_
uint32_t wait_count;
uint32_t long_wait;
unsigned attempts = 0; /* attempt count; used for spinloop backoff */
BLOCK_CANCELLATIONS;
w->cv_mu = NULL; /* not a cv wait */
w->cond.f = NULL; /* Not using a conditional critical section. */
w->cond.v = NULL;
@ -72,7 +74,7 @@ void nsync_mu_lock_slow_ (nsync_mu *mu, waiter *w, uint32_t clear, lock_type *l_
if (ATM_CAS_ACQ (&mu->word, old_word,
(old_word+l_type->add_to_acquire) &
~(clear|long_wait|l_type->clear_on_acquire))) {
return;
break;
}
} else if ((old_word&MU_SPINLOCK) == 0 &&
ATM_CAS_ACQ (&mu->word, old_word,
@ -126,6 +128,7 @@ void nsync_mu_lock_slow_ (nsync_mu *mu, waiter *w, uint32_t clear, lock_type *l_
}
attempts = nsync_spin_delay_ (attempts);
}
ALLOW_CANCELLATIONS;
}
/* Attempt to acquire *mu in writer mode without blocking, and return non-zero

View file

@ -17,6 +17,8 @@
*/
#include "libc/assert.h"
#include "libc/errno.h"
#include "libc/intrin/kprintf.h"
#include "libc/intrin/weaken.h"
#include "libc/str/str.h"
#include "libc/thread/thread.h"
#include "third_party/nsync/atomic.h"
@ -51,25 +53,32 @@ void nsync_mu_semaphore_init (nsync_semaphore *s) {
}
/* Wait until the count of *s exceeds 0, and decrement it. */
void nsync_mu_semaphore_p (nsync_semaphore *s) {
errno_t nsync_mu_semaphore_p (nsync_semaphore *s) {
struct futex *f = (struct futex *) s;
int i;
int result = 0;
do {
i = ATM_LOAD ((nsync_atomic_uint32_ *) &f->i);
if (i == 0) {
int futex_result = nsync_futex_wait_ ((atomic_int *)&f->i, i, PTHREAD_PROCESS_PRIVATE, NULL);
int futex_result = nsync_futex_wait_ (
(atomic_int *)&f->i, i, PTHREAD_PROCESS_PRIVATE, NULL);
ASSERT (futex_result == 0 ||
futex_result == -EINTR ||
futex_result == -EAGAIN ||
futex_result == -ECANCELED ||
futex_result == -EWOULDBLOCK);
if (futex_result == -ECANCELED) {
result = ECANCELED;
}
}
} while (i == 0 || !ATM_CAS_ACQ ((nsync_atomic_uint32_ *) &f->i, i, i-1));
} while (result == 0 && (i == 0 || !ATM_CAS_ACQ ((nsync_atomic_uint32_ *) &f->i, i, i-1)));
return result;
}
/* Wait until one of:
the count of *s is non-zero, in which case decrement *s and return 0;
or abs_deadline expires, in which case return ETIMEDOUT. */
int nsync_mu_semaphore_p_with_deadline (nsync_semaphore *s, nsync_time abs_deadline) {
errno_t nsync_mu_semaphore_p_with_deadline (nsync_semaphore *s, nsync_time abs_deadline) {
struct futex *f = (struct futex *)s;
int i;
int result = 0;
@ -103,6 +112,7 @@ int nsync_mu_semaphore_p_with_deadline (nsync_semaphore *s, nsync_time abs_deadl
ASSERT (futex_result == 0 ||
futex_result == -EINTR ||
futex_result == -EAGAIN ||
futex_result == -ECANCELED ||
futex_result == -ETIMEDOUT ||
futex_result == -EWOULDBLOCK);
/* Some systems don't wait as long as they are told. */
@ -110,6 +120,9 @@ int nsync_mu_semaphore_p_with_deadline (nsync_semaphore *s, nsync_time abs_deadl
nsync_time_cmp (abs_deadline, nsync_time_now ()) <= 0) {
result = ETIMEDOUT;
}
if (futex_result == -ECANCELED) {
result = ECANCELED;
}
}
} while (result == 0 && (i == 0 || !ATM_CAS_ACQ ((nsync_atomic_uint32_ *) &f->i, i, i - 1)));
return (result);

View file

@ -12,13 +12,13 @@ typedef struct nsync_semaphore_s_ {
void nsync_mu_semaphore_init(nsync_semaphore *s);
/* Wait until the count of *s exceeds 0, and decrement it. */
void nsync_mu_semaphore_p(nsync_semaphore *s);
errno_t nsync_mu_semaphore_p(nsync_semaphore *s);
/* Wait until one of: the count of *s is non-zero, in which case
decrement *s and return 0; or abs_deadline expires, in which case
return ETIMEDOUT. */
int nsync_mu_semaphore_p_with_deadline(nsync_semaphore *s,
nsync_time abs_deadline);
errno_t nsync_mu_semaphore_p_with_deadline(nsync_semaphore *s,
nsync_time abs_deadline);
/* Ensure that the count of *s is at least 1. */
void nsync_mu_semaphore_v(nsync_semaphore *s);