Make improvements

- We now serialize the file descriptor table when spawning / executing
  processes on Windows. This means you can now inherit more stuff than
  just standard i/o. It's needed by bash, which duplicates the console
  to file descriptor #255. We also now do a better job serializing the
  environment variables, so you're less likely to encounter E2BIG when
  using your bash shell. We also no longer coerce environ to uppercase

- execve() on Windows now remotely controls its parent process to make
  them spawn a replacement for itself. Then it'll be able to terminate
  immediately once the spawn succeeds, without having to linger around
  for the lifetime as a shell process for proxying the exit code. When
  process worker thread running in the parent sees the child die, it's
  given a handle to the new child, to replace it in the process table.

- execve() and posix_spawn() on Windows will now provide CreateProcess
  an explicit handle list. This allows us to remove handle locks which
  enables better fork/spawn concurrency, with seriously correct thread
  safety. Other codebases like Go use the same technique. On the other
  hand fork() still favors the conventional WIN32 inheritence approach
  which can be a little bit messy, but is *controlled* by guaranteeing
  perfectly clean slates at both the spawning and execution boundaries

- sigset_t is now 64 bits. Having it be 128 bits was a mistake because
  there's no reason to use that and it's only supported by FreeBSD. By
  using the system word size, signal mask manipulation on Windows goes
  very fast. Furthermore @asyncsignalsafe funcs have been rewritten on
  Windows to take advantage of signal masking, now that it's much more
  pleasant to use.

- All the overlapped i/o code on Windows has been rewritten for pretty
  good signal and cancelation safety. We're now able to ensure overlap
  data structures are cleaned up so long as you don't longjmp() out of
  out of a signal handler that interrupted an i/o operation. Latencies
  are also improved thanks to the removal of lots of "busy wait" code.
  Waits should be optimal for everything except poll(), which shall be
  the last and final demon we slay in the win32 i/o horror show.

- getrusage() on Windows is now able to report RUSAGE_CHILDREN as well
  as RUSAGE_SELF, thanks to aggregation in the process manager thread.
This commit is contained in:
Justine Tunney 2023-10-08 05:36:18 -07:00
parent af7cb3c82f
commit 791f79fcb3
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
382 changed files with 4008 additions and 4511 deletions

View file

@ -18,10 +18,13 @@
*/
#include "libc/sysv/consts/itimer.h"
#include "libc/calls/sig.internal.h"
#include "libc/calls/state.internal.h"
#include "libc/calls/struct/itimerval.h"
#include "libc/calls/struct/itimerval.internal.h"
#include "libc/calls/struct/sigset.internal.h"
#include "libc/calls/struct/timeval.h"
#include "libc/cosmo.h"
#include "libc/intrin/strace.internal.h"
#include "libc/nt/enum/processcreationflags.h"
#include "libc/nt/thread.h"
#include "libc/str/str.h"
@ -31,7 +34,6 @@
#include "libc/thread/itimer.internal.h"
#include "libc/thread/tls.h"
#include "third_party/nsync/mu.h"
#ifdef __x86_64__
struct IntervalTimer __itimer;
@ -80,7 +82,7 @@ static textwindows void __itimer_setup(void) {
kNtStackSizeParamIsAReservation, 0);
}
textwindows void __itimer_reset(void) {
textwindows void __itimer_wipe(void) {
// this function is called by fork(), because
// timers aren't inherited by forked subprocesses
bzero(&__itimer, sizeof(__itimer));
@ -99,6 +101,7 @@ textwindows int sys_setitimer_nt(int which, const struct itimerval *neu,
// accommodate the usage setitimer(ITIMER_REAL, &it, &it) anyway
config = *neu;
}
BLOCK_SIGNALS;
nsync_mu_lock(&__itimer.lock);
if (old) {
old->it_interval = __itimer.it.it_interval;
@ -112,6 +115,7 @@ textwindows int sys_setitimer_nt(int which, const struct itimerval *neu,
nsync_cv_signal(&__itimer.cond);
}
nsync_mu_unlock(&__itimer.lock);
ALLOW_SIGNALS;
return 0;
}

View file

@ -17,7 +17,7 @@ struct IntervalTimer {
extern struct IntervalTimer __itimer;
void __itimer_reset(void);
void __itimer_wipe(void);
COSMOPOLITAN_C_END_
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */

View file

@ -13,9 +13,12 @@
#define PT_NOCANCEL 8
#define PT_MASKED 16
#define PT_INCANCEL 32
#define PT_POLLING 64 // windows only
#define PT_INSEMAPHORE 128 // windows only
#define PT_OPENBSD_KLUDGE 128 // openbsd only
#define PT_RESTARTABLE 64
#define PT_OPENBSD_KLUDGE 128
#define PT_BLOCKER_CPU ((_Atomic(int) *)-0)
#define PT_BLOCKER_SEM ((_Atomic(int) *)-1)
#define PT_BLOCKER_IO ((_Atomic(int) *)-2)
#if !(__ASSEMBLER__ + __LINKER__ + 0)
COSMOPOLITAN_C_START_
@ -76,48 +79,49 @@ enum PosixThreadStatus {
#define POSIXTHREAD_CONTAINER(e) DLL_CONTAINER(struct PosixThread, list, e)
struct PosixThread {
int pt_flags; // 0x00: see PT_* constants
_Atomic(int) cancelled; // 0x04: thread has bad beliefs
_Atomic(enum PosixThreadStatus) status;
_Atomic(int) ptid; // transitions 0 → tid
void *(*start)(void *); // creation callback
void *arg; // start's parameter
void *rc; // start's return value
char *tls; // bottom of tls allocation
struct CosmoTib *tib; // middle of tls allocation
struct Dll list; // list of threads
_Atomic(_Atomic(int) *) pt_futex;
intptr_t semaphore;
intptr_t iohandle;
void *ioverlap;
jmp_buf exiter;
pthread_attr_t attr;
int abort_errno;
struct _pthread_cleanup_buffer *cleanup;
int pt_flags; // 0x00: see PT_* constants
_Atomic(int) pt_canceled; // 0x04: thread has bad beliefs
_Atomic(enum PosixThreadStatus) pt_status;
_Atomic(int) ptid; // transitions 0 → tid
void *(*pt_start)(void *); // creation callback
void *pt_arg; // start's parameter
void *pt_rc; // start's return value
char *pt_tls; // bottom of tls allocation
struct CosmoTib *tib; // middle of tls allocation
struct Dll list; // list of threads
struct _pthread_cleanup_buffer *pt_cleanup;
_Atomic(_Atomic(int) *) pt_blocker;
_Atomic(int) pt_futex;
int64_t pt_semaphore;
intptr_t pt_iohandle;
void *pt_ioverlap;
jmp_buf pt_exiter;
pthread_attr_t pt_attr;
};
typedef void (*atfork_f)(void);
extern struct Dll *_pthread_list;
extern pthread_spinlock_t _pthread_lock;
extern struct PosixThread _pthread_static;
extern _Atomic(pthread_key_dtor) _pthread_key_dtor[PTHREAD_KEYS_MAX];
void _pthread_decimate(void);
int _pthread_tid(struct PosixThread *);
void _pthread_unkey(struct CosmoTib *);
void _pthread_unwind(struct PosixThread *);
int _pthread_reschedule(struct PosixThread *);
intptr_t _pthread_syshand(struct PosixThread *);
int _pthread_atfork(atfork_f, atfork_f, atfork_f);
int _pthread_reschedule(struct PosixThread *);
int _pthread_setschedparam_freebsd(int, int, const struct sched_param *);
void _pthread_free(struct PosixThread *, bool);
void _pthread_zombify(struct PosixThread *);
void _pthread_onfork_prepare(void);
void _pthread_onfork_parent(void);
void _pthread_onfork_child(void);
int _pthread_tid(struct PosixThread *);
intptr_t _pthread_syshand(struct PosixThread *);
long _pthread_cancel_ack(void);
void _pthread_decimate(void);
void _pthread_free(struct PosixThread *, bool);
void _pthread_lock(void);
void _pthread_onfork_child(void);
void _pthread_onfork_parent(void);
void _pthread_onfork_prepare(void);
void _pthread_ungarbage(void);
void _pthread_unkey(struct CosmoTib *);
void _pthread_unlock(void);
void _pthread_unwind(struct PosixThread *);
void _pthread_zombify(struct PosixThread *);
__funline pureconst struct PosixThread *_pthread_self(void) {
return (struct PosixThread *)__get_tls()->tib_pthread;

View file

@ -24,7 +24,6 @@
#include "libc/errno.h"
#include "libc/intrin/atomic.h"
#include "libc/intrin/dll.h"
#include "libc/intrin/handlock.internal.h"
#include "libc/intrin/leaky.internal.h"
#include "libc/macros.internal.h"
#include "libc/mem/mem.h"
@ -48,6 +47,8 @@ static struct AtForks {
atomic_int allocated;
} _atforks;
extern pthread_spinlock_t _pthread_lock_obj;
static void _pthread_onfork(int i) {
struct AtFork *a;
unassert(0 <= i && i <= 2);
@ -61,33 +62,27 @@ static void _pthread_onfork(int i) {
void _pthread_onfork_prepare(void) {
_pthread_onfork(0);
pthread_spin_lock(&_pthread_lock);
_pthread_lock();
__fds_lock();
if (IsWindows()) {
__hand_lock();
}
__mmi_lock();
}
void _pthread_onfork_parent(void) {
__mmi_unlock();
if (IsWindows()) {
__hand_unlock();
}
__fds_unlock();
pthread_spin_unlock(&_pthread_lock);
_pthread_unlock();
_pthread_onfork(1);
}
void _pthread_onfork_child(void) {
if (IsWindows()) __hand_wipe();
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
extern pthread_mutex_t __mmi_lock_obj;
pthread_mutex_init(&__mmi_lock_obj, &attr);
pthread_mutex_init(&__fds_lock_obj, &attr);
(void)pthread_spin_init(&_pthread_lock, 0);
pthread_mutexattr_destroy(&attr);
(void)pthread_spin_init(&_pthread_lock_obj, 0);
_pthread_onfork(2);
}

View file

@ -32,6 +32,6 @@ errno_t pthread_attr_getsigmask_np(const pthread_attr_t *attr,
sigset_t *sigmask) {
_Static_assert(sizeof(attr->__sigmask) == sizeof(*sigmask), "");
if (!attr->__havesigmask) return PTHREAD_ATTR_NO_SIGMASK_NP;
if (sigmask) memcpy(sigmask, attr->__sigmask, sizeof(*sigmask));
if (sigmask) *sigmask = attr->__sigmask;
return 0;
}

View file

@ -44,7 +44,7 @@ errno_t pthread_attr_setsigmask_np(pthread_attr_t *attr,
_Static_assert(sizeof(attr->__sigmask) == sizeof(*sigmask), "");
if (sigmask) {
attr->__havesigmask = true;
memcpy(attr->__sigmask, sigmask, sizeof(*sigmask));
attr->__sigmask = *sigmask;
} else {
attr->__havesigmask = false;
}

View file

@ -17,30 +17,23 @@
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/assert.h"
#include "libc/atomic.h"
#include "libc/calls/calls.h"
#include "libc/calls/sig.internal.h"
#include "libc/calls/struct/sigaction.h"
#include "libc/calls/struct/siginfo.h"
#include "libc/calls/struct/sigset.h"
#include "libc/calls/struct/ucontext-freebsd.internal.h"
#include "libc/calls/struct/ucontext.internal.h"
#include "libc/calls/syscall-sysv.internal.h"
#include "libc/calls/syscall_support-sysv.internal.h"
#include "libc/calls/ucontext.h"
#include "libc/cosmo.h"
#include "libc/dce.h"
#include "libc/errno.h"
#include "libc/intrin/atomic.h"
#include "libc/intrin/describeflags.internal.h"
#include "libc/intrin/kprintf.h"
#include "libc/intrin/strace.internal.h"
#include "libc/nt/enum/context.h"
#include "libc/nt/enum/threadaccess.h"
#include "libc/nt/runtime.h"
#include "libc/nt/struct/context.h"
#include "libc/nt/thread.h"
#include "libc/runtime/runtime.h"
#include "libc/runtime/syslib.internal.h"
#include "libc/str/str.h"
#include "libc/sysv/consts/sa.h"
#include "libc/sysv/consts/sicode.h"
#include "libc/sysv/consts/sig.h"
#include "libc/sysv/errfuns.h"
#include "libc/thread/posixthread.internal.h"
@ -58,7 +51,10 @@ long _pthread_cancel_ack(void) {
(pt->pt_flags & PT_ASYNC)) {
pthread_exit(PTHREAD_CANCELED);
}
pt->pt_flags |= PT_NOCANCEL | PT_OPENBSD_KLUDGE;
pt->pt_flags |= PT_NOCANCEL;
if (IsOpenbsd()) {
pt->pt_flags |= PT_OPENBSD_KLUDGE;
}
return ecanceled();
}
@ -70,7 +66,7 @@ static void _pthread_cancel_sig(int sig, siginfo_t *si, void *arg) {
if (!__tls_enabled) return;
if (!(pt = _pthread_self())) return;
if (pt->pt_flags & PT_NOCANCEL) return;
if (!atomic_load_explicit(&pt->cancelled, memory_order_acquire)) return;
if (!atomic_load_explicit(&pt->pt_canceled, memory_order_acquire)) return;
// in asynchronous mode we'll just the exit asynchronously
if (pt->pt_flags & PT_ASYNC) {
@ -84,61 +80,38 @@ static void _pthread_cancel_sig(int sig, siginfo_t *si, void *arg) {
// check for race condition between pre-check and syscall
// rewrite the thread's execution state to acknowledge it
if (systemfive_cancellable <= (char *)ctx->uc_mcontext.PC &&
(char *)ctx->uc_mcontext.PC < systemfive_cancellable_end) {
ctx->uc_mcontext.PC = (intptr_t)systemfive_cancel;
return;
// sadly windows isn't able to be sophisticated like this
if (!IsWindows()) {
if (systemfive_cancellable <= (char *)ctx->uc_mcontext.PC &&
(char *)ctx->uc_mcontext.PC < systemfive_cancellable_end) {
ctx->uc_mcontext.PC = (intptr_t)systemfive_cancel;
return;
}
}
// punts cancellation to start of next cancellation point
// punts cancelation to start of next cancellation point
// we ensure sigthr is a pending signal in case unblocked
raise(sig);
}
static void _pthread_cancel_listen(void) {
struct sigaction sa;
if (!IsWindows()) {
sa.sa_sigaction = _pthread_cancel_sig;
sa.sa_flags = SA_SIGINFO | SA_RESTART;
memset(&sa.sa_mask, -1, sizeof(sa.sa_mask));
npassert(!sigaction(SIGTHR, &sa, 0));
}
struct sigaction sa = {
.sa_mask = -1,
.sa_flags = SA_SIGINFO,
.sa_sigaction = _pthread_cancel_sig,
};
sigaction(SIGTHR, &sa, 0);
}
static void pthread_cancel_nt(struct PosixThread *pt, intptr_t hThread) {
uint32_t old_suspend_count;
if (!(pt->pt_flags & PT_NOCANCEL)) {
if ((pt->pt_flags & PT_ASYNC) &&
(old_suspend_count = SuspendThread(hThread)) != -1u) {
if (!old_suspend_count) {
struct NtContext cpu;
cpu.ContextFlags = kNtContextControl | kNtContextInteger;
if (GetThreadContext(hThread, &cpu)) {
cpu.Rip = (uintptr_t)pthread_exit;
cpu.Rdi = (uintptr_t)PTHREAD_CANCELED;
cpu.Rsp &= -16;
*(uintptr_t *)(cpu.Rsp -= sizeof(uintptr_t)) = cpu.Rip;
unassert(SetThreadContext(hThread, &cpu));
}
}
ResumeThread(hThread);
}
pt->abort_errno = ECANCELED;
__sig_cancel(pt, 0);
}
}
static errno_t _pthread_cancel_impl(struct PosixThread *pt) {
static errno_t _pthread_cancel_single(struct PosixThread *pt) {
// install our special signal handler
static bool once;
if (!once) {
_pthread_cancel_listen();
once = true;
}
static atomic_uint once;
cosmo_once(&once, _pthread_cancel_listen);
// check if thread is already dead
switch (atomic_load_explicit(&pt->status, memory_order_acquire)) {
// we don't care about any further esrch checks upstream
switch (atomic_load_explicit(&pt->pt_status, memory_order_acquire)) {
case kPosixThreadZombie:
case kPosixThreadTerminated:
return ESRCH;
@ -146,12 +119,11 @@ static errno_t _pthread_cancel_impl(struct PosixThread *pt) {
break;
}
// flip the bit indicating that this thread is cancelled
atomic_store_explicit(&pt->cancelled, 1, memory_order_release);
// erase this thread from the book of life
atomic_store_explicit(&pt->pt_canceled, 1, memory_order_release);
// does this thread want to cancel itself?
// does this thread want to cancel itself? just exit
if (pt == _pthread_self()) {
unassert(!(pt->pt_flags & PT_NOCANCEL));
if (!(pt->pt_flags & (PT_NOCANCEL | PT_MASKED)) &&
(pt->pt_flags & PT_ASYNC)) {
pthread_exit(PTHREAD_CANCELED);
@ -159,24 +131,29 @@ static errno_t _pthread_cancel_impl(struct PosixThread *pt) {
return 0;
}
// send the cancelation signal
errno_t err;
if (IsWindows()) {
pthread_cancel_nt(pt, _pthread_syshand(pt));
err = 0;
} else if (IsXnuSilicon()) {
err = __syslib->__pthread_kill(_pthread_syshand(pt), SIGTHR);
} else {
int e = errno;
if (!sys_tkill(_pthread_tid(pt), SIGTHR, pt->tib)) {
err = pthread_kill((pthread_t)pt, SIGTHR);
if (err == ESRCH) err = 0;
return err;
}
static errno_t _pthread_cancel_everyone(void) {
errno_t err;
struct Dll *e;
struct PosixThread *other;
err = ESRCH;
_pthread_lock();
for (e = dll_first(_pthread_list); e; e = dll_next(_pthread_list, e)) {
other = POSIXTHREAD_CONTAINER(e);
if (other != _pthread_self() &&
atomic_load_explicit(&other->pt_status, memory_order_acquire) <
kPosixThreadTerminated) {
_pthread_cancel_single(other);
err = 0;
} else {
err = errno;
errno = e;
}
}
if (err == ESRCH) {
err = 0; // we already reported this
}
_pthread_unlock();
return err;
}
@ -185,18 +162,18 @@ static errno_t _pthread_cancel_impl(struct PosixThread *pt) {
*
* When a thread is cancelled, it'll interrupt blocking i/o calls,
* invoke any cleanup handlers that were pushed on the thread's stack
* before the cancellation occurred, in addition to destructing pthread
* before the cancelation occurred, in addition to destructing pthread
* keys, before finally, the thread shall abruptly exit.
*
* By default, pthread_cancel() can only take effect when a thread
* reaches a cancellation point. Such functions are documented with
* `@cancellationpoint`. They check the cancellation state before the
* reaches a cancelation point. Such functions are documented with
* `@cancelationpoint`. They check the cancellation state before the
* underlying system call is issued. If the system call is issued and
* blocks, then pthread_cancel() will interrupt the operation in which
* case the syscall wrapper will check the cancelled state a second
* time, only if the raw system call returned EINTR.
*
* The following system calls are implemented as cancellation points.
* The following system calls are implemented as cancelation points.
*
* - `accept4`
* - `accept`
@ -245,7 +222,7 @@ static errno_t _pthread_cancel_impl(struct PosixThread *pt) {
* - `write`
* - `writev`
*
* The following library calls are implemented as cancellation points.
* The following library calls are implemented as cancelation points.
*
* - `fopen`
* - `gzopen`, `gzread`, `gzwrite`, etc.
@ -269,8 +246,8 @@ static errno_t _pthread_cancel_impl(struct PosixThread *pt) {
* - `usleep`
*
* Other userspace libraries provided by Cosmopolitan Libc that call the
* cancellation points above will block cancellations while running. The
* following are examples of functions that *aren't* cancellation points
* cancelation points above will block cancellations while running. The
* following are examples of functions that *aren't* cancelation points
*
* - `INFOF()`, `WARNF()`, etc.
* - `getentropy`
@ -290,14 +267,14 @@ static errno_t _pthread_cancel_impl(struct PosixThread *pt) {
* - `timespec_sleep`
* - `touch`
*
* The way to block cancellations temporarily is:
* The way to block cancelations temporarily is:
*
* int cs;
* pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs);
* // ...
* pthread_setcancelstate(cs, 0);
*
* In order to support cancellations all your code needs to be rewritten
* In order to support cancelations all your code needs to be rewritten
* so that when resources such as file descriptors are managed they must
* have a cleanup crew pushed to the stack. For example even malloc() is
* technically unsafe w.r.t. leaks without doing something like this:
@ -308,12 +285,12 @@ static errno_t _pthread_cancel_impl(struct PosixThread *pt) {
* pthread_cleanup_pop(1);
*
* Consider using Cosmopolitan Libc's garbage collector since it will be
* executed when a thread exits due to a cancellation.
* executed when a thread exits due to a cancelation.
*
* void *p = _gc(malloc(123));
* read(0, p, 123);
*
* It's possible to put a thread in asynchronous cancellation mode with
* It's possible to put a thread in asynchronous cancelation mode with
*
* pthread_setcancelstate(PTHREAD_CANCEL_ASYNCHRONOUS, 0);
* for (;;) donothing;
@ -321,15 +298,15 @@ static errno_t _pthread_cancel_impl(struct PosixThread *pt) {
* In which case a thread may be cancelled at any assembly opcode. This
* is useful for immediately halting threads that consume cpu and don't
* use any system calls. It shouldn't be used on threads that will call
* cancellation points since in that case asynchronous mode could cause
* cancelation points since in that case asynchronous mode could cause
* resource leaks to happen, in such a way that can't be worked around.
*
* If none of the above options seem savory to you, then a third way is
* offered for doing cancellations. Cosmopolitan Libc supports the Musl
* offered for doing cancelations. Cosmopolitan Libc supports the Musl
* Libc `PTHREAD_CANCEL_MASKED` non-POSIX extension. Any thread may pass
* this setting to pthread_setcancelstate(), in which case threads won't
* be abruptly destroyed upon cancellation and have their stack unwound;
* instead, cancellation points will simply raise an `ECANCELED` error,
* be abruptly destroyed upon cancelation and have their stack unwound;
* instead, cancelation points will simply raise an `ECANCELED` error,
* which can be more safely and intuitively handled for many use cases.
* For example:
*
@ -341,8 +318,8 @@ static errno_t _pthread_cancel_impl(struct PosixThread *pt) {
* pthread_exit(0);
* }
*
* Shows how the masked cancellations paradigm can be safely used. Note
* that it's so important that cancellation point error return codes be
* Shows how the masked cancelations paradigm can be safely used. Note
* that it's so important that cancelation point error return codes be
* checked. Code such as the following:
*
* pthread_setcancelstate(PTHREAD_CANCEL_MASKED, 0);
@ -354,10 +331,10 @@ static errno_t _pthread_cancel_impl(struct PosixThread *pt) {
* pthread_exit(0); // XXX: not run if write() was cancelled
* }
*
* Isn't safe to use in masked mode. That's because if a cancellation
* occurs during the write() operation then cancellations are blocked
* Isn't safe to use in masked mode. That's because if a cancelation
* occurs during the write() operation then cancelations are blocked
* while running read(). MASKED MODE DOESN'T HAVE SECOND CHANCES. You
* must rigorously check the results of each cancellation point call.
* must rigorously check the results of each cancelation point call.
*
* Unit tests should be able to safely ignore the return value, or at
* the very least be programmed to consider ESRCH a successful status
@ -368,30 +345,18 @@ static errno_t _pthread_cancel_impl(struct PosixThread *pt) {
*/
errno_t pthread_cancel(pthread_t thread) {
errno_t err;
struct Dll *e;
struct PosixThread *arg, *other;
struct PosixThread *arg;
if ((arg = (struct PosixThread *)thread)) {
err = _pthread_cancel_impl(arg);
err = _pthread_cancel_single(arg);
} else {
err = ESRCH;
pthread_spin_lock(&_pthread_lock);
for (e = dll_first(_pthread_list); e; e = dll_next(_pthread_list, e)) {
other = POSIXTHREAD_CONTAINER(e);
if (other != _pthread_self() &&
atomic_load_explicit(&other->status, memory_order_acquire) <
kPosixThreadTerminated) {
_pthread_cancel_impl(other);
err = 0;
}
}
pthread_spin_unlock(&_pthread_lock);
err = _pthread_cancel_everyone();
}
STRACE("pthread_cancel(%d) → %s", _pthread_tid(arg), DescribeErrno(err));
return err;
}
/**
* Creates cancellation point in calling thread.
* Creates cancelation point in calling thread.
*
* This function can be used to force `PTHREAD_CANCEL_DEFERRED` threads
* to cancel without needing to invoke an interruptible system call. If
@ -407,25 +372,25 @@ void pthread_testcancel(void) {
if (!(pt = _pthread_self())) return;
if (pt->pt_flags & PT_NOCANCEL) return;
if ((!(pt->pt_flags & PT_MASKED) || (pt->pt_flags & PT_ASYNC)) &&
atomic_load_explicit(&pt->cancelled, memory_order_acquire)) {
atomic_load_explicit(&pt->pt_canceled, memory_order_acquire)) {
pthread_exit(PTHREAD_CANCELED);
}
}
/**
* Creates cancellation point in calling thread.
* Creates cancelation point in calling thread.
*
* This function can be used to force `PTHREAD_CANCEL_DEFERRED` threads
* to cancel without needing to invoke an interruptible system call. If
* the calling thread is in the `PTHREAD_CANCEL_DISABLE` then this will
* do nothing. If the calling thread hasn't yet been cancelled, this'll
* do nothing. If the calling thread uses `PTHREAD_CANCEL_MASKED`, then
* this function returns `ECANCELED` if a cancellation occurred, rather
* this function returns `ECANCELED` if a cancelation occurred, rather
* than the normal behavior which is to destroy and cleanup the thread.
* Any `ECANCELED` result must not be ignored, because the thread shall
* have cancellations disabled once it occurs.
* have cancelations disabled once it occurs.
*
* @return 0 if not cancelled or cancellation is blocked or `ECANCELED`
* @return 0 if not cancelled or cancelation is blocked or `ECANCELED`
* in masked mode when the calling thread has been cancelled
*/
errno_t pthread_testcancel_np(void) {
@ -433,7 +398,7 @@ errno_t pthread_testcancel_np(void) {
if (!__tls_enabled) return 0;
if (!(pt = _pthread_self())) return 0;
if (pt->pt_flags & PT_NOCANCEL) return 0;
if (!atomic_load_explicit(&pt->cancelled, memory_order_acquire)) return 0;
if (!atomic_load_explicit(&pt->pt_canceled, memory_order_acquire)) return 0;
if (!(pt->pt_flags & PT_MASKED) || (pt->pt_flags & PT_ASYNC)) {
pthread_exit(PTHREAD_CANCELED);
} else {

View file

@ -44,7 +44,7 @@
* @raise ECANCELED if calling thread was cancelled in masked mode
* @see pthread_cond_broadcast()
* @see pthread_cond_signal()
* @cancellationpoint
* @cancelationpoint
*/
errno_t pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
const struct timespec *abstime) {

View file

@ -37,7 +37,7 @@
* @see pthread_cond_timedwait
* @see pthread_cond_broadcast
* @see pthread_cond_signal
* @cancellationpoint
* @cancelationpoint
*/
errno_t pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) {
return pthread_cond_timedwait(cond, mutex, 0);

View file

@ -18,9 +18,9 @@
*/
#include "libc/assert.h"
#include "libc/atomic.h"
#include "libc/calls/blocksigs.internal.h"
#include "libc/calls/calls.h"
#include "libc/calls/struct/sigset.h"
#include "libc/calls/struct/sigset.internal.h"
#include "libc/calls/syscall-sysv.internal.h"
#include "libc/dce.h"
#include "libc/errno.h"
@ -31,7 +31,6 @@
#include "libc/intrin/bsr.h"
#include "libc/intrin/describeflags.internal.h"
#include "libc/intrin/dll.h"
#include "libc/intrin/kprintf.h"
#include "libc/intrin/strace.internal.h"
#include "libc/intrin/weaken.h"
#include "libc/log/internal.h"
@ -70,7 +69,7 @@ __static_yoink("_pthread_atfork");
void _pthread_free(struct PosixThread *pt, bool isfork) {
if (pt->pt_flags & PT_STATIC) return;
if (pt->pt_flags & PT_OWNSTACK) {
unassert(!munmap(pt->attr.__stackaddr, pt->attr.__stacksize));
unassert(!munmap(pt->pt_attr.__stackaddr, pt->pt_attr.__stacksize));
}
if (!isfork) {
if (IsWindows()) {
@ -83,28 +82,28 @@ void _pthread_free(struct PosixThread *pt, bool isfork) {
}
}
}
free(pt->tls);
free(pt->pt_tls);
free(pt);
}
static int PosixThread(void *arg, int tid) {
void *rc;
struct PosixThread *pt = arg;
if (pt->attr.__inheritsched == PTHREAD_EXPLICIT_SCHED) {
if (pt->pt_attr.__inheritsched == PTHREAD_EXPLICIT_SCHED) {
unassert(_weaken(_pthread_reschedule));
_weaken(_pthread_reschedule)(pt); // yoinked by attribute builder
}
// set long jump handler so pthread_exit can bring control back here
if (!setjmp(pt->exiter)) {
pthread_sigmask(SIG_SETMASK, (sigset_t *)pt->attr.__sigmask, 0);
rc = pt->start(pt->arg);
if (!setjmp(pt->pt_exiter)) {
pthread_sigmask(SIG_SETMASK, &pt->pt_attr.__sigmask, 0);
rc = pt->pt_start(pt->pt_arg);
// ensure pthread_cleanup_pop(), and pthread_exit() popped cleanup
unassert(!pt->cleanup);
unassert(!pt->pt_cleanup);
// calling pthread_exit() will either jump back here, or call exit
pthread_exit(rc);
}
// avoid signal handler being triggered after we trash our own stack
_sigblockall();
__sig_block();
// return to clone polyfill which clears tid, wakes futex, and exits
return 0;
}
@ -152,16 +151,11 @@ static errno_t pthread_create_impl(pthread_t *thread,
errno = e;
return EAGAIN;
}
pt->start = start_routine;
pt->arg = arg;
if (IsWindows()) {
if (!(pt->semaphore = CreateSemaphore(0, 0, 1, 0))) {
notpossible;
}
}
pt->pt_start = start_routine;
pt->pt_arg = arg;
// create thread local storage memory
if (!(pt->tls = _mktls(&pt->tib))) {
if (!(pt->pt_tls = _mktls(&pt->tib))) {
free(pt);
errno = e;
return EAGAIN;
@ -169,18 +163,18 @@ static errno_t pthread_create_impl(pthread_t *thread,
// setup attributes
if (attr) {
pt->attr = *attr;
pt->pt_attr = *attr;
attr = 0;
} else {
pthread_attr_init(&pt->attr);
pthread_attr_init(&pt->pt_attr);
}
// setup stack
if (pt->attr.__stackaddr) {
if (pt->pt_attr.__stackaddr) {
// caller supplied their own stack
// assume they know what they're doing as much as possible
if (IsOpenbsd()) {
if ((rc = FixupCustomStackOnOpenbsd(&pt->attr))) {
if ((rc = FixupCustomStackOnOpenbsd(&pt->pt_attr))) {
_pthread_free(pt, false);
return rc;
}
@ -191,38 +185,39 @@ static errno_t pthread_create_impl(pthread_t *thread,
// 2. in public world optimize to *work* regardless of memory
int granularity = FRAMESIZE;
int pagesize = getauxval(AT_PAGESZ);
pt->attr.__guardsize = ROUNDUP(pt->attr.__guardsize, pagesize);
pt->attr.__stacksize = ROUNDUP(pt->attr.__stacksize, granularity);
if (pt->attr.__guardsize + pagesize > pt->attr.__stacksize) {
pt->pt_attr.__guardsize = ROUNDUP(pt->pt_attr.__guardsize, pagesize);
pt->pt_attr.__stacksize = ROUNDUP(pt->pt_attr.__stacksize, granularity);
if (pt->pt_attr.__guardsize + pagesize > pt->pt_attr.__stacksize) {
_pthread_free(pt, false);
return EINVAL;
}
if (pt->attr.__guardsize == pagesize) {
pt->attr.__stackaddr =
mmap(0, pt->attr.__stacksize, PROT_READ | PROT_WRITE,
if (pt->pt_attr.__guardsize == pagesize) {
pt->pt_attr.__stackaddr =
mmap(0, pt->pt_attr.__stacksize, PROT_READ | PROT_WRITE,
MAP_STACK | MAP_ANONYMOUS, -1, 0);
} else {
pt->attr.__stackaddr =
mmap(0, pt->attr.__stacksize, PROT_READ | PROT_WRITE,
pt->pt_attr.__stackaddr =
mmap(0, pt->pt_attr.__stacksize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (pt->attr.__stackaddr != MAP_FAILED) {
if (pt->pt_attr.__stackaddr != MAP_FAILED) {
if (IsOpenbsd() &&
__sys_mmap(
pt->attr.__stackaddr, pt->attr.__stacksize,
pt->pt_attr.__stackaddr, pt->pt_attr.__stacksize,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_FIXED | MAP_ANON_OPENBSD | MAP_STACK_OPENBSD,
-1, 0, 0) != pt->attr.__stackaddr) {
-1, 0, 0) != pt->pt_attr.__stackaddr) {
notpossible;
}
if (pt->attr.__guardsize) {
if (pt->pt_attr.__guardsize) {
if (!IsWindows()) {
if (mprotect(pt->attr.__stackaddr, pt->attr.__guardsize,
if (mprotect(pt->pt_attr.__stackaddr, pt->pt_attr.__guardsize,
PROT_NONE)) {
notpossible;
}
} else {
uint32_t oldattr;
if (!VirtualProtect(pt->attr.__stackaddr, pt->attr.__guardsize,
if (!VirtualProtect(pt->pt_attr.__stackaddr,
pt->pt_attr.__guardsize,
kNtPageReadwrite | kNtPageGuard, &oldattr)) {
notpossible;
}
@ -230,7 +225,7 @@ static errno_t pthread_create_impl(pthread_t *thread,
}
}
}
if (!pt->attr.__stackaddr || pt->attr.__stackaddr == MAP_FAILED) {
if (!pt->pt_attr.__stackaddr || pt->pt_attr.__stackaddr == MAP_FAILED) {
rc = errno;
_pthread_free(pt, false);
errno = e;
@ -241,8 +236,8 @@ static errno_t pthread_create_impl(pthread_t *thread,
}
}
pt->pt_flags |= PT_OWNSTACK;
if (IsAsan() && !IsWindows() && pt->attr.__guardsize) {
__asan_poison(pt->attr.__stackaddr, pt->attr.__guardsize,
if (IsAsan() && !IsWindows() && pt->pt_attr.__guardsize) {
__asan_poison(pt->pt_attr.__stackaddr, pt->pt_attr.__guardsize,
kAsanStackOverflow);
}
}
@ -250,17 +245,17 @@ static errno_t pthread_create_impl(pthread_t *thread,
// set initial status
pt->tib->tib_pthread = (pthread_t)pt;
atomic_store_explicit(&pt->tib->tib_sigmask, -1, memory_order_relaxed);
if (!pt->attr.__havesigmask) {
pt->attr.__havesigmask = true;
memcpy(pt->attr.__sigmask, &oldsigs, sizeof(oldsigs));
if (!pt->pt_attr.__havesigmask) {
pt->pt_attr.__havesigmask = true;
pt->pt_attr.__sigmask = oldsigs;
}
switch (pt->attr.__detachstate) {
switch (pt->pt_attr.__detachstate) {
case PTHREAD_CREATE_JOINABLE:
atomic_store_explicit(&pt->status, kPosixThreadJoinable,
atomic_store_explicit(&pt->pt_status, kPosixThreadJoinable,
memory_order_relaxed);
break;
case PTHREAD_CREATE_DETACHED:
atomic_store_explicit(&pt->status, kPosixThreadDetached,
atomic_store_explicit(&pt->pt_status, kPosixThreadDetached,
memory_order_relaxed);
break;
default:
@ -271,21 +266,21 @@ static errno_t pthread_create_impl(pthread_t *thread,
// add thread to global list
// we add it to the beginning since zombies go at the end
dll_init(&pt->list);
pthread_spin_lock(&_pthread_lock);
_pthread_lock();
dll_make_first(&_pthread_list, &pt->list);
pthread_spin_unlock(&_pthread_lock);
_pthread_unlock();
// launch PosixThread(pt) in new thread
if ((rc = clone(PosixThread, pt->attr.__stackaddr,
pt->attr.__stacksize - (IsOpenbsd() ? 16 : 0),
if ((rc = clone(PosixThread, pt->pt_attr.__stackaddr,
pt->pt_attr.__stacksize - (IsOpenbsd() ? 16 : 0),
CLONE_VM | CLONE_THREAD | CLONE_FS | CLONE_FILES |
CLONE_SIGHAND | CLONE_SYSVSEM | CLONE_SETTLS |
CLONE_PARENT_SETTID | CLONE_CHILD_SETTID |
CLONE_CHILD_CLEARTID,
pt, &pt->ptid, __adj_tls(pt->tib), &pt->tib->tib_tid))) {
pthread_spin_lock(&_pthread_lock);
_pthread_lock();
dll_remove(&_pthread_list, &pt->list);
pthread_spin_unlock(&_pthread_lock);
_pthread_unlock();
_pthread_free(pt, false);
return rc;
}

View file

@ -19,6 +19,7 @@
#include "libc/atomic.h"
#include "libc/intrin/atomic.h"
#include "libc/intrin/dll.h"
#include "libc/intrin/strace.internal.h"
#include "libc/runtime/runtime.h"
#include "libc/thread/posixthread.internal.h"
#include "libc/thread/thread.h"
@ -32,18 +33,18 @@ void _pthread_decimate(void) {
struct PosixThread *pt;
enum PosixThreadStatus status;
StartOver:
pthread_spin_lock(&_pthread_lock);
_pthread_lock();
for (e = dll_last(_pthread_list); e; e = dll_prev(_pthread_list, e)) {
pt = POSIXTHREAD_CONTAINER(e);
if (pt->tib == __get_tls()) continue;
status = atomic_load_explicit(&pt->status, memory_order_acquire);
status = atomic_load_explicit(&pt->pt_status, memory_order_acquire);
if (status != kPosixThreadZombie) break;
if (!atomic_load_explicit(&pt->tib->tib_tid, memory_order_acquire)) {
dll_remove(&_pthread_list, e);
pthread_spin_unlock(&_pthread_lock);
_pthread_unlock();
_pthread_free(pt, false);
goto StartOver;
}
}
pthread_spin_unlock(&_pthread_lock);
_pthread_unlock();
}

View file

@ -29,7 +29,7 @@
static errno_t pthread_detach_impl(struct PosixThread *pt) {
enum PosixThreadStatus status, transition;
for (;;) {
status = atomic_load_explicit(&pt->status, memory_order_acquire);
status = atomic_load_explicit(&pt->pt_status, memory_order_acquire);
if (status == kPosixThreadJoinable) {
transition = kPosixThreadDetached;
} else if (status == kPosixThreadTerminated) {
@ -37,8 +37,8 @@ static errno_t pthread_detach_impl(struct PosixThread *pt) {
} else {
return EINVAL;
}
if (atomic_compare_exchange_weak_explicit(&pt->status, &status, transition,
memory_order_release,
if (atomic_compare_exchange_weak_explicit(&pt->pt_status, &status,
transition, memory_order_release,
memory_order_relaxed)) {
if (transition == kPosixThreadZombie) {
_pthread_zombify(pt);

View file

@ -34,8 +34,8 @@
void _pthread_unwind(struct PosixThread *pt) {
struct _pthread_cleanup_buffer *cb;
while ((cb = pt->cleanup)) {
pt->cleanup = cb->__prev;
while ((cb = pt->pt_cleanup)) {
pt->pt_cleanup = cb->__prev;
cb->__routine(cb->__arg);
}
}
@ -104,7 +104,7 @@ wontreturn void pthread_exit(void *rc) {
tib = __get_tls();
pt = (struct PosixThread *)tib->tib_pthread;
pt->pt_flags |= PT_NOCANCEL;
pt->rc = rc;
pt->pt_rc = rc;
STRACE("pthread_exit(%p)", rc);
@ -122,7 +122,7 @@ wontreturn void pthread_exit(void *rc) {
}
// transition the thread to a terminated state
status = atomic_load_explicit(&pt->status, memory_order_acquire);
status = atomic_load_explicit(&pt->pt_status, memory_order_acquire);
do {
switch (status) {
case kPosixThreadJoinable:
@ -135,7 +135,7 @@ wontreturn void pthread_exit(void *rc) {
__builtin_unreachable();
}
} while (!atomic_compare_exchange_weak_explicit(
&pt->status, &status, transition, memory_order_release,
&pt->pt_status, &status, transition, memory_order_release,
memory_order_relaxed));
// make this thread a zombie if it was detached
@ -160,5 +160,5 @@ wontreturn void pthread_exit(void *rc) {
}
// this is a child thread
longjmp(pt->exiter, 1);
longjmp(pt->pt_exiter, 1);
}

View file

@ -59,8 +59,8 @@
*/
errno_t pthread_getattr_np(pthread_t thread, pthread_attr_t *attr) {
struct PosixThread *pt = (struct PosixThread *)thread;
memcpy(attr, &pt->attr, sizeof(pt->attr));
switch (atomic_load_explicit(&pt->status, memory_order_relaxed)) {
memcpy(attr, &pt->pt_attr, sizeof(pt->pt_attr));
switch (atomic_load_explicit(&pt->pt_status, memory_order_relaxed)) {
case kPosixThreadJoinable:
case kPosixThreadTerminated:
attr->__detachstate = PTHREAD_CREATE_JOINABLE;

View file

@ -130,8 +130,8 @@ errno_t pthread_getname_np(pthread_t thread, char *name, size_t size) {
errno_t rc;
struct PosixThread *pt;
pt = (struct PosixThread *)thread;
BLOCK_CANCELLATIONS;
BLOCK_CANCELATION;
rc = pthread_getname_impl(pt, name, size);
ALLOW_CANCELLATIONS;
ALLOW_CANCELATION;
return rc;
}

View file

@ -25,7 +25,7 @@
errno_t pthread_getschedparam(pthread_t thread, int *policy,
struct sched_param *param) {
struct PosixThread *pt = (struct PosixThread *)thread;
*policy = pt->attr.__schedpolicy;
*param = (struct sched_param){pt->attr.__schedparam};
*policy = pt->pt_attr.__schedpolicy;
*param = (struct sched_param){pt->pt_attr.__schedparam};
return 0;
}

View file

@ -22,7 +22,7 @@
* Waits for thread to terminate.
*
* Multiple threads joining the same thread is undefined behavior. If a
* deferred or masked cancellation happens to the calling thread either
* deferred or masked cancelation happens to the calling thread either
* before or during the waiting process then the target thread will not
* be joined. Calling pthread_join() on a non-joinable thread, e.g. one
* that's been detached, is undefined behavior. If a thread attempts to
@ -33,7 +33,7 @@
* pthread_cancel() destroyed the thread instead
* @return 0 on success, or errno on error
* @raise ECANCELED if calling thread was cancelled in masked mode
* @cancellationpoint
* @cancelationpoint
* @returnserrno
*/
errno_t pthread_join(pthread_t thread, void **value_ptr) {

View file

@ -47,7 +47,7 @@ errno_t pthread_kill(pthread_t thread, int sig) {
err = EINVAL;
} else if (thread == __get_tls()->tib_pthread) {
err = raise(sig); // XNU will EDEADLK it otherwise
} else if (atomic_load_explicit(&pt->status, memory_order_acquire) >=
} else if (atomic_load_explicit(&pt->pt_status, memory_order_acquire) >=
kPosixThreadTerminated) {
err = ESRCH;
} else if (IsWindows()) {

View file

@ -24,9 +24,9 @@
*/
int pthread_orphan_np(void) {
bool res;
pthread_spin_lock(&_pthread_lock);
_pthread_lock();
res = _pthread_list == _pthread_list->prev &&
_pthread_list == _pthread_list->next;
pthread_spin_unlock(&_pthread_lock);
_pthread_unlock();
return res;
}

View file

@ -25,9 +25,9 @@
#include "libc/thread/posixthread.internal.h"
errno_t _pthread_reschedule(struct PosixThread *pt) {
int policy = pt->attr.__schedpolicy;
int policy = pt->pt_attr.__schedpolicy;
int e, rc, tid = _pthread_tid(pt);
struct sched_param param = {pt->attr.__schedparam};
struct sched_param param = {pt->pt_attr.__schedparam};
e = errno;
if (IsNetbsd()) {
rc = sys_sched_setparam_netbsd(0, tid, policy, &param);

View file

@ -36,7 +36,7 @@ static const char *DescribeCancelType(char buf[12], int err, int *t) {
}
/**
* Sets cancellation strategy.
* Sets cancelation strategy.
*
* @param type may be one of:
* - `PTHREAD_CANCEL_DEFERRED` (default)

View file

@ -130,9 +130,9 @@ errno_t pthread_setname_np(pthread_t thread, const char *name) {
errno_t err;
struct PosixThread *pt;
pt = (struct PosixThread *)thread;
BLOCK_CANCELLATIONS;
BLOCK_CANCELATION;
err = pthread_setname_impl(pt, name);
ALLOW_CANCELLATIONS;
ALLOW_CANCELATION;
STRACE("pthread_setname_np(%d, %s) → %s", _pthread_tid(pt), name,
DescribeErrno(err));
return err;

View file

@ -44,7 +44,7 @@
errno_t pthread_setschedparam(pthread_t thread, int policy,
const struct sched_param *param) {
struct PosixThread *pt = (struct PosixThread *)thread;
pt->attr.__schedpolicy = policy;
pt->attr.__schedparam = param->sched_priority;
pt->pt_attr.__schedpolicy = policy;
pt->pt_attr.__schedparam = param->sched_priority;
return _pthread_reschedule(pt);
}

View file

@ -24,6 +24,6 @@
*/
errno_t pthread_setschedprio(pthread_t thread, int prio) {
struct PosixThread *pt = (struct PosixThread *)thread;
pt->attr.__schedparam = prio;
pt->pt_attr.__schedparam = prio;
return _pthread_reschedule(pt);
}

View file

@ -31,8 +31,6 @@
#include "libc/thread/tls.h"
#include "third_party/nsync/futex.internal.h"
// TODO(jart): Use condition variable for thread waiting.
static const char *DescribeReturnValue(char buf[30], int err, void **value) {
char *p = buf;
if (!value) return "NULL";
@ -55,7 +53,7 @@ static const char *DescribeReturnValue(char buf[30], int err, void **value) {
* @return 0 on success, or errno on error
* @raise ECANCELED if calling thread was cancelled in masked mode
* @raise EBUSY if `abstime` was specified and deadline expired
* @cancellationpoint
* @cancelationpoint
*/
static errno_t _pthread_wait(atomic_int *ctid, struct timespec *abstime) {
int x, e, rc = 0;
@ -63,7 +61,7 @@ static errno_t _pthread_wait(atomic_int *ctid, struct timespec *abstime) {
// "If the thread calling pthread_join() is canceled, then the target
// thread shall not be detached." ──Quoth POSIX.1-2017
if (!(rc = pthread_testcancel_np())) {
BEGIN_CANCELLATION_POINT;
BEGIN_CANCELATION_POINT;
while ((x = atomic_load_explicit(ctid, memory_order_acquire))) {
e = nsync_futex_wait_(ctid, x, !IsWindows() && !IsXnu(), abstime);
if (e == -ECANCELED) {
@ -74,7 +72,7 @@ static errno_t _pthread_wait(atomic_int *ctid, struct timespec *abstime) {
break;
}
}
END_CANCELLATION_POINT;
END_CANCELATION_POINT;
}
return rc;
}
@ -83,7 +81,7 @@ static errno_t _pthread_wait(atomic_int *ctid, struct timespec *abstime) {
* Waits for thread to terminate.
*
* Multiple threads joining the same thread is undefined behavior. If a
* deferred or masked cancellation happens to the calling thread either
* deferred or masked cancelation happens to the calling thread either
* before or during the waiting process then the target thread will not
* be joined. Calling pthread_join() on a non-joinable thread, e.g. one
* that's been detached, is undefined behavior. If a thread attempts to
@ -97,7 +95,7 @@ static errno_t _pthread_wait(atomic_int *ctid, struct timespec *abstime) {
* @return 0 on success, or errno on error
* @raise ECANCELED if calling thread was cancelled in masked mode
* @raise EBUSY if `abstime` deadline elapsed
* @cancellationpoint
* @cancelationpoint
* @returnserrno
*/
errno_t pthread_timedjoin_np(pthread_t thread, void **value_ptr,
@ -106,17 +104,17 @@ errno_t pthread_timedjoin_np(pthread_t thread, void **value_ptr,
struct PosixThread *pt;
enum PosixThreadStatus status;
pt = (struct PosixThread *)thread;
status = atomic_load_explicit(&pt->status, memory_order_acquire);
status = atomic_load_explicit(&pt->pt_status, memory_order_acquire);
// "The behavior is undefined if the value specified by the thread
// argument to pthread_join() does not refer to a joinable thread."
// ──Quoth POSIX.1-2017
unassert(status == kPosixThreadJoinable || status == kPosixThreadTerminated);
if (!(err = _pthread_wait(&pt->tib->tib_tid, abstime))) {
pthread_spin_lock(&_pthread_lock);
_pthread_lock();
dll_remove(&_pthread_list, &pt->list);
pthread_spin_unlock(&_pthread_lock);
_pthread_unlock();
if (value_ptr) {
*value_ptr = pt->rc;
*value_ptr = pt->pt_rc;
}
_pthread_free(pt, false);
_pthread_decimate();

View file

@ -22,7 +22,7 @@
* Joins thread if it's already terminated.
*
* Multiple threads joining the same thread is undefined behavior. If a
* deferred or masked cancellation happens to the calling thread either
* deferred or masked cancelation happens to the calling thread either
* before or during the waiting process then the target thread will not
* be joined. Calling pthread_join() on a non-joinable thread, e.g. one
* that's been detached, is undefined behavior. If a thread attempts to
@ -33,7 +33,7 @@
* pthread_cancel() destroyed the thread instead
* @return 0 on success, or errno on error
* @raise ECANCELED if calling thread was cancelled in masked mode
* @cancellationpoint
* @cancelationpoint
* @returnserrno
*/
errno_t pthread_tryjoin_np(pthread_t thread, void **value_ptr) {

View file

@ -21,8 +21,8 @@
#include "libc/thread/thread.h"
void _pthread_zombify(struct PosixThread *pt) {
pthread_spin_lock(&_pthread_lock);
_pthread_lock();
dll_remove(&_pthread_list, &pt->list);
dll_make_last(&_pthread_list, &pt->list);
pthread_spin_unlock(&_pthread_lock);
_pthread_unlock();
}

View file

@ -190,7 +190,7 @@ sem_t *sem_open(const char *name, int oflag, ...) {
if (!(path = sem_path_np(name, pathbuf, sizeof(pathbuf)))) {
return SEM_FAILED;
}
BLOCK_CANCELLATIONS;
BLOCK_CANCELATION;
sem_open_init();
sem_open_lock();
if ((s = sem_open_reopen(path))) {
@ -229,7 +229,7 @@ sem_t *sem_open(const char *name, int oflag, ...) {
sem = SEM_FAILED;
}
sem_open_unlock();
ALLOW_CANCELLATIONS;
ALLOW_CANCELATION;
return sem;
}

View file

@ -49,7 +49,7 @@ static void sem_timedwait_cleanup(void *arg) {
* @raise EDEADLK if deadlock was detected
* @raise ETIMEDOUT if deadline expired
* @raise EINVAL if `sem` is invalid
* @cancellationpoint
* @cancelationpoint
*/
int sem_timedwait(sem_t *sem, const struct timespec *abstime) {
int e, i, v, rc;
@ -67,7 +67,7 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime) {
}
}
BEGIN_CANCELLATION_POINT;
BEGIN_CANCELATION_POINT;
unassert(atomic_fetch_add_explicit(&sem->sem_waiters, +1,
memory_order_acq_rel) >= 0);
pthread_cleanup_push(sem_timedwait_cleanup, sem);
@ -102,7 +102,7 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime) {
memory_order_relaxed)));
pthread_cleanup_pop(1);
END_CANCELLATION_POINT;
END_CANCELATION_POINT;
return rc;
}

View file

@ -26,7 +26,7 @@
* @raise EINTR if signal was delivered instead
* @raise EDEADLK if deadlock was detected
* @raise EINVAL if `sem` is invalid
* @cancellationpoint
* @cancelationpoint
*/
int sem_wait(sem_t *sem) {
return sem_timedwait(sem, 0);

View file

@ -66,6 +66,7 @@
* if this parameter is NULL, we'll polyfill getitimer() behavior
* @param out_opt_old may receive remainder of previous op (if any)
* @return 0 on success or -1 w/ errno
* @asyncsignalsafe
*/
int setitimer(int which, const struct itimerval *newvalue,
struct itimerval *oldvalue) {

View file

@ -99,7 +99,7 @@ typedef struct pthread_attr_s {
int __contentionscope;
int __guardsize;
size_t __stacksize;
uint32_t __sigmask[4];
uint64_t __sigmask;
void *__stackaddr;
} pthread_attr_t;