mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-01-31 03:27:39 +00:00
Make win32 i/o signals atomic and longjmp() safe
This commit is contained in:
parent
585c86e2a4
commit
d7917ea076
20 changed files with 381 additions and 263 deletions
1
Makefile
1
Makefile
|
@ -374,6 +374,7 @@ COSMOPOLITAN_OBJECTS = \
|
|||
THIRD_PARTY_NSYNC_MEM \
|
||||
LIBC_MEM \
|
||||
THIRD_PARTY_DLMALLOC \
|
||||
LIBC_DLOPEN \
|
||||
LIBC_RUNTIME \
|
||||
THIRD_PARTY_NSYNC \
|
||||
LIBC_ELF \
|
||||
|
|
|
@ -17,10 +17,8 @@
|
|||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/calls/internal.h"
|
||||
#include "libc/calls/sig.internal.h"
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/intrin/weaken.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
#include "libc/thread/posixthread.internal.h"
|
||||
#ifdef __x86_64__
|
||||
|
||||
|
@ -34,14 +32,4 @@ textwindows int _check_cancel(void) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
textwindows int _check_signal(bool restartable) {
|
||||
int status;
|
||||
if (_check_cancel() == -1) return -1;
|
||||
if (!_weaken(__sig_check)) return 0;
|
||||
if (!(status = _weaken(__sig_check)())) return 0;
|
||||
if (_check_cancel() == -1) return -1;
|
||||
if (status == 2 && restartable) return 0;
|
||||
return eintr();
|
||||
}
|
||||
|
||||
#endif /* __x86_64__ */
|
35
libc/calls/checksignal.c
Normal file
35
libc/calls/checksignal.c
Normal file
|
@ -0,0 +1,35 @@
|
|||
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
|
||||
│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
|
||||
╞══════════════════════════════════════════════════════════════════════════════╡
|
||||
│ Copyright 2023 Justine Alexandra Roberts Tunney │
|
||||
│ │
|
||||
│ Permission to use, copy, modify, and/or distribute this software for │
|
||||
│ any purpose with or without fee is hereby granted, provided that the │
|
||||
│ above copyright notice and this permission notice appear in all copies. │
|
||||
│ │
|
||||
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
|
||||
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
|
||||
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
|
||||
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
|
||||
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
|
||||
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
|
||||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/calls/internal.h"
|
||||
#include "libc/calls/sig.internal.h"
|
||||
#include "libc/intrin/weaken.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
#ifdef __x86_64__
|
||||
|
||||
textwindows int _check_signal(bool restartable) {
|
||||
int status;
|
||||
if (_check_cancel() == -1) return -1;
|
||||
if (!_weaken(__sig_check)) return 0;
|
||||
if (!(status = _weaken(__sig_check)())) return 0;
|
||||
if (_check_cancel() == -1) return -1;
|
||||
if (status == 2 && restartable) return 0;
|
||||
return eintr();
|
||||
}
|
||||
|
||||
#endif /* __x86_64__ */
|
|
@ -16,50 +16,40 @@
|
|||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/assert.h"
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/calls/internal.h"
|
||||
#include "libc/calls/struct/sigset.internal.h"
|
||||
#include "libc/calls/syscall_support-nt.internal.h"
|
||||
#include "libc/errno.h"
|
||||
#include "libc/calls/sig.internal.h"
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/nt/enum/wait.h"
|
||||
#include "libc/nt/runtime.h"
|
||||
#include "libc/nt/synchronization.h"
|
||||
#include "libc/sysv/consts/sicode.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
#include "libc/thread/posixthread.internal.h"
|
||||
#include "libc/thread/thread.h"
|
||||
#include "libc/thread/tls.h"
|
||||
#ifdef __x86_64__
|
||||
|
||||
// returns 0 on timeout or spurious wakeup
|
||||
// raises EINTR if a signal delivery interrupted wait operation
|
||||
// raises ECANCELED if this POSIX thread was canceled in masked mode
|
||||
static textwindows int _park_thread(uint32_t msdelay, sigset_t waitmask,
|
||||
bool restartable) {
|
||||
int rc;
|
||||
int64_t sem;
|
||||
sigset_t om;
|
||||
uint32_t wi;
|
||||
struct PosixThread *pt;
|
||||
pt = _pthread_self();
|
||||
pt->pt_flags &= ~PT_RESTARTABLE;
|
||||
if (restartable) pt->pt_flags |= PT_RESTARTABLE;
|
||||
pt->pt_semaphore = sem = CreateSemaphore(0, 0, 1, 0);
|
||||
pthread_cleanup_push((void *)CloseHandle, (void *)sem);
|
||||
atomic_store_explicit(&pt->pt_blocker, PT_BLOCKER_SEM, memory_order_release);
|
||||
om = __sig_beginwait(waitmask);
|
||||
if ((rc = _check_signal(restartable)) != -1) {
|
||||
if ((wi = WaitForSingleObject(sem, msdelay)) != -1u) {
|
||||
if (restartable && !(pt->pt_flags & PT_RESTARTABLE)) rc = eintr();
|
||||
rc |= _check_signal(restartable);
|
||||
} else {
|
||||
rc = __winerr();
|
||||
}
|
||||
int sig;
|
||||
if (_check_cancel() == -1) return -1;
|
||||
if ((sig = __sig_get(waitmask))) {
|
||||
int handler_was_called = __sig_relay(sig, SI_KERNEL, waitmask);
|
||||
if (_check_cancel() == -1) return -1;
|
||||
if (!restartable || handler_was_called == 1) return eintr();
|
||||
}
|
||||
__sig_finishwait(om);
|
||||
atomic_store_explicit(&pt->pt_blocker, PT_BLOCKER_CPU, memory_order_release);
|
||||
pt->pt_flags &= ~PT_RESTARTABLE;
|
||||
pthread_cleanup_pop(true);
|
||||
pt->pt_semaphore = 0;
|
||||
return rc;
|
||||
int expect = 0;
|
||||
atomic_int futex = 0;
|
||||
struct PosixThread *pt = _pthread_self();
|
||||
pt->pt_blkmask = waitmask;
|
||||
atomic_store_explicit(&pt->pt_blocker, &futex, memory_order_release);
|
||||
bool32 ok = WaitOnAddress(&futex, &expect, sizeof(int), msdelay);
|
||||
atomic_store_explicit(&pt->pt_blocker, 0, memory_order_release);
|
||||
if (ok && (sig = __sig_get(waitmask))) {
|
||||
int handler_was_called = __sig_relay(sig, SI_KERNEL, waitmask);
|
||||
if (_check_cancel() == -1) return -1;
|
||||
if (!restartable || handler_was_called == 1) return eintr();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
textwindows int _park_norestart(uint32_t msdelay, sigset_t waitmask) {
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
#include "libc/nt/enum/creationdisposition.h"
|
||||
#include "libc/nt/enum/filesharemode.h"
|
||||
#include "libc/nt/enum/vk.h"
|
||||
#include "libc/nt/enum/wait.h"
|
||||
#include "libc/nt/errors.h"
|
||||
#include "libc/nt/runtime.h"
|
||||
#include "libc/nt/struct/inputrecord.h"
|
||||
|
@ -51,6 +52,7 @@
|
|||
#include "libc/str/str.h"
|
||||
#include "libc/str/utf16.h"
|
||||
#include "libc/sysv/consts/o.h"
|
||||
#include "libc/sysv/consts/sicode.h"
|
||||
#include "libc/sysv/consts/sig.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
#include "libc/thread/posixthread.internal.h"
|
||||
|
@ -718,10 +720,10 @@ static textwindows bool DigestConsoleInput(char *data, size_t size, int *rc) {
|
|||
}
|
||||
|
||||
static textwindows int WaitForConsole(struct Fd *f, sigset_t waitmask) {
|
||||
int rc;
|
||||
sigset_t m;
|
||||
int sig;
|
||||
int64_t sem;
|
||||
uint32_t ms = -1u;
|
||||
uint32_t wi, ms = -1;
|
||||
int handler_was_called;
|
||||
struct PosixThread *pt;
|
||||
if (!__ttyconf.vmin) {
|
||||
if (!__ttyconf.vtime) {
|
||||
|
@ -733,39 +735,42 @@ static textwindows int WaitForConsole(struct Fd *f, sigset_t waitmask) {
|
|||
if (f->flags & _O_NONBLOCK) {
|
||||
return eagain(); // standard unix non-blocking
|
||||
}
|
||||
if (_check_cancel() == -1) return -1;
|
||||
if ((sig = __sig_get(waitmask))) {
|
||||
handler_was_called = __sig_relay(sig, SI_KERNEL, waitmask);
|
||||
if (_check_cancel() == -1) return -1;
|
||||
if (handler_was_called != 1) return -2;
|
||||
return eintr();
|
||||
}
|
||||
pt = _pthread_self();
|
||||
pt->pt_flags |= PT_RESTARTABLE;
|
||||
pt->pt_semaphore = sem = CreateSemaphore(0, 0, 1, 0);
|
||||
pthread_cleanup_push((void *)CloseHandle, (void *)sem);
|
||||
pt->pt_blkmask = waitmask;
|
||||
atomic_store_explicit(&pt->pt_blocker, PT_BLOCKER_SEM, memory_order_release);
|
||||
m = __sig_beginwait(waitmask);
|
||||
if ((rc = _check_signal(true)) != -1) {
|
||||
int64_t hands[2] = {sem, __keystroke.cin};
|
||||
if (WaitForMultipleObjects(2, hands, 0, ms) != -1u) {
|
||||
if (!(pt->pt_flags & PT_RESTARTABLE)) rc = eintr();
|
||||
rc |= _check_signal(true);
|
||||
} else {
|
||||
rc = __winerr();
|
||||
}
|
||||
}
|
||||
__sig_finishwait(m);
|
||||
atomic_store_explicit(&pt->pt_blocker, PT_BLOCKER_CPU, memory_order_release);
|
||||
pt->pt_flags &= ~PT_RESTARTABLE;
|
||||
wi = WaitForMultipleObjects(2, (int64_t[2]){__keystroke.cin, sem}, 0, ms);
|
||||
atomic_store_explicit(&pt->pt_blocker, 0, memory_order_release);
|
||||
pthread_cleanup_pop(true);
|
||||
return rc;
|
||||
if (wi == kNtWaitTimeout) return 0; // vtime elapsed
|
||||
if (wi == 0) return -2; // console data
|
||||
if (wi != 1) return __winerr(); // wait failed
|
||||
if (!(sig = __sig_get(waitmask))) return eintr();
|
||||
handler_was_called = __sig_relay(sig, SI_KERNEL, waitmask);
|
||||
if (_check_cancel() == -1) return -1;
|
||||
if (handler_was_called != 1) return -2;
|
||||
return eintr();
|
||||
}
|
||||
|
||||
static textwindows ssize_t ReadFromConsole(struct Fd *f, void *data,
|
||||
size_t size, sigset_t waitmask) {
|
||||
int rc;
|
||||
bool done = false;
|
||||
InitConsole();
|
||||
do {
|
||||
LockKeystrokes();
|
||||
IngestConsoleInput();
|
||||
done = DigestConsoleInput(data, size, &rc);
|
||||
bool done = DigestConsoleInput(data, size, &rc);
|
||||
UnlockKeystrokes();
|
||||
} while (!done && !(rc = WaitForConsole(f, waitmask)));
|
||||
if (done) return rc;
|
||||
} while ((rc = WaitForConsole(f, waitmask)) == -2);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -823,7 +828,6 @@ static textwindows ssize_t sys_read_nt2(int fd, const struct iovec *iov,
|
|||
total += rc;
|
||||
if (opt_offset != -1) opt_offset += rc;
|
||||
if (rc < iov[i].iov_len) break;
|
||||
waitmask = -1; // disable eintr/ecanceled for remaining iovecs
|
||||
}
|
||||
return total;
|
||||
} else {
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include "libc/stdio/sysparam.h"
|
||||
#include "libc/str/str.h"
|
||||
#include "libc/sysv/consts/o.h"
|
||||
#include "libc/sysv/consts/sicode.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
#include "libc/thread/posixthread.internal.h"
|
||||
#include "libc/thread/thread.h"
|
||||
|
@ -67,12 +68,12 @@ sys_readwrite_nt(int fd, void *data, size_t size, ssize_t offset,
|
|||
bool32 ReadOrWriteFile(int64_t, void *, uint32_t, uint32_t *,
|
||||
struct NtOverlapped *)) {
|
||||
bool32 ok;
|
||||
uint64_t m;
|
||||
int sig = 0;
|
||||
uint32_t exchanged;
|
||||
int olderror = errno;
|
||||
bool eagained = false;
|
||||
bool eintered = false;
|
||||
bool canceled = false;
|
||||
int handler_was_called;
|
||||
struct PosixThread *pt;
|
||||
struct Fd *f = g_fds.p + fd;
|
||||
|
||||
|
@ -104,73 +105,50 @@ sys_readwrite_nt(int fd, void *data, size_t size, ssize_t offset,
|
|||
}
|
||||
}
|
||||
|
||||
// managing an overlapped i/o operation is tricky to do using just
|
||||
// imperative procedural logic. its design lends itself more to be
|
||||
// something that's abstracted in an object-oriented design, which
|
||||
// easily manages the unusual lifecycles requirements of the thing
|
||||
// the game here is to not return until win32 is done w/ `overlap`
|
||||
// next we need to allow signal handlers to re-enter this function
|
||||
// while we're performing a read in the same thread. this needs to
|
||||
// be thread safe too of course. read() / write() are cancelation
|
||||
// points so pthread_cancel() might teleport the execution here to
|
||||
// pthread_exit(), so we need cleanup handlers that pthread_exit()
|
||||
// can call, pushed onto the stack, so we don't leak win32 handles
|
||||
// or worse trash the thread stack containing `overlap` that win32
|
||||
// temporarily owns while the overlapped i/o op is being performed
|
||||
// we implement a non-blocking iop by optimistically performing io
|
||||
// and then aborting the operation if win32 says it needs to block
|
||||
// with cancelation points, we need to be able to raise eintr when
|
||||
// this thread is pre-empted to run a signal handler but only when
|
||||
// that signal handler wasn't installed using this SA_RESTART flag
|
||||
// in which case read() and write() will keep going afterwards. we
|
||||
// support a second kind of eintr in cosmo/musl which is ecanceled
|
||||
// and it's mission critical that it be relayed properly, since it
|
||||
// can only be returned by a single system call in a thread's life
|
||||
// another thing we do is check if any pending signals exist, then
|
||||
// running as many of them as possible before entering a wait call
|
||||
RestartOperation:
|
||||
// signals have already been fully blocked by caller
|
||||
// perform i/o operation with atomic signal/cancel checking
|
||||
struct NtOverlapped overlap = {.hEvent = CreateEvent(0, 1, 0, 0),
|
||||
.Pointer = offset};
|
||||
struct ReadwriteResources rwc = {handle, &overlap};
|
||||
pthread_cleanup_push(UnwindReadwrite, &rwc);
|
||||
ok = ReadOrWriteFile(handle, data, size, 0, &overlap);
|
||||
if (!ok && GetLastError() == kNtErrorIoPending) {
|
||||
BlockingOperation:
|
||||
pt = _pthread_self();
|
||||
pt->pt_iohandle = handle;
|
||||
pt->pt_ioverlap = &overlap;
|
||||
pt->pt_flags |= PT_RESTARTABLE;
|
||||
atomic_store_explicit(&pt->pt_blocker, PT_BLOCKER_IO, memory_order_release);
|
||||
m = __sig_beginwait(waitmask);
|
||||
// win32 says this i/o operation needs to block
|
||||
if (f->flags & _O_NONBLOCK) {
|
||||
// abort the i/o operation if file descriptor is in non-blocking mode
|
||||
CancelIoEx(handle, &overlap);
|
||||
eagained = true;
|
||||
} else if (_check_signal(true)) {
|
||||
} else if (_check_cancel()) {
|
||||
// _check_cancel() can go three ways:
|
||||
// 1. it'll return 0 if we're fine and no thread cancelation happened
|
||||
// 2. it'll pthread_exit() and cleanup, when cancelation was deferred
|
||||
// 3. it'll return -1 and raise ECANCELED if a cancelation was masked
|
||||
CancelIoEx(handle, &overlap);
|
||||
canceled = true;
|
||||
} else if ((sig = __sig_get(waitmask))) {
|
||||
// we've dequeued a signal that was pending per caller's old sigmask
|
||||
// we can't call the signal handler until we release win32 resources
|
||||
CancelIoEx(handle, &overlap);
|
||||
if (errno == ECANCELED) {
|
||||
canceled = true;
|
||||
} else {
|
||||
eintered = true;
|
||||
}
|
||||
} else {
|
||||
// wait until i/o either completes or is canceled by another thread
|
||||
// we avoid a race condition by having a second mask for unblocking
|
||||
pt = _pthread_self();
|
||||
pt->pt_blkmask = waitmask;
|
||||
pt->pt_iohandle = handle;
|
||||
pt->pt_ioverlap = &overlap;
|
||||
atomic_store_explicit(&pt->pt_blocker, PT_BLOCKER_IO,
|
||||
memory_order_release);
|
||||
WaitForSingleObject(overlap.hEvent, -1u);
|
||||
atomic_store_explicit(&pt->pt_blocker, 0, memory_order_release);
|
||||
}
|
||||
__sig_finishwait(m);
|
||||
atomic_store_explicit(&pt->pt_blocker, PT_BLOCKER_CPU,
|
||||
memory_order_release);
|
||||
pt->pt_flags &= ~PT_RESTARTABLE;
|
||||
pt->pt_ioverlap = 0;
|
||||
pt->pt_iohandle = 0;
|
||||
ok = true;
|
||||
}
|
||||
if (ok) {
|
||||
bool32 should_wait = canceled || eagained;
|
||||
ok = GetOverlappedResult(handle, &overlap, &exchanged, should_wait);
|
||||
if (!ok && GetLastError() == kNtErrorIoIncomplete) {
|
||||
goto BlockingOperation;
|
||||
}
|
||||
ok = GetOverlappedResult(handle, &overlap, &exchanged, true);
|
||||
}
|
||||
CloseHandle(overlap.hEvent);
|
||||
pthread_cleanup_pop(false);
|
||||
CloseHandle(overlap.hEvent);
|
||||
|
||||
// if we acknowledged a pending masked mode cancelation request then
|
||||
// we must pass it to the caller immediately now that cleanup's done
|
||||
|
@ -178,8 +156,16 @@ sys_readwrite_nt(int fd, void *data, size_t size, ssize_t offset,
|
|||
return ecanceled();
|
||||
}
|
||||
|
||||
// sudden success trumps interrupts and/or failed i/o abort attempts
|
||||
// plenty of code above might clobber errno, so we always restore it
|
||||
// if we removed a pending signal then we must raise it
|
||||
// it's now safe to call a signal handler that longjmps
|
||||
if (sig) {
|
||||
handler_was_called = __sig_relay(sig, SI_KERNEL, waitmask);
|
||||
if (_check_cancel() == -1) return -1;
|
||||
} else {
|
||||
handler_was_called = 0;
|
||||
}
|
||||
|
||||
// if i/o succeeded then return its result
|
||||
if (ok) {
|
||||
if (!pwriting && seekable) {
|
||||
f->pointer = offset + exchanged;
|
||||
|
@ -188,23 +174,24 @@ sys_readwrite_nt(int fd, void *data, size_t size, ssize_t offset,
|
|||
return exchanged;
|
||||
}
|
||||
|
||||
// if we backed out of the i/o operation intentionally ignore errors
|
||||
if (eagained) {
|
||||
return eagain();
|
||||
}
|
||||
|
||||
// if another thread canceled our win32 i/o operation then we should
|
||||
// check and see if it was pthread_cancel() which committed the deed
|
||||
// in which case _check_cancel() can acknowledge the cancelation now
|
||||
// it's also fine to do nothing here; punt to next cancelation point
|
||||
// only raise EINTR or EAGAIN if I/O got canceled
|
||||
if (GetLastError() == kNtErrorOperationAborted) {
|
||||
if (_check_cancel() == -1) return ecanceled();
|
||||
if (!eintered && _check_signal(false)) return -1;
|
||||
}
|
||||
|
||||
// if we chose to process a pending signal earlier then we preserve
|
||||
// that original error explicitly here even though aborted == eintr
|
||||
if (eintered) {
|
||||
// raise EAGAIN if it's due to O_NONBLOCK mmode
|
||||
if (eagained) {
|
||||
return eagain();
|
||||
}
|
||||
// at this point the i/o must have been canceled due to a signal.
|
||||
// this could be because we found the signal earlier and canceled
|
||||
// ourself. otherwise it's due to a kill from another thread that
|
||||
// added something to our mask and canceled our i/o, so we check.
|
||||
if (!handler_was_called && (sig = __sig_get(waitmask))) {
|
||||
handler_was_called = __sig_relay(sig, SI_KERNEL, waitmask);
|
||||
if (_check_cancel() == -1) return -1;
|
||||
}
|
||||
// read() is @restartable unless non-SA_RESTART hands were called
|
||||
if (handler_was_called != 1) {
|
||||
goto RestartOperation;
|
||||
}
|
||||
return eintr();
|
||||
}
|
||||
|
||||
|
|
137
libc/calls/sig.c
137
libc/calls/sig.c
|
@ -85,7 +85,8 @@ textwindows bool __sig_ignored(int sig) {
|
|||
|
||||
textwindows void __sig_delete(int sig) {
|
||||
struct Dll *e;
|
||||
__sig.pending &= ~(1ull << (sig - 1));
|
||||
atomic_fetch_and_explicit(&__sig.pending, ~(1ull << (sig - 1)),
|
||||
memory_order_relaxed);
|
||||
BLOCK_SIGNALS;
|
||||
_pthread_lock();
|
||||
for (e = dll_last(_pthread_list); e; e = dll_prev(_pthread_list, e)) {
|
||||
|
@ -96,12 +97,11 @@ textwindows void __sig_delete(int sig) {
|
|||
ALLOW_SIGNALS;
|
||||
}
|
||||
|
||||
static textwindows int __sig_getter(struct CosmoTib *tib, atomic_ulong *sigs) {
|
||||
static textwindows int __sig_getter(atomic_ulong *sigs, sigset_t masked) {
|
||||
int sig;
|
||||
sigset_t bit, pending, masked, deliverable;
|
||||
sigset_t bit, pending, deliverable;
|
||||
for (;;) {
|
||||
pending = atomic_load_explicit(sigs, memory_order_acquire);
|
||||
masked = atomic_load_explicit(&tib->tib_sigmask, memory_order_acquire);
|
||||
if ((deliverable = pending & ~masked)) {
|
||||
sig = _bsfl(deliverable) + 1;
|
||||
bit = 1ull << (sig - 1);
|
||||
|
@ -114,10 +114,10 @@ static textwindows int __sig_getter(struct CosmoTib *tib, atomic_ulong *sigs) {
|
|||
}
|
||||
}
|
||||
|
||||
static textwindows int __sig_get(struct CosmoTib *tib) {
|
||||
textwindows int __sig_get(sigset_t masked) {
|
||||
int sig;
|
||||
if (!(sig = __sig_getter(tib, &tib->tib_sigpending))) {
|
||||
sig = __sig_getter(tib, &__sig.pending);
|
||||
if (!(sig = __sig_getter(&__get_tls()->tib_sigpending, masked))) {
|
||||
sig = __sig_getter(&__sig.pending, masked);
|
||||
}
|
||||
return sig;
|
||||
}
|
||||
|
@ -154,7 +154,8 @@ static textwindows bool __sig_start(struct PosixThread *pt, int sig,
|
|||
STRACE("ignoring %G", sig);
|
||||
return false;
|
||||
}
|
||||
if (pt->tib->tib_sigmask & (1ull << (sig - 1))) {
|
||||
if (atomic_load_explicit(&pt->tib->tib_sigmask, memory_order_acquire) &
|
||||
(1ull << (sig - 1))) {
|
||||
STRACE("enqueing %G on %d", sig, _pthread_tid(pt));
|
||||
atomic_fetch_or_explicit(&pt->tib->tib_sigpending, 1ull << (sig - 1),
|
||||
memory_order_relaxed);
|
||||
|
@ -196,7 +197,7 @@ textwindows int __sig_raise(int sig, int sic) {
|
|||
sigset_t blocksigs = __sighandmask[sig];
|
||||
if (!(flags & SA_NODEFER)) blocksigs |= 1ull << (sig - 1);
|
||||
ctx.uc_sigmask = atomic_fetch_or_explicit(
|
||||
&pt->tib->tib_sigmask, blocksigs, memory_order_acq_rel);
|
||||
&pt->tib->tib_sigmask, blocksigs, memory_order_acquire);
|
||||
|
||||
// call the user's signal handler
|
||||
char ssbuf[2][128];
|
||||
|
@ -213,51 +214,41 @@ textwindows int __sig_raise(int sig, int sic) {
|
|||
handler_was_called |= (flags & SA_RESTART) ? 2 : 1;
|
||||
}
|
||||
sic = SI_KERNEL;
|
||||
} while ((sig = __sig_get(pt->tib)));
|
||||
} while ((sig = __sig_get(ctx.uc_sigmask)));
|
||||
return handler_was_called;
|
||||
}
|
||||
|
||||
textwindows int __sig_relay(int sig, int sic, sigset_t waitmask) {
|
||||
int handler_was_called;
|
||||
sigset_t m = __sig_begin(waitmask);
|
||||
handler_was_called = __sig_raise(sig, SI_KERNEL);
|
||||
__sig_finish(m);
|
||||
return handler_was_called;
|
||||
}
|
||||
|
||||
// cancels blocking operations being performed by signaled thread
|
||||
textwindows void __sig_cancel(struct PosixThread *pt, int sig, unsigned flags) {
|
||||
bool should_restart;
|
||||
atomic_int *blocker;
|
||||
// cancelation points need to set pt_blocker before entering a wait op
|
||||
blocker = atomic_load_explicit(&pt->pt_blocker, memory_order_acquire);
|
||||
// cancelation points should set it back to this after blocking
|
||||
// however, code that longjmps might mess it up a tolerable bit
|
||||
if (blocker == PT_BLOCKER_CPU) {
|
||||
STRACE("%G delivered to %d asynchronously", sig, _pthread_tid(pt));
|
||||
if (!blocker) {
|
||||
STRACE("%G sent to %d asynchronously", sig, _pthread_tid(pt));
|
||||
return;
|
||||
}
|
||||
// most cancelation points can be safely restarted w/o raising eintr
|
||||
should_restart = (flags & SA_RESTART) && (pt->pt_flags & PT_RESTARTABLE);
|
||||
// we can cancel another thread's overlapped i/o op after the freeze
|
||||
if (blocker == PT_BLOCKER_IO) {
|
||||
if (should_restart) {
|
||||
STRACE("%G restarting %d's i/o op", sig, _pthread_tid(pt));
|
||||
} else {
|
||||
STRACE("%G interupting %d's i/o op", sig, _pthread_tid(pt));
|
||||
CancelIoEx(pt->pt_iohandle, pt->pt_ioverlap);
|
||||
}
|
||||
STRACE("%G canceling %d's i/o", sig, _pthread_tid(pt));
|
||||
CancelIoEx(pt->pt_iohandle, pt->pt_ioverlap);
|
||||
return;
|
||||
}
|
||||
// threads can create semaphores on an as-needed basis
|
||||
if (blocker == PT_BLOCKER_SEM) {
|
||||
if (should_restart) {
|
||||
STRACE("%G restarting %d's semaphore", sig, _pthread_tid(pt));
|
||||
} else {
|
||||
STRACE("%G releasing %d's semaphore", sig, _pthread_tid(pt));
|
||||
pt->pt_flags &= ~PT_RESTARTABLE;
|
||||
ReleaseSemaphore(pt->pt_semaphore, 1, 0);
|
||||
}
|
||||
STRACE("%G releasing %d's semaphore", sig, _pthread_tid(pt));
|
||||
ReleaseSemaphore(pt->pt_semaphore, 1, 0);
|
||||
return;
|
||||
}
|
||||
// all other blocking ops that aren't overlap should use futexes
|
||||
// we force restartable futexes to churn by waking w/o releasing
|
||||
STRACE("%G waking %d's futex", sig, _pthread_tid(pt));
|
||||
if (!should_restart) {
|
||||
atomic_store_explicit(blocker, 1, memory_order_release);
|
||||
}
|
||||
WakeByAddressSingle(blocker);
|
||||
}
|
||||
|
||||
|
@ -273,7 +264,7 @@ static textwindows wontreturn void __sig_tramp(struct SignalFrame *sf) {
|
|||
sigset_t blocksigs = __sighandmask[sig];
|
||||
if (!(sf->flags & SA_NODEFER)) blocksigs |= 1ull << (sig - 1);
|
||||
sf->ctx.uc_sigmask = atomic_fetch_or_explicit(&tib->tib_sigmask, blocksigs,
|
||||
memory_order_acq_rel);
|
||||
memory_order_acquire);
|
||||
|
||||
// call the user's signal handler
|
||||
char ssbuf[2][128];
|
||||
|
@ -290,7 +281,7 @@ static textwindows wontreturn void __sig_tramp(struct SignalFrame *sf) {
|
|||
|
||||
// jump back into original code if there aren't any pending signals
|
||||
do {
|
||||
if (!(sig = __sig_get(tib))) {
|
||||
if (!(sig = __sig_get(sf->ctx.uc_sigmask))) {
|
||||
__sig_restore(&sf->ctx);
|
||||
}
|
||||
} while (!__sig_start(pt, sig, &sf->rva, &sf->flags));
|
||||
|
@ -305,14 +296,24 @@ static textwindows wontreturn void __sig_tramp(struct SignalFrame *sf) {
|
|||
}
|
||||
}
|
||||
|
||||
// sends signal to another specific thread which is ref'd
|
||||
static int __sig_killer(struct PosixThread *pt, int sig, int sic) {
|
||||
unsigned rva = __sighandrvas[sig];
|
||||
unsigned flags = __sighandflags[sig];
|
||||
|
||||
// prepare for signal
|
||||
unsigned rva, flags;
|
||||
if (!__sig_start(pt, sig, &rva, &flags)) {
|
||||
// do nothing if signal is ignored
|
||||
if (rva == (intptr_t)SIG_IGN ||
|
||||
(rva == (intptr_t)SIG_DFL && __sig_ignored_by_default(sig))) {
|
||||
STRACE("ignoring %G", sig);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// if there's no handler then killing a thread kills the process
|
||||
if (rva == (intptr_t)SIG_DFL) {
|
||||
STRACE("terminating on %G due to no handler", sig);
|
||||
__sig_terminate(sig);
|
||||
}
|
||||
|
||||
// take control of thread
|
||||
// suspending the thread happens asynchronously
|
||||
// however getting the context blocks until it's frozen
|
||||
|
@ -334,11 +335,13 @@ static int __sig_killer(struct PosixThread *pt, int sig, int sic) {
|
|||
}
|
||||
pthread_spin_unlock(&killer_lock);
|
||||
|
||||
// we can't preempt threads that masked sig or are blocked
|
||||
// we can't preempt threads that are running in win32 code
|
||||
if ((pt->tib->tib_sigmask & (1ull << (sig - 1))) ||
|
||||
// so we shall unblock the thread and let it signal itself
|
||||
if ((atomic_load_explicit(&pt->tib->tib_sigmask, memory_order_acquire) &
|
||||
(1ull << (sig - 1))) ||
|
||||
!((uintptr_t)__executable_start <= nc.Rip &&
|
||||
nc.Rip < (uintptr_t)__privileged_start)) {
|
||||
STRACE("enqueing %G on %d rip %p", sig, _pthread_tid(pt), nc.Rip);
|
||||
atomic_fetch_or_explicit(&pt->tib->tib_sigpending, 1ull << (sig - 1),
|
||||
memory_order_relaxed);
|
||||
ResumeThread(th);
|
||||
|
@ -346,13 +349,14 @@ static int __sig_killer(struct PosixThread *pt, int sig, int sic) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// we're committed to delivering this signal now
|
||||
// preferring to live dangerously
|
||||
// the thread will be signaled asynchronously
|
||||
if (flags & SA_RESETHAND) {
|
||||
STRACE("resetting %G handler", sig);
|
||||
__sighandrvas[sig] = (int32_t)(intptr_t)SIG_DFL;
|
||||
}
|
||||
|
||||
// inject trampoline function call into thread
|
||||
// inject call to trampoline function into thread
|
||||
uintptr_t sp;
|
||||
if (__sig_should_use_altstack(flags, pt->tib)) {
|
||||
sp = (uintptr_t)pt->tib->tib_sigstack_addr + pt->tib->tib_sigstack_size;
|
||||
|
@ -381,6 +385,7 @@ static int __sig_killer(struct PosixThread *pt, int sig, int sic) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// sends signal to another specific thread
|
||||
textwindows int __sig_kill(struct PosixThread *pt, int sig, int sic) {
|
||||
int rc;
|
||||
BLOCK_SIGNALS;
|
||||
|
@ -391,6 +396,7 @@ textwindows int __sig_kill(struct PosixThread *pt, int sig, int sic) {
|
|||
return rc;
|
||||
}
|
||||
|
||||
// sends signal to any other thread
|
||||
textwindows void __sig_generate(int sig, int sic) {
|
||||
struct Dll *e;
|
||||
struct PosixThread *pt, *mark = 0;
|
||||
|
@ -406,22 +412,41 @@ textwindows void __sig_generate(int sig, int sic) {
|
|||
_pthread_lock();
|
||||
for (e = dll_first(_pthread_list); e; e = dll_next(_pthread_list, e)) {
|
||||
pt = POSIXTHREAD_CONTAINER(e);
|
||||
if (pt != _pthread_self() &&
|
||||
atomic_load_explicit(&pt->pt_status, memory_order_acquire) <
|
||||
kPosixThreadTerminated &&
|
||||
!(pt->tib->tib_sigmask & (1ull << (sig - 1)))) {
|
||||
_pthread_ref((mark = pt));
|
||||
// we don't want to signal ourself
|
||||
if (pt == _pthread_self()) continue;
|
||||
// we don't want to signal a thread that isn't running
|
||||
if (atomic_load_explicit(&pt->pt_status, memory_order_acquire) >=
|
||||
kPosixThreadTerminated) {
|
||||
continue;
|
||||
}
|
||||
// choose this thread if it isn't masking sig
|
||||
if (!(atomic_load_explicit(&pt->tib->tib_sigmask, memory_order_acquire) &
|
||||
(1ull << (sig - 1)))) {
|
||||
STRACE("generating %G by killing %d", sig, _pthread_tid(pt));
|
||||
_pthread_ref(pt);
|
||||
mark = pt;
|
||||
break;
|
||||
}
|
||||
// if a thread is blocking then we check to see if it's planning
|
||||
// to unblock our sig once the wait operation is completed; when
|
||||
// that's the case we can cancel the thread's i/o to deliver sig
|
||||
if (atomic_load_explicit(&pt->pt_blocker, memory_order_acquire) &&
|
||||
!(atomic_load_explicit(&pt->pt_blkmask, memory_order_relaxed) &
|
||||
(1ull << (sig - 1)))) {
|
||||
STRACE("generating %G by unblocking %d", sig, _pthread_tid(pt));
|
||||
_pthread_ref(pt);
|
||||
mark = pt;
|
||||
break;
|
||||
}
|
||||
}
|
||||
_pthread_unlock();
|
||||
if (mark) {
|
||||
STRACE("generating %G by killing %d", sig, _pthread_tid(mark));
|
||||
__sig_killer(mark, sig, sic);
|
||||
_pthread_unref(mark);
|
||||
} else {
|
||||
STRACE("all threads block %G so adding to pending signals of process", sig);
|
||||
__sig.pending |= 1ull << (sig - 1);
|
||||
atomic_fetch_or_explicit(&__sig.pending, 1ull << (sig - 1),
|
||||
memory_order_relaxed);
|
||||
}
|
||||
ALLOW_SIGNALS;
|
||||
}
|
||||
|
@ -541,11 +566,10 @@ static void __sig_unmaskable(struct NtExceptionPointers *ep, int code, int sig,
|
|||
ucontext_t ctx = {0};
|
||||
siginfo_t si = {.si_signo = sig, .si_code = code, .si_addr = si_addr};
|
||||
_ntcontext2linux(&ctx, ep->ContextRecord);
|
||||
ctx.uc_sigmask = tib->tib_sigmask;
|
||||
tib->tib_sigmask |= __sighandmask[sig];
|
||||
if (!(flags & SA_NODEFER)) {
|
||||
tib->tib_sigmask |= 1ull << (sig - 1);
|
||||
}
|
||||
sigset_t blocksigs = __sighandmask[sig];
|
||||
if (!(flags & SA_NODEFER)) blocksigs |= 1ull << (sig - 1);
|
||||
ctx.uc_sigmask = atomic_fetch_or_explicit(&tib->tib_sigmask, blocksigs,
|
||||
memory_order_acquire);
|
||||
__sig_handler(rva)(sig, &si, &ctx);
|
||||
atomic_store_explicit(&tib->tib_sigmask, ctx.uc_sigmask,
|
||||
memory_order_release);
|
||||
|
@ -618,7 +642,8 @@ __msabi textwindows dontinstrument bool32 __sig_console(uint32_t dwCtrlType) {
|
|||
// handlers were called (or `3` if both were the case).
|
||||
textwindows int __sig_check(void) {
|
||||
int sig;
|
||||
if ((sig = __sig_get(__get_tls()))) {
|
||||
if ((sig = __sig_get(atomic_load_explicit(&__get_tls()->tib_sigmask,
|
||||
memory_order_acquire)))) {
|
||||
return __sig_raise(sig, SI_KERNEL);
|
||||
} else {
|
||||
return 0;
|
||||
|
|
|
@ -25,7 +25,9 @@ bool __sig_ignored(int);
|
|||
int __sig_check(void);
|
||||
int __sig_kill(struct PosixThread *, int, int);
|
||||
int __sig_mask(int, const sigset_t *, sigset_t *);
|
||||
int __sig_relay(int, int, sigset_t);
|
||||
int __sig_raise(int, int);
|
||||
int __sig_get(sigset_t);
|
||||
void __sig_delete(int);
|
||||
void __sig_generate(int, int);
|
||||
void __sig_init(void);
|
||||
|
|
|
@ -16,9 +16,9 @@ COSMOPOLITAN_C_START_
|
|||
while (0)
|
||||
|
||||
sigset_t __sig_block(void);
|
||||
void __sig_finish(sigset_t);
|
||||
void __sig_unblock(sigset_t);
|
||||
void __sig_finishwait(sigset_t);
|
||||
sigset_t __sig_beginwait(sigset_t);
|
||||
sigset_t __sig_begin(sigset_t);
|
||||
int __sys_sigprocmask(int, const uint64_t *, uint64_t *, uint64_t);
|
||||
int sys_sigprocmask(int, const sigset_t *, sigset_t *);
|
||||
int sys_sigsuspend(const uint64_t *, uint64_t);
|
||||
|
|
|
@ -49,7 +49,6 @@ static inline void RaiseSignal(int sig) {
|
|||
static textwindows ssize_t sys_write_nt_impl(int fd, void *data, size_t size,
|
||||
ssize_t offset,
|
||||
uint64_t waitmask) {
|
||||
uint64_t m;
|
||||
struct Fd *f = g_fds.p + fd;
|
||||
bool isconsole = f->kind == kFdConsole;
|
||||
|
||||
|
@ -78,9 +77,7 @@ static textwindows ssize_t sys_write_nt_impl(int fd, void *data, size_t size,
|
|||
// return edquot(); /* handled by consts.sh */
|
||||
case kNtErrorBrokenPipe: // broken pipe
|
||||
case kNtErrorNoData: // closing named pipe
|
||||
m = __sig_beginwait(waitmask);
|
||||
RaiseSignal(SIGPIPE);
|
||||
__sig_finishwait(m);
|
||||
return epipe();
|
||||
case kNtErrorAccessDenied: // write doesn't return EACCESS
|
||||
return ebadf();
|
||||
|
@ -111,7 +108,6 @@ static textwindows ssize_t sys_write_nt2(int fd, const struct iovec *iov,
|
|||
total += rc;
|
||||
if (opt_offset != -1) opt_offset += rc;
|
||||
if (rc < iov[i].iov_len) break;
|
||||
waitmask = -1; // disable eintr/ecanceled for remaining iovecs
|
||||
}
|
||||
return total;
|
||||
} else {
|
||||
|
|
|
@ -39,7 +39,6 @@
|
|||
#include "libc/nt/errors.h"
|
||||
#include "libc/nt/memory.h"
|
||||
#include "libc/nt/runtime.h"
|
||||
#include "libc/runtime/elf_loader.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/runtime/syslib.internal.h"
|
||||
#include "libc/stdio/stdio.h"
|
||||
|
|
|
@ -48,11 +48,11 @@ void __sig_unblock(sigset_t m) {
|
|||
}
|
||||
}
|
||||
|
||||
textwindows sigset_t __sig_beginwait(sigset_t waitmask) {
|
||||
textwindows sigset_t __sig_begin(sigset_t waitmask) {
|
||||
return atomic_exchange_explicit(&__get_tls()->tib_sigmask, waitmask,
|
||||
memory_order_acquire);
|
||||
}
|
||||
|
||||
textwindows void __sig_finishwait(sigset_t m) {
|
||||
textwindows void __sig_finish(sigset_t m) {
|
||||
atomic_store_explicit(&__get_tls()->tib_sigmask, m, memory_order_release);
|
||||
}
|
||||
|
|
|
@ -130,11 +130,11 @@ WaitMore:
|
|||
SpuriousWakeup:
|
||||
++*wv;
|
||||
pthread_cleanup_push(UnwindWaiterCount, wv);
|
||||
m = __sig_beginwait(waitmask);
|
||||
m = __sig_begin(waitmask);
|
||||
if ((rc = _check_signal(true)) != -1) {
|
||||
rc = nsync_cv_wait_with_deadline(cv, &__proc.lock, deadline, 0);
|
||||
}
|
||||
__sig_finishwait(m);
|
||||
__sig_finish(m);
|
||||
pthread_cleanup_pop(true);
|
||||
if (rc == -1) return -1;
|
||||
if (rc == ETIMEDOUT) goto WaitMore;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/assert.h"
|
||||
#include "libc/calls/internal.h"
|
||||
#include "libc/calls/sig.internal.h"
|
||||
#include "libc/calls/struct/fd.internal.h"
|
||||
#include "libc/calls/struct/sigset.internal.h"
|
||||
#include "libc/errno.h"
|
||||
|
@ -35,6 +36,7 @@
|
|||
#include "libc/sock/internal.h"
|
||||
#include "libc/sock/syscall_fd.internal.h"
|
||||
#include "libc/sysv/consts/o.h"
|
||||
#include "libc/sysv/consts/sicode.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
#include "libc/thread/posixthread.internal.h"
|
||||
#ifdef __x86_64__
|
||||
|
@ -60,92 +62,83 @@ static void CancelWinsockBlock(int64_t handle, struct NtOverlapped *overlap) {
|
|||
|
||||
textwindows ssize_t
|
||||
__winsock_block(int64_t handle, uint32_t flags, bool nonblock,
|
||||
uint32_t srwtimeout, sigset_t wait_signal_mask,
|
||||
uint32_t srwtimeout, sigset_t waitmask,
|
||||
int StartSocketOp(int64_t handle, struct NtOverlapped *overlap,
|
||||
uint32_t *flags, void *arg),
|
||||
void *arg) {
|
||||
|
||||
int rc;
|
||||
uint64_t m;
|
||||
int sig = 0;
|
||||
uint32_t status;
|
||||
uint32_t exchanged;
|
||||
int olderror = errno;
|
||||
bool eagained = false;
|
||||
bool eintered = false;
|
||||
bool canceled = false;
|
||||
int handler_was_called;
|
||||
struct PosixThread *pt;
|
||||
|
||||
RestartOperation:
|
||||
struct NtOverlapped overlap = {.hEvent = WSACreateEvent()};
|
||||
struct WinsockBlockResources wbr = {handle, &overlap};
|
||||
|
||||
pthread_cleanup_push(UnwindWinsockBlock, &wbr);
|
||||
rc = StartSocketOp(handle, &overlap, &flags, arg);
|
||||
if (rc && WSAGetLastError() == kNtErrorIoPending) {
|
||||
BlockingOperation:
|
||||
pt = _pthread_self();
|
||||
pt->pt_iohandle = handle;
|
||||
pt->pt_ioverlap = &overlap;
|
||||
pt->pt_flags |= PT_RESTARTABLE;
|
||||
atomic_store_explicit(&pt->pt_blocker, PT_BLOCKER_IO, memory_order_release);
|
||||
m = __sig_beginwait(wait_signal_mask);
|
||||
if (nonblock) {
|
||||
CancelWinsockBlock(handle, &overlap);
|
||||
eagained = true;
|
||||
} else if (_check_signal(true)) {
|
||||
} else if (_check_cancel()) {
|
||||
CancelWinsockBlock(handle, &overlap);
|
||||
canceled = true;
|
||||
} else if ((sig = __sig_get(waitmask))) {
|
||||
CancelWinsockBlock(handle, &overlap);
|
||||
if (errno == ECANCELED) {
|
||||
canceled = true;
|
||||
} else {
|
||||
eintered = true;
|
||||
}
|
||||
} else {
|
||||
pt = _pthread_self();
|
||||
pt->pt_blkmask = waitmask;
|
||||
pt->pt_iohandle = handle;
|
||||
pt->pt_ioverlap = &overlap;
|
||||
atomic_store_explicit(&pt->pt_blocker, PT_BLOCKER_IO,
|
||||
memory_order_release);
|
||||
status = WSAWaitForMultipleEvents(1, &overlap.hEvent, 0,
|
||||
srwtimeout ? srwtimeout : -1u, 0);
|
||||
atomic_store_explicit(&pt->pt_blocker, 0, memory_order_release);
|
||||
if (status == kNtWaitTimeout) {
|
||||
// rcvtimeo or sndtimeo elapsed
|
||||
// SO_RCVTIMEO or SO_SNDTIMEO elapsed
|
||||
CancelWinsockBlock(handle, &overlap);
|
||||
eagained = true;
|
||||
} else if (status == kNtWaitFailed) {
|
||||
// Failure should be an impossible condition, but MSDN lists
|
||||
// WSAENETDOWN and WSA_NOT_ENOUGH_MEMORY as possible errors.
|
||||
CancelWinsockBlock(handle, &overlap);
|
||||
eintered = true;
|
||||
}
|
||||
}
|
||||
__sig_finishwait(m);
|
||||
atomic_store_explicit(&pt->pt_blocker, PT_BLOCKER_CPU,
|
||||
memory_order_release);
|
||||
pt->pt_flags &= ~PT_RESTARTABLE;
|
||||
pt->pt_ioverlap = 0;
|
||||
pt->pt_iohandle = 0;
|
||||
rc = 0;
|
||||
}
|
||||
if (!rc) {
|
||||
bool32 should_wait = canceled || eagained;
|
||||
bool32 ok = WSAGetOverlappedResult(handle, &overlap, &exchanged,
|
||||
should_wait, &flags);
|
||||
if (!ok && WSAGetLastError() == kNtErrorIoIncomplete) {
|
||||
goto BlockingOperation;
|
||||
}
|
||||
rc = ok ? 0 : -1;
|
||||
rc = WSAGetOverlappedResult(handle, &overlap, &exchanged, true, &flags)
|
||||
? 0
|
||||
: -1;
|
||||
}
|
||||
WSACloseEvent(overlap.hEvent);
|
||||
pthread_cleanup_pop(false);
|
||||
WSACloseEvent(overlap.hEvent);
|
||||
|
||||
if (canceled) {
|
||||
return ecanceled();
|
||||
}
|
||||
if (sig) {
|
||||
handler_was_called = __sig_relay(sig, SI_KERNEL, waitmask);
|
||||
if (_check_cancel() == -1) return -1;
|
||||
} else {
|
||||
handler_was_called = 0;
|
||||
}
|
||||
if (!rc) {
|
||||
errno = olderror;
|
||||
return exchanged;
|
||||
}
|
||||
if (eagained) {
|
||||
return eagain();
|
||||
}
|
||||
if (GetLastError() == kNtErrorOperationAborted) {
|
||||
if (_check_cancel() == -1) return ecanceled();
|
||||
if (!eintered && _check_signal(false)) return -1;
|
||||
}
|
||||
if (eintered) {
|
||||
if (WSAGetLastError() == kNtErrorOperationAborted) {
|
||||
if (eagained) return eagain();
|
||||
if (!handler_was_called && (sig = __sig_get(waitmask))) {
|
||||
handler_was_called = __sig_relay(sig, SI_KERNEL, waitmask);
|
||||
if (_check_cancel() == -1) return -1;
|
||||
}
|
||||
if (handler_was_called != 1) {
|
||||
goto RestartOperation;
|
||||
}
|
||||
return eintr();
|
||||
}
|
||||
return __winsockerr();
|
||||
|
|
|
@ -124,7 +124,7 @@ syscon errno ECANCELED 125 125 89 89 85 88 87 1223 # kNtError
|
|||
syscon errno EOWNERDEAD 130 130 105 105 96 94 97 105 # kNtErrorSemOwnerDied; raised by pthread_cond_timedwait(3), pthread_mutex_consistent(3), pthread_mutex_getprioceiling(3), pthread_mutex_lock(3), pthread_mutex_timedlock(3), pthread_mutexattr_getrobust(3), pthread_mutexattr_setrobust(3)
|
||||
syscon errno ENOTRECOVERABLE 131 131 104 104 95 93 98 0 # raised by pthread_cond_timedwait(3), pthread_mutex_consistent(3), pthread_mutex_getprioceiling(3), pthread_mutex_lock(3), pthread_mutex_timedlock(3), pthread_mutexattr_getrobust(3), pthread_mutexattr_setrobust(3)
|
||||
syscon errno ENONET 64 64 317 317 317 317 317 0 # made up on BSDs; raised by accept(2)
|
||||
syscon errno ERESTART 85 85 -1 -1 -1 -1 -3 0 # should only be seen in ptrace()
|
||||
syscon errno ERESTART 85 85 318 318 318 318 -3 20000 # should only be seen in ptrace()
|
||||
syscon errno ENODATA 61 61 96 96 0 0 89 232 # no message is available in xsi stream or named pipe is being closed; no data available; barely in posix; returned by ioctl; very close in spirit to EPIPE?
|
||||
syscon errno ENOSR 63 63 98 98 0 90 90 0 # out of streams resources; something like EAGAIN; it's in POSIX; maybe some commercial UNIX returns it with openat, putmsg, putpmsg, posix_openpt, ioctl, open
|
||||
syscon errno ENOSTR 60 60 99 99 0 0 91 0 # not a stream; returned by getmsg, putmsg, putpmsg, getpmsg
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
#include "libc/sysv/consts/syscon.internal.h"
|
||||
.syscon errno,ERESTART,85,85,-1,-1,-1,-1,-3,0
|
||||
.syscon errno,ERESTART,85,85,318,318,318,318,-3,20000
|
||||
|
|
|
@ -14,10 +14,8 @@
|
|||
#define PT_NOCANCEL 8
|
||||
#define PT_MASKED 16
|
||||
#define PT_INCANCEL 32
|
||||
#define PT_RESTARTABLE 64
|
||||
#define PT_OPENBSD_KLUDGE 128
|
||||
#define PT_OPENBSD_KLUDGE 64
|
||||
|
||||
#define PT_BLOCKER_CPU ((atomic_int *)-0)
|
||||
#define PT_BLOCKER_SEM ((atomic_int *)-1)
|
||||
#define PT_BLOCKER_IO ((atomic_int *)-2)
|
||||
|
||||
|
@ -93,6 +91,7 @@ struct PosixThread {
|
|||
struct Dll list; // list of threads
|
||||
struct _pthread_cleanup_buffer *pt_cleanup;
|
||||
_Atomic(atomic_int *) pt_blocker;
|
||||
uint64_t pt_blkmask;
|
||||
int64_t pt_semaphore;
|
||||
intptr_t pt_iohandle;
|
||||
void *pt_ioverlap;
|
||||
|
|
72
test/libc/calls/readwrite_test.c
Normal file
72
test/libc/calls/readwrite_test.c
Normal file
|
@ -0,0 +1,72 @@
|
|||
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
|
||||
│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
|
||||
╞══════════════════════════════════════════════════════════════════════════════╡
|
||||
│ Copyright 2023 Justine Alexandra Roberts Tunney │
|
||||
│ │
|
||||
│ Permission to use, copy, modify, and/or distribute this software for │
|
||||
│ any purpose with or without fee is hereby granted, provided that the │
|
||||
│ above copyright notice and this permission notice appear in all copies. │
|
||||
│ │
|
||||
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
|
||||
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
|
||||
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
|
||||
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
|
||||
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
|
||||
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
|
||||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/atomic.h"
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/calls/struct/sigaction.h"
|
||||
#include "libc/runtime/clktck.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/sysv/consts/sa.h"
|
||||
#include "libc/sysv/consts/sig.h"
|
||||
#include "libc/testlib/testlib.h"
|
||||
#include "libc/thread/thread.h"
|
||||
|
||||
jmp_buf jb;
|
||||
int pfds[2];
|
||||
atomic_bool isdone;
|
||||
volatile bool canjmp;
|
||||
|
||||
void OnSignal(int sig) {
|
||||
if (canjmp) {
|
||||
canjmp = false;
|
||||
longjmp(jb, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void *ReadWorker(void *arg) {
|
||||
int got;
|
||||
char buf[8];
|
||||
while (!isdone) {
|
||||
if (!(got = setjmp(jb))) {
|
||||
canjmp = true;
|
||||
read(pfds[0], buf, sizeof(buf));
|
||||
abort();
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
TEST(eintr, longjmp) {
|
||||
pthread_t th;
|
||||
struct sigaction sa = {
|
||||
.sa_handler = OnSignal,
|
||||
.sa_flags = SA_SIGINFO | SA_NODEFER,
|
||||
};
|
||||
sigaction(SIGUSR1, &sa, 0);
|
||||
ASSERT_SYS(0, 0, pipe(pfds));
|
||||
ASSERT_EQ(0, pthread_create(&th, 0, ReadWorker, 0));
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
pthread_kill(th, SIGUSR1);
|
||||
usleep(1. / CLK_TCK * 1e6);
|
||||
}
|
||||
isdone = true;
|
||||
pthread_kill(th, SIGUSR1);
|
||||
ASSERT_EQ(0, pthread_join(th, 0));
|
||||
ASSERT_SYS(0, 0, close(pfds[1]));
|
||||
ASSERT_SYS(0, 0, close(pfds[0]));
|
||||
}
|
45
third_party/nsync/futex.c
vendored
45
third_party/nsync/futex.c
vendored
|
@ -23,6 +23,8 @@
|
|||
#include "libc/calls/internal.h"
|
||||
#include "libc/calls/sig.internal.h"
|
||||
#include "libc/calls/state.internal.h"
|
||||
#include "libc/calls/struct/sigset.h"
|
||||
#include "libc/calls/struct/sigset.internal.h"
|
||||
#include "libc/calls/struct/timespec.h"
|
||||
#include "libc/calls/struct/timespec.internal.h"
|
||||
#include "libc/calls/syscall_support-nt.internal.h"
|
||||
|
@ -40,6 +42,7 @@
|
|||
#include "libc/nt/synchronization.h"
|
||||
#include "libc/runtime/clktck.h"
|
||||
#include "libc/sysv/consts/clock.h"
|
||||
#include "libc/sysv/consts/sicode.h"
|
||||
#include "libc/sysv/consts/timer.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
#include "libc/thread/freebsd.internal.h"
|
||||
|
@ -147,9 +150,11 @@ static int nsync_futex_polyfill_ (atomic_int *w, int expect, struct timespec *ab
|
|||
|
||||
static int nsync_futex_wait_win32_ (atomic_int *w, int expect, char pshare,
|
||||
const struct timespec *timeout,
|
||||
struct PosixThread *pt) {
|
||||
struct PosixThread *pt,
|
||||
sigset_t waitmask) {
|
||||
int sig;
|
||||
bool32 ok;
|
||||
struct timespec deadline, interval, remain, wait, now;
|
||||
struct timespec deadline, wait, now;
|
||||
|
||||
if (timeout) {
|
||||
deadline = *timeout;
|
||||
|
@ -162,16 +167,36 @@ static int nsync_futex_wait_win32_ (atomic_int *w, int expect, char pshare,
|
|||
if (timespec_cmp (now, deadline) > 0) {
|
||||
return etimedout();
|
||||
}
|
||||
remain = timespec_sub (deadline, now);
|
||||
interval = timespec_frommillis (5000);
|
||||
wait = timespec_cmp (remain, interval) > 0 ? interval : remain;
|
||||
wait = timespec_sub (deadline, now);
|
||||
if (atomic_load_explicit (w, memory_order_acquire) != expect) {
|
||||
return 0;
|
||||
}
|
||||
if (pt) atomic_store_explicit (&pt->pt_blocker, w, memory_order_release);
|
||||
if (_check_signal (false) == -1) return -1;
|
||||
if (pt) {
|
||||
if (_check_cancel () == -1) {
|
||||
return -1; /* ECANCELED */
|
||||
}
|
||||
if ((sig = __sig_get (waitmask))) {
|
||||
__sig_relay (sig, SI_KERNEL, waitmask);
|
||||
if (_check_cancel () == -1) {
|
||||
return -1; /* ECANCELED */
|
||||
}
|
||||
return eintr ();
|
||||
}
|
||||
pt->pt_blkmask = waitmask;
|
||||
atomic_store_explicit (&pt->pt_blocker, w, memory_order_release);
|
||||
}
|
||||
ok = WaitOnAddress (w, &expect, sizeof(int), timespec_tomillis (wait));
|
||||
if (_check_signal (false) == -1) return -1;
|
||||
if (pt) {
|
||||
/* __sig_cancel wakes our futex without changing `w` after enqueing signals */
|
||||
atomic_store_explicit (&pt->pt_blocker, 0, memory_order_release);
|
||||
if (ok && atomic_load_explicit (w, memory_order_acquire) == expect && (sig = __sig_get (waitmask))) {
|
||||
__sig_relay (sig, SI_KERNEL, waitmask);
|
||||
if (_check_cancel () == -1) {
|
||||
return -1; /* ECANCELED */
|
||||
}
|
||||
return eintr ();
|
||||
}
|
||||
}
|
||||
if (ok) {
|
||||
return 0;
|
||||
} else {
|
||||
|
@ -233,7 +258,9 @@ int nsync_futex_wait_ (atomic_int *w, int expect, char pshare, const struct time
|
|||
if (IsWindows ()) {
|
||||
// Windows 8 futexes don't support multiple processes :(
|
||||
if (pshare) goto Polyfill;
|
||||
rc = nsync_futex_wait_win32_ (w, expect, pshare, timeout, pt);
|
||||
sigset_t m = __sig_block ();
|
||||
rc = nsync_futex_wait_win32_ (w, expect, pshare, timeout, pt, m);
|
||||
__sig_unblock (m);
|
||||
} else if (IsXnu ()) {
|
||||
uint32_t op, us;
|
||||
if (pshare) {
|
||||
|
|
Loading…
Reference in a new issue