mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-02-07 06:53:33 +00:00
791f79fcb3
- We now serialize the file descriptor table when spawning / executing processes on Windows. This means you can now inherit more stuff than just standard i/o. It's needed by bash, which duplicates the console to file descriptor #255. We also now do a better job serializing the environment variables, so you're less likely to encounter E2BIG when using your bash shell. We also no longer coerce environ to uppercase - execve() on Windows now remotely controls its parent process to make them spawn a replacement for itself. Then it'll be able to terminate immediately once the spawn succeeds, without having to linger around for the lifetime as a shell process for proxying the exit code. When process worker thread running in the parent sees the child die, it's given a handle to the new child, to replace it in the process table. - execve() and posix_spawn() on Windows will now provide CreateProcess an explicit handle list. This allows us to remove handle locks which enables better fork/spawn concurrency, with seriously correct thread safety. Other codebases like Go use the same technique. On the other hand fork() still favors the conventional WIN32 inheritence approach which can be a little bit messy, but is *controlled* by guaranteeing perfectly clean slates at both the spawning and execution boundaries - sigset_t is now 64 bits. Having it be 128 bits was a mistake because there's no reason to use that and it's only supported by FreeBSD. By using the system word size, signal mask manipulation on Windows goes very fast. Furthermore @asyncsignalsafe funcs have been rewritten on Windows to take advantage of signal masking, now that it's much more pleasant to use. - All the overlapped i/o code on Windows has been rewritten for pretty good signal and cancelation safety. We're now able to ensure overlap data structures are cleaned up so long as you don't longjmp() out of out of a signal handler that interrupted an i/o operation. Latencies are also improved thanks to the removal of lots of "busy wait" code. Waits should be optimal for everything except poll(), which shall be the last and final demon we slay in the win32 i/o horror show. - getrusage() on Windows is now able to report RUSAGE_CHILDREN as well as RUSAGE_SELF, thanks to aggregation in the process manager thread.
385 lines
11 KiB
C
385 lines
11 KiB
C
/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│
|
|
│vi: set et ft=c ts=8 tw=8 fenc=utf-8 :vi│
|
|
╞══════════════════════════════════════════════════════════════════════════════╡
|
|
│ Copyright 2022 Justine Alexandra Roberts Tunney │
|
|
│ │
|
|
│ Permission to use, copy, modify, and/or distribute this software for │
|
|
│ any purpose with or without fee is hereby granted, provided that the │
|
|
│ above copyright notice and this permission notice appear in all copies. │
|
|
│ │
|
|
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
|
|
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
|
|
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
|
|
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
|
|
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
|
|
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
|
|
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
|
│ PERFORMANCE OF THIS SOFTWARE. │
|
|
╚─────────────────────────────────────────────────────────────────────────────*/
|
|
#include "libc/sysv/consts/futex.h"
|
|
#include "libc/assert.h"
|
|
#include "libc/atomic.h"
|
|
#include "libc/calls/calls.h"
|
|
#include "libc/calls/internal.h"
|
|
#include "libc/calls/sig.internal.h"
|
|
#include "libc/calls/state.internal.h"
|
|
#include "libc/calls/struct/timespec.h"
|
|
#include "libc/calls/struct/timespec.internal.h"
|
|
#include "libc/calls/syscall_support-nt.internal.h"
|
|
#include "libc/cosmo.h"
|
|
#include "libc/dce.h"
|
|
#include "libc/errno.h"
|
|
#include "libc/intrin/atomic.h"
|
|
#include "libc/intrin/describeflags.internal.h"
|
|
#include "libc/intrin/strace.internal.h"
|
|
#include "libc/intrin/ulock.h"
|
|
#include "libc/intrin/weaken.h"
|
|
#include "libc/limits.h"
|
|
#include "libc/nexgen32e/vendor.internal.h"
|
|
#include "libc/nt/runtime.h"
|
|
#include "libc/nt/synchronization.h"
|
|
#include "libc/sysv/consts/clock.h"
|
|
#include "libc/sysv/consts/timer.h"
|
|
#include "libc/sysv/errfuns.h"
|
|
#include "libc/thread/freebsd.internal.h"
|
|
#include "libc/thread/posixthread.internal.h"
|
|
#include "libc/thread/thread.h"
|
|
#include "libc/thread/tls.h"
|
|
#include "third_party/nsync/atomic.h"
|
|
#include "third_party/nsync/common.internal.h"
|
|
#include "third_party/nsync/futex.internal.h"
|
|
#include "third_party/nsync/time.h"
|
|
// clang-format off
|
|
|
|
#define FUTEX_WAIT_BITS_ FUTEX_BITSET_MATCH_ANY
|
|
|
|
errno_t _futex (atomic_int *, int, int, const struct timespec *, int *, int);
|
|
errno_t _futex_wake (atomic_int *, int, int) asm ("_futex");
|
|
int sys_futex_cp (atomic_int *, int, int, const struct timespec *, int *, int);
|
|
|
|
static struct NsyncFutex {
|
|
atomic_uint once;
|
|
int FUTEX_WAIT_;
|
|
int FUTEX_PRIVATE_FLAG_;
|
|
bool is_supported;
|
|
bool timeout_is_relative;
|
|
} nsync_futex_;
|
|
|
|
static void nsync_futex_init_ (void) {
|
|
int e;
|
|
atomic_int x;
|
|
|
|
nsync_futex_.FUTEX_WAIT_ = FUTEX_WAIT;
|
|
|
|
if (IsWindows ()) {
|
|
nsync_futex_.is_supported = true;
|
|
return;
|
|
}
|
|
|
|
if (IsXnu ()) {
|
|
nsync_futex_.is_supported = true;
|
|
nsync_futex_.timeout_is_relative = true;
|
|
return;
|
|
}
|
|
|
|
if (IsFreebsd ()) {
|
|
nsync_futex_.is_supported = true;
|
|
nsync_futex_.FUTEX_PRIVATE_FLAG_ = FUTEX_PRIVATE_FLAG;
|
|
return;
|
|
}
|
|
|
|
if (!(nsync_futex_.is_supported = IsLinux () || IsOpenbsd ())) {
|
|
return;
|
|
}
|
|
|
|
// In our testing, we found that the monotonic clock on various
|
|
// popular systems (such as Linux, and some BSD variants) was no
|
|
// better behaved than the realtime clock, and routinely took
|
|
// large steps backwards, especially on multiprocessors. Given
|
|
// that "monotonic" doesn't seem to mean what it says,
|
|
// implementers of nsync_time might consider retaining the
|
|
// simplicity of a single epoch within an address space, by
|
|
// configuring any time synchronization mechanism (like ntp) to
|
|
// adjust for leap seconds by adjusting the rate, rather than
|
|
// with a backwards step.
|
|
e = errno;
|
|
atomic_store_explicit (&x, 0, memory_order_relaxed);
|
|
if (IsLinux () &&
|
|
_futex (&x, FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME,
|
|
1, 0, 0, FUTEX_BITSET_MATCH_ANY) == -EAGAIN) {
|
|
nsync_futex_.FUTEX_WAIT_ =
|
|
FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME;
|
|
nsync_futex_.FUTEX_PRIVATE_FLAG_ = FUTEX_PRIVATE_FLAG;
|
|
} else if (!IsTiny () && IsLinux () &&
|
|
_futex (&x, FUTEX_WAIT_BITSET, 1, 0, 0,
|
|
FUTEX_BITSET_MATCH_ANY) == -EAGAIN) {
|
|
nsync_futex_.FUTEX_WAIT_ = FUTEX_WAIT_BITSET;
|
|
nsync_futex_.FUTEX_PRIVATE_FLAG_ = FUTEX_PRIVATE_FLAG;
|
|
} else if (IsOpenbsd () ||
|
|
(!IsTiny () && IsLinux () &&
|
|
!_futex_wake (&x, FUTEX_WAKE_PRIVATE, 1))) {
|
|
nsync_futex_.FUTEX_WAIT_ = FUTEX_WAIT;
|
|
nsync_futex_.FUTEX_PRIVATE_FLAG_ = FUTEX_PRIVATE_FLAG;
|
|
nsync_futex_.timeout_is_relative = true;
|
|
} else {
|
|
nsync_futex_.FUTEX_WAIT_ = FUTEX_WAIT;
|
|
nsync_futex_.timeout_is_relative = true;
|
|
}
|
|
errno = e;
|
|
}
|
|
|
|
static int nsync_futex_polyfill_ (atomic_int *w, int expect, struct timespec *abstime) {
|
|
int rc;
|
|
int64_t nanos, maxnanos;
|
|
struct timespec now, wait, remain, deadline;
|
|
|
|
if (!abstime) {
|
|
deadline = timespec_max;
|
|
} else {
|
|
deadline = *abstime;
|
|
}
|
|
|
|
nanos = 100;
|
|
maxnanos = __SIG_LOCK_INTERVAL_MS * 1000L * 1000;
|
|
for (;;) {
|
|
if (atomic_load_explicit (w, memory_order_acquire) != expect) {
|
|
return 0;
|
|
}
|
|
now = timespec_real ();
|
|
if (atomic_load_explicit (w, memory_order_acquire) != expect) {
|
|
return 0;
|
|
}
|
|
if (timespec_cmp (now, deadline) >= 0) {
|
|
break;
|
|
}
|
|
wait = timespec_fromnanos (nanos);
|
|
remain = timespec_sub (deadline, now);
|
|
if (timespec_cmp(wait, remain) > 0) {
|
|
wait = remain;
|
|
}
|
|
if ((rc = clock_nanosleep (CLOCK_REALTIME, 0, &wait, 0))) {
|
|
return -rc;
|
|
}
|
|
if (nanos < maxnanos) {
|
|
nanos <<= 1;
|
|
if (nanos > maxnanos) {
|
|
nanos = maxnanos;
|
|
}
|
|
}
|
|
}
|
|
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
static int nsync_futex_wait_win32_ (atomic_int *w, int expect, char pshare,
|
|
const struct timespec *timeout,
|
|
struct PosixThread *pt) {
|
|
bool32 ok;
|
|
struct timespec deadline, interval, remain, wait, now;
|
|
|
|
if (timeout) {
|
|
deadline = *timeout;
|
|
} else {
|
|
deadline = timespec_max;
|
|
}
|
|
|
|
for (;;) {
|
|
now = timespec_real ();
|
|
if (timespec_cmp (now, deadline) > 0) {
|
|
return etimedout();
|
|
}
|
|
remain = timespec_sub (deadline, now);
|
|
interval = timespec_frommillis (__SIG_LOCK_INTERVAL_MS);
|
|
wait = timespec_cmp (remain, interval) > 0 ? interval : remain;
|
|
if (atomic_load_explicit (w, memory_order_acquire) != expect) {
|
|
return 0;
|
|
}
|
|
if (pt) atomic_store_explicit (&pt->pt_blocker, w, memory_order_release);
|
|
if (_check_cancel() == -1) return -1;
|
|
if (_check_signal(false) == -1) return -1;
|
|
ok = WaitOnAddress (w, &expect, sizeof(int), timespec_tomillis (wait));
|
|
if (_check_cancel() == -1) return -1;
|
|
if (ok) {
|
|
return 0;
|
|
} else {
|
|
ASSERT (GetLastError () == ETIMEDOUT);
|
|
}
|
|
}
|
|
}
|
|
|
|
static struct timespec *nsync_futex_timeout_ (struct timespec *memory,
|
|
const struct timespec *abstime) {
|
|
struct timespec now;
|
|
if (!abstime) {
|
|
return 0;
|
|
} else if (!nsync_futex_.timeout_is_relative) {
|
|
*memory = *abstime;
|
|
return memory;
|
|
} else {
|
|
now = timespec_real ();
|
|
*memory = timespec_subz (*abstime, now);
|
|
return memory;
|
|
}
|
|
}
|
|
|
|
int nsync_futex_wait_ (atomic_int *w, int expect, char pshare, const struct timespec *abstime) {
|
|
int e, rc, op;
|
|
struct CosmoTib *tib;
|
|
struct PosixThread *pt;
|
|
struct timespec tsmem, *timeout;
|
|
|
|
cosmo_once (&nsync_futex_.once, nsync_futex_init_);
|
|
|
|
op = nsync_futex_.FUTEX_WAIT_;
|
|
if (pshare == PTHREAD_PROCESS_PRIVATE) {
|
|
op |= nsync_futex_.FUTEX_PRIVATE_FLAG_;
|
|
}
|
|
|
|
if (abstime && timespec_cmp (*abstime, timespec_zero) <= 0) {
|
|
rc = -ETIMEDOUT;
|
|
goto Finished;
|
|
}
|
|
|
|
if (atomic_load_explicit (w, memory_order_acquire) != expect) {
|
|
rc = -EAGAIN;
|
|
goto Finished;
|
|
}
|
|
|
|
timeout = nsync_futex_timeout_ (&tsmem, abstime);
|
|
|
|
LOCKTRACE ("futex(%t [%d], %s, %#x, %s) → ...",
|
|
w, atomic_load_explicit (w, memory_order_relaxed),
|
|
DescribeFutexOp (op), expect,
|
|
DescribeTimespec (0, timeout));
|
|
|
|
tib = __get_tls();
|
|
pt = (struct PosixThread *)tib->tib_pthread;
|
|
|
|
if (nsync_futex_.is_supported) {
|
|
e = errno;
|
|
if (IsWindows ()) {
|
|
// Windows 8 futexes don't support multiple processes :(
|
|
if (pshare) goto Polyfill;
|
|
rc = nsync_futex_wait_win32_ (w, expect, pshare, timeout, pt);
|
|
} else if (IsXnu ()) {
|
|
uint32_t op, us;
|
|
if (pshare) {
|
|
op = UL_COMPARE_AND_WAIT_SHARED;
|
|
} else {
|
|
op = UL_COMPARE_AND_WAIT;
|
|
}
|
|
if (timeout) {
|
|
us = timespec_tomicros (*timeout);
|
|
} else {
|
|
us = -1u;
|
|
}
|
|
rc = ulock_wait (op, w, expect, us);
|
|
if (rc > 0) rc = 0; // TODO(jart): What does it mean?
|
|
} else if (IsFreebsd ()) {
|
|
rc = sys_umtx_timedwait_uint (w, expect, pshare, timeout);
|
|
} else {
|
|
if (IsOpenbsd()) {
|
|
// OpenBSD 6.8 futex() returns errors as
|
|
// positive numbers, without setting CF.
|
|
// This irregularity is fixed in 7.2 but
|
|
// unfortunately OpenBSD futex() defines
|
|
// its own ECANCELED condition, and that
|
|
// overlaps with our system call wrapper
|
|
if (pt) pt->pt_flags &= ~PT_OPENBSD_KLUDGE;
|
|
}
|
|
rc = sys_futex_cp (w, op, expect, timeout, 0, FUTEX_WAIT_BITS_);
|
|
if (IsOpenbsd()) {
|
|
// Handle the OpenBSD 6.x irregularity.
|
|
if (rc > 0) {
|
|
errno = rc;
|
|
rc = -1;
|
|
}
|
|
// Check if ECANCELED came from the kernel
|
|
// because a SA_RESTART signal handler was
|
|
// invoked, such as our SIGTHR callback.
|
|
if (rc == -1 && errno == ECANCELED &&
|
|
pt && (~pt->pt_flags & PT_OPENBSD_KLUDGE)) {
|
|
errno = EINTR;
|
|
}
|
|
}
|
|
}
|
|
if (rc == -1) {
|
|
rc = -errno;
|
|
errno = e;
|
|
}
|
|
} else {
|
|
Polyfill:
|
|
rc = nsync_futex_polyfill_ (w, expect, timeout);
|
|
}
|
|
|
|
Finished:
|
|
STRACE ("futex(%t [%d], %s, %#x, %s) → %s",
|
|
w, atomic_load_explicit (w, memory_order_relaxed),
|
|
DescribeFutexOp (op), expect,
|
|
DescribeTimespec (0, abstime),
|
|
DescribeErrno (rc));
|
|
|
|
return rc;
|
|
}
|
|
|
|
int nsync_futex_wake_ (atomic_int *w, int count, char pshare) {
|
|
int rc, op, fop;
|
|
|
|
ASSERT (count == 1 || count == INT_MAX);
|
|
|
|
cosmo_once (&nsync_futex_.once, nsync_futex_init_);
|
|
|
|
op = FUTEX_WAKE;
|
|
if (pshare == PTHREAD_PROCESS_PRIVATE) {
|
|
op |= nsync_futex_.FUTEX_PRIVATE_FLAG_;
|
|
}
|
|
|
|
if (nsync_futex_.is_supported) {
|
|
if (IsWindows ()) {
|
|
if (pshare) {
|
|
goto Polyfill;
|
|
}
|
|
if (count == 1) {
|
|
WakeByAddressSingle (w);
|
|
} else {
|
|
WakeByAddressAll (w);
|
|
}
|
|
rc = 0;
|
|
} else if (IsXnu ()) {
|
|
uint32_t op;
|
|
if (pshare) {
|
|
op = UL_COMPARE_AND_WAIT_SHARED;
|
|
} else {
|
|
op = UL_COMPARE_AND_WAIT;
|
|
}
|
|
if (count > 1) {
|
|
op |= ULF_WAKE_ALL;
|
|
}
|
|
rc = ulock_wake (op, w, 0);
|
|
if (!rc) {
|
|
rc = 1;
|
|
} else if (rc == -ENOENT) {
|
|
rc = 0;
|
|
}
|
|
} else if (IsFreebsd ()) {
|
|
if (pshare) {
|
|
fop = UMTX_OP_WAKE;
|
|
} else {
|
|
fop = UMTX_OP_WAKE_PRIVATE;
|
|
}
|
|
rc = _futex_wake (w, fop, count);
|
|
} else {
|
|
rc = _futex_wake (w, op, count);
|
|
}
|
|
} else {
|
|
Polyfill:
|
|
pthread_yield ();
|
|
rc = 0;
|
|
}
|
|
|
|
STRACE ("futex(%t [%d], %s, %d) → %d woken",
|
|
w, atomic_load_explicit (w, memory_order_relaxed),
|
|
DescribeFutexOp (op), count, rc);
|
|
|
|
return rc;
|
|
}
|