mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-06-30 08:18:30 +00:00
Make forking off threads reliable on Windows
This change makes posix_spawn_test no longer flaky on Windows, by (1) fixing a race condition in wait(), and (2) removing a misguided vfork implementation which was letting Windows bypass pthread_atfork().
This commit is contained in:
parent
2ebc5781a1
commit
58352df0a4
30 changed files with 230 additions and 187 deletions
|
@ -24,16 +24,19 @@
|
|||
#include "libc/calls/struct/rusage.h"
|
||||
#include "libc/calls/syscall_support-nt.internal.h"
|
||||
#include "libc/fmt/conv.h"
|
||||
#include "libc/intrin/kprintf.h"
|
||||
#include "libc/intrin/strace.internal.h"
|
||||
#include "libc/macros.internal.h"
|
||||
#include "libc/nt/accounting.h"
|
||||
#include "libc/nt/enum/accessmask.h"
|
||||
#include "libc/nt/enum/processaccess.h"
|
||||
#include "libc/nt/enum/status.h"
|
||||
#include "libc/nt/enum/th32cs.h"
|
||||
#include "libc/nt/enum/wait.h"
|
||||
#include "libc/nt/process.h"
|
||||
#include "libc/nt/runtime.h"
|
||||
#include "libc/nt/struct/filetime.h"
|
||||
#include "libc/nt/struct/processentry32.h"
|
||||
#include "libc/nt/struct/processmemorycounters.h"
|
||||
#include "libc/nt/synchronization.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
|
@ -46,105 +49,101 @@
|
|||
|
||||
#ifdef __x86_64__
|
||||
|
||||
static textwindows int sys_wait4_nt_impl(int pid, int *opt_out_wstatus,
|
||||
static textwindows void AddProcessStats(int64_t h, struct rusage *ru) {
|
||||
struct NtProcessMemoryCountersEx memcount = {
|
||||
.cb = sizeof(struct NtProcessMemoryCountersEx)};
|
||||
if (GetProcessMemoryInfo(h, &memcount, sizeof(memcount))) {
|
||||
ru->ru_maxrss = MAX(ru->ru_maxrss, memcount.PeakWorkingSetSize / 1024);
|
||||
ru->ru_majflt += memcount.PageFaultCount;
|
||||
} else {
|
||||
STRACE("%s failed %u", "GetProcessMemoryInfo", GetLastError());
|
||||
}
|
||||
struct NtFileTime createfiletime, exitfiletime;
|
||||
struct NtFileTime kernelfiletime, userfiletime;
|
||||
if (GetProcessTimes(h, &createfiletime, &exitfiletime, &kernelfiletime,
|
||||
&userfiletime)) {
|
||||
ru->ru_utime = timeval_add(
|
||||
ru->ru_utime, WindowsDurationToTimeVal(ReadFileTime(userfiletime)));
|
||||
ru->ru_stime = timeval_add(
|
||||
ru->ru_stime, WindowsDurationToTimeVal(ReadFileTime(kernelfiletime)));
|
||||
} else {
|
||||
STRACE("%s failed %u", "GetProcessTimes", GetLastError());
|
||||
}
|
||||
}
|
||||
|
||||
static textwindows int sys_wait4_nt_impl(int *pid, int *opt_out_wstatus,
|
||||
int options,
|
||||
struct rusage *opt_out_rusage) {
|
||||
int64_t handle;
|
||||
int rc, pids[64];
|
||||
int64_t handles[64];
|
||||
uint32_t dwExitCode;
|
||||
bool shouldinterrupt;
|
||||
uint32_t i, j, base, count, timeout;
|
||||
struct NtProcessMemoryCountersEx memcount;
|
||||
struct NtFileTime createfiletime, exitfiletime, kernelfiletime, userfiletime;
|
||||
if (_check_interrupts(true, g_fds.p)) return -1;
|
||||
__fds_lock();
|
||||
if (pid != -1 && pid != 0) {
|
||||
if (pid < 0) {
|
||||
/* XXX: this is sloppy */
|
||||
pid = -pid;
|
||||
uint32_t i, j, count;
|
||||
if (*pid != -1 && *pid != 0) {
|
||||
if (*pid < 0) {
|
||||
// XXX: this is sloppy
|
||||
*pid = -*pid;
|
||||
}
|
||||
if (!__isfdkind(pid, kFdProcess)) {
|
||||
/* XXX: this is sloppy (see fork-nt.c) */
|
||||
if (!__isfdopen(pid) &&
|
||||
if (!__isfdkind(*pid, kFdProcess)) {
|
||||
// XXX: this is sloppy (see fork-nt.c)
|
||||
if (!__isfdopen(*pid) &&
|
||||
(handle = OpenProcess(kNtSynchronize | kNtProcessQueryInformation,
|
||||
true, pid))) {
|
||||
if ((pid = __reservefd_unlocked(-1)) != -1) {
|
||||
g_fds.p[pid].kind = kFdProcess;
|
||||
g_fds.p[pid].handle = handle;
|
||||
g_fds.p[pid].flags = O_CLOEXEC;
|
||||
true, *pid))) {
|
||||
if ((*pid = __reservefd_unlocked(-1)) != -1) {
|
||||
g_fds.p[*pid].kind = kFdProcess;
|
||||
g_fds.p[*pid].handle = handle;
|
||||
g_fds.p[*pid].flags = O_CLOEXEC;
|
||||
} else {
|
||||
__fds_unlock();
|
||||
CloseHandle(handle);
|
||||
return echild();
|
||||
}
|
||||
} else {
|
||||
__fds_unlock();
|
||||
return echild();
|
||||
}
|
||||
}
|
||||
handles[0] = g_fds.p[pid].handle;
|
||||
pids[0] = pid;
|
||||
handles[0] = g_fds.p[*pid].handle;
|
||||
pids[0] = *pid;
|
||||
count = 1;
|
||||
} else {
|
||||
count = __sample_pids(pids, handles, false);
|
||||
if (!count) {
|
||||
__fds_unlock();
|
||||
return echild();
|
||||
}
|
||||
}
|
||||
__fds_unlock();
|
||||
for (;;) {
|
||||
if (_check_interrupts(true, 0)) return -1;
|
||||
dwExitCode = kNtStillActive;
|
||||
if (options & WNOHANG) {
|
||||
i = WaitForMultipleObjects(count, handles, false, 0);
|
||||
if (i == kNtWaitTimeout) {
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
i = WaitForMultipleObjects(count, handles, false,
|
||||
__SIG_POLLING_INTERVAL_MS);
|
||||
if (i == kNtWaitTimeout) {
|
||||
continue;
|
||||
}
|
||||
dwExitCode = kNtStillActive;
|
||||
if (options & WNOHANG) {
|
||||
i = WaitForMultipleObjects(count, handles, false, 0);
|
||||
if (i == kNtWaitTimeout) {
|
||||
return 0;
|
||||
}
|
||||
if (i == kNtWaitFailed) {
|
||||
STRACE("%s failed %u", "WaitForMultipleObjects", GetLastError());
|
||||
return __winerr();
|
||||
} else {
|
||||
i = WaitForMultipleObjects(count, handles, false,
|
||||
__SIG_POLLING_INTERVAL_MS);
|
||||
if (i == kNtWaitTimeout) {
|
||||
return -2;
|
||||
}
|
||||
if (!GetExitCodeProcess(handles[i], &dwExitCode)) {
|
||||
STRACE("%s failed %u", "GetExitCodeProcess", GetLastError());
|
||||
return __winerr();
|
||||
}
|
||||
if (dwExitCode == kNtStillActive) continue;
|
||||
if (opt_out_wstatus) { /* @see WEXITSTATUS() */
|
||||
*opt_out_wstatus = (dwExitCode & 0xff) << 8;
|
||||
}
|
||||
if (opt_out_rusage) {
|
||||
bzero(opt_out_rusage, sizeof(*opt_out_rusage));
|
||||
bzero(&memcount, sizeof(memcount));
|
||||
memcount.cb = sizeof(struct NtProcessMemoryCountersEx);
|
||||
if (GetProcessMemoryInfo(handles[i], &memcount, sizeof(memcount))) {
|
||||
opt_out_rusage->ru_maxrss = memcount.PeakWorkingSetSize / 1024;
|
||||
opt_out_rusage->ru_majflt = memcount.PageFaultCount;
|
||||
} else {
|
||||
STRACE("%s failed %u", "GetProcessMemoryInfo", GetLastError());
|
||||
}
|
||||
if (GetProcessTimes(handles[i], &createfiletime, &exitfiletime,
|
||||
&kernelfiletime, &userfiletime)) {
|
||||
opt_out_rusage->ru_utime =
|
||||
WindowsDurationToTimeVal(ReadFileTime(userfiletime));
|
||||
opt_out_rusage->ru_stime =
|
||||
WindowsDurationToTimeVal(ReadFileTime(kernelfiletime));
|
||||
} else {
|
||||
STRACE("%s failed %u", "GetProcessTimes", GetLastError());
|
||||
}
|
||||
}
|
||||
CloseHandle(handles[i]);
|
||||
__releasefd(pids[i]);
|
||||
return pids[i];
|
||||
}
|
||||
if (i == kNtWaitFailed) {
|
||||
STRACE("%s failed %u", "WaitForMultipleObjects", GetLastError());
|
||||
return __winerr();
|
||||
}
|
||||
if (!GetExitCodeProcess(handles[i], &dwExitCode)) {
|
||||
STRACE("%s failed %u", "GetExitCodeProcess", GetLastError());
|
||||
return __winerr();
|
||||
}
|
||||
if (dwExitCode == kNtStillActive) {
|
||||
return -2;
|
||||
}
|
||||
if (opt_out_wstatus) { // @see WEXITSTATUS()
|
||||
*opt_out_wstatus = (dwExitCode & 0xff) << 8;
|
||||
}
|
||||
if (opt_out_rusage) {
|
||||
bzero(opt_out_rusage, sizeof(*opt_out_rusage));
|
||||
AddProcessStats(handles[i], opt_out_rusage);
|
||||
}
|
||||
CloseHandle(handles[i]);
|
||||
__releasefd(pids[i]);
|
||||
return pids[i];
|
||||
}
|
||||
|
||||
textwindows int sys_wait4_nt(int pid, int *opt_out_wstatus, int options,
|
||||
|
@ -153,7 +152,13 @@ textwindows int sys_wait4_nt(int pid, int *opt_out_wstatus, int options,
|
|||
sigset_t oldmask, mask = {0};
|
||||
sigaddset(&mask, SIGCHLD);
|
||||
__sig_mask(SIG_BLOCK, &mask, &oldmask);
|
||||
rc = sys_wait4_nt_impl(pid, opt_out_wstatus, options, opt_out_rusage);
|
||||
do {
|
||||
rc = _check_interrupts(kSigOpRestartable | kSigOpNochld, 0);
|
||||
if (rc == -1) break;
|
||||
__fds_lock();
|
||||
rc = sys_wait4_nt_impl(&pid, opt_out_wstatus, options, opt_out_rusage);
|
||||
__fds_unlock();
|
||||
} while (rc == -2);
|
||||
__sig_mask(SIG_SETMASK, &oldmask, 0);
|
||||
return rc;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue