mirror of
				https://github.com/jart/cosmopolitan.git
				synced 2025-10-27 03:16:44 +00:00 
			
		
		
		
	Introduce cosmo_futex_wait and cosmo_futex_wake
Cosmopolitan Futexes are now exposed as a public API.
This commit is contained in:
		
							parent
							
								
									729f7045e3
								
							
						
					
					
						commit
						9ddbfd921e
					
				
					 66 changed files with 886 additions and 917 deletions
				
			
		
							
								
								
									
										717
									
								
								libc/calls/sig.c
									
										
									
									
									
								
							
							
						
						
									
										717
									
								
								libc/calls/sig.c
									
										
									
									
									
								
							|  | @ -1,717 +0,0 @@ | ||||||
| /*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
 |  | ||||||
| │ vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8                               :vi │ |  | ||||||
| ╞══════════════════════════════════════════════════════════════════════════════╡ |  | ||||||
| │ Copyright 2022 Justine Alexandra Roberts Tunney                              │ |  | ||||||
| │                                                                              │ |  | ||||||
| │ Permission to use, copy, modify, and/or distribute this software for         │ |  | ||||||
| │ any purpose with or without fee is hereby granted, provided that the         │ |  | ||||||
| │ above copyright notice and this permission notice appear in all copies.      │ |  | ||||||
| │                                                                              │ |  | ||||||
| │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL                │ |  | ||||||
| │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED                │ |  | ||||||
| │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE             │ |  | ||||||
| │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL         │ |  | ||||||
| │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR        │ |  | ||||||
| │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER               │ |  | ||||||
| │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR             │ |  | ||||||
| │ PERFORMANCE OF THIS SOFTWARE.                                                │ |  | ||||||
| ╚─────────────────────────────────────────────────────────────────────────────*/ |  | ||||||
| #include "libc/sysv/consts/sig.h" |  | ||||||
| #include "ape/sections.internal.h" |  | ||||||
| #include "libc/calls/calls.h" |  | ||||||
| #include "libc/calls/sig.internal.h" |  | ||||||
| #include "libc/calls/state.internal.h" |  | ||||||
| #include "libc/calls/struct/sigaction.h" |  | ||||||
| #include "libc/calls/struct/siginfo.h" |  | ||||||
| #include "libc/calls/struct/sigset.internal.h" |  | ||||||
| #include "libc/calls/struct/ucontext.internal.h" |  | ||||||
| #include "libc/calls/syscall_support-nt.internal.h" |  | ||||||
| #include "libc/calls/ucontext.h" |  | ||||||
| #include "libc/dce.h" |  | ||||||
| #include "libc/errno.h" |  | ||||||
| #include "libc/intrin/atomic.h" |  | ||||||
| #include "libc/intrin/bsf.h" |  | ||||||
| #include "libc/intrin/describebacktrace.h" |  | ||||||
| #include "libc/intrin/dll.h" |  | ||||||
| #include "libc/intrin/maps.h" |  | ||||||
| #include "libc/intrin/strace.h" |  | ||||||
| #include "libc/intrin/weaken.h" |  | ||||||
| #include "libc/nt/console.h" |  | ||||||
| #include "libc/nt/enum/context.h" |  | ||||||
| #include "libc/nt/enum/exceptionhandleractions.h" |  | ||||||
| #include "libc/nt/enum/processcreationflags.h" |  | ||||||
| #include "libc/nt/enum/signal.h" |  | ||||||
| #include "libc/nt/enum/status.h" |  | ||||||
| #include "libc/nt/events.h" |  | ||||||
| #include "libc/nt/runtime.h" |  | ||||||
| #include "libc/nt/signals.h" |  | ||||||
| #include "libc/nt/struct/ntexceptionpointers.h" |  | ||||||
| #include "libc/nt/synchronization.h" |  | ||||||
| #include "libc/nt/thread.h" |  | ||||||
| #include "libc/runtime/internal.h" |  | ||||||
| #include "libc/runtime/symbols.internal.h" |  | ||||||
| #include "libc/str/str.h" |  | ||||||
| #include "libc/sysv/consts/sa.h" |  | ||||||
| #include "libc/sysv/consts/sicode.h" |  | ||||||
| #include "libc/sysv/consts/ss.h" |  | ||||||
| #include "libc/thread/posixthread.internal.h" |  | ||||||
| #ifdef __x86_64__ |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * @fileoverview Cosmopolitan Signals for Windows. |  | ||||||
|  */ |  | ||||||
| 
 |  | ||||||
| #define STKSZ 65536 |  | ||||||
| 
 |  | ||||||
| struct SignalFrame { |  | ||||||
|   unsigned rva; |  | ||||||
|   unsigned flags; |  | ||||||
|   siginfo_t si; |  | ||||||
|   ucontext_t ctx; |  | ||||||
| }; |  | ||||||
| 
 |  | ||||||
| static textwindows bool __sig_ignored_by_default(int sig) { |  | ||||||
|   return sig == SIGURG ||   //
 |  | ||||||
|          sig == SIGCONT ||  //
 |  | ||||||
|          sig == SIGCHLD ||  //
 |  | ||||||
|          sig == SIGWINCH; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| textwindows bool __sig_ignored(int sig) { |  | ||||||
|   return __sighandrvas[sig] == (intptr_t)SIG_IGN || |  | ||||||
|          (__sighandrvas[sig] == (intptr_t)SIG_DFL && |  | ||||||
|           __sig_ignored_by_default(sig)); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| textwindows void __sig_delete(int sig) { |  | ||||||
|   struct Dll *e; |  | ||||||
|   atomic_fetch_and_explicit(__sig.process, ~(1ull << (sig - 1)), |  | ||||||
|                             memory_order_relaxed); |  | ||||||
|   _pthread_lock(); |  | ||||||
|   for (e = dll_last(_pthread_list); e; e = dll_prev(_pthread_list, e)) |  | ||||||
|     atomic_fetch_and_explicit(&POSIXTHREAD_CONTAINER(e)->tib->tib_sigpending, |  | ||||||
|                               ~(1ull << (sig - 1)), memory_order_relaxed); |  | ||||||
|   _pthread_unlock(); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static textwindows int __sig_getter(atomic_ulong *sigs, sigset_t masked) { |  | ||||||
|   int sig; |  | ||||||
|   sigset_t bit, pending, deliverable; |  | ||||||
|   for (;;) { |  | ||||||
|     pending = atomic_load_explicit(sigs, memory_order_acquire); |  | ||||||
|     if ((deliverable = pending & ~masked)) { |  | ||||||
|       sig = bsfl(deliverable) + 1; |  | ||||||
|       bit = 1ull << (sig - 1); |  | ||||||
|       if (atomic_fetch_and_explicit(sigs, ~bit, memory_order_acq_rel) & bit) |  | ||||||
|         return sig; |  | ||||||
|     } else { |  | ||||||
|       return 0; |  | ||||||
|     } |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| textwindows int __sig_get(sigset_t masked) { |  | ||||||
|   int sig; |  | ||||||
|   if (!(sig = __sig_getter(&__get_tls()->tib_sigpending, masked))) |  | ||||||
|     sig = __sig_getter(__sig.process, masked); |  | ||||||
|   return sig; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static textwindows bool __sig_should_use_altstack(unsigned flags, |  | ||||||
|                                                   struct CosmoTib *tib) { |  | ||||||
|   if (!(flags & SA_ONSTACK)) |  | ||||||
|     return false;  // signal handler didn't enable it
 |  | ||||||
|   if (!tib->tib_sigstack_size) |  | ||||||
|     return false;  // sigaltstack() wasn't installed on this thread
 |  | ||||||
|   if (tib->tib_sigstack_flags & SS_DISABLE) |  | ||||||
|     return false;  // sigaltstack() on this thread was disabled by user
 |  | ||||||
|   char *bp = __builtin_frame_address(0); |  | ||||||
|   if (tib->tib_sigstack_addr <= bp && |  | ||||||
|       bp <= tib->tib_sigstack_addr + tib->tib_sigstack_size) |  | ||||||
|     return false;  // we're already on the alternate stack
 |  | ||||||
|   return true; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static textwindows wontreturn void __sig_terminate(int sig) { |  | ||||||
|   TerminateThisProcess(sig); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| textwindows static bool __sig_wake(struct PosixThread *pt, int sig) { |  | ||||||
|   atomic_int *blocker; |  | ||||||
|   blocker = atomic_load_explicit(&pt->pt_blocker, memory_order_acquire); |  | ||||||
|   if (!blocker) |  | ||||||
|     return false; |  | ||||||
|   // threads can create semaphores on an as-needed basis
 |  | ||||||
|   if (blocker == PT_BLOCKER_EVENT) { |  | ||||||
|     STRACE("%G set %d's event object", sig, _pthread_tid(pt)); |  | ||||||
|     SetEvent(pt->pt_event); |  | ||||||
|     return !!atomic_load_explicit(&pt->pt_blocker, memory_order_acquire); |  | ||||||
|   } |  | ||||||
|   // all other blocking ops that aren't overlap should use futexes
 |  | ||||||
|   // we force restartable futexes to churn by waking w/o releasing
 |  | ||||||
|   STRACE("%G waking %d's futex", sig, _pthread_tid(pt)); |  | ||||||
|   WakeByAddressSingle(blocker); |  | ||||||
|   return !!atomic_load_explicit(&pt->pt_blocker, memory_order_acquire); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| textwindows static bool __sig_start(struct PosixThread *pt, int sig, |  | ||||||
|                                     unsigned *rva, unsigned *flags) { |  | ||||||
|   *rva = __sighandrvas[sig]; |  | ||||||
|   *flags = __sighandflags[sig]; |  | ||||||
|   if (*rva == (intptr_t)SIG_IGN || |  | ||||||
|       (*rva == (intptr_t)SIG_DFL && __sig_ignored_by_default(sig))) { |  | ||||||
|     STRACE("ignoring %G", sig); |  | ||||||
|     return false; |  | ||||||
|   } |  | ||||||
|   if (atomic_load_explicit(&pt->tib->tib_sigmask, memory_order_acquire) & |  | ||||||
|       (1ull << (sig - 1))) { |  | ||||||
|     STRACE("enqueing %G on %d", sig, _pthread_tid(pt)); |  | ||||||
|     atomic_fetch_or_explicit(&pt->tib->tib_sigpending, 1ull << (sig - 1), |  | ||||||
|                              memory_order_relaxed); |  | ||||||
|     __sig_wake(pt, sig); |  | ||||||
|     return false; |  | ||||||
|   } |  | ||||||
|   if (*rva == (intptr_t)SIG_DFL) { |  | ||||||
|     STRACE("terminating on %G due to no handler", sig); |  | ||||||
|     __sig_terminate(sig); |  | ||||||
|   } |  | ||||||
|   return true; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| textwindows static sigaction_f __sig_handler(unsigned rva) { |  | ||||||
|   atomic_fetch_add_explicit(&__sig.count, 1, memory_order_relaxed); |  | ||||||
|   return (sigaction_f)(__executable_start + rva); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| textwindows int __sig_raise(volatile int sig, int sic) { |  | ||||||
| 
 |  | ||||||
|   // bitset of kinds of handlers called
 |  | ||||||
|   volatile int handler_was_called = 0; |  | ||||||
| 
 |  | ||||||
|   // loop over pending signals
 |  | ||||||
|   ucontext_t ctx; |  | ||||||
|   getcontext(&ctx); |  | ||||||
|   if (!sig) { |  | ||||||
|     if ((sig = __sig_get(ctx.uc_sigmask))) { |  | ||||||
|       sic = SI_KERNEL; |  | ||||||
|     } else { |  | ||||||
|       return handler_was_called; |  | ||||||
|     } |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   // process signal(s)
 |  | ||||||
|   unsigned rva, flags; |  | ||||||
|   struct PosixThread *pt = _pthread_self(); |  | ||||||
|   if (__sig_start(pt, sig, &rva, &flags)) { |  | ||||||
| 
 |  | ||||||
|     if (flags & SA_RESETHAND) { |  | ||||||
|       STRACE("resetting %G handler", sig); |  | ||||||
|       __sighandrvas[sig] = (int32_t)(intptr_t)SIG_DFL; |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     // update the signal mask in preparation for signal handller
 |  | ||||||
|     sigset_t blocksigs = __sighandmask[sig]; |  | ||||||
|     if (!(flags & SA_NODEFER)) |  | ||||||
|       blocksigs |= 1ull << (sig - 1); |  | ||||||
|     ctx.uc_sigmask = atomic_fetch_or_explicit(&pt->tib->tib_sigmask, blocksigs, |  | ||||||
|                                               memory_order_acquire); |  | ||||||
| 
 |  | ||||||
|     // call the user's signal handler
 |  | ||||||
|     char ssbuf[128]; |  | ||||||
|     siginfo_t si = {.si_signo = sig, .si_code = sic}; |  | ||||||
|     STRACE("__sig_raise(%G, %t) mask %s", sig, __sig_handler(rva), |  | ||||||
|            _DescribeSigset(ssbuf, 0, (sigset_t *)&pt->tib->tib_sigmask)); |  | ||||||
|     __sig_handler(rva)(sig, &si, &ctx); |  | ||||||
| 
 |  | ||||||
|     // record this handler
 |  | ||||||
|     if (flags & SA_RESTART) { |  | ||||||
|       handler_was_called |= SIG_HANDLED_SA_RESTART; |  | ||||||
|     } else { |  | ||||||
|       handler_was_called |= SIG_HANDLED_NO_RESTART; |  | ||||||
|     } |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   // restore sigmask
 |  | ||||||
|   // loop back to top
 |  | ||||||
|   // jump where handler says
 |  | ||||||
|   sig = 0; |  | ||||||
|   return setcontext(&ctx); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| textwindows int __sig_relay(int sig, int sic, sigset_t waitmask) { |  | ||||||
|   sigset_t m; |  | ||||||
|   int handler_was_called; |  | ||||||
|   m = atomic_exchange_explicit(&__get_tls()->tib_sigmask, waitmask, |  | ||||||
|                                memory_order_acquire); |  | ||||||
|   handler_was_called = __sig_raise(sig, SI_KERNEL); |  | ||||||
|   atomic_store_explicit(&__get_tls()->tib_sigmask, m, memory_order_release); |  | ||||||
|   return handler_was_called; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // the user's signal handler callback is wrapped with this trampoline
 |  | ||||||
| static textwindows wontreturn void __sig_tramp(struct SignalFrame *sf) { |  | ||||||
|   int sig = sf->si.si_signo; |  | ||||||
|   struct CosmoTib *tib = __get_tls(); |  | ||||||
|   struct PosixThread *pt = (struct PosixThread *)tib->tib_pthread; |  | ||||||
|   atomic_store_explicit(&pt->pt_intoff, 0, memory_order_release); |  | ||||||
|   for (;;) { |  | ||||||
| 
 |  | ||||||
|     // update the signal mask in preparation for signal handler
 |  | ||||||
|     sigset_t blocksigs = __sighandmask[sig]; |  | ||||||
|     if (!(sf->flags & SA_NODEFER)) |  | ||||||
|       blocksigs |= 1ull << (sig - 1); |  | ||||||
|     sf->ctx.uc_sigmask = atomic_fetch_or_explicit(&tib->tib_sigmask, blocksigs, |  | ||||||
|                                                   memory_order_acquire); |  | ||||||
| 
 |  | ||||||
|     // call the user's signal handler
 |  | ||||||
|     char ssbuf[2][128]; |  | ||||||
|     STRACE("__sig_tramp(%G, %t) mask %s → %s", sig, __sig_handler(sf->rva), |  | ||||||
|            _DescribeSigset(ssbuf[0], 0, &sf->ctx.uc_sigmask), |  | ||||||
|            _DescribeSigset(ssbuf[1], 0, (sigset_t *)&tib->tib_sigmask)); |  | ||||||
|     __sig_handler(sf->rva)(sig, &sf->si, &sf->ctx); |  | ||||||
| 
 |  | ||||||
|     // restore the signal mask that was used by the interrupted code
 |  | ||||||
|     // this may have been modified by the signal handler in the callback
 |  | ||||||
|     atomic_store_explicit(&tib->tib_sigmask, sf->ctx.uc_sigmask, |  | ||||||
|                           memory_order_release); |  | ||||||
| 
 |  | ||||||
|     // jump back into original code if there aren't any pending signals
 |  | ||||||
|     do { |  | ||||||
|       if (!(sig = __sig_get(sf->ctx.uc_sigmask))) |  | ||||||
|         __sig_restore(&sf->ctx); |  | ||||||
|     } while (!__sig_start(pt, sig, &sf->rva, &sf->flags)); |  | ||||||
| 
 |  | ||||||
|     // tail recurse into another signal handler
 |  | ||||||
|     sf->si.si_signo = sig; |  | ||||||
|     sf->si.si_code = SI_KERNEL; |  | ||||||
|     if (sf->flags & SA_RESETHAND) { |  | ||||||
|       STRACE("resetting %G handler", sig); |  | ||||||
|       __sighandrvas[sig] = (int32_t)(intptr_t)SIG_DFL; |  | ||||||
|     } |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // sends signal to another specific thread which is ref'd
 |  | ||||||
| static textwindows int __sig_killer(struct PosixThread *pt, int sig, int sic) { |  | ||||||
|   unsigned rva = __sighandrvas[sig]; |  | ||||||
|   unsigned flags = __sighandflags[sig]; |  | ||||||
| 
 |  | ||||||
|   // do nothing if signal is ignored
 |  | ||||||
|   if (rva == (intptr_t)SIG_IGN || |  | ||||||
|       (rva == (intptr_t)SIG_DFL && __sig_ignored_by_default(sig))) { |  | ||||||
|     STRACE("ignoring %G", sig); |  | ||||||
|     return 0; |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   // we can't preempt threads that masked sigs or are blocked on i/o
 |  | ||||||
|   while ((atomic_load_explicit(&pt->tib->tib_sigmask, memory_order_acquire) & |  | ||||||
|           (1ull << (sig - 1)))) { |  | ||||||
|     if (atomic_fetch_or_explicit(&pt->tib->tib_sigpending, 1ull << (sig - 1), |  | ||||||
|                                  memory_order_acq_rel) & |  | ||||||
|         (1ull << (sig - 1))) |  | ||||||
|       // we believe signal was already enqueued
 |  | ||||||
|       return 0; |  | ||||||
|     if (__sig_wake(pt, sig)) |  | ||||||
|       // we believe i/o routine will handle signal
 |  | ||||||
|       return 0; |  | ||||||
|     if (atomic_load_explicit(&pt->tib->tib_sigmask, memory_order_acquire) & |  | ||||||
|         (1ull << (sig - 1))) |  | ||||||
|       // we believe ALLOW_SIGNALS will handle signal
 |  | ||||||
|       return 0; |  | ||||||
|     if (!(atomic_fetch_and_explicit(&pt->tib->tib_sigpending, |  | ||||||
|                                     ~(1ull << (sig - 1)), |  | ||||||
|                                     memory_order_acq_rel) & |  | ||||||
|           (1ull << (sig - 1)))) |  | ||||||
|       // we believe another thread sniped our signal
 |  | ||||||
|       return 0; |  | ||||||
|     break; |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   // avoid race conditions and deadlocks with thread suspend process
 |  | ||||||
|   if (atomic_exchange_explicit(&pt->pt_intoff, 1, memory_order_acquire)) { |  | ||||||
|     // we believe another thread is asynchronously waking the mark
 |  | ||||||
|     if (atomic_fetch_or_explicit(&pt->tib->tib_sigpending, 1ull << (sig - 1), |  | ||||||
|                                  memory_order_acq_rel) & |  | ||||||
|         (1ull << (sig - 1))) |  | ||||||
|       // we believe our signal is already being delivered
 |  | ||||||
|       return 0; |  | ||||||
|     if (atomic_load_explicit(&pt->pt_intoff, memory_order_acquire) || |  | ||||||
|         atomic_exchange_explicit(&pt->pt_intoff, 1, memory_order_acquire)) |  | ||||||
|       // we believe __sig_tramp will deliver our signal
 |  | ||||||
|       return 0; |  | ||||||
|     if (!(atomic_fetch_and_explicit(&pt->tib->tib_sigpending, |  | ||||||
|                                     ~(1ull << (sig - 1)), |  | ||||||
|                                     memory_order_acq_rel) & |  | ||||||
|           (1ull << (sig - 1)))) |  | ||||||
|       // we believe another thread sniped our signal
 |  | ||||||
|       return 0; |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   // if there's no handler then killing a thread kills the process
 |  | ||||||
|   if (rva == (intptr_t)SIG_DFL) { |  | ||||||
|     STRACE("terminating on %G due to no handler", sig); |  | ||||||
|     __sig_terminate(sig); |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   // take control of thread
 |  | ||||||
|   // suspending the thread happens asynchronously
 |  | ||||||
|   // however getting the context blocks until it's frozen
 |  | ||||||
|   uintptr_t th = _pthread_syshand(pt); |  | ||||||
|   if (SuspendThread(th) == -1u) { |  | ||||||
|     STRACE("SuspendThread failed w/ %d", GetLastError()); |  | ||||||
|     atomic_store_explicit(&pt->pt_intoff, 0, memory_order_release); |  | ||||||
|     return ESRCH; |  | ||||||
|   } |  | ||||||
|   struct NtContext nc; |  | ||||||
|   nc.ContextFlags = kNtContextFull; |  | ||||||
|   if (!GetThreadContext(th, &nc)) { |  | ||||||
|     STRACE("GetThreadContext failed w/ %d", GetLastError()); |  | ||||||
|     ResumeThread(th); |  | ||||||
|     atomic_store_explicit(&pt->pt_intoff, 0, memory_order_release); |  | ||||||
|     return ESRCH; |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   // we can't preempt threads that masked sig or are blocked
 |  | ||||||
|   // we can't preempt threads that are running in win32 code
 |  | ||||||
|   // so we shall unblock the thread and let it signal itself
 |  | ||||||
|   if (!((uintptr_t)__executable_start <= nc.Rip && |  | ||||||
|         nc.Rip < (uintptr_t)__privileged_start)) { |  | ||||||
|     atomic_fetch_or_explicit(&pt->tib->tib_sigpending, 1ull << (sig - 1), |  | ||||||
|                              memory_order_relaxed); |  | ||||||
|     ResumeThread(th); |  | ||||||
|     atomic_store_explicit(&pt->pt_intoff, 0, memory_order_release); |  | ||||||
|     __sig_wake(pt, sig); |  | ||||||
|     return 0; |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   // preferring to live dangerously
 |  | ||||||
|   // the thread will be signaled asynchronously
 |  | ||||||
|   if (flags & SA_RESETHAND) { |  | ||||||
|     STRACE("resetting %G handler", sig); |  | ||||||
|     __sighandrvas[sig] = (int32_t)(intptr_t)SIG_DFL; |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   // inject call to trampoline function into thread
 |  | ||||||
|   uintptr_t sp; |  | ||||||
|   if (__sig_should_use_altstack(flags, pt->tib)) { |  | ||||||
|     sp = (uintptr_t)pt->tib->tib_sigstack_addr + pt->tib->tib_sigstack_size; |  | ||||||
|   } else { |  | ||||||
|     sp = nc.Rsp; |  | ||||||
|   } |  | ||||||
|   sp -= sizeof(struct SignalFrame); |  | ||||||
|   sp &= -16; |  | ||||||
|   struct SignalFrame *sf = (struct SignalFrame *)sp; |  | ||||||
|   _ntcontext2linux(&sf->ctx, &nc); |  | ||||||
|   bzero(&sf->si, sizeof(sf->si)); |  | ||||||
|   sf->rva = rva; |  | ||||||
|   sf->flags = flags; |  | ||||||
|   sf->si.si_code = sic; |  | ||||||
|   sf->si.si_signo = sig; |  | ||||||
|   *(uintptr_t *)(sp -= sizeof(uintptr_t)) = nc.Rip; |  | ||||||
|   nc.Rip = (intptr_t)__sig_tramp; |  | ||||||
|   nc.Rdi = (intptr_t)sf; |  | ||||||
|   nc.Rsp = sp; |  | ||||||
|   if (!SetThreadContext(th, &nc)) { |  | ||||||
|     STRACE("SetThreadContext failed w/ %d", GetLastError()); |  | ||||||
|     atomic_store_explicit(&pt->pt_intoff, 0, memory_order_release); |  | ||||||
|     return ESRCH; |  | ||||||
|   } |  | ||||||
|   ResumeThread(th); |  | ||||||
|   __sig_wake(pt, sig); |  | ||||||
|   return 0; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // sends signal to another specific thread
 |  | ||||||
| textwindows int __sig_kill(struct PosixThread *pt, int sig, int sic) { |  | ||||||
|   int rc; |  | ||||||
|   BLOCK_SIGNALS; |  | ||||||
|   rc = __sig_killer(pt, sig, sic); |  | ||||||
|   ALLOW_SIGNALS; |  | ||||||
|   return rc; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // sends signal to any other thread
 |  | ||||||
| // this should only be called by non-posix threads
 |  | ||||||
| textwindows void __sig_generate(int sig, int sic) { |  | ||||||
|   struct Dll *e; |  | ||||||
|   struct PosixThread *pt, *mark = 0; |  | ||||||
|   if (__sig_ignored(sig)) { |  | ||||||
|     STRACE("ignoring %G", sig); |  | ||||||
|     return; |  | ||||||
|   } |  | ||||||
|   if (__sighandrvas[sig] == (intptr_t)SIG_DFL) { |  | ||||||
|     STRACE("terminating on %G due to no handler", sig); |  | ||||||
|     __sig_terminate(sig); |  | ||||||
|   } |  | ||||||
|   if (atomic_load_explicit(__sig.process, memory_order_acquire) & |  | ||||||
|       (1ull << (sig - 1))) { |  | ||||||
|     return; |  | ||||||
|   } |  | ||||||
|   _pthread_lock(); |  | ||||||
|   for (e = dll_first(_pthread_list); e; e = dll_next(_pthread_list, e)) { |  | ||||||
|     pt = POSIXTHREAD_CONTAINER(e); |  | ||||||
|     // we don't want to signal ourself
 |  | ||||||
|     if (pt == _pthread_self()) |  | ||||||
|       continue; |  | ||||||
|     // we don't want to signal a thread that isn't running
 |  | ||||||
|     if (atomic_load_explicit(&pt->pt_status, memory_order_acquire) >= |  | ||||||
|         kPosixThreadTerminated) { |  | ||||||
|       continue; |  | ||||||
|     } |  | ||||||
|     // choose this thread if it isn't masking sig
 |  | ||||||
|     if (!(atomic_load_explicit(&pt->tib->tib_sigmask, memory_order_acquire) & |  | ||||||
|           (1ull << (sig - 1)))) { |  | ||||||
|       _pthread_ref(pt); |  | ||||||
|       mark = pt; |  | ||||||
|       break; |  | ||||||
|     } |  | ||||||
|     // if a thread is blocking then we check to see if it's planning
 |  | ||||||
|     // to unblock our sig once the wait operation is completed; when
 |  | ||||||
|     // that's the case we can cancel the thread's i/o to deliver sig
 |  | ||||||
|     if (atomic_load_explicit(&pt->pt_blocker, memory_order_acquire) && |  | ||||||
|         !(pt->pt_blkmask & (1ull << (sig - 1)))) { |  | ||||||
|       _pthread_ref(pt); |  | ||||||
|       mark = pt; |  | ||||||
|       break; |  | ||||||
|     } |  | ||||||
|   } |  | ||||||
|   _pthread_unlock(); |  | ||||||
|   if (mark) { |  | ||||||
|     // no lock needed since current thread is nameless and formless
 |  | ||||||
|     __sig_killer(mark, sig, sic); |  | ||||||
|     _pthread_unref(mark); |  | ||||||
|   } else { |  | ||||||
|     atomic_fetch_or_explicit(__sig.process, 1ull << (sig - 1), |  | ||||||
|                              memory_order_relaxed); |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static textwindows char *__sig_stpcpy(char *d, const char *s) { |  | ||||||
|   size_t i; |  | ||||||
|   for (i = 0;; ++i) |  | ||||||
|     if (!(d[i] = s[i])) |  | ||||||
|       return d + i; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static textwindows wontreturn void __sig_death(int sig, const char *thing) { |  | ||||||
| #ifndef TINY |  | ||||||
|   intptr_t hStderr; |  | ||||||
|   char sigbuf[21], s[128], *p; |  | ||||||
|   hStderr = GetStdHandle(kNtStdErrorHandle); |  | ||||||
|   p = __sig_stpcpy(s, "Terminating on "); |  | ||||||
|   p = __sig_stpcpy(p, thing); |  | ||||||
|   p = __sig_stpcpy(p, strsignal_r(sig, sigbuf)); |  | ||||||
|   p = __sig_stpcpy(p, |  | ||||||
|                    ". Pass --strace and/or ShowCrashReports() for details.\n"); |  | ||||||
|   WriteFile(hStderr, s, p - s, 0, 0); |  | ||||||
| #endif |  | ||||||
|   __sig_terminate(sig); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static textwindows void __sig_unmaskable(struct NtExceptionPointers *ep, |  | ||||||
|                                          int code, int sig, |  | ||||||
|                                          struct CosmoTib *tib) { |  | ||||||
| 
 |  | ||||||
|   // log vital crash information reliably for --strace before doing much
 |  | ||||||
|   // we don't print this without the flag since raw numbers scare people
 |  | ||||||
|   // this needs at least one page of stack memory in order to get logged
 |  | ||||||
|   // otherwise it'll print a warning message about the lack of stack mem
 |  | ||||||
|   STRACE("win32 vectored exception 0x%08Xu raising %G " |  | ||||||
|          "cosmoaddr2line %s %lx %s", |  | ||||||
|          ep->ExceptionRecord->ExceptionCode, sig, |  | ||||||
|          _weaken(FindDebugBinary) ? _weaken(FindDebugBinary)() |  | ||||||
|                                   : program_invocation_name, |  | ||||||
|          ep->ContextRecord->Rip, |  | ||||||
|          DescribeBacktrace((struct StackFrame *)ep->ContextRecord->Rbp)); |  | ||||||
| 
 |  | ||||||
|   // if the user didn't install a signal handler for this unmaskable
 |  | ||||||
|   // exception, then print a friendly helpful hint message to stderr
 |  | ||||||
|   unsigned rva = __sighandrvas[sig]; |  | ||||||
|   if (rva == (intptr_t)SIG_DFL || rva == (intptr_t)SIG_IGN) |  | ||||||
|     __sig_death(sig, "uncaught "); |  | ||||||
| 
 |  | ||||||
|   // if this signal handler is configured to auto-reset to the default
 |  | ||||||
|   // then that reset needs to happen before the user handler is called
 |  | ||||||
|   unsigned flags = __sighandflags[sig]; |  | ||||||
|   if (flags & SA_RESETHAND) { |  | ||||||
|     STRACE("resetting %G handler", sig); |  | ||||||
|     __sighandrvas[sig] = (int32_t)(intptr_t)SIG_DFL; |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   // determine the true memory address at which fault occurred
 |  | ||||||
|   // if this is a stack overflow then reapply guard protection
 |  | ||||||
|   void *si_addr; |  | ||||||
|   if (ep->ExceptionRecord->ExceptionCode == kNtSignalGuardPage) { |  | ||||||
|     si_addr = (void *)ep->ExceptionRecord->ExceptionInformation[1]; |  | ||||||
|   } else { |  | ||||||
|     si_addr = ep->ExceptionRecord->ExceptionAddress; |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   // call the user signal handler
 |  | ||||||
|   // and a modifiable view of the faulting code's cpu state
 |  | ||||||
|   // temporarily replace signal mask while calling crash handler
 |  | ||||||
|   // abort process if sig is already blocked to avoid crash loop
 |  | ||||||
|   // note ucontext_t is a hefty data structures on top of NtContext
 |  | ||||||
|   ucontext_t ctx = {0}; |  | ||||||
|   siginfo_t si = {.si_signo = sig, .si_code = code, .si_addr = si_addr}; |  | ||||||
|   _ntcontext2linux(&ctx, ep->ContextRecord); |  | ||||||
|   sigset_t blocksigs = __sighandmask[sig]; |  | ||||||
|   if (!(flags & SA_NODEFER)) |  | ||||||
|     blocksigs |= 1ull << (sig - 1); |  | ||||||
|   ctx.uc_sigmask = atomic_fetch_or_explicit(&tib->tib_sigmask, blocksigs, |  | ||||||
|                                             memory_order_acquire); |  | ||||||
|   if (ctx.uc_sigmask & (1ull << (sig - 1))) { |  | ||||||
|     __sig_death(sig, "masked "); |  | ||||||
|     __sig_terminate(sig); |  | ||||||
|   } |  | ||||||
|   __sig_handler(rva)(sig, &si, &ctx); |  | ||||||
|   atomic_store_explicit(&tib->tib_sigmask, ctx.uc_sigmask, |  | ||||||
|                         memory_order_release); |  | ||||||
|   _ntlinux2context(ep->ContextRecord, &ctx); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| void __stack_call(struct NtExceptionPointers *, int, int, struct CosmoTib *, |  | ||||||
|                   void (*)(struct NtExceptionPointers *, int, int, |  | ||||||
|                            struct CosmoTib *), |  | ||||||
|                   void *); |  | ||||||
| 
 |  | ||||||
| //                         abashed the devil stood
 |  | ||||||
| //                      and felt how awful goodness is
 |  | ||||||
| __msabi dontinstrument unsigned __sig_crash(struct NtExceptionPointers *ep) { |  | ||||||
| 
 |  | ||||||
|   // translate win32 to unix si_signo and si_code
 |  | ||||||
|   int code, sig = __sig_crash_sig(ep->ExceptionRecord->ExceptionCode, &code); |  | ||||||
| 
 |  | ||||||
|   // advance the instruction pointer to skip over debugger breakpoints
 |  | ||||||
|   // this behavior is consistent with how unix kernels are implemented
 |  | ||||||
|   if (sig == SIGTRAP) { |  | ||||||
|     ep->ContextRecord->Rip++; |  | ||||||
|     if (__sig_ignored(sig)) |  | ||||||
|       return kNtExceptionContinueExecution; |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   // win32 stack overflow detection executes INSIDE the guard page
 |  | ||||||
|   // thus switch to the alternate signal stack as soon as possible
 |  | ||||||
|   struct CosmoTib *tib = __get_tls(); |  | ||||||
|   unsigned flags = __sighandflags[sig]; |  | ||||||
|   if (__sig_should_use_altstack(flags, tib)) { |  | ||||||
|     __stack_call(ep, code, sig, tib, __sig_unmaskable, |  | ||||||
|                  tib->tib_sigstack_addr + tib->tib_sigstack_size); |  | ||||||
|   } else { |  | ||||||
|     __sig_unmaskable(ep, code, sig, tib); |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   // resume running user program
 |  | ||||||
|   // hopefully the user fixed the cpu state
 |  | ||||||
|   // otherwise the crash will keep happening
 |  | ||||||
|   return kNtExceptionContinueExecution; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static textwindows int __sig_console_sig(uint32_t dwCtrlType) { |  | ||||||
|   switch (dwCtrlType) { |  | ||||||
|     case kNtCtrlCEvent: |  | ||||||
|       return SIGINT; |  | ||||||
|     case kNtCtrlBreakEvent: |  | ||||||
|       return SIGQUIT; |  | ||||||
|     case kNtCtrlCloseEvent: |  | ||||||
|     case kNtCtrlLogoffEvent:    // only received by services
 |  | ||||||
|     case kNtCtrlShutdownEvent:  // only received by services
 |  | ||||||
|       return SIGHUP; |  | ||||||
|     default: |  | ||||||
|       return SIGSTKFLT; |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| __msabi textwindows dontinstrument bool32 __sig_console(uint32_t dwCtrlType) { |  | ||||||
|   // win32 launches a thread to deliver ctrl-c and ctrl-break when typed
 |  | ||||||
|   // it only happens when kNtEnableProcessedInput is in play on console.
 |  | ||||||
|   // otherwise we need to wait until read-nt.c discovers that keystroke.
 |  | ||||||
|   struct CosmoTib tls; |  | ||||||
|   __bootstrap_tls(&tls, __builtin_frame_address(0)); |  | ||||||
|   __sig_generate(__sig_console_sig(dwCtrlType), SI_KERNEL); |  | ||||||
|   return true; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // returns 0 if no signal handlers were called, otherwise a bitmask
 |  | ||||||
| // consisting of `1` which means a signal handler was invoked which
 |  | ||||||
| // didn't have the SA_RESTART flag, and `2`, which means SA_RESTART
 |  | ||||||
| // handlers were called (or `3` if both were the case).
 |  | ||||||
| textwindows int __sig_check(void) { |  | ||||||
|   int sig, res = 0; |  | ||||||
|   while ((sig = __sig_get(atomic_load_explicit(&__get_tls()->tib_sigmask, |  | ||||||
|                                                memory_order_acquire)))) |  | ||||||
|     res |= __sig_raise(sig, SI_KERNEL); |  | ||||||
|   return res; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // background thread for delivering inter-process signals asynchronously
 |  | ||||||
| // this checks for undelivered process-wide signals, once per scheduling
 |  | ||||||
| // quantum, which on windows should be every ~15ms or so, unless somehow
 |  | ||||||
| // the process was tuned to have more fine-grained event timing. we want
 |  | ||||||
| // signals to happen faster when possible; that happens when cancelation
 |  | ||||||
| // points, e.g. read need to wait on i/o; they too check for new signals
 |  | ||||||
| textwindows dontinstrument static uint32_t __sig_worker(void *arg) { |  | ||||||
|   struct CosmoTib tls; |  | ||||||
|   __bootstrap_tls(&tls, __builtin_frame_address(0)); |  | ||||||
|   char *sp = __builtin_frame_address(0); |  | ||||||
|   __maps_track((char *)(((uintptr_t)sp + __pagesize - 1) & -__pagesize) - STKSZ, |  | ||||||
|                STKSZ); |  | ||||||
|   for (;;) { |  | ||||||
| 
 |  | ||||||
|     // dequeue all pending signals and fire them off. if there's no
 |  | ||||||
|     // thread that can handle them then __sig_generate will requeue
 |  | ||||||
|     // those signals back to __sig.process; hence the need for xchg
 |  | ||||||
|     unsigned long sigs = |  | ||||||
|         atomic_exchange_explicit(__sig.process, 0, memory_order_acq_rel); |  | ||||||
|     while (sigs) { |  | ||||||
|       int sig = bsfl(sigs) + 1; |  | ||||||
|       sigs &= ~(1ull << (sig - 1)); |  | ||||||
|       __sig_generate(sig, SI_KERNEL); |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     // unblock stalled asynchronous signals in threads
 |  | ||||||
|     _pthread_lock(); |  | ||||||
|     for (struct Dll *e = dll_first(_pthread_list); e; |  | ||||||
|          e = dll_next(_pthread_list, e)) { |  | ||||||
|       struct PosixThread *pt = POSIXTHREAD_CONTAINER(e); |  | ||||||
|       if (atomic_load_explicit(&pt->pt_status, memory_order_acquire) >= |  | ||||||
|           kPosixThreadTerminated) { |  | ||||||
|         break; |  | ||||||
|       } |  | ||||||
|       sigset_t pending = |  | ||||||
|           atomic_load_explicit(&pt->tib->tib_sigpending, memory_order_acquire); |  | ||||||
|       sigset_t mask = |  | ||||||
|           atomic_load_explicit(&pt->tib->tib_sigmask, memory_order_acquire); |  | ||||||
|       if (pending & ~mask) { |  | ||||||
|         _pthread_ref(pt); |  | ||||||
|         _pthread_unlock(); |  | ||||||
|         while (!atomic_compare_exchange_weak_explicit( |  | ||||||
|             &pt->tib->tib_sigpending, &pending, pending & ~mask, |  | ||||||
|             memory_order_acq_rel, memory_order_relaxed)) { |  | ||||||
|         } |  | ||||||
|         while ((pending = pending & ~mask)) { |  | ||||||
|           int sig = bsfl(pending) + 1; |  | ||||||
|           pending &= ~(1ull << (sig - 1)); |  | ||||||
|           __sig_killer(pt, sig, SI_KERNEL); |  | ||||||
|         } |  | ||||||
|         _pthread_lock(); |  | ||||||
|         _pthread_unref(pt); |  | ||||||
|       } |  | ||||||
|     } |  | ||||||
|     _pthread_unlock(); |  | ||||||
| 
 |  | ||||||
|     // wait until next scheduler quantum
 |  | ||||||
|     Sleep(POLL_INTERVAL_MS); |  | ||||||
|   } |  | ||||||
|   return 0; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| __attribute__((__constructor__(10))) textstartup void __sig_init(void) { |  | ||||||
|   if (!IsWindows()) |  | ||||||
|     return; |  | ||||||
|   AddVectoredExceptionHandler(true, (void *)__sig_crash); |  | ||||||
|   SetConsoleCtrlHandler((void *)__sig_console, true); |  | ||||||
|   CreateThread(0, STKSZ, __sig_worker, 0, kNtStackSizeParamIsAReservation, 0); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| #endif /* __x86_64__ */ |  | ||||||
|  | @ -1,5 +1,6 @@ | ||||||
| #ifndef COSMOPOLITAN_LIBC_COSMO_H_ | #ifndef COSMOPOLITAN_LIBC_COSMO_H_ | ||||||
| #define COSMOPOLITAN_LIBC_COSMO_H_ | #define COSMOPOLITAN_LIBC_COSMO_H_ | ||||||
|  | #include "libc/calls/struct/timespec.h" | ||||||
| COSMOPOLITAN_C_START_ | COSMOPOLITAN_C_START_ | ||||||
| 
 | 
 | ||||||
| #ifndef __cplusplus | #ifndef __cplusplus | ||||||
|  | @ -17,6 +18,9 @@ int __is_mangled(const char *) libcesque; | ||||||
| bool32 IsLinuxModern(void) libcesque; | bool32 IsLinuxModern(void) libcesque; | ||||||
| int LoadZipArgs(int *, char ***) libcesque; | int LoadZipArgs(int *, char ***) libcesque; | ||||||
| int cosmo_args(const char *, char ***) libcesque; | int cosmo_args(const char *, char ***) libcesque; | ||||||
|  | int cosmo_futex_wake(_COSMO_ATOMIC(int) *, int, char); | ||||||
|  | int cosmo_futex_wait(_COSMO_ATOMIC(int) *, int, char, int, | ||||||
|  |                      const struct timespec *); | ||||||
| 
 | 
 | ||||||
| COSMOPOLITAN_C_END_ | COSMOPOLITAN_C_END_ | ||||||
| #endif /* COSMOPOLITAN_LIBC_COSMO_H_ */ | #endif /* COSMOPOLITAN_LIBC_COSMO_H_ */ | ||||||
|  |  | ||||||
|  | @ -30,9 +30,11 @@ LIBC_INTRIN_A_CHECKS =					\ | ||||||
| LIBC_INTRIN_A_DIRECTDEPS =				\
 | LIBC_INTRIN_A_DIRECTDEPS =				\
 | ||||||
| 	LIBC_NEXGEN32E					\
 | 	LIBC_NEXGEN32E					\
 | ||||||
| 	LIBC_NT_KERNEL32				\
 | 	LIBC_NT_KERNEL32				\
 | ||||||
|  | 	LIBC_NT_REALTIME				\
 | ||||||
|  | 	LIBC_NT_SYNCHRONIZATION				\
 | ||||||
| 	LIBC_NT_WS2_32					\
 | 	LIBC_NT_WS2_32					\
 | ||||||
| 	LIBC_SYSV					\
 | 	LIBC_SYSV					\
 | ||||||
| 	LIBC_SYSV_CALLS | 	LIBC_SYSV_CALLS					\
 | ||||||
| 
 | 
 | ||||||
| LIBC_INTRIN_A_DEPS :=					\
 | LIBC_INTRIN_A_DEPS :=					\
 | ||||||
| 	$(call uniq,$(foreach x,$(LIBC_INTRIN_A_DIRECTDEPS),$($(x)))) | 	$(call uniq,$(foreach x,$(LIBC_INTRIN_A_DIRECTDEPS),$($(x)))) | ||||||
|  | @ -106,6 +108,16 @@ o//libc/intrin/demangle.o: private			\ | ||||||
| 		CFLAGS +=				\
 | 		CFLAGS +=				\
 | ||||||
| 			-mgeneral-regs-only | 			-mgeneral-regs-only | ||||||
| 
 | 
 | ||||||
|  | # ensure that division is optimized
 | ||||||
|  | o/$(MODE)/libc/intrin/windowsdurationtotimeval.o	\ | ||||||
|  | o/$(MODE)/libc/intrin/windowsdurationtotimespec.o	\ | ||||||
|  | o/$(MODE)/libc/intrin/timevaltowindowstime.o		\ | ||||||
|  | o/$(MODE)/libc/intrin/timespectowindowstime.o		\ | ||||||
|  | o/$(MODE)/libc/intrin/windowstimetotimeval.o		\ | ||||||
|  | o/$(MODE)/libc/intrin/windowstimetotimespec.o: private	\ | ||||||
|  | 		CFLAGS +=				\
 | ||||||
|  | 			-O2 | ||||||
|  | 
 | ||||||
| # these assembly files are safe to build on aarch64
 | # these assembly files are safe to build on aarch64
 | ||||||
| o/$(MODE)/libc/intrin/aarch64/%.o: libc/intrin/aarch64/%.S | o/$(MODE)/libc/intrin/aarch64/%.o: libc/intrin/aarch64/%.S | ||||||
| 	@$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< | 	@$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< | ||||||
|  |  | ||||||
|  | @ -16,18 +16,14 @@ | ||||||
| │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR             │ | │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR             │ | ||||||
| │ PERFORMANCE OF THIS SOFTWARE.                                                │ | │ PERFORMANCE OF THIS SOFTWARE.                                                │ | ||||||
| ╚─────────────────────────────────────────────────────────────────────────────*/ | ╚─────────────────────────────────────────────────────────────────────────────*/ | ||||||
| #include "libc/sysv/consts/futex.h" |  | ||||||
| #include "libc/assert.h" | #include "libc/assert.h" | ||||||
| #include "libc/atomic.h" | #include "libc/atomic.h" | ||||||
| #include "libc/calls/calls.h" |  | ||||||
| #include "libc/calls/internal.h" | #include "libc/calls/internal.h" | ||||||
| #include "libc/calls/sig.internal.h" | #include "libc/calls/sig.internal.h" | ||||||
| #include "libc/calls/state.internal.h" |  | ||||||
| #include "libc/calls/struct/sigset.h" | #include "libc/calls/struct/sigset.h" | ||||||
| #include "libc/calls/struct/sigset.internal.h" | #include "libc/calls/struct/sigset.internal.h" | ||||||
| #include "libc/calls/struct/timespec.h" | #include "libc/calls/struct/timespec.h" | ||||||
| #include "libc/calls/struct/timespec.internal.h" | #include "libc/calls/struct/timespec.internal.h" | ||||||
| #include "libc/calls/syscall_support-nt.internal.h" |  | ||||||
| #include "libc/cosmo.h" | #include "libc/cosmo.h" | ||||||
| #include "libc/dce.h" | #include "libc/dce.h" | ||||||
| #include "libc/errno.h" | #include "libc/errno.h" | ||||||
|  | @ -37,62 +33,56 @@ | ||||||
| #include "libc/intrin/ulock.h" | #include "libc/intrin/ulock.h" | ||||||
| #include "libc/intrin/weaken.h" | #include "libc/intrin/weaken.h" | ||||||
| #include "libc/limits.h" | #include "libc/limits.h" | ||||||
| #include "libc/nexgen32e/vendor.internal.h" |  | ||||||
| #include "libc/nt/runtime.h" | #include "libc/nt/runtime.h" | ||||||
| #include "libc/nt/synchronization.h" | #include "libc/nt/synchronization.h" | ||||||
| #include "libc/runtime/clktck.h" | #include "libc/sysv/consts/clock.h" | ||||||
|  | #include "libc/sysv/consts/futex.h" | ||||||
| #include "libc/sysv/consts/sicode.h" | #include "libc/sysv/consts/sicode.h" | ||||||
| #include "libc/sysv/consts/timer.h" |  | ||||||
| #include "libc/sysv/errfuns.h" | #include "libc/sysv/errfuns.h" | ||||||
| #include "libc/thread/freebsd.internal.h" | #include "libc/thread/freebsd.internal.h" | ||||||
| #include "libc/thread/posixthread.internal.h" | #include "libc/thread/posixthread.internal.h" | ||||||
| #include "libc/thread/thread.h" | #include "libc/thread/thread.h" | ||||||
| #include "libc/thread/tls.h" | // clang-format off
 | ||||||
| #include "third_party/nsync/atomic.h" |  | ||||||
| #include "third_party/nsync/time.h" |  | ||||||
| #include "third_party/nsync/common.internal.h" |  | ||||||
| #include "third_party/nsync/futex.internal.h" |  | ||||||
| #include "third_party/nsync/time.h" |  | ||||||
| 
 | 
 | ||||||
| #define FUTEX_WAIT_BITS_ FUTEX_BITSET_MATCH_ANY | #define FUTEX_WAIT_BITS_ FUTEX_BITSET_MATCH_ANY | ||||||
| 
 | 
 | ||||||
| errno_t _futex (atomic_int *, int, int, const struct timespec *, int *, int); | errno_t cosmo_futex_thunk (atomic_int *, int, int, const struct timespec *, int *, int); | ||||||
| errno_t _futex_wake (atomic_int *, int, int) asm ("_futex"); | errno_t _futex_wake (atomic_int *, int, int) asm ("cosmo_futex_thunk"); | ||||||
| int sys_futex_cp (atomic_int *, int, int, const struct timespec *, int *, int); | int sys_futex_cp (atomic_int *, int, int, const struct timespec *, int *, int); | ||||||
| 
 | 
 | ||||||
| static struct NsyncFutex { | static struct CosmoFutex { | ||||||
| 	atomic_uint once; | 	atomic_uint once; | ||||||
| 	int FUTEX_WAIT_; | 	int FUTEX_WAIT_; | ||||||
| 	int FUTEX_PRIVATE_FLAG_; | 	int FUTEX_PRIVATE_FLAG_; | ||||||
| 	int FUTEX_CLOCK_REALTIME_; | 	int FUTEX_CLOCK_REALTIME_; | ||||||
| 	bool is_supported; | 	bool is_supported; | ||||||
| 	bool timeout_is_relative; | 	bool timeout_is_relative; | ||||||
| } nsync_futex_; | } g_cosmo_futex; | ||||||
| 
 | 
 | ||||||
| static void nsync_futex_init_ (void) { | static void cosmo_futex_init (void) { | ||||||
| 	int e; | 	int e; | ||||||
| 	atomic_int x; | 	atomic_int x; | ||||||
| 
 | 
 | ||||||
| 	nsync_futex_.FUTEX_WAIT_ = FUTEX_WAIT; | 	g_cosmo_futex.FUTEX_WAIT_ = FUTEX_WAIT; | ||||||
| 
 | 
 | ||||||
| 	if (IsWindows ()) { | 	if (IsWindows ()) { | ||||||
| 		nsync_futex_.is_supported = true; | 		g_cosmo_futex.is_supported = true; | ||||||
| 		return; | 		return; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (IsXnu ()) { | 	if (IsXnu ()) { | ||||||
| 		nsync_futex_.is_supported = true; | 		g_cosmo_futex.is_supported = true; | ||||||
| 		nsync_futex_.timeout_is_relative = true; | 		g_cosmo_futex.timeout_is_relative = true; | ||||||
| 		return; | 		return; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (IsFreebsd ()) { | 	if (IsFreebsd ()) { | ||||||
| 		nsync_futex_.is_supported = true; | 		g_cosmo_futex.is_supported = true; | ||||||
| 		nsync_futex_.FUTEX_PRIVATE_FLAG_ = FUTEX_PRIVATE_FLAG; | 		g_cosmo_futex.FUTEX_PRIVATE_FLAG_ = FUTEX_PRIVATE_FLAG; | ||||||
| 		return; | 		return; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (!(nsync_futex_.is_supported = IsLinux () || IsOpenbsd ())) | 	if (!(g_cosmo_futex.is_supported = IsLinux () || IsOpenbsd ())) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
| 	// In our testing, we found that the monotonic clock on various
 | 	// In our testing, we found that the monotonic clock on various
 | ||||||
|  | @ -100,7 +90,7 @@ static void nsync_futex_init_ (void) { | ||||||
| 	// better behaved than the realtime clock, and routinely took
 | 	// better behaved than the realtime clock, and routinely took
 | ||||||
| 	// large steps backwards, especially on multiprocessors. Given
 | 	// large steps backwards, especially on multiprocessors. Given
 | ||||||
| 	// that "monotonic" doesn't seem to mean what it says,
 | 	// that "monotonic" doesn't seem to mean what it says,
 | ||||||
| 	// implementers of nsync_time might consider retaining the
 | 	// implementers of cosmo_time might consider retaining the
 | ||||||
| 	// simplicity of a single epoch within an address space, by
 | 	// simplicity of a single epoch within an address space, by
 | ||||||
| 	// configuring any time synchronization mechanism (like ntp) to
 | 	// configuring any time synchronization mechanism (like ntp) to
 | ||||||
| 	// adjust for leap seconds by adjusting the rate, rather than
 | 	// adjust for leap seconds by adjusting the rate, rather than
 | ||||||
|  | @ -108,31 +98,32 @@ static void nsync_futex_init_ (void) { | ||||||
| 	e = errno; | 	e = errno; | ||||||
| 	atomic_store_explicit (&x, 0, memory_order_relaxed); | 	atomic_store_explicit (&x, 0, memory_order_relaxed); | ||||||
| 	if (IsLinux () && | 	if (IsLinux () && | ||||||
| 	    _futex (&x, FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, | 	    cosmo_futex_thunk (&x, FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, | ||||||
| 			       1, 0, 0, FUTEX_BITSET_MATCH_ANY) == -EAGAIN) { | 			       1, 0, 0, FUTEX_BITSET_MATCH_ANY) == -EAGAIN) { | ||||||
| 		nsync_futex_.FUTEX_WAIT_ = FUTEX_WAIT_BITSET; | 		g_cosmo_futex.FUTEX_WAIT_ = FUTEX_WAIT_BITSET; | ||||||
| 		nsync_futex_.FUTEX_PRIVATE_FLAG_ = FUTEX_PRIVATE_FLAG; | 		g_cosmo_futex.FUTEX_PRIVATE_FLAG_ = FUTEX_PRIVATE_FLAG; | ||||||
| 		nsync_futex_.FUTEX_CLOCK_REALTIME_ = FUTEX_CLOCK_REALTIME; | 		g_cosmo_futex.FUTEX_CLOCK_REALTIME_ = FUTEX_CLOCK_REALTIME; | ||||||
| 	} else if (IsOpenbsd () || | 	} else if (IsOpenbsd () || | ||||||
| 		   (IsLinux () && | 		   (IsLinux () && | ||||||
| 		    !_futex_wake (&x, FUTEX_WAKE_PRIVATE, 1))) { | 		    !_futex_wake (&x, FUTEX_WAKE_PRIVATE, 1))) { | ||||||
| 		nsync_futex_.FUTEX_WAIT_ = FUTEX_WAIT; | 		g_cosmo_futex.FUTEX_WAIT_ = FUTEX_WAIT; | ||||||
| 		nsync_futex_.FUTEX_PRIVATE_FLAG_ = FUTEX_PRIVATE_FLAG; | 		g_cosmo_futex.FUTEX_PRIVATE_FLAG_ = FUTEX_PRIVATE_FLAG; | ||||||
| 		nsync_futex_.timeout_is_relative = true; | 		g_cosmo_futex.timeout_is_relative = true; | ||||||
| 	} else { | 	} else { | ||||||
| 		nsync_futex_.FUTEX_WAIT_ = FUTEX_WAIT; | 		g_cosmo_futex.FUTEX_WAIT_ = FUTEX_WAIT; | ||||||
| 		nsync_futex_.timeout_is_relative = true; | 		g_cosmo_futex.timeout_is_relative = true; | ||||||
| 	} | 	} | ||||||
| 	errno = e; | 	errno = e; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static uint32_t nsync_time_64to32u (uint64_t duration) { | static uint32_t cosmo_time_64to32u (uint64_t duration) { | ||||||
| 	if (duration <= -1u) | 	if (duration <= -1u) | ||||||
| 		return duration; | 		return duration; | ||||||
| 	return -1u; | 	return -1u; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int nsync_futex_polyfill_ (atomic_int *w, int expect, int clock, struct timespec *abstime) { | static int cosmo_futex_polyfill (atomic_int *w, int expect, int clock, | ||||||
|  | 				 struct timespec *abstime) { | ||||||
| 	for (;;) { | 	for (;;) { | ||||||
| 		if (atomic_load_explicit (w, memory_order_acquire) != expect) | 		if (atomic_load_explicit (w, memory_order_acquire) != expect) | ||||||
| 			return 0; | 			return 0; | ||||||
|  | @ -148,7 +139,7 @@ static int nsync_futex_polyfill_ (atomic_int *w, int expect, int clock, struct t | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int nsync_futex_wait_win32_ (atomic_int *w, int expect, char pshare, | static int cosmo_futex_wait_win32 (atomic_int *w, int expect, char pshare, | ||||||
| 				   int clock, const struct timespec *timeout, | 				   int clock, const struct timespec *timeout, | ||||||
| 				   struct PosixThread *pt, | 				   struct PosixThread *pt, | ||||||
| 				   sigset_t waitmask) { | 				   sigset_t waitmask) { | ||||||
|  | @ -183,7 +174,7 @@ static int nsync_futex_wait_win32_ (atomic_int *w, int expect, char pshare, | ||||||
| 			pt->pt_blkmask = waitmask; | 			pt->pt_blkmask = waitmask; | ||||||
| 			atomic_store_explicit (&pt->pt_blocker, w, memory_order_release); | 			atomic_store_explicit (&pt->pt_blocker, w, memory_order_release); | ||||||
| 		} | 		} | ||||||
| 		ok = WaitOnAddress (w, &expect, sizeof(int), nsync_time_64to32u (timespec_tomillis (wait))); | 		ok = WaitOnAddress (w, &expect, sizeof(int), cosmo_time_64to32u (timespec_tomillis (wait))); | ||||||
| 		if (pt) { | 		if (pt) { | ||||||
| 			/* __sig_wake wakes our futex without changing `w` after enqueing signals */ | 			/* __sig_wake wakes our futex without changing `w` after enqueing signals */ | ||||||
| 			atomic_store_explicit (&pt->pt_blocker, 0, memory_order_release); | 			atomic_store_explicit (&pt->pt_blocker, 0, memory_order_release); | ||||||
|  | @ -197,7 +188,7 @@ static int nsync_futex_wait_win32_ (atomic_int *w, int expect, char pshare, | ||||||
| 		if (ok) { | 		if (ok) { | ||||||
| 			return 0; | 			return 0; | ||||||
| 		} else { | 		} else { | ||||||
| 			ASSERT (GetLastError () == ETIMEDOUT); | 			unassert (GetLastError () == ETIMEDOUT); | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| #else | #else | ||||||
|  | @ -205,14 +196,14 @@ static int nsync_futex_wait_win32_ (atomic_int *w, int expect, char pshare, | ||||||
| #endif /* __x86_64__ */ | #endif /* __x86_64__ */ | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int nsync_futex_fix_timeout_ (struct timespec *memory, int clock, | static int cosmo_futex_fix_timeout (struct timespec *memory, int clock, | ||||||
| 				    const struct timespec *abstime, | 				    const struct timespec *abstime, | ||||||
| 				    struct timespec **result) { | 				    struct timespec **result) { | ||||||
| 	struct timespec now; | 	struct timespec now; | ||||||
| 	if (!abstime) { | 	if (!abstime) { | ||||||
| 		*result = 0; | 		*result = 0; | ||||||
| 		return 0; | 		return 0; | ||||||
| 	} else if (!nsync_futex_.timeout_is_relative) { | 	} else if (!g_cosmo_futex.timeout_is_relative) { | ||||||
| 		*memory = *abstime; | 		*memory = *abstime; | ||||||
| 		*result = memory; | 		*result = memory; | ||||||
| 		return 0; | 		return 0; | ||||||
|  | @ -225,7 +216,24 @@ static int nsync_futex_fix_timeout_ (struct timespec *memory, int clock, | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| int nsync_futex_wait_ (atomic_int *w, int expect, char pshare, | /**
 | ||||||
|  |  * Waits on futex. | ||||||
|  |  * | ||||||
|  |  * This function may be used to ask the OS to park the calling thread | ||||||
|  |  * until cosmo_futex_wake() is called on the memory address `w`. | ||||||
|  |  * | ||||||
|  |  * @param w is your futex | ||||||
|  |  * @param expect is the value `*w` is expected to have on entry | ||||||
|  |  * @param pshare is `PTHREAD_PROCESS_PRIVATE` / `PTHREAD_PROCESS_SHARED` | ||||||
|  |  * @param clock is `CLOCK_MONOTONIC`, `CLOCK_REALTIME`, etc. | ||||||
|  |  * @param abstime is null to wait forever or absolute timestamp to stop | ||||||
|  |  * @return 0 on success, or -errno on error | ||||||
|  |  * @raise EINVAL on bad parameter | ||||||
|  |  * @raise EAGAIN if `*w` wasn't `expect` | ||||||
|  |  * @raise EINTR if a signal handler was called while waiting | ||||||
|  |  * @raise ECANCELED if calling thread was canceled while waiting | ||||||
|  |  */ | ||||||
|  | int cosmo_futex_wait (atomic_int *w, int expect, char pshare, | ||||||
| 		      int clock, const struct timespec *abstime) { | 		      int clock, const struct timespec *abstime) { | ||||||
| 	int e, rc, op; | 	int e, rc, op; | ||||||
| 	struct CosmoTib *tib; | 	struct CosmoTib *tib; | ||||||
|  | @ -233,14 +241,14 @@ int nsync_futex_wait_ (atomic_int *w, int expect, char pshare, | ||||||
| 	struct timespec tsmem; | 	struct timespec tsmem; | ||||||
| 	struct timespec *timeout = 0; | 	struct timespec *timeout = 0; | ||||||
| 
 | 
 | ||||||
| 	cosmo_once (&nsync_futex_.once, nsync_futex_init_); | 	cosmo_once (&g_cosmo_futex.once, cosmo_futex_init); | ||||||
| 
 | 
 | ||||||
| 	op = nsync_futex_.FUTEX_WAIT_; | 	op = g_cosmo_futex.FUTEX_WAIT_; | ||||||
| 	if (pshare == PTHREAD_PROCESS_PRIVATE) | 	if (pshare == PTHREAD_PROCESS_PRIVATE) | ||||||
| 		op |= nsync_futex_.FUTEX_PRIVATE_FLAG_; | 		op |= g_cosmo_futex.FUTEX_PRIVATE_FLAG_; | ||||||
| 	if (clock == CLOCK_REALTIME || | 	if (clock == CLOCK_REALTIME || | ||||||
| 	    clock == CLOCK_REALTIME_COARSE) | 	    clock == CLOCK_REALTIME_COARSE) | ||||||
| 		op |= nsync_futex_.FUTEX_CLOCK_REALTIME_; | 		op |= g_cosmo_futex.FUTEX_CLOCK_REALTIME_; | ||||||
| 
 | 
 | ||||||
| 	if (abstime && timespec_cmp (*abstime, timespec_zero) <= 0) { | 	if (abstime && timespec_cmp (*abstime, timespec_zero) <= 0) { | ||||||
| 		rc = -ETIMEDOUT; | 		rc = -ETIMEDOUT; | ||||||
|  | @ -252,7 +260,7 @@ int nsync_futex_wait_ (atomic_int *w, int expect, char pshare, | ||||||
| 		goto Finished; | 		goto Finished; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if ((rc = nsync_futex_fix_timeout_ (&tsmem, clock, abstime, &timeout))) | 	if ((rc = cosmo_futex_fix_timeout (&tsmem, clock, abstime, &timeout))) | ||||||
| 		goto Finished; | 		goto Finished; | ||||||
| 
 | 
 | ||||||
| 	LOCKTRACE ("futex(%t [%d], %s, %#x, %s) → ...", | 	LOCKTRACE ("futex(%t [%d], %s, %#x, %s) → ...", | ||||||
|  | @ -263,13 +271,13 @@ int nsync_futex_wait_ (atomic_int *w, int expect, char pshare, | ||||||
| 	tib = __get_tls(); | 	tib = __get_tls(); | ||||||
| 	pt = (struct PosixThread *)tib->tib_pthread; | 	pt = (struct PosixThread *)tib->tib_pthread; | ||||||
| 
 | 
 | ||||||
| 	if (nsync_futex_.is_supported) { | 	if (g_cosmo_futex.is_supported) { | ||||||
| 		e = errno; | 		e = errno; | ||||||
| 		if (IsWindows ()) { | 		if (IsWindows ()) { | ||||||
| 			// Windows 8 futexes don't support multiple processes :(
 | 			// Windows 8 futexes don't support multiple processes :(
 | ||||||
| 			if (pshare) goto Polyfill; | 			if (pshare) goto Polyfill; | ||||||
| 			sigset_t m = __sig_block (); | 			sigset_t m = __sig_block (); | ||||||
| 			rc = nsync_futex_wait_win32_ (w, expect, pshare, clock, timeout, pt, m); | 			rc = cosmo_futex_wait_win32 (w, expect, pshare, clock, timeout, pt, m); | ||||||
| 			__sig_unblock (m); | 			__sig_unblock (m); | ||||||
| 		} else if (IsXnu ()) { | 		} else if (IsXnu ()) { | ||||||
| 
 | 
 | ||||||
|  | @ -293,7 +301,7 @@ int nsync_futex_wait_ (atomic_int *w, int expect, char pshare, | ||||||
| 				op = UL_COMPARE_AND_WAIT; | 				op = UL_COMPARE_AND_WAIT; | ||||||
| 			} | 			} | ||||||
| 			if (timeout) { | 			if (timeout) { | ||||||
| 				us = nsync_time_64to32u (timespec_tomicros (*timeout)); | 				us = cosmo_time_64to32u (timespec_tomicros (*timeout)); | ||||||
| 			} else { | 			} else { | ||||||
| 				us = -1u; | 				us = -1u; | ||||||
| 			} | 			} | ||||||
|  | @ -333,7 +341,7 @@ int nsync_futex_wait_ (atomic_int *w, int expect, char pshare, | ||||||
| 		} | 		} | ||||||
| 	} else { | 	} else { | ||||||
| 	Polyfill: | 	Polyfill: | ||||||
| 		rc = nsync_futex_polyfill_ (w, expect, clock, timeout); | 		rc = cosmo_futex_polyfill (w, expect, clock, timeout); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| Finished: | Finished: | ||||||
|  | @ -346,18 +354,24 @@ Finished: | ||||||
| 	return rc; | 	return rc; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| int nsync_futex_wake_ (atomic_int *w, int count, char pshare) { | /**
 | ||||||
|  |  * Wakes futex. | ||||||
|  |  * | ||||||
|  |  * @param w is your futex | ||||||
|  |  * @param count is number of threads to wake (usually 1 or `INT_MAX`) | ||||||
|  |  * @param pshare is `PTHREAD_PROCESS_PRIVATE` / `PTHREAD_PROCESS_SHARED` | ||||||
|  |  * @return number of threads woken on success, or -errno on error | ||||||
|  |  */ | ||||||
|  | int cosmo_futex_wake (atomic_int *w, int count, char pshare) { | ||||||
| 	int rc, op, fop; | 	int rc, op, fop; | ||||||
| 
 | 
 | ||||||
| 	ASSERT (count == 1 || count == INT_MAX); | 	cosmo_once (&g_cosmo_futex.once, cosmo_futex_init); | ||||||
| 
 |  | ||||||
| 	cosmo_once (&nsync_futex_.once, nsync_futex_init_); |  | ||||||
| 
 | 
 | ||||||
| 	op = FUTEX_WAKE; | 	op = FUTEX_WAKE; | ||||||
| 	if (pshare == PTHREAD_PROCESS_PRIVATE) | 	if (pshare == PTHREAD_PROCESS_PRIVATE) | ||||||
| 		op |= nsync_futex_.FUTEX_PRIVATE_FLAG_; | 		op |= g_cosmo_futex.FUTEX_PRIVATE_FLAG_; | ||||||
| 
 | 
 | ||||||
| 	if (nsync_futex_.is_supported) { | 	if (g_cosmo_futex.is_supported) { | ||||||
| 		if (IsWindows ()) { | 		if (IsWindows ()) { | ||||||
| 			if (pshare) { | 			if (pshare) { | ||||||
| 				goto Polyfill; | 				goto Polyfill; | ||||||
|  | @ -379,7 +393,7 @@ int nsync_futex_wake_ (atomic_int *w, int count, char pshare) { | ||||||
| 				op |= ULF_WAKE_ALL; | 				op |= ULF_WAKE_ALL; | ||||||
| 			} | 			} | ||||||
| 			rc = ulock_wake (op, w, 0); | 			rc = ulock_wake (op, w, 0); | ||||||
| 			ASSERT (!rc || rc == -ENOENT); | 			unassert (!rc || rc == -ENOENT); | ||||||
| 			if (!rc) { | 			if (!rc) { | ||||||
| 				rc = 1; | 				rc = 1; | ||||||
| 			} else if (rc == -ENOENT) { | 			} else if (rc == -ENOENT) { | ||||||
|  | @ -20,7 +20,7 @@ | ||||||
| #include "libc/macros.h" | #include "libc/macros.h" | ||||||
| .privileged | .privileged | ||||||
| 
 | 
 | ||||||
| _futex: | cosmo_futex_thunk: | ||||||
| #ifdef __x86_64__ | #ifdef __x86_64__ | ||||||
| 	push	%rbp | 	push	%rbp | ||||||
| 	mov	%rsp,%rbp | 	mov	%rsp,%rbp | ||||||
|  | @ -47,4 +47,4 @@ _futex: | ||||||
| #error "unsupported architecture" | #error "unsupported architecture" | ||||||
| #endif /* __x86_64__ */ | #endif /* __x86_64__ */ | ||||||
| 1:	ret | 1:	ret | ||||||
| 	.endfn	_futex,globl,hidden | 	.endfn	cosmo_futex_thunk,globl,hidden | ||||||
|  | @ -27,6 +27,6 @@ | ||||||
| 	.ftrace1 | 	.ftrace1 | ||||||
| getcontext: | getcontext: | ||||||
| 	.ftrace2 | 	.ftrace2 | ||||||
| #include "libc/calls/getcontext.inc" | #include "libc/intrin/getcontext.inc" | ||||||
| 	jmp	__getcontextsig | 	jmp	__getcontextsig | ||||||
| 	.endfn	getcontext,globl | 	.endfn	getcontext,globl | ||||||
|  | @ -19,6 +19,7 @@ | ||||||
| #include "libc/calls/blockcancel.internal.h" | #include "libc/calls/blockcancel.internal.h" | ||||||
| #include "libc/calls/calls.h" | #include "libc/calls/calls.h" | ||||||
| #include "libc/calls/state.internal.h" | #include "libc/calls/state.internal.h" | ||||||
|  | #include "libc/cosmo.h" | ||||||
| #include "libc/dce.h" | #include "libc/dce.h" | ||||||
| #include "libc/errno.h" | #include "libc/errno.h" | ||||||
| #include "libc/intrin/atomic.h" | #include "libc/intrin/atomic.h" | ||||||
|  | @ -28,25 +29,8 @@ | ||||||
| #include "libc/runtime/internal.h" | #include "libc/runtime/internal.h" | ||||||
| #include "libc/thread/lock.h" | #include "libc/thread/lock.h" | ||||||
| #include "libc/thread/thread.h" | #include "libc/thread/thread.h" | ||||||
| #include "third_party/nsync/futex.internal.h" |  | ||||||
| #include "third_party/nsync/mu.h" | #include "third_party/nsync/mu.h" | ||||||
| 
 | 
 | ||||||
| static void pthread_mutex_lock_spin(atomic_int *word) { |  | ||||||
|   int backoff = 0; |  | ||||||
|   if (atomic_exchange_explicit(word, 1, memory_order_acquire)) { |  | ||||||
|     LOCKTRACE("acquiring pthread_mutex_lock_spin(%t)...", word); |  | ||||||
|     for (;;) { |  | ||||||
|       for (;;) { |  | ||||||
|         if (!atomic_load_explicit(word, memory_order_relaxed)) |  | ||||||
|           break; |  | ||||||
|         backoff = pthread_delay_np(word, backoff); |  | ||||||
|       } |  | ||||||
|       if (!atomic_exchange_explicit(word, 1, memory_order_acquire)) |  | ||||||
|         break; |  | ||||||
|     } |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // see "take 3" algorithm in "futexes are tricky" by ulrich drepper
 | // see "take 3" algorithm in "futexes are tricky" by ulrich drepper
 | ||||||
| // slightly improved to attempt acquiring multiple times b4 syscall
 | // slightly improved to attempt acquiring multiple times b4 syscall
 | ||||||
| static void pthread_mutex_lock_drepper(atomic_int *futex, char pshare) { | static void pthread_mutex_lock_drepper(atomic_int *futex, char pshare) { | ||||||
|  | @ -59,7 +43,7 @@ static void pthread_mutex_lock_drepper(atomic_int *futex, char pshare) { | ||||||
|     word = atomic_exchange_explicit(futex, 2, memory_order_acquire); |     word = atomic_exchange_explicit(futex, 2, memory_order_acquire); | ||||||
|   BLOCK_CANCELATION; |   BLOCK_CANCELATION; | ||||||
|   while (word > 0) { |   while (word > 0) { | ||||||
|     _weaken(nsync_futex_wait_)(futex, 2, pshare, 0, 0); |     cosmo_futex_wait(futex, 2, pshare, 0, 0); | ||||||
|     word = atomic_exchange_explicit(futex, 2, memory_order_acquire); |     word = atomic_exchange_explicit(futex, 2, memory_order_acquire); | ||||||
|   } |   } | ||||||
|   ALLOW_CANCELATION; |   ALLOW_CANCELATION; | ||||||
|  | @ -164,11 +148,7 @@ static errno_t pthread_mutex_lock_impl(pthread_mutex_t *mutex) { | ||||||
| 
 | 
 | ||||||
|   // handle normal mutexes
 |   // handle normal mutexes
 | ||||||
|   if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) { |   if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) { | ||||||
|     if (_weaken(nsync_futex_wait_)) { |  | ||||||
|     pthread_mutex_lock_drepper(&mutex->_futex, MUTEX_PSHARED(word)); |     pthread_mutex_lock_drepper(&mutex->_futex, MUTEX_PSHARED(word)); | ||||||
|     } else { |  | ||||||
|       pthread_mutex_lock_spin(&mutex->_futex); |  | ||||||
|     } |  | ||||||
|     return 0; |     return 0; | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -24,15 +24,8 @@ | ||||||
| #include "libc/runtime/internal.h" | #include "libc/runtime/internal.h" | ||||||
| #include "libc/thread/lock.h" | #include "libc/thread/lock.h" | ||||||
| #include "libc/thread/thread.h" | #include "libc/thread/thread.h" | ||||||
| #include "third_party/nsync/futex.internal.h" |  | ||||||
| #include "third_party/nsync/mu.h" | #include "third_party/nsync/mu.h" | ||||||
| 
 | 
 | ||||||
| static errno_t pthread_mutex_trylock_spin(atomic_int *word) { |  | ||||||
|   if (!atomic_exchange_explicit(word, 1, memory_order_acquire)) |  | ||||||
|     return 0; |  | ||||||
|   return EBUSY; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static errno_t pthread_mutex_trylock_drepper(atomic_int *futex) { | static errno_t pthread_mutex_trylock_drepper(atomic_int *futex) { | ||||||
|   int word = 0; |   int word = 0; | ||||||
|   if (atomic_compare_exchange_strong_explicit( |   if (atomic_compare_exchange_strong_explicit( | ||||||
|  | @ -142,13 +135,8 @@ errno_t pthread_mutex_trylock(pthread_mutex_t *mutex) { | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
|   // handle normal mutexes
 |   // handle normal mutexes
 | ||||||
|   if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) { |   if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) | ||||||
|     if (_weaken(nsync_futex_wait_)) { |  | ||||||
|     return pthread_mutex_trylock_drepper(&mutex->_futex); |     return pthread_mutex_trylock_drepper(&mutex->_futex); | ||||||
|     } else { |  | ||||||
|       return pthread_mutex_trylock_spin(&mutex->_futex); |  | ||||||
|     } |  | ||||||
|   } |  | ||||||
| 
 | 
 | ||||||
|   // handle recursive and error checking mutexes
 |   // handle recursive and error checking mutexes
 | ||||||
| #if PTHREAD_USE_NSYNC | #if PTHREAD_USE_NSYNC | ||||||
|  |  | ||||||
|  | @ -18,6 +18,7 @@ | ||||||
| ╚─────────────────────────────────────────────────────────────────────────────*/ | ╚─────────────────────────────────────────────────────────────────────────────*/ | ||||||
| #include "libc/calls/calls.h" | #include "libc/calls/calls.h" | ||||||
| #include "libc/calls/state.internal.h" | #include "libc/calls/state.internal.h" | ||||||
|  | #include "libc/cosmo.h" | ||||||
| #include "libc/dce.h" | #include "libc/dce.h" | ||||||
| #include "libc/errno.h" | #include "libc/errno.h" | ||||||
| #include "libc/intrin/atomic.h" | #include "libc/intrin/atomic.h" | ||||||
|  | @ -26,19 +27,14 @@ | ||||||
| #include "libc/runtime/internal.h" | #include "libc/runtime/internal.h" | ||||||
| #include "libc/thread/lock.h" | #include "libc/thread/lock.h" | ||||||
| #include "libc/thread/thread.h" | #include "libc/thread/thread.h" | ||||||
| #include "third_party/nsync/futex.internal.h" |  | ||||||
| #include "third_party/nsync/mu.h" | #include "third_party/nsync/mu.h" | ||||||
| 
 | 
 | ||||||
| static void pthread_mutex_unlock_spin(atomic_int *word) { |  | ||||||
|   atomic_store_explicit(word, 0, memory_order_release); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // see "take 3" algorithm in "futexes are tricky" by ulrich drepper
 | // see "take 3" algorithm in "futexes are tricky" by ulrich drepper
 | ||||||
| static void pthread_mutex_unlock_drepper(atomic_int *futex, char pshare) { | static void pthread_mutex_unlock_drepper(atomic_int *futex, char pshare) { | ||||||
|   int word = atomic_fetch_sub_explicit(futex, 1, memory_order_release); |   int word = atomic_fetch_sub_explicit(futex, 1, memory_order_release); | ||||||
|   if (word == 2) { |   if (word == 2) { | ||||||
|     atomic_store_explicit(futex, 0, memory_order_release); |     atomic_store_explicit(futex, 0, memory_order_release); | ||||||
|     _weaken(nsync_futex_wake_)(futex, 1, pshare); |     cosmo_futex_wake(futex, 1, pshare); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -137,11 +133,7 @@ errno_t pthread_mutex_unlock(pthread_mutex_t *mutex) { | ||||||
| 
 | 
 | ||||||
|   // implement barebones normal mutexes
 |   // implement barebones normal mutexes
 | ||||||
|   if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) { |   if (MUTEX_TYPE(word) == PTHREAD_MUTEX_NORMAL) { | ||||||
|     if (_weaken(nsync_futex_wake_)) { |  | ||||||
|     pthread_mutex_unlock_drepper(&mutex->_futex, MUTEX_PSHARED(word)); |     pthread_mutex_unlock_drepper(&mutex->_futex, MUTEX_PSHARED(word)); | ||||||
|     } else { |  | ||||||
|       pthread_mutex_unlock_spin(&mutex->_futex); |  | ||||||
|     } |  | ||||||
|     return 0; |     return 0; | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1,7 +1,7 @@ | ||||||
| /*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
 | /*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
 | ||||||
| │ vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8                               :vi │ | │ vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8                               :vi │ | ||||||
| ╞══════════════════════════════════════════════════════════════════════════════╡ | ╞══════════════════════════════════════════════════════════════════════════════╡ | ||||||
| │ Copyright 2023 Justine Alexandra Roberts Tunney                              │ | │ Copyright 2022 Justine Alexandra Roberts Tunney                              │ | ||||||
| │                                                                              │ | │                                                                              │ | ||||||
| │ Permission to use, copy, modify, and/or distribute this software for         │ | │ Permission to use, copy, modify, and/or distribute this software for         │ | ||||||
| │ any purpose with or without fee is hereby granted, provided that the         │ | │ any purpose with or without fee is hereby granted, provided that the         │ | ||||||
|  | @ -17,37 +17,701 @@ | ||||||
| │ PERFORMANCE OF THIS SOFTWARE.                                                │ | │ PERFORMANCE OF THIS SOFTWARE.                                                │ | ||||||
| ╚─────────────────────────────────────────────────────────────────────────────*/ | ╚─────────────────────────────────────────────────────────────────────────────*/ | ||||||
| #include "libc/sysv/consts/sig.h" | #include "libc/sysv/consts/sig.h" | ||||||
|  | #include "ape/sections.internal.h" | ||||||
|  | #include "libc/calls/calls.h" | ||||||
| #include "libc/calls/sig.internal.h" | #include "libc/calls/sig.internal.h" | ||||||
|  | #include "libc/calls/state.internal.h" | ||||||
|  | #include "libc/calls/struct/sigaction.h" | ||||||
|  | #include "libc/calls/struct/siginfo.h" | ||||||
| #include "libc/calls/struct/sigset.internal.h" | #include "libc/calls/struct/sigset.internal.h" | ||||||
|  | #include "libc/calls/struct/ucontext.internal.h" | ||||||
|  | #include "libc/calls/syscall_support-nt.internal.h" | ||||||
|  | #include "libc/calls/ucontext.h" | ||||||
| #include "libc/dce.h" | #include "libc/dce.h" | ||||||
|  | #include "libc/errno.h" | ||||||
| #include "libc/intrin/atomic.h" | #include "libc/intrin/atomic.h" | ||||||
|  | #include "libc/intrin/bsf.h" | ||||||
|  | #include "libc/intrin/describebacktrace.h" | ||||||
|  | #include "libc/intrin/dll.h" | ||||||
|  | #include "libc/intrin/maps.h" | ||||||
|  | #include "libc/intrin/strace.h" | ||||||
| #include "libc/intrin/weaken.h" | #include "libc/intrin/weaken.h" | ||||||
| #include "libc/thread/tls.h" | #include "libc/nt/console.h" | ||||||
|  | #include "libc/nt/enum/context.h" | ||||||
|  | #include "libc/nt/enum/exceptionhandleractions.h" | ||||||
|  | #include "libc/nt/enum/processcreationflags.h" | ||||||
|  | #include "libc/nt/enum/signal.h" | ||||||
|  | #include "libc/nt/enum/status.h" | ||||||
|  | #include "libc/nt/events.h" | ||||||
|  | #include "libc/nt/runtime.h" | ||||||
|  | #include "libc/nt/signals.h" | ||||||
|  | #include "libc/nt/struct/ntexceptionpointers.h" | ||||||
|  | #include "libc/nt/synchronization.h" | ||||||
|  | #include "libc/nt/thread.h" | ||||||
|  | #include "libc/runtime/internal.h" | ||||||
|  | #include "libc/runtime/symbols.internal.h" | ||||||
|  | #include "libc/str/str.h" | ||||||
|  | #include "libc/sysv/consts/sa.h" | ||||||
|  | #include "libc/sysv/consts/sicode.h" | ||||||
|  | #include "libc/sysv/consts/ss.h" | ||||||
|  | #include "libc/thread/posixthread.internal.h" | ||||||
|  | #ifdef __x86_64__ | ||||||
| 
 | 
 | ||||||
| struct Signals __sig; | /**
 | ||||||
|  |  * @fileoverview Cosmopolitan Signals for Windows. | ||||||
|  |  */ | ||||||
| 
 | 
 | ||||||
| sigset_t __sig_block(void) { | #define STKSZ 65536 | ||||||
|   if (IsWindows() || IsMetal()) { | 
 | ||||||
|     if (__tls_enabled) | struct SignalFrame { | ||||||
|       return atomic_exchange_explicit(&__get_tls()->tib_sigmask, -1, |   unsigned rva; | ||||||
|                                       memory_order_acquire); |   unsigned flags; | ||||||
|     else |   siginfo_t si; | ||||||
|  |   ucontext_t ctx; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | static textwindows bool __sig_ignored_by_default(int sig) { | ||||||
|  |   return sig == SIGURG ||   //
 | ||||||
|  |          sig == SIGCONT ||  //
 | ||||||
|  |          sig == SIGCHLD ||  //
 | ||||||
|  |          sig == SIGWINCH; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | textwindows bool __sig_ignored(int sig) { | ||||||
|  |   return __sighandrvas[sig] == (intptr_t)SIG_IGN || | ||||||
|  |          (__sighandrvas[sig] == (intptr_t)SIG_DFL && | ||||||
|  |           __sig_ignored_by_default(sig)); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | textwindows void __sig_delete(int sig) { | ||||||
|  |   struct Dll *e; | ||||||
|  |   atomic_fetch_and_explicit(__sig.process, ~(1ull << (sig - 1)), | ||||||
|  |                             memory_order_relaxed); | ||||||
|  |   _pthread_lock(); | ||||||
|  |   for (e = dll_last(_pthread_list); e; e = dll_prev(_pthread_list, e)) | ||||||
|  |     atomic_fetch_and_explicit(&POSIXTHREAD_CONTAINER(e)->tib->tib_sigpending, | ||||||
|  |                               ~(1ull << (sig - 1)), memory_order_relaxed); | ||||||
|  |   _pthread_unlock(); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static textwindows int __sig_getter(atomic_ulong *sigs, sigset_t masked) { | ||||||
|  |   int sig; | ||||||
|  |   sigset_t bit, pending, deliverable; | ||||||
|  |   for (;;) { | ||||||
|  |     pending = atomic_load_explicit(sigs, memory_order_acquire); | ||||||
|  |     if ((deliverable = pending & ~masked)) { | ||||||
|  |       sig = bsfl(deliverable) + 1; | ||||||
|  |       bit = 1ull << (sig - 1); | ||||||
|  |       if (atomic_fetch_and_explicit(sigs, ~bit, memory_order_acq_rel) & bit) | ||||||
|  |         return sig; | ||||||
|  |     } else { | ||||||
|       return 0; |       return 0; | ||||||
|   } else { |     } | ||||||
|     sigset_t res, neu = -1; |  | ||||||
|     sys_sigprocmask(SIG_SETMASK, &neu, &res); |  | ||||||
|     return res; |  | ||||||
|   } |   } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void __sig_unblock(sigset_t m) { | textwindows int __sig_get(sigset_t masked) { | ||||||
|   if (IsWindows() || IsMetal()) { |   int sig; | ||||||
|     if (__tls_enabled) { |   if (!(sig = __sig_getter(&__get_tls()->tib_sigpending, masked))) | ||||||
|       atomic_store_explicit(&__get_tls()->tib_sigmask, m, memory_order_release); |     sig = __sig_getter(__sig.process, masked); | ||||||
|       if (_weaken(__sig_check)) |   return sig; | ||||||
|         _weaken(__sig_check)(); | } | ||||||
|  | 
 | ||||||
|  | static textwindows bool __sig_should_use_altstack(unsigned flags, | ||||||
|  |                                                   struct CosmoTib *tib) { | ||||||
|  |   if (!(flags & SA_ONSTACK)) | ||||||
|  |     return false;  // signal handler didn't enable it
 | ||||||
|  |   if (!tib->tib_sigstack_size) | ||||||
|  |     return false;  // sigaltstack() wasn't installed on this thread
 | ||||||
|  |   if (tib->tib_sigstack_flags & SS_DISABLE) | ||||||
|  |     return false;  // sigaltstack() on this thread was disabled by user
 | ||||||
|  |   char *bp = __builtin_frame_address(0); | ||||||
|  |   if (tib->tib_sigstack_addr <= bp && | ||||||
|  |       bp <= tib->tib_sigstack_addr + tib->tib_sigstack_size) | ||||||
|  |     return false;  // we're already on the alternate stack
 | ||||||
|  |   return true; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static textwindows wontreturn void __sig_terminate(int sig) { | ||||||
|  |   TerminateThisProcess(sig); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | textwindows static bool __sig_wake(struct PosixThread *pt, int sig) { | ||||||
|  |   atomic_int *blocker; | ||||||
|  |   blocker = atomic_load_explicit(&pt->pt_blocker, memory_order_acquire); | ||||||
|  |   if (!blocker) | ||||||
|  |     return false; | ||||||
|  |   // threads can create semaphores on an as-needed basis
 | ||||||
|  |   if (blocker == PT_BLOCKER_EVENT) { | ||||||
|  |     STRACE("%G set %d's event object", sig, _pthread_tid(pt)); | ||||||
|  |     SetEvent(pt->pt_event); | ||||||
|  |     return !!atomic_load_explicit(&pt->pt_blocker, memory_order_acquire); | ||||||
|   } |   } | ||||||
|  |   // all other blocking ops that aren't overlap should use futexes
 | ||||||
|  |   // we force restartable futexes to churn by waking w/o releasing
 | ||||||
|  |   STRACE("%G waking %d's futex", sig, _pthread_tid(pt)); | ||||||
|  |   WakeByAddressSingle(blocker); | ||||||
|  |   return !!atomic_load_explicit(&pt->pt_blocker, memory_order_acquire); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | textwindows static bool __sig_start(struct PosixThread *pt, int sig, | ||||||
|  |                                     unsigned *rva, unsigned *flags) { | ||||||
|  |   *rva = __sighandrvas[sig]; | ||||||
|  |   *flags = __sighandflags[sig]; | ||||||
|  |   if (*rva == (intptr_t)SIG_IGN || | ||||||
|  |       (*rva == (intptr_t)SIG_DFL && __sig_ignored_by_default(sig))) { | ||||||
|  |     STRACE("ignoring %G", sig); | ||||||
|  |     return false; | ||||||
|  |   } | ||||||
|  |   if (atomic_load_explicit(&pt->tib->tib_sigmask, memory_order_acquire) & | ||||||
|  |       (1ull << (sig - 1))) { | ||||||
|  |     STRACE("enqueing %G on %d", sig, _pthread_tid(pt)); | ||||||
|  |     atomic_fetch_or_explicit(&pt->tib->tib_sigpending, 1ull << (sig - 1), | ||||||
|  |                              memory_order_relaxed); | ||||||
|  |     __sig_wake(pt, sig); | ||||||
|  |     return false; | ||||||
|  |   } | ||||||
|  |   if (*rva == (intptr_t)SIG_DFL) { | ||||||
|  |     STRACE("terminating on %G due to no handler", sig); | ||||||
|  |     __sig_terminate(sig); | ||||||
|  |   } | ||||||
|  |   return true; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | textwindows static sigaction_f __sig_handler(unsigned rva) { | ||||||
|  |   atomic_fetch_add_explicit(&__sig.count, 1, memory_order_relaxed); | ||||||
|  |   return (sigaction_f)(__executable_start + rva); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | textwindows int __sig_raise(volatile int sig, int sic) { | ||||||
|  | 
 | ||||||
|  |   // bitset of kinds of handlers called
 | ||||||
|  |   volatile int handler_was_called = 0; | ||||||
|  | 
 | ||||||
|  |   // loop over pending signals
 | ||||||
|  |   ucontext_t ctx; | ||||||
|  |   getcontext(&ctx); | ||||||
|  |   if (!sig) { | ||||||
|  |     if ((sig = __sig_get(ctx.uc_sigmask))) { | ||||||
|  |       sic = SI_KERNEL; | ||||||
|     } else { |     } else { | ||||||
|     sys_sigprocmask(SIG_SETMASK, &m, 0); |       return handler_was_called; | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   // process signal(s)
 | ||||||
|  |   unsigned rva, flags; | ||||||
|  |   struct PosixThread *pt = _pthread_self(); | ||||||
|  |   if (__sig_start(pt, sig, &rva, &flags)) { | ||||||
|  | 
 | ||||||
|  |     if (flags & SA_RESETHAND) { | ||||||
|  |       STRACE("resetting %G handler", sig); | ||||||
|  |       __sighandrvas[sig] = (int32_t)(intptr_t)SIG_DFL; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     // update the signal mask in preparation for signal handller
 | ||||||
|  |     sigset_t blocksigs = __sighandmask[sig]; | ||||||
|  |     if (!(flags & SA_NODEFER)) | ||||||
|  |       blocksigs |= 1ull << (sig - 1); | ||||||
|  |     ctx.uc_sigmask = atomic_fetch_or_explicit(&pt->tib->tib_sigmask, blocksigs, | ||||||
|  |                                               memory_order_acquire); | ||||||
|  | 
 | ||||||
|  |     // call the user's signal handler
 | ||||||
|  |     char ssbuf[128]; | ||||||
|  |     siginfo_t si = {.si_signo = sig, .si_code = sic}; | ||||||
|  |     STRACE("__sig_raise(%G, %t) mask %s", sig, __sig_handler(rva), | ||||||
|  |            _DescribeSigset(ssbuf, 0, (sigset_t *)&pt->tib->tib_sigmask)); | ||||||
|  |     __sig_handler(rva)(sig, &si, &ctx); | ||||||
|  | 
 | ||||||
|  |     // record this handler
 | ||||||
|  |     if (flags & SA_RESTART) { | ||||||
|  |       handler_was_called |= SIG_HANDLED_SA_RESTART; | ||||||
|  |     } else { | ||||||
|  |       handler_was_called |= SIG_HANDLED_NO_RESTART; | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   // restore sigmask
 | ||||||
|  |   // loop back to top
 | ||||||
|  |   // jump where handler says
 | ||||||
|  |   sig = 0; | ||||||
|  |   return setcontext(&ctx); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | textwindows int __sig_relay(int sig, int sic, sigset_t waitmask) { | ||||||
|  |   sigset_t m; | ||||||
|  |   int handler_was_called; | ||||||
|  |   m = atomic_exchange_explicit(&__get_tls()->tib_sigmask, waitmask, | ||||||
|  |                                memory_order_acquire); | ||||||
|  |   handler_was_called = __sig_raise(sig, SI_KERNEL); | ||||||
|  |   atomic_store_explicit(&__get_tls()->tib_sigmask, m, memory_order_release); | ||||||
|  |   return handler_was_called; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // the user's signal handler callback is wrapped with this trampoline
 | ||||||
|  | static textwindows wontreturn void __sig_tramp(struct SignalFrame *sf) { | ||||||
|  |   int sig = sf->si.si_signo; | ||||||
|  |   struct CosmoTib *tib = __get_tls(); | ||||||
|  |   struct PosixThread *pt = (struct PosixThread *)tib->tib_pthread; | ||||||
|  |   atomic_store_explicit(&pt->pt_intoff, 0, memory_order_release); | ||||||
|  |   for (;;) { | ||||||
|  | 
 | ||||||
|  |     // update the signal mask in preparation for signal handler
 | ||||||
|  |     sigset_t blocksigs = __sighandmask[sig]; | ||||||
|  |     if (!(sf->flags & SA_NODEFER)) | ||||||
|  |       blocksigs |= 1ull << (sig - 1); | ||||||
|  |     sf->ctx.uc_sigmask = atomic_fetch_or_explicit(&tib->tib_sigmask, blocksigs, | ||||||
|  |                                                   memory_order_acquire); | ||||||
|  | 
 | ||||||
|  |     // call the user's signal handler
 | ||||||
|  |     char ssbuf[2][128]; | ||||||
|  |     STRACE("__sig_tramp(%G, %t) mask %s → %s", sig, __sig_handler(sf->rva), | ||||||
|  |            _DescribeSigset(ssbuf[0], 0, &sf->ctx.uc_sigmask), | ||||||
|  |            _DescribeSigset(ssbuf[1], 0, (sigset_t *)&tib->tib_sigmask)); | ||||||
|  |     __sig_handler(sf->rva)(sig, &sf->si, &sf->ctx); | ||||||
|  | 
 | ||||||
|  |     // restore the signal mask that was used by the interrupted code
 | ||||||
|  |     // this may have been modified by the signal handler in the callback
 | ||||||
|  |     atomic_store_explicit(&tib->tib_sigmask, sf->ctx.uc_sigmask, | ||||||
|  |                           memory_order_release); | ||||||
|  | 
 | ||||||
|  |     // jump back into original code if there aren't any pending signals
 | ||||||
|  |     do { | ||||||
|  |       if (!(sig = __sig_get(sf->ctx.uc_sigmask))) | ||||||
|  |         __sig_restore(&sf->ctx); | ||||||
|  |     } while (!__sig_start(pt, sig, &sf->rva, &sf->flags)); | ||||||
|  | 
 | ||||||
|  |     // tail recurse into another signal handler
 | ||||||
|  |     sf->si.si_signo = sig; | ||||||
|  |     sf->si.si_code = SI_KERNEL; | ||||||
|  |     if (sf->flags & SA_RESETHAND) { | ||||||
|  |       STRACE("resetting %G handler", sig); | ||||||
|  |       __sighandrvas[sig] = (int32_t)(intptr_t)SIG_DFL; | ||||||
|  |     } | ||||||
|   } |   } | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | // sends signal to another specific thread which is ref'd
 | ||||||
|  | static textwindows int __sig_killer(struct PosixThread *pt, int sig, int sic) { | ||||||
|  |   unsigned rva = __sighandrvas[sig]; | ||||||
|  |   unsigned flags = __sighandflags[sig]; | ||||||
|  | 
 | ||||||
|  |   // do nothing if signal is ignored
 | ||||||
|  |   if (rva == (intptr_t)SIG_IGN || | ||||||
|  |       (rva == (intptr_t)SIG_DFL && __sig_ignored_by_default(sig))) { | ||||||
|  |     STRACE("ignoring %G", sig); | ||||||
|  |     return 0; | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   // we can't preempt threads that masked sigs or are blocked on i/o
 | ||||||
|  |   while ((atomic_load_explicit(&pt->tib->tib_sigmask, memory_order_acquire) & | ||||||
|  |           (1ull << (sig - 1)))) { | ||||||
|  |     if (atomic_fetch_or_explicit(&pt->tib->tib_sigpending, 1ull << (sig - 1), | ||||||
|  |                                  memory_order_acq_rel) & | ||||||
|  |         (1ull << (sig - 1))) | ||||||
|  |       // we believe signal was already enqueued
 | ||||||
|  |       return 0; | ||||||
|  |     if (__sig_wake(pt, sig)) | ||||||
|  |       // we believe i/o routine will handle signal
 | ||||||
|  |       return 0; | ||||||
|  |     if (atomic_load_explicit(&pt->tib->tib_sigmask, memory_order_acquire) & | ||||||
|  |         (1ull << (sig - 1))) | ||||||
|  |       // we believe ALLOW_SIGNALS will handle signal
 | ||||||
|  |       return 0; | ||||||
|  |     if (!(atomic_fetch_and_explicit(&pt->tib->tib_sigpending, | ||||||
|  |                                     ~(1ull << (sig - 1)), | ||||||
|  |                                     memory_order_acq_rel) & | ||||||
|  |           (1ull << (sig - 1)))) | ||||||
|  |       // we believe another thread sniped our signal
 | ||||||
|  |       return 0; | ||||||
|  |     break; | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   // avoid race conditions and deadlocks with thread suspend process
 | ||||||
|  |   if (atomic_exchange_explicit(&pt->pt_intoff, 1, memory_order_acquire)) { | ||||||
|  |     // we believe another thread is asynchronously waking the mark
 | ||||||
|  |     if (atomic_fetch_or_explicit(&pt->tib->tib_sigpending, 1ull << (sig - 1), | ||||||
|  |                                  memory_order_acq_rel) & | ||||||
|  |         (1ull << (sig - 1))) | ||||||
|  |       // we believe our signal is already being delivered
 | ||||||
|  |       return 0; | ||||||
|  |     if (atomic_load_explicit(&pt->pt_intoff, memory_order_acquire) || | ||||||
|  |         atomic_exchange_explicit(&pt->pt_intoff, 1, memory_order_acquire)) | ||||||
|  |       // we believe __sig_tramp will deliver our signal
 | ||||||
|  |       return 0; | ||||||
|  |     if (!(atomic_fetch_and_explicit(&pt->tib->tib_sigpending, | ||||||
|  |                                     ~(1ull << (sig - 1)), | ||||||
|  |                                     memory_order_acq_rel) & | ||||||
|  |           (1ull << (sig - 1)))) | ||||||
|  |       // we believe another thread sniped our signal
 | ||||||
|  |       return 0; | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   // if there's no handler then killing a thread kills the process
 | ||||||
|  |   if (rva == (intptr_t)SIG_DFL) { | ||||||
|  |     STRACE("terminating on %G due to no handler", sig); | ||||||
|  |     __sig_terminate(sig); | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   // take control of thread
 | ||||||
|  |   // suspending the thread happens asynchronously
 | ||||||
|  |   // however getting the context blocks until it's frozen
 | ||||||
|  |   uintptr_t th = _pthread_syshand(pt); | ||||||
|  |   if (SuspendThread(th) == -1u) { | ||||||
|  |     STRACE("SuspendThread failed w/ %d", GetLastError()); | ||||||
|  |     atomic_store_explicit(&pt->pt_intoff, 0, memory_order_release); | ||||||
|  |     return ESRCH; | ||||||
|  |   } | ||||||
|  |   struct NtContext nc; | ||||||
|  |   nc.ContextFlags = kNtContextFull; | ||||||
|  |   if (!GetThreadContext(th, &nc)) { | ||||||
|  |     STRACE("GetThreadContext failed w/ %d", GetLastError()); | ||||||
|  |     ResumeThread(th); | ||||||
|  |     atomic_store_explicit(&pt->pt_intoff, 0, memory_order_release); | ||||||
|  |     return ESRCH; | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   // we can't preempt threads that masked sig or are blocked
 | ||||||
|  |   // we can't preempt threads that are running in win32 code
 | ||||||
|  |   // so we shall unblock the thread and let it signal itself
 | ||||||
|  |   if (!((uintptr_t)__executable_start <= nc.Rip && | ||||||
|  |         nc.Rip < (uintptr_t)__privileged_start)) { | ||||||
|  |     atomic_fetch_or_explicit(&pt->tib->tib_sigpending, 1ull << (sig - 1), | ||||||
|  |                              memory_order_relaxed); | ||||||
|  |     ResumeThread(th); | ||||||
|  |     atomic_store_explicit(&pt->pt_intoff, 0, memory_order_release); | ||||||
|  |     __sig_wake(pt, sig); | ||||||
|  |     return 0; | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   // preferring to live dangerously
 | ||||||
|  |   // the thread will be signaled asynchronously
 | ||||||
|  |   if (flags & SA_RESETHAND) { | ||||||
|  |     STRACE("resetting %G handler", sig); | ||||||
|  |     __sighandrvas[sig] = (int32_t)(intptr_t)SIG_DFL; | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   // inject call to trampoline function into thread
 | ||||||
|  |   uintptr_t sp; | ||||||
|  |   if (__sig_should_use_altstack(flags, pt->tib)) { | ||||||
|  |     sp = (uintptr_t)pt->tib->tib_sigstack_addr + pt->tib->tib_sigstack_size; | ||||||
|  |   } else { | ||||||
|  |     sp = nc.Rsp; | ||||||
|  |   } | ||||||
|  |   sp -= sizeof(struct SignalFrame); | ||||||
|  |   sp &= -16; | ||||||
|  |   struct SignalFrame *sf = (struct SignalFrame *)sp; | ||||||
|  |   _ntcontext2linux(&sf->ctx, &nc); | ||||||
|  |   bzero(&sf->si, sizeof(sf->si)); | ||||||
|  |   sf->rva = rva; | ||||||
|  |   sf->flags = flags; | ||||||
|  |   sf->si.si_code = sic; | ||||||
|  |   sf->si.si_signo = sig; | ||||||
|  |   *(uintptr_t *)(sp -= sizeof(uintptr_t)) = nc.Rip; | ||||||
|  |   nc.Rip = (intptr_t)__sig_tramp; | ||||||
|  |   nc.Rdi = (intptr_t)sf; | ||||||
|  |   nc.Rsp = sp; | ||||||
|  |   if (!SetThreadContext(th, &nc)) { | ||||||
|  |     STRACE("SetThreadContext failed w/ %d", GetLastError()); | ||||||
|  |     atomic_store_explicit(&pt->pt_intoff, 0, memory_order_release); | ||||||
|  |     return ESRCH; | ||||||
|  |   } | ||||||
|  |   ResumeThread(th); | ||||||
|  |   __sig_wake(pt, sig); | ||||||
|  |   return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // sends signal to another specific thread
 | ||||||
|  | textwindows int __sig_kill(struct PosixThread *pt, int sig, int sic) { | ||||||
|  |   int rc; | ||||||
|  |   BLOCK_SIGNALS; | ||||||
|  |   rc = __sig_killer(pt, sig, sic); | ||||||
|  |   ALLOW_SIGNALS; | ||||||
|  |   return rc; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // sends signal to any other thread
 | ||||||
|  | // this should only be called by non-posix threads
 | ||||||
|  | textwindows void __sig_generate(int sig, int sic) { | ||||||
|  |   struct Dll *e; | ||||||
|  |   struct PosixThread *pt, *mark = 0; | ||||||
|  |   if (__sig_ignored(sig)) { | ||||||
|  |     STRACE("ignoring %G", sig); | ||||||
|  |     return; | ||||||
|  |   } | ||||||
|  |   if (__sighandrvas[sig] == (intptr_t)SIG_DFL) { | ||||||
|  |     STRACE("terminating on %G due to no handler", sig); | ||||||
|  |     __sig_terminate(sig); | ||||||
|  |   } | ||||||
|  |   if (atomic_load_explicit(__sig.process, memory_order_acquire) & | ||||||
|  |       (1ull << (sig - 1))) { | ||||||
|  |     return; | ||||||
|  |   } | ||||||
|  |   _pthread_lock(); | ||||||
|  |   for (e = dll_first(_pthread_list); e; e = dll_next(_pthread_list, e)) { | ||||||
|  |     pt = POSIXTHREAD_CONTAINER(e); | ||||||
|  |     // we don't want to signal ourself
 | ||||||
|  |     if (pt == _pthread_self()) | ||||||
|  |       continue; | ||||||
|  |     // we don't want to signal a thread that isn't running
 | ||||||
|  |     if (atomic_load_explicit(&pt->pt_status, memory_order_acquire) >= | ||||||
|  |         kPosixThreadTerminated) { | ||||||
|  |       continue; | ||||||
|  |     } | ||||||
|  |     // choose this thread if it isn't masking sig
 | ||||||
|  |     if (!(atomic_load_explicit(&pt->tib->tib_sigmask, memory_order_acquire) & | ||||||
|  |           (1ull << (sig - 1)))) { | ||||||
|  |       _pthread_ref(pt); | ||||||
|  |       mark = pt; | ||||||
|  |       break; | ||||||
|  |     } | ||||||
|  |     // if a thread is blocking then we check to see if it's planning
 | ||||||
|  |     // to unblock our sig once the wait operation is completed; when
 | ||||||
|  |     // that's the case we can cancel the thread's i/o to deliver sig
 | ||||||
|  |     if (atomic_load_explicit(&pt->pt_blocker, memory_order_acquire) && | ||||||
|  |         !(pt->pt_blkmask & (1ull << (sig - 1)))) { | ||||||
|  |       _pthread_ref(pt); | ||||||
|  |       mark = pt; | ||||||
|  |       break; | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   _pthread_unlock(); | ||||||
|  |   if (mark) { | ||||||
|  |     // no lock needed since current thread is nameless and formless
 | ||||||
|  |     __sig_killer(mark, sig, sic); | ||||||
|  |     _pthread_unref(mark); | ||||||
|  |   } else { | ||||||
|  |     atomic_fetch_or_explicit(__sig.process, 1ull << (sig - 1), | ||||||
|  |                              memory_order_relaxed); | ||||||
|  |   } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static textwindows char *__sig_stpcpy(char *d, const char *s) { | ||||||
|  |   size_t i; | ||||||
|  |   for (i = 0;; ++i) | ||||||
|  |     if (!(d[i] = s[i])) | ||||||
|  |       return d + i; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static textwindows wontreturn void __sig_death(int sig, const char *thing) { | ||||||
|  | #ifndef TINY | ||||||
|  |   intptr_t hStderr; | ||||||
|  |   char sigbuf[21], s[128], *p; | ||||||
|  |   hStderr = GetStdHandle(kNtStdErrorHandle); | ||||||
|  |   p = __sig_stpcpy(s, "Terminating on "); | ||||||
|  |   p = __sig_stpcpy(p, thing); | ||||||
|  |   p = __sig_stpcpy(p, strsignal_r(sig, sigbuf)); | ||||||
|  |   p = __sig_stpcpy(p, | ||||||
|  |                    ". Pass --strace and/or ShowCrashReports() for details.\n"); | ||||||
|  |   WriteFile(hStderr, s, p - s, 0, 0); | ||||||
|  | #endif | ||||||
|  |   __sig_terminate(sig); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static textwindows void __sig_unmaskable(struct NtExceptionPointers *ep, | ||||||
|  |                                          int code, int sig, | ||||||
|  |                                          struct CosmoTib *tib) { | ||||||
|  | 
 | ||||||
|  |   // log vital crash information reliably for --strace before doing much
 | ||||||
|  |   // we don't print this without the flag since raw numbers scare people
 | ||||||
|  |   // this needs at least one page of stack memory in order to get logged
 | ||||||
|  |   // otherwise it'll print a warning message about the lack of stack mem
 | ||||||
|  |   STRACE("win32 vectored exception 0x%08Xu raising %G " | ||||||
|  |          "cosmoaddr2line %s %lx %s", | ||||||
|  |          ep->ExceptionRecord->ExceptionCode, sig, | ||||||
|  |          _weaken(FindDebugBinary) ? _weaken(FindDebugBinary)() | ||||||
|  |                                   : program_invocation_name, | ||||||
|  |          ep->ContextRecord->Rip, | ||||||
|  |          DescribeBacktrace((struct StackFrame *)ep->ContextRecord->Rbp)); | ||||||
|  | 
 | ||||||
|  |   // if the user didn't install a signal handler for this unmaskable
 | ||||||
|  |   // exception, then print a friendly helpful hint message to stderr
 | ||||||
|  |   unsigned rva = __sighandrvas[sig]; | ||||||
|  |   if (rva == (intptr_t)SIG_DFL || rva == (intptr_t)SIG_IGN) | ||||||
|  |     __sig_death(sig, "uncaught "); | ||||||
|  | 
 | ||||||
|  |   // if this signal handler is configured to auto-reset to the default
 | ||||||
|  |   // then that reset needs to happen before the user handler is called
 | ||||||
|  |   unsigned flags = __sighandflags[sig]; | ||||||
|  |   if (flags & SA_RESETHAND) { | ||||||
|  |     STRACE("resetting %G handler", sig); | ||||||
|  |     __sighandrvas[sig] = (int32_t)(intptr_t)SIG_DFL; | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   // determine the true memory address at which fault occurred
 | ||||||
|  |   // if this is a stack overflow then reapply guard protection
 | ||||||
|  |   void *si_addr; | ||||||
|  |   if (ep->ExceptionRecord->ExceptionCode == kNtSignalGuardPage) { | ||||||
|  |     si_addr = (void *)ep->ExceptionRecord->ExceptionInformation[1]; | ||||||
|  |   } else { | ||||||
|  |     si_addr = ep->ExceptionRecord->ExceptionAddress; | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   // call the user signal handler
 | ||||||
|  |   // and a modifiable view of the faulting code's cpu state
 | ||||||
|  |   // temporarily replace signal mask while calling crash handler
 | ||||||
|  |   // abort process if sig is already blocked to avoid crash loop
 | ||||||
|  |   // note ucontext_t is a hefty data structures on top of NtContext
 | ||||||
|  |   ucontext_t ctx = {0}; | ||||||
|  |   siginfo_t si = {.si_signo = sig, .si_code = code, .si_addr = si_addr}; | ||||||
|  |   _ntcontext2linux(&ctx, ep->ContextRecord); | ||||||
|  |   sigset_t blocksigs = __sighandmask[sig]; | ||||||
|  |   if (!(flags & SA_NODEFER)) | ||||||
|  |     blocksigs |= 1ull << (sig - 1); | ||||||
|  |   ctx.uc_sigmask = atomic_fetch_or_explicit(&tib->tib_sigmask, blocksigs, | ||||||
|  |                                             memory_order_acquire); | ||||||
|  |   if (ctx.uc_sigmask & (1ull << (sig - 1))) { | ||||||
|  |     __sig_death(sig, "masked "); | ||||||
|  |     __sig_terminate(sig); | ||||||
|  |   } | ||||||
|  |   __sig_handler(rva)(sig, &si, &ctx); | ||||||
|  |   atomic_store_explicit(&tib->tib_sigmask, ctx.uc_sigmask, | ||||||
|  |                         memory_order_release); | ||||||
|  |   _ntlinux2context(ep->ContextRecord, &ctx); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | void __stack_call(struct NtExceptionPointers *, int, int, struct CosmoTib *, | ||||||
|  |                   void (*)(struct NtExceptionPointers *, int, int, | ||||||
|  |                            struct CosmoTib *), | ||||||
|  |                   void *); | ||||||
|  | 
 | ||||||
|  | //                         abashed the devil stood
 | ||||||
|  | //                      and felt how awful goodness is
 | ||||||
|  | __msabi dontinstrument unsigned __sig_crash(struct NtExceptionPointers *ep) { | ||||||
|  | 
 | ||||||
|  |   // translate win32 to unix si_signo and si_code
 | ||||||
|  |   int code, sig = __sig_crash_sig(ep->ExceptionRecord->ExceptionCode, &code); | ||||||
|  | 
 | ||||||
|  |   // advance the instruction pointer to skip over debugger breakpoints
 | ||||||
|  |   // this behavior is consistent with how unix kernels are implemented
 | ||||||
|  |   if (sig == SIGTRAP) { | ||||||
|  |     ep->ContextRecord->Rip++; | ||||||
|  |     if (__sig_ignored(sig)) | ||||||
|  |       return kNtExceptionContinueExecution; | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   // win32 stack overflow detection executes INSIDE the guard page
 | ||||||
|  |   // thus switch to the alternate signal stack as soon as possible
 | ||||||
|  |   struct CosmoTib *tib = __get_tls(); | ||||||
|  |   unsigned flags = __sighandflags[sig]; | ||||||
|  |   if (__sig_should_use_altstack(flags, tib)) { | ||||||
|  |     __stack_call(ep, code, sig, tib, __sig_unmaskable, | ||||||
|  |                  tib->tib_sigstack_addr + tib->tib_sigstack_size); | ||||||
|  |   } else { | ||||||
|  |     __sig_unmaskable(ep, code, sig, tib); | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   // resume running user program
 | ||||||
|  |   // hopefully the user fixed the cpu state
 | ||||||
|  |   // otherwise the crash will keep happening
 | ||||||
|  |   return kNtExceptionContinueExecution; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static textwindows int __sig_console_sig(uint32_t dwCtrlType) { | ||||||
|  |   switch (dwCtrlType) { | ||||||
|  |     case kNtCtrlCEvent: | ||||||
|  |       return SIGINT; | ||||||
|  |     case kNtCtrlBreakEvent: | ||||||
|  |       return SIGQUIT; | ||||||
|  |     case kNtCtrlCloseEvent: | ||||||
|  |     case kNtCtrlLogoffEvent:    // only received by services
 | ||||||
|  |     case kNtCtrlShutdownEvent:  // only received by services
 | ||||||
|  |       return SIGHUP; | ||||||
|  |     default: | ||||||
|  |       return SIGSTKFLT; | ||||||
|  |   } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | __msabi textwindows dontinstrument bool32 __sig_console(uint32_t dwCtrlType) { | ||||||
|  |   // win32 launches a thread to deliver ctrl-c and ctrl-break when typed
 | ||||||
|  |   // it only happens when kNtEnableProcessedInput is in play on console.
 | ||||||
|  |   // otherwise we need to wait until read-nt.c discovers that keystroke.
 | ||||||
|  |   struct CosmoTib tls; | ||||||
|  |   __bootstrap_tls(&tls, __builtin_frame_address(0)); | ||||||
|  |   __sig_generate(__sig_console_sig(dwCtrlType), SI_KERNEL); | ||||||
|  |   return true; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // returns 0 if no signal handlers were called, otherwise a bitmask
 | ||||||
|  | // consisting of `1` which means a signal handler was invoked which
 | ||||||
|  | // didn't have the SA_RESTART flag, and `2`, which means SA_RESTART
 | ||||||
|  | // handlers were called (or `3` if both were the case).
 | ||||||
|  | textwindows int __sig_check(void) { | ||||||
|  |   int sig, res = 0; | ||||||
|  |   while ((sig = __sig_get(atomic_load_explicit(&__get_tls()->tib_sigmask, | ||||||
|  |                                                memory_order_acquire)))) | ||||||
|  |     res |= __sig_raise(sig, SI_KERNEL); | ||||||
|  |   return res; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // background thread for delivering inter-process signals asynchronously
 | ||||||
|  | // this checks for undelivered process-wide signals, once per scheduling
 | ||||||
|  | // quantum, which on windows should be every ~15ms or so, unless somehow
 | ||||||
|  | // the process was tuned to have more fine-grained event timing. we want
 | ||||||
|  | // signals to happen faster when possible; that happens when cancelation
 | ||||||
|  | // points, e.g. read need to wait on i/o; they too check for new signals
 | ||||||
|  | textwindows dontinstrument static uint32_t __sig_worker(void *arg) { | ||||||
|  |   struct CosmoTib tls; | ||||||
|  |   __bootstrap_tls(&tls, __builtin_frame_address(0)); | ||||||
|  |   char *sp = __builtin_frame_address(0); | ||||||
|  |   __maps_track((char *)(((uintptr_t)sp + __pagesize - 1) & -__pagesize) - STKSZ, | ||||||
|  |                STKSZ); | ||||||
|  |   for (;;) { | ||||||
|  | 
 | ||||||
|  |     // dequeue all pending signals and fire them off. if there's no
 | ||||||
|  |     // thread that can handle them then __sig_generate will requeue
 | ||||||
|  |     // those signals back to __sig.process; hence the need for xchg
 | ||||||
|  |     unsigned long sigs = | ||||||
|  |         atomic_exchange_explicit(__sig.process, 0, memory_order_acq_rel); | ||||||
|  |     while (sigs) { | ||||||
|  |       int sig = bsfl(sigs) + 1; | ||||||
|  |       sigs &= ~(1ull << (sig - 1)); | ||||||
|  |       __sig_generate(sig, SI_KERNEL); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     // unblock stalled asynchronous signals in threads
 | ||||||
|  |     _pthread_lock(); | ||||||
|  |     for (struct Dll *e = dll_first(_pthread_list); e; | ||||||
|  |          e = dll_next(_pthread_list, e)) { | ||||||
|  |       struct PosixThread *pt = POSIXTHREAD_CONTAINER(e); | ||||||
|  |       if (atomic_load_explicit(&pt->pt_status, memory_order_acquire) >= | ||||||
|  |           kPosixThreadTerminated) { | ||||||
|  |         break; | ||||||
|  |       } | ||||||
|  |       sigset_t pending = | ||||||
|  |           atomic_load_explicit(&pt->tib->tib_sigpending, memory_order_acquire); | ||||||
|  |       sigset_t mask = | ||||||
|  |           atomic_load_explicit(&pt->tib->tib_sigmask, memory_order_acquire); | ||||||
|  |       if (pending & ~mask) { | ||||||
|  |         _pthread_ref(pt); | ||||||
|  |         _pthread_unlock(); | ||||||
|  |         while (!atomic_compare_exchange_weak_explicit( | ||||||
|  |             &pt->tib->tib_sigpending, &pending, pending & ~mask, | ||||||
|  |             memory_order_acq_rel, memory_order_relaxed)) { | ||||||
|  |         } | ||||||
|  |         while ((pending = pending & ~mask)) { | ||||||
|  |           int sig = bsfl(pending) + 1; | ||||||
|  |           pending &= ~(1ull << (sig - 1)); | ||||||
|  |           __sig_killer(pt, sig, SI_KERNEL); | ||||||
|  |         } | ||||||
|  |         _pthread_lock(); | ||||||
|  |         _pthread_unref(pt); | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |     _pthread_unlock(); | ||||||
|  | 
 | ||||||
|  |     // wait until next scheduler quantum
 | ||||||
|  |     Sleep(POLL_INTERVAL_MS); | ||||||
|  |   } | ||||||
|  |   return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | __attribute__((__constructor__(10))) textstartup void __sig_init(void) { | ||||||
|  |   if (!IsWindows()) | ||||||
|  |     return; | ||||||
|  |   AddVectoredExceptionHandler(true, (void *)__sig_crash); | ||||||
|  |   SetConsoleCtrlHandler((void *)__sig_console, true); | ||||||
|  |   CreateThread(0, STKSZ, __sig_worker, 0, kNtStackSizeParamIsAReservation, 0); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | #endif /* __x86_64__ */ | ||||||
|  |  | ||||||
							
								
								
									
										53
									
								
								libc/intrin/sigblock.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										53
									
								
								libc/intrin/sigblock.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,53 @@ | ||||||
|  | /*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
 | ||||||
|  | │ vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8                               :vi │ | ||||||
|  | ╞══════════════════════════════════════════════════════════════════════════════╡ | ||||||
|  | │ Copyright 2023 Justine Alexandra Roberts Tunney                              │ | ||||||
|  | │                                                                              │ | ||||||
|  | │ Permission to use, copy, modify, and/or distribute this software for         │ | ||||||
|  | │ any purpose with or without fee is hereby granted, provided that the         │ | ||||||
|  | │ above copyright notice and this permission notice appear in all copies.      │ | ||||||
|  | │                                                                              │ | ||||||
|  | │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL                │ | ||||||
|  | │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED                │ | ||||||
|  | │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE             │ | ||||||
|  | │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL         │ | ||||||
|  | │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR        │ | ||||||
|  | │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER               │ | ||||||
|  | │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR             │ | ||||||
|  | │ PERFORMANCE OF THIS SOFTWARE.                                                │ | ||||||
|  | ╚─────────────────────────────────────────────────────────────────────────────*/ | ||||||
|  | #include "libc/sysv/consts/sig.h" | ||||||
|  | #include "libc/calls/sig.internal.h" | ||||||
|  | #include "libc/calls/struct/sigset.internal.h" | ||||||
|  | #include "libc/dce.h" | ||||||
|  | #include "libc/intrin/atomic.h" | ||||||
|  | #include "libc/intrin/weaken.h" | ||||||
|  | #include "libc/thread/tls.h" | ||||||
|  | 
 | ||||||
|  | struct Signals __sig; | ||||||
|  | 
 | ||||||
|  | sigset_t __sig_block(void) { | ||||||
|  |   if (IsWindows() || IsMetal()) { | ||||||
|  |     if (__tls_enabled) | ||||||
|  |       return atomic_exchange_explicit(&__get_tls()->tib_sigmask, -1, | ||||||
|  |                                       memory_order_acquire); | ||||||
|  |     else | ||||||
|  |       return 0; | ||||||
|  |   } else { | ||||||
|  |     sigset_t res, neu = -1; | ||||||
|  |     sys_sigprocmask(SIG_SETMASK, &neu, &res); | ||||||
|  |     return res; | ||||||
|  |   } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | void __sig_unblock(sigset_t m) { | ||||||
|  |   if (IsWindows() || IsMetal()) { | ||||||
|  |     if (__tls_enabled) { | ||||||
|  |       atomic_store_explicit(&__get_tls()->tib_sigmask, m, memory_order_release); | ||||||
|  |       if (_weaken(__sig_check)) | ||||||
|  |         _weaken(__sig_check)(); | ||||||
|  |     } | ||||||
|  |   } else { | ||||||
|  |     sys_sigprocmask(SIG_SETMASK, &m, 0); | ||||||
|  |   } | ||||||
|  | } | ||||||
|  | @ -32,7 +32,7 @@ | ||||||
| 	.ftrace1 | 	.ftrace1 | ||||||
| swapcontext: | swapcontext: | ||||||
| 	.ftrace2 | 	.ftrace2 | ||||||
| #include "libc/calls/getcontext.inc" | #include "libc/intrin/getcontext.inc" | ||||||
| #ifdef __x86_64__ | #ifdef __x86_64__ | ||||||
| 	push	%rbp | 	push	%rbp | ||||||
| 	mov	%rsp,%rbp | 	mov	%rsp,%rbp | ||||||
|  | @ -77,13 +77,7 @@ o/$(MODE)/libc/str/iswseparator.o: private			\ | ||||||
| 
 | 
 | ||||||
| # ensure that division is optimized
 | # ensure that division is optimized
 | ||||||
| o/$(MODE)/libc/str/bcmp.o					\ | o/$(MODE)/libc/str/bcmp.o					\ | ||||||
| o/$(MODE)/libc/str/strcmp.o					\ | o/$(MODE)/libc/str/strcmp.o: private				\ | ||||||
| o/$(MODE)/libc/str/windowsdurationtotimeval.o			\ |  | ||||||
| o/$(MODE)/libc/str/windowsdurationtotimespec.o			\ |  | ||||||
| o/$(MODE)/libc/str/timevaltowindowstime.o			\ |  | ||||||
| o/$(MODE)/libc/str/timespectowindowstime.o			\ |  | ||||||
| o/$(MODE)/libc/str/windowstimetotimeval.o			\ |  | ||||||
| o/$(MODE)/libc/str/windowstimetotimespec.o: private		\ |  | ||||||
| 		CFLAGS +=					\
 | 		CFLAGS +=					\
 | ||||||
| 			-O2 | 			-O2 | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -17,11 +17,11 @@ | ||||||
| │ PERFORMANCE OF THIS SOFTWARE.                                                │ | │ PERFORMANCE OF THIS SOFTWARE.                                                │ | ||||||
| ╚─────────────────────────────────────────────────────────────────────────────*/ | ╚─────────────────────────────────────────────────────────────────────────────*/ | ||||||
| #include "libc/calls/blockcancel.internal.h" | #include "libc/calls/blockcancel.internal.h" | ||||||
|  | #include "libc/cosmo.h" | ||||||
| #include "libc/errno.h" | #include "libc/errno.h" | ||||||
| #include "libc/intrin/atomic.h" | #include "libc/intrin/atomic.h" | ||||||
| #include "libc/limits.h" | #include "libc/limits.h" | ||||||
| #include "libc/thread/thread.h" | #include "libc/thread/thread.h" | ||||||
| #include "third_party/nsync/futex.internal.h" |  | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * Waits for all threads to arrive at barrier. |  * Waits for all threads to arrive at barrier. | ||||||
|  | @ -54,14 +54,14 @@ errno_t pthread_barrier_wait(pthread_barrier_t *barrier) { | ||||||
|     atomic_store_explicit(&barrier->_counter, barrier->_count, |     atomic_store_explicit(&barrier->_counter, barrier->_count, | ||||||
|                           memory_order_release); |                           memory_order_release); | ||||||
|     atomic_store_explicit(&barrier->_waiters, 0, memory_order_release); |     atomic_store_explicit(&barrier->_waiters, 0, memory_order_release); | ||||||
|     nsync_futex_wake_(&barrier->_waiters, INT_MAX, barrier->_pshared); |     cosmo_futex_wake(&barrier->_waiters, INT_MAX, barrier->_pshared); | ||||||
|     return PTHREAD_BARRIER_SERIAL_THREAD; |     return PTHREAD_BARRIER_SERIAL_THREAD; | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   // wait for everyone else to arrive at barrier
 |   // wait for everyone else to arrive at barrier
 | ||||||
|   BLOCK_CANCELATION; |   BLOCK_CANCELATION; | ||||||
|   while ((n = atomic_load_explicit(&barrier->_waiters, memory_order_acquire))) |   while ((n = atomic_load_explicit(&barrier->_waiters, memory_order_acquire))) | ||||||
|     nsync_futex_wait_(&barrier->_waiters, n, barrier->_pshared, 0, 0); |     cosmo_futex_wait(&barrier->_waiters, n, barrier->_pshared, 0, 0); | ||||||
|   ALLOW_CANCELATION; |   ALLOW_CANCELATION; | ||||||
| 
 | 
 | ||||||
|   return 0; |   return 0; | ||||||
|  |  | ||||||
|  | @ -16,12 +16,12 @@ | ||||||
| │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR             │ | │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR             │ | ||||||
| │ PERFORMANCE OF THIS SOFTWARE.                                                │ | │ PERFORMANCE OF THIS SOFTWARE.                                                │ | ||||||
| ╚─────────────────────────────────────────────────────────────────────────────*/ | ╚─────────────────────────────────────────────────────────────────────────────*/ | ||||||
|  | #include "libc/cosmo.h" | ||||||
| #include "libc/dce.h" | #include "libc/dce.h" | ||||||
| #include "libc/intrin/atomic.h" | #include "libc/intrin/atomic.h" | ||||||
| #include "libc/limits.h" | #include "libc/limits.h" | ||||||
| #include "libc/thread/thread.h" | #include "libc/thread/thread.h" | ||||||
| #include "third_party/nsync/cv.h" | #include "third_party/nsync/cv.h" | ||||||
| #include "third_party/nsync/futex.internal.h" |  | ||||||
| 
 | 
 | ||||||
| __static_yoink("nsync_mu_lock"); | __static_yoink("nsync_mu_lock"); | ||||||
| __static_yoink("nsync_mu_unlock"); | __static_yoink("nsync_mu_unlock"); | ||||||
|  | @ -63,6 +63,6 @@ errno_t pthread_cond_broadcast(pthread_cond_t *cond) { | ||||||
|   // roll forward the monotonic sequence
 |   // roll forward the monotonic sequence
 | ||||||
|   atomic_fetch_add_explicit(&cond->_sequence, 1, memory_order_acq_rel); |   atomic_fetch_add_explicit(&cond->_sequence, 1, memory_order_acq_rel); | ||||||
|   if (atomic_load_explicit(&cond->_waiters, memory_order_acquire)) |   if (atomic_load_explicit(&cond->_waiters, memory_order_acquire)) | ||||||
|     nsync_futex_wake_((atomic_int *)&cond->_sequence, INT_MAX, cond->_pshared); |     cosmo_futex_wake((atomic_int *)&cond->_sequence, INT_MAX, cond->_pshared); | ||||||
|   return 0; |   return 0; | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -16,11 +16,11 @@ | ||||||
| │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR             │ | │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR             │ | ||||||
| │ PERFORMANCE OF THIS SOFTWARE.                                                │ | │ PERFORMANCE OF THIS SOFTWARE.                                                │ | ||||||
| ╚─────────────────────────────────────────────────────────────────────────────*/ | ╚─────────────────────────────────────────────────────────────────────────────*/ | ||||||
|  | #include "libc/cosmo.h" | ||||||
| #include "libc/dce.h" | #include "libc/dce.h" | ||||||
| #include "libc/intrin/atomic.h" | #include "libc/intrin/atomic.h" | ||||||
| #include "libc/thread/thread.h" | #include "libc/thread/thread.h" | ||||||
| #include "third_party/nsync/cv.h" | #include "third_party/nsync/cv.h" | ||||||
| #include "third_party/nsync/futex.internal.h" |  | ||||||
| 
 | 
 | ||||||
| __static_yoink("nsync_mu_lock"); | __static_yoink("nsync_mu_lock"); | ||||||
| __static_yoink("nsync_mu_unlock"); | __static_yoink("nsync_mu_unlock"); | ||||||
|  | @ -62,6 +62,6 @@ errno_t pthread_cond_signal(pthread_cond_t *cond) { | ||||||
|   // roll forward the monotonic sequence
 |   // roll forward the monotonic sequence
 | ||||||
|   atomic_fetch_add_explicit(&cond->_sequence, 1, memory_order_acq_rel); |   atomic_fetch_add_explicit(&cond->_sequence, 1, memory_order_acq_rel); | ||||||
|   if (atomic_load_explicit(&cond->_waiters, memory_order_acquire)) |   if (atomic_load_explicit(&cond->_waiters, memory_order_acquire)) | ||||||
|     nsync_futex_wake_((atomic_int *)&cond->_sequence, 1, cond->_pshared); |     cosmo_futex_wake((atomic_int *)&cond->_sequence, 1, cond->_pshared); | ||||||
|   return 0; |   return 0; | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -18,6 +18,8 @@ | ||||||
| ╚─────────────────────────────────────────────────────────────────────────────*/ | ╚─────────────────────────────────────────────────────────────────────────────*/ | ||||||
| #include "libc/calls/calls.h" | #include "libc/calls/calls.h" | ||||||
| #include "libc/calls/cp.internal.h" | #include "libc/calls/cp.internal.h" | ||||||
|  | #include "libc/calls/struct/timespec.h" | ||||||
|  | #include "libc/cosmo.h" | ||||||
| #include "libc/dce.h" | #include "libc/dce.h" | ||||||
| #include "libc/errno.h" | #include "libc/errno.h" | ||||||
| #include "libc/intrin/atomic.h" | #include "libc/intrin/atomic.h" | ||||||
|  | @ -28,7 +30,6 @@ | ||||||
| #include "libc/thread/thread2.h" | #include "libc/thread/thread2.h" | ||||||
| #include "third_party/nsync/common.internal.h" | #include "third_party/nsync/common.internal.h" | ||||||
| #include "third_party/nsync/cv.h" | #include "third_party/nsync/cv.h" | ||||||
| #include "third_party/nsync/futex.internal.h" |  | ||||||
| #include "third_party/nsync/time.h" | #include "third_party/nsync/time.h" | ||||||
| 
 | 
 | ||||||
| __static_yoink("nsync_mu_lock"); | __static_yoink("nsync_mu_lock"); | ||||||
|  | @ -74,7 +75,7 @@ static errno_t pthread_cond_timedwait_impl(pthread_cond_t *cond, | ||||||
|   int rc; |   int rc; | ||||||
|   struct PthreadWait waiter = {cond, mutex}; |   struct PthreadWait waiter = {cond, mutex}; | ||||||
|   pthread_cleanup_push(pthread_cond_leave, &waiter); |   pthread_cleanup_push(pthread_cond_leave, &waiter); | ||||||
|   rc = nsync_futex_wait_((atomic_int *)&cond->_sequence, seq1, cond->_pshared, |   rc = cosmo_futex_wait((atomic_int *)&cond->_sequence, seq1, cond->_pshared, | ||||||
|                         cond->_clock, abstime); |                         cond->_clock, abstime); | ||||||
|   pthread_cleanup_pop(true); |   pthread_cleanup_pop(true); | ||||||
|   if (rc == -EAGAIN) |   if (rc == -EAGAIN) | ||||||
|  |  | ||||||
|  | @ -18,6 +18,7 @@ | ||||||
| ╚─────────────────────────────────────────────────────────────────────────────*/ | ╚─────────────────────────────────────────────────────────────────────────────*/ | ||||||
| #include "libc/assert.h" | #include "libc/assert.h" | ||||||
| #include "libc/atomic.h" | #include "libc/atomic.h" | ||||||
|  | #include "libc/cosmo.h" | ||||||
| #include "libc/cxxabi.h" | #include "libc/cxxabi.h" | ||||||
| #include "libc/dce.h" | #include "libc/dce.h" | ||||||
| #include "libc/intrin/atomic.h" | #include "libc/intrin/atomic.h" | ||||||
|  | @ -33,7 +34,6 @@ | ||||||
| #include "libc/thread/posixthread.internal.h" | #include "libc/thread/posixthread.internal.h" | ||||||
| #include "libc/thread/thread.h" | #include "libc/thread/thread.h" | ||||||
| #include "libc/thread/tls.h" | #include "libc/thread/tls.h" | ||||||
| #include "third_party/nsync/futex.internal.h" |  | ||||||
| #include "third_party/nsync/wait_s.internal.h" | #include "third_party/nsync/wait_s.internal.h" | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  | @ -137,7 +137,7 @@ wontreturn void pthread_exit(void *rc) { | ||||||
|   // note that the main thread is joinable by child threads
 |   // note that the main thread is joinable by child threads
 | ||||||
|   if (pt->pt_flags & PT_STATIC) { |   if (pt->pt_flags & PT_STATIC) { | ||||||
|     atomic_store_explicit(&tib->tib_tid, 0, memory_order_release); |     atomic_store_explicit(&tib->tib_tid, 0, memory_order_release); | ||||||
|     nsync_futex_wake_((atomic_int *)&tib->tib_tid, INT_MAX, |     cosmo_futex_wake((atomic_int *)&tib->tib_tid, INT_MAX, | ||||||
|                      !IsWindows() && !IsXnu()); |                      !IsWindows() && !IsXnu()); | ||||||
|     _Exit1(0); |     _Exit1(0); | ||||||
|   } |   } | ||||||
|  |  | ||||||
|  | @ -20,6 +20,8 @@ | ||||||
| #include "libc/calls/cp.internal.h" | #include "libc/calls/cp.internal.h" | ||||||
| #include "libc/calls/struct/timespec.h" | #include "libc/calls/struct/timespec.h" | ||||||
| #include "libc/calls/struct/timespec.internal.h" | #include "libc/calls/struct/timespec.internal.h" | ||||||
|  | #include "libc/cosmo.h" | ||||||
|  | #include "libc/dce.h" | ||||||
| #include "libc/errno.h" | #include "libc/errno.h" | ||||||
| #include "libc/fmt/itoa.h" | #include "libc/fmt/itoa.h" | ||||||
| #include "libc/intrin/atomic.h" | #include "libc/intrin/atomic.h" | ||||||
|  | @ -30,7 +32,6 @@ | ||||||
| #include "libc/thread/posixthread.internal.h" | #include "libc/thread/posixthread.internal.h" | ||||||
| #include "libc/thread/thread2.h" | #include "libc/thread/thread2.h" | ||||||
| #include "libc/thread/tls.h" | #include "libc/thread/tls.h" | ||||||
| #include "third_party/nsync/futex.internal.h" |  | ||||||
| 
 | 
 | ||||||
| static const char *DescribeReturnValue(char buf[30], int err, void **value) { | static const char *DescribeReturnValue(char buf[30], int err, void **value) { | ||||||
|   char *p = buf; |   char *p = buf; | ||||||
|  | @ -75,7 +76,7 @@ static errno_t _pthread_wait(atomic_int *ctid, struct timespec *abstime) { | ||||||
|     if (!(err = pthread_testcancel_np())) { |     if (!(err = pthread_testcancel_np())) { | ||||||
|       BEGIN_CANCELATION_POINT; |       BEGIN_CANCELATION_POINT; | ||||||
|       while ((x = atomic_load_explicit(ctid, memory_order_acquire))) { |       while ((x = atomic_load_explicit(ctid, memory_order_acquire))) { | ||||||
|         e = nsync_futex_wait_(ctid, x, !IsWindows() && !IsXnu(), CLOCK_REALTIME, |         e = cosmo_futex_wait(ctid, x, !IsWindows() && !IsXnu(), CLOCK_REALTIME, | ||||||
|                              abstime); |                              abstime); | ||||||
|         if (e == -ECANCELED) { |         if (e == -ECANCELED) { | ||||||
|           err = ECANCELED; |           err = ECANCELED; | ||||||
|  |  | ||||||
|  | @ -19,6 +19,7 @@ | ||||||
| #include "libc/assert.h" | #include "libc/assert.h" | ||||||
| #include "libc/calls/calls.h" | #include "libc/calls/calls.h" | ||||||
| #include "libc/calls/syscall-sysv.internal.h" | #include "libc/calls/syscall-sysv.internal.h" | ||||||
|  | #include "libc/cosmo.h" | ||||||
| #include "libc/dce.h" | #include "libc/dce.h" | ||||||
| #include "libc/errno.h" | #include "libc/errno.h" | ||||||
| #include "libc/intrin/atomic.h" | #include "libc/intrin/atomic.h" | ||||||
|  | @ -26,7 +27,6 @@ | ||||||
| #include "libc/runtime/syslib.internal.h" | #include "libc/runtime/syslib.internal.h" | ||||||
| #include "libc/sysv/errfuns.h" | #include "libc/sysv/errfuns.h" | ||||||
| #include "libc/thread/semaphore.h" | #include "libc/thread/semaphore.h" | ||||||
| #include "third_party/nsync/futex.internal.h" |  | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * Unlocks semaphore. |  * Unlocks semaphore. | ||||||
|  | @ -46,7 +46,7 @@ int sem_post(sem_t *sem) { | ||||||
|   old = atomic_fetch_add_explicit(&sem->sem_value, 1, memory_order_acq_rel); |   old = atomic_fetch_add_explicit(&sem->sem_value, 1, memory_order_acq_rel); | ||||||
|   unassert(old > INT_MIN); |   unassert(old > INT_MIN); | ||||||
|   if (old >= 0) { |   if (old >= 0) { | ||||||
|     wakeups = nsync_futex_wake_(&sem->sem_value, 1, sem->sem_pshared); |     wakeups = cosmo_futex_wake(&sem->sem_value, 1, sem->sem_pshared); | ||||||
|     npassert(wakeups >= 0); |     npassert(wakeups >= 0); | ||||||
|     rc = 0; |     rc = 0; | ||||||
|   } else { |   } else { | ||||||
|  |  | ||||||
|  | @ -22,6 +22,7 @@ | ||||||
| #include "libc/calls/struct/timespec.h" | #include "libc/calls/struct/timespec.h" | ||||||
| #include "libc/calls/struct/timespec.internal.h" | #include "libc/calls/struct/timespec.internal.h" | ||||||
| #include "libc/calls/syscall-sysv.internal.h" | #include "libc/calls/syscall-sysv.internal.h" | ||||||
|  | #include "libc/cosmo.h" | ||||||
| #include "libc/dce.h" | #include "libc/dce.h" | ||||||
| #include "libc/errno.h" | #include "libc/errno.h" | ||||||
| #include "libc/intrin/atomic.h" | #include "libc/intrin/atomic.h" | ||||||
|  | @ -32,7 +33,6 @@ | ||||||
| #include "libc/sysv/errfuns.h" | #include "libc/sysv/errfuns.h" | ||||||
| #include "libc/thread/semaphore.h" | #include "libc/thread/semaphore.h" | ||||||
| #include "libc/thread/thread.h" | #include "libc/thread/thread.h" | ||||||
| #include "third_party/nsync/futex.internal.h" |  | ||||||
| 
 | 
 | ||||||
| static void sem_delay(int n) { | static void sem_delay(int n) { | ||||||
|   volatile int i; |   volatile int i; | ||||||
|  | @ -119,7 +119,7 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime) { | ||||||
| 
 | 
 | ||||||
|   do { |   do { | ||||||
|     if (!(v = atomic_load_explicit(&sem->sem_value, memory_order_relaxed))) { |     if (!(v = atomic_load_explicit(&sem->sem_value, memory_order_relaxed))) { | ||||||
|       rc = nsync_futex_wait_(&sem->sem_value, v, sem->sem_pshared, |       rc = cosmo_futex_wait(&sem->sem_value, v, sem->sem_pshared, | ||||||
|                             CLOCK_REALTIME, abstime); |                             CLOCK_REALTIME, abstime); | ||||||
|       if (rc == -EINTR || rc == -ECANCELED) { |       if (rc == -EINTR || rc == -ECANCELED) { | ||||||
|         errno = -rc; |         errno = -rc; | ||||||
|  |  | ||||||
|  | @ -9,8 +9,6 @@ | ||||||
| 
 | 
 | ||||||
| #ifdef __COSMOPOLITAN__ | #ifdef __COSMOPOLITAN__ | ||||||
| #include <cosmo.h> | #include <cosmo.h> | ||||||
| #include "libc/thread/thread.h" |  | ||||||
| #include "third_party/nsync/futex.internal.h" |  | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #include <assert.h> | #include <assert.h> | ||||||
|  |  | ||||||
							
								
								
									
										7
									
								
								third_party/lua/lunix.c
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								third_party/lua/lunix.c
									
										
									
									
										vendored
									
									
								
							|  | @ -109,8 +109,9 @@ | ||||||
| #include "third_party/lua/lgc.h" | #include "third_party/lua/lgc.h" | ||||||
| #include "third_party/lua/lua.h" | #include "third_party/lua/lua.h" | ||||||
| #include "third_party/lua/luaconf.h" | #include "third_party/lua/luaconf.h" | ||||||
| #include "third_party/nsync/futex.internal.h" |  | ||||||
| #include "libc/sysv/consts/clock.h" | #include "libc/sysv/consts/clock.h" | ||||||
|  | #include "libc/cosmo.h" | ||||||
|  | #include "libc/cosmo.h" | ||||||
| #include "tool/net/luacheck.h" | #include "tool/net/luacheck.h" | ||||||
| 
 | 
 | ||||||
| #define DNS_NAME_MAX  253 | #define DNS_NAME_MAX  253 | ||||||
|  | @ -2855,7 +2856,7 @@ static int LuaUnixMemoryWait(lua_State *L) { | ||||||
|     deadline = &ts; |     deadline = &ts; | ||||||
|   } |   } | ||||||
|   BEGIN_CANCELATION_POINT; |   BEGIN_CANCELATION_POINT; | ||||||
|   rc = nsync_futex_wait_((atomic_int *)GetWord(L), expect, |   rc = cosmo_futex_wait((atomic_int *)GetWord(L), expect, | ||||||
|                          PTHREAD_PROCESS_SHARED, CLOCK_REALTIME, deadline); |                          PTHREAD_PROCESS_SHARED, CLOCK_REALTIME, deadline); | ||||||
|   END_CANCELATION_POINT; |   END_CANCELATION_POINT; | ||||||
|   if (rc < 0) errno = -rc, rc = -1; |   if (rc < 0) errno = -rc, rc = -1; | ||||||
|  | @ -2867,7 +2868,7 @@ static int LuaUnixMemoryWait(lua_State *L) { | ||||||
| static int LuaUnixMemoryWake(lua_State *L) { | static int LuaUnixMemoryWake(lua_State *L) { | ||||||
|   int count, woken; |   int count, woken; | ||||||
|   count = luaL_optinteger(L, 3, INT_MAX); |   count = luaL_optinteger(L, 3, INT_MAX); | ||||||
|   woken = nsync_futex_wake_((atomic_int *)GetWord(L), count, |   woken = cosmo_futex_wake((atomic_int *)GetWord(L), count, | ||||||
|                             PTHREAD_PROCESS_SHARED); |                             PTHREAD_PROCESS_SHARED); | ||||||
|   npassert(woken >= 0); |   npassert(woken >= 0); | ||||||
|   return ReturnInteger(L, woken); |   return ReturnInteger(L, woken); | ||||||
|  |  | ||||||
							
								
								
									
										1
									
								
								third_party/nsync/BUILD.mk
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								third_party/nsync/BUILD.mk
									
										
									
									
										vendored
									
									
								
							|  | @ -27,7 +27,6 @@ THIRD_PARTY_NSYNC_A_DIRECTDEPS =			\ | ||||||
| 	LIBC_INTRIN					\
 | 	LIBC_INTRIN					\
 | ||||||
| 	LIBC_NEXGEN32E					\
 | 	LIBC_NEXGEN32E					\
 | ||||||
| 	LIBC_NT_KERNEL32				\
 | 	LIBC_NT_KERNEL32				\
 | ||||||
| 	LIBC_NT_SYNCHRONIZATION				\
 |  | ||||||
| 	LIBC_STR					\
 | 	LIBC_STR					\
 | ||||||
| 	LIBC_SYSV					\
 | 	LIBC_SYSV					\
 | ||||||
| 	LIBC_SYSV_CALLS | 	LIBC_SYSV_CALLS | ||||||
|  |  | ||||||
							
								
								
									
										17
									
								
								third_party/nsync/futex.internal.h
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										17
									
								
								third_party/nsync/futex.internal.h
									
										
									
									
										vendored
									
									
								
							|  | @ -1,17 +0,0 @@ | ||||||
| #ifndef NSYNC_FUTEX_INTERNAL_H_ |  | ||||||
| #define NSYNC_FUTEX_INTERNAL_H_ |  | ||||||
| #include "libc/calls/struct/timespec.h" |  | ||||||
| #include "libc/dce.h" |  | ||||||
| COSMOPOLITAN_C_START_ |  | ||||||
| 
 |  | ||||||
| #ifndef __cplusplus |  | ||||||
| #define _FUTEX_ATOMIC(x) _Atomic(x) |  | ||||||
| #else |  | ||||||
| #define _FUTEX_ATOMIC(x) x |  | ||||||
| #endif |  | ||||||
| 
 |  | ||||||
| int nsync_futex_wake_(_FUTEX_ATOMIC(int) *, int, char); |  | ||||||
| int nsync_futex_wait_(_FUTEX_ATOMIC(int) *, int, char, int, const struct timespec *); |  | ||||||
| 
 |  | ||||||
| COSMOPOLITAN_C_END_ |  | ||||||
| #endif /* NSYNC_FUTEX_INTERNAL_H_ */ |  | ||||||
							
								
								
									
										10
									
								
								third_party/nsync/mu_semaphore_futex.c
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								third_party/nsync/mu_semaphore_futex.c
									
										
									
									
										vendored
									
									
								
							|  | @ -21,7 +21,9 @@ | ||||||
| #include "libc/thread/thread.h" | #include "libc/thread/thread.h" | ||||||
| #include "third_party/nsync/atomic.h" | #include "third_party/nsync/atomic.h" | ||||||
| #include "third_party/nsync/atomic.internal.h" | #include "third_party/nsync/atomic.internal.h" | ||||||
| #include "third_party/nsync/futex.internal.h" | #include "libc/cosmo.h" | ||||||
|  | #include "libc/calls/struct/timespec.h" | ||||||
|  | #include "libc/cosmo.h" | ||||||
| #include "third_party/nsync/mu_semaphore.internal.h" | #include "third_party/nsync/mu_semaphore.internal.h" | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  | @ -61,7 +63,7 @@ errno_t nsync_mu_semaphore_p_futex (nsync_semaphore *s) { | ||||||
| 		i = ATM_LOAD ((nsync_atomic_uint32_ *) &f->i); | 		i = ATM_LOAD ((nsync_atomic_uint32_ *) &f->i); | ||||||
| 		if (i == 0) { | 		if (i == 0) { | ||||||
| 			int futex_result; | 			int futex_result; | ||||||
| 			futex_result = -nsync_futex_wait_ ( | 			futex_result = -cosmo_futex_wait ( | ||||||
| 				(atomic_int *)&f->i, i, | 				(atomic_int *)&f->i, i, | ||||||
| 				PTHREAD_PROCESS_PRIVATE, 0, 0); | 				PTHREAD_PROCESS_PRIVATE, 0, 0); | ||||||
| 			ASSERT (futex_result == 0 || | 			ASSERT (futex_result == 0 || | ||||||
|  | @ -100,7 +102,7 @@ errno_t nsync_mu_semaphore_p_with_deadline_futex (nsync_semaphore *s, int clock, | ||||||
| 				ts_buf.tv_nsec = NSYNC_TIME_NSEC (abs_deadline); | 				ts_buf.tv_nsec = NSYNC_TIME_NSEC (abs_deadline); | ||||||
| 				ts = &ts_buf; | 				ts = &ts_buf; | ||||||
| 			} | 			} | ||||||
| 			futex_result = nsync_futex_wait_ ((atomic_int *)&f->i, i, | 			futex_result = cosmo_futex_wait ((atomic_int *)&f->i, i, | ||||||
| 							 PTHREAD_PROCESS_PRIVATE, | 							 PTHREAD_PROCESS_PRIVATE, | ||||||
| 							 clock, ts); | 							 clock, ts); | ||||||
| 			ASSERT (futex_result == 0 || | 			ASSERT (futex_result == 0 || | ||||||
|  | @ -136,5 +138,5 @@ void nsync_mu_semaphore_v_futex (nsync_semaphore *s) { | ||||||
| 		       (nsync_atomic_uint32_ *) &f->i, &old_value, old_value+1, | 		       (nsync_atomic_uint32_ *) &f->i, &old_value, old_value+1, | ||||||
| 		       memory_order_release, memory_order_relaxed)) { | 		       memory_order_release, memory_order_relaxed)) { | ||||||
| 	} | 	} | ||||||
| 	ASSERT (nsync_futex_wake_ ((atomic_int *)&f->i, 1, PTHREAD_PROCESS_PRIVATE) >= 0); | 	ASSERT (cosmo_futex_wake ((atomic_int *)&f->i, 1, PTHREAD_PROCESS_PRIVATE) >= 0); | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										6
									
								
								third_party/openmp/kmp_lock.cpp
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								third_party/openmp/kmp_lock.cpp
									
										
									
									
										vendored
									
									
								
							|  | @ -23,7 +23,7 @@ | ||||||
| 
 | 
 | ||||||
| #if KMP_USE_FUTEX | #if KMP_USE_FUTEX | ||||||
| #ifdef __COSMOPOLITAN__ | #ifdef __COSMOPOLITAN__ | ||||||
| #include "third_party/nsync/futex.internal.h" | #include <cosmo.h> | ||||||
| #else | #else | ||||||
| #include <sys/syscall.h> | #include <sys/syscall.h> | ||||||
| #include <unistd.h> | #include <unistd.h> | ||||||
|  | @ -380,7 +380,7 @@ __kmp_acquire_futex_lock_timed_template(kmp_futex_lock_t *lck, kmp_int32 gtid) { | ||||||
| 
 | 
 | ||||||
|     long rc; |     long rc; | ||||||
| #ifdef __COSMOPOLITAN__ | #ifdef __COSMOPOLITAN__ | ||||||
|     if ((rc = nsync_futex_wait_((int *)&(lck->lk.poll), poll_val, false, 0, NULL)) != 0) { |     if ((rc = cosmo_futex_wait((int *)&(lck->lk.poll), poll_val, false, 0, NULL)) != 0) { | ||||||
| #else | #else | ||||||
|     if ((rc = syscall(__NR_futex, (int *)&(lck->lk.poll), FUTEX_WAIT, poll_val, NULL, |     if ((rc = syscall(__NR_futex, (int *)&(lck->lk.poll), FUTEX_WAIT, poll_val, NULL, | ||||||
|                       NULL, 0)) != 0) { |                       NULL, 0)) != 0) { | ||||||
|  | @ -462,7 +462,7 @@ int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) { | ||||||
|              ("__kmp_release_futex_lock: lck:%p, T#%d futex_wake 1 thread\n", |              ("__kmp_release_futex_lock: lck:%p, T#%d futex_wake 1 thread\n", | ||||||
|               lck, gtid)); |               lck, gtid)); | ||||||
| #ifdef __COSMOPOLITAN__ | #ifdef __COSMOPOLITAN__ | ||||||
|     nsync_futex_wake_((int *)&(lck->lk.poll), 1, false); |     cosmo_futex_wake((int *)&(lck->lk.poll), 1, false); | ||||||
| #else | #else | ||||||
|     syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), |     syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), | ||||||
|             NULL, NULL, 0); |             NULL, NULL, 0); | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue