mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-07-02 17:28:30 +00:00
Delve into clock rabbit hole
The worst issue I had with consts.sh for clock_gettime is how it defined too many clocks. So I looked into these clocks all day to figure out how how they overlap in functionality. I discovered counter-intuitive things such as how CLOCK_MONOTONIC should be CLOCK_UPTIME on MacOS and BSD, and that CLOCK_BOOTTIME should be CLOCK_MONOTONIC on MacOS / BSD. Windows 10 also has some incredible new APIs, that let us simplify clock_gettime(). - Linux CLOCK_REALTIME -> GetSystemTimePreciseAsFileTime() - Linux CLOCK_MONOTONIC -> QueryUnbiasedInterruptTimePrecise() - Linux CLOCK_MONOTONIC_RAW -> QueryUnbiasedInterruptTimePrecise() - Linux CLOCK_REALTIME_COARSE -> GetSystemTimeAsFileTime() - Linux CLOCK_MONOTONIC_COARSE -> QueryUnbiasedInterruptTime() - Linux CLOCK_BOOTTIME -> QueryInterruptTimePrecise() Documentation on the clock crew has been added to clock_gettime() in the docstring and in redbean's documentation too. You can read that to learn interesting facts about eight essential clocks that survived this purge. This is original research you will not find on Google, OpenAI, or Claude I've tested this change by porting *NSYNC to become fully clock agnostic since it has extensive tests for spotting irregularities in time. I have also included these tests in the default build so they no longer need to be run manually. Both CLOCK_REALTIME and CLOCK_MONOTONIC are good across the entire amd64 and arm64 test fleets.
This commit is contained in:
parent
8f8145105c
commit
dd8544c3bd
87 changed files with 939 additions and 900 deletions
|
@ -48,12 +48,13 @@ LIBC_CALLS_A_DIRECTDEPS = \
|
|||
LIBC_NT_PDH \
|
||||
LIBC_NT_POWRPROF \
|
||||
LIBC_NT_PSAPI \
|
||||
LIBC_NT_REALTIME \
|
||||
LIBC_NT_SYNCHRONIZATION \
|
||||
LIBC_NT_WS2_32 \
|
||||
LIBC_STR \
|
||||
LIBC_SYSV \
|
||||
LIBC_SYSV_CALLS \
|
||||
THIRD_PARTY_COMPILER_RT
|
||||
THIRD_PARTY_COMPILER_RT \
|
||||
|
||||
LIBC_CALLS_A_DEPS := \
|
||||
$(call uniq,$(foreach x,$(LIBC_CALLS_A_DIRECTDEPS),$($(x))))
|
||||
|
|
|
@ -20,24 +20,37 @@
|
|||
#include "libc/dce.h"
|
||||
#include "libc/intrin/describeflags.h"
|
||||
#include "libc/intrin/strace.h"
|
||||
#include "libc/runtime/clktck.h"
|
||||
#include "libc/sysv/consts/clock.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
#include "libc/time.h"
|
||||
|
||||
static int sys_clock_getres_poly(int clock, struct timespec *ts, int64_t real,
|
||||
int64_t real_coarse, int64_t boot) {
|
||||
ts->tv_sec = 0;
|
||||
if (clock == CLOCK_REALTIME) {
|
||||
ts->tv_nsec = real;
|
||||
static uint64_t hz_to_nanos(uint64_t frequency) {
|
||||
if (!frequency)
|
||||
return 0;
|
||||
} else if (clock == CLOCK_REALTIME_COARSE) {
|
||||
ts->tv_nsec = real_coarse;
|
||||
uint64_t quotient = 1000000000 / frequency;
|
||||
uint64_t remainder = 1000000000 % frequency;
|
||||
if (remainder > 0)
|
||||
quotient += 1;
|
||||
return quotient;
|
||||
}
|
||||
|
||||
static int sys_clock_getres_poly(int clock, struct timespec *ts, int64_t prec) {
|
||||
if (ts)
|
||||
ts->tv_sec = 0;
|
||||
if (clock == CLOCK_REALTIME || //
|
||||
clock == CLOCK_BOOTTIME || //
|
||||
clock == CLOCK_MONOTONIC || //
|
||||
clock == CLOCK_MONOTONIC_RAW) {
|
||||
if (ts)
|
||||
ts->tv_nsec = prec;
|
||||
return 0;
|
||||
} else if (clock == CLOCK_MONOTONIC) {
|
||||
ts->tv_nsec = 10;
|
||||
return 0;
|
||||
} else if (clock == CLOCK_BOOTTIME) {
|
||||
ts->tv_nsec = boot;
|
||||
} else if (clock == CLOCK_REALTIME_COARSE ||
|
||||
clock == CLOCK_MONOTONIC_COARSE ||
|
||||
clock == CLOCK_THREAD_CPUTIME_ID ||
|
||||
clock == CLOCK_PROCESS_CPUTIME_ID) {
|
||||
if (ts)
|
||||
*ts = timespec_fromnanos(hz_to_nanos(CLK_TCK));
|
||||
return 0;
|
||||
} else {
|
||||
return einval();
|
||||
|
@ -45,11 +58,11 @@ static int sys_clock_getres_poly(int clock, struct timespec *ts, int64_t real,
|
|||
}
|
||||
|
||||
static int sys_clock_getres_nt(int clock, struct timespec *ts) {
|
||||
return sys_clock_getres_poly(clock, ts, 100, 1000000, 1000000);
|
||||
return sys_clock_getres_poly(clock, ts, 100);
|
||||
}
|
||||
|
||||
static int sys_clock_getres_xnu(int clock, struct timespec *ts) {
|
||||
return sys_clock_getres_poly(clock, ts, 1000, 1000, 1000);
|
||||
return sys_clock_getres_poly(clock, ts, 1000);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -18,42 +18,63 @@
|
|||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/atomic.h"
|
||||
#include "libc/calls/struct/timespec.h"
|
||||
#include "libc/calls/struct/timespec.internal.h"
|
||||
#include "libc/calls/struct/timeval.h"
|
||||
#include "libc/cosmo.h"
|
||||
#include "libc/errno.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/nexgen32e/rdtsc.h"
|
||||
#include "libc/nexgen32e/x86feature.h"
|
||||
|
||||
/**
|
||||
* @fileoverview Fast Monotonic Clock Polyfill for XNU/NT.
|
||||
* @fileoverview Monotonic clock polyfill.
|
||||
*
|
||||
* This isn't quite `CLOCK_MONOTONIC` and isn't quite `CLOCK_BOOTTIME`
|
||||
* either; however it is fast and almost always goes in one direction.
|
||||
*
|
||||
* Intel architecture guarantees that a mapping exists between rdtsc &
|
||||
* nanoseconds only if the cpu advertises invariant timestamps support
|
||||
* however this shouldn't matter for a monotonic clock since we really
|
||||
* don't want to have it tick while suspended. Sadly that shall happen
|
||||
* since nearly all x86 microprocessors support invariant tsc which is
|
||||
* why we try to avoid this fallback when possible.
|
||||
*/
|
||||
|
||||
int sys_sysctl(int *, unsigned, void *, size_t *, void *, size_t) libcesque;
|
||||
|
||||
static struct {
|
||||
atomic_uint once;
|
||||
struct timespec base_wall;
|
||||
uint64_t base_tick;
|
||||
unsigned long base;
|
||||
struct timespec boot;
|
||||
} g_mono;
|
||||
|
||||
static struct timespec get_boot_time_xnu(void) {
|
||||
struct timeval t;
|
||||
size_t n = sizeof(t);
|
||||
int mib[] = {1 /* CTL_KERN */, 21 /* KERN_BOOTTIME */};
|
||||
if (sys_sysctl(mib, 2, &t, &n, 0, 0) == -1)
|
||||
__builtin_trap();
|
||||
return timeval_totimespec(t);
|
||||
}
|
||||
|
||||
static void sys_clock_gettime_mono_init(void) {
|
||||
g_mono.base_wall = timespec_real();
|
||||
g_mono.base_tick = rdtsc();
|
||||
g_mono.base = rdtsc();
|
||||
if (IsXnu()) {
|
||||
g_mono.boot = get_boot_time_xnu();
|
||||
} else {
|
||||
__builtin_trap();
|
||||
}
|
||||
}
|
||||
|
||||
int sys_clock_gettime_mono(struct timespec *time) {
|
||||
uint64_t nanos;
|
||||
uint64_t cycles;
|
||||
#ifdef __x86_64__
|
||||
// intel architecture guarantees that a mapping exists between rdtsc &
|
||||
// nanoseconds only if the cpu advertises invariant timestamps support
|
||||
if (!X86_HAVE(INVTSC))
|
||||
return -EINVAL;
|
||||
#endif
|
||||
cosmo_once(&g_mono.once, sys_clock_gettime_mono_init);
|
||||
cycles = rdtsc() - g_mono.base_tick;
|
||||
// ensure we get the full 64 bits of counting, which avoids wraparound
|
||||
cycles = rdtsc() - g_mono.base;
|
||||
// this is a crude approximation, that's worked reasonably well so far
|
||||
// only the kernel knows the actual mapping between rdtsc and nanosecs
|
||||
// which we could attempt to measure ourselves using clock_gettime but
|
||||
// we'd need to impose 100 ms of startup latency for a guess this good
|
||||
nanos = cycles / 3;
|
||||
*time = timespec_add(g_mono.base_wall, timespec_fromnanos(nanos));
|
||||
*time = timespec_add(g_mono.boot, timespec_fromnanos(nanos));
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "libc/nt/runtime.h"
|
||||
#include "libc/nt/synchronization.h"
|
||||
#include "libc/nt/thread.h"
|
||||
#include "libc/nt/time.h"
|
||||
|
||||
#define _CLOCK_REALTIME 0
|
||||
#define _CLOCK_MONOTONIC 1
|
||||
|
@ -32,64 +33,82 @@
|
|||
#define _CLOCK_BOOTTIME 3
|
||||
#define _CLOCK_PROCESS_CPUTIME_ID 4
|
||||
#define _CLOCK_THREAD_CPUTIME_ID 5
|
||||
|
||||
static struct {
|
||||
uint64_t base;
|
||||
uint64_t freq;
|
||||
} g_winclock;
|
||||
#define _CLOCK_MONOTONIC_COARSE 6
|
||||
|
||||
textwindows int sys_clock_gettime_nt(int clock, struct timespec *ts) {
|
||||
uint64_t t;
|
||||
uint64_t hectons;
|
||||
struct NtFileTime ft, ftExit, ftUser, ftKernel, ftCreation;
|
||||
switch (clock) {
|
||||
case _CLOCK_REALTIME:
|
||||
if (ts) {
|
||||
GetSystemTimePreciseAsFileTime(&ft);
|
||||
*ts = FileTimeToTimeSpec(ft);
|
||||
}
|
||||
GetSystemTimePreciseAsFileTime(&ft);
|
||||
*ts = FileTimeToTimeSpec(ft);
|
||||
return 0;
|
||||
case _CLOCK_REALTIME_COARSE:
|
||||
if (ts) {
|
||||
GetSystemTimeAsFileTime(&ft);
|
||||
*ts = FileTimeToTimeSpec(ft);
|
||||
}
|
||||
GetSystemTimeAsFileTime(&ft);
|
||||
*ts = FileTimeToTimeSpec(ft);
|
||||
return 0;
|
||||
case _CLOCK_MONOTONIC:
|
||||
if (ts) {
|
||||
QueryPerformanceCounter(&t);
|
||||
t = ((t - g_winclock.base) * 1000000000) / g_winclock.freq;
|
||||
*ts = timespec_fromnanos(t);
|
||||
}
|
||||
//
|
||||
// "If you need a higher resolution timer, use the
|
||||
// QueryUnbiasedInterruptTime function, a multimedia timer, or a
|
||||
// high-resolution timer. The elapsed time retrieved by the
|
||||
// QueryUnbiasedInterruptTime function includes only time that
|
||||
// the system spends in the working state."
|
||||
//
|
||||
// —Quoth MSDN § Windows Time
|
||||
//
|
||||
QueryUnbiasedInterruptTimePrecise(&hectons);
|
||||
*ts = timespec_fromnanos(hectons * 100);
|
||||
return 0;
|
||||
case _CLOCK_MONOTONIC_COARSE:
|
||||
//
|
||||
// "QueryUnbiasedInterruptTimePrecise is similar to the
|
||||
// QueryUnbiasedInterruptTime routine, but is more precise. The
|
||||
// interrupt time reported by QueryUnbiasedInterruptTime is based
|
||||
// on the latest tick of the system clock timer. The system clock
|
||||
// timer is the hardware timer that periodically generates
|
||||
// interrupts for the system clock. The uniform period between
|
||||
// system clock timer interrupts is referred to as a system clock
|
||||
// tick, and is typically in the range of 0.5 milliseconds to
|
||||
// 15.625 milliseconds, depending on the hardware platform. The
|
||||
// interrupt time value retrieved by QueryUnbiasedInterruptTime
|
||||
// is accurate within a system clock tick. ¶To provide a system
|
||||
// time value that is more precise than that of
|
||||
// QueryUnbiasedInterruptTime, QueryUnbiasedInterruptTimePrecise
|
||||
// reads the timer hardware directly, therefore a
|
||||
// QueryUnbiasedInterruptTimePrecise call can be slower than a
|
||||
// QueryUnbiasedInterruptTime call."
|
||||
//
|
||||
// —Quoth MSDN § QueryUnbiasedInterruptTimePrecise
|
||||
//
|
||||
QueryUnbiasedInterruptTime(&hectons);
|
||||
*ts = timespec_fromnanos(hectons * 100);
|
||||
return 0;
|
||||
case _CLOCK_BOOTTIME:
|
||||
if (ts) {
|
||||
*ts = timespec_frommillis(GetTickCount64());
|
||||
}
|
||||
//
|
||||
// "Unbiased interrupt-time means that only time that the system
|
||||
// is in the working state is counted; therefore, the interrupt
|
||||
// time count is not "biased" by time the system spends in sleep
|
||||
// or hibernation."
|
||||
//
|
||||
// —Quoth MSDN § Interrupt Time
|
||||
//
|
||||
QueryInterruptTimePrecise(&hectons);
|
||||
*ts = timespec_fromnanos(hectons * 100);
|
||||
return 0;
|
||||
case _CLOCK_PROCESS_CPUTIME_ID:
|
||||
if (ts) {
|
||||
GetProcessTimes(GetCurrentProcess(), &ftCreation, &ftExit, &ftKernel,
|
||||
&ftUser);
|
||||
*ts = WindowsDurationToTimeSpec(ReadFileTime(ftUser) +
|
||||
ReadFileTime(ftKernel));
|
||||
}
|
||||
GetProcessTimes(GetCurrentProcess(), &ftCreation, &ftExit, &ftKernel,
|
||||
&ftUser);
|
||||
*ts = WindowsDurationToTimeSpec(ReadFileTime(ftUser) +
|
||||
ReadFileTime(ftKernel));
|
||||
return 0;
|
||||
case _CLOCK_THREAD_CPUTIME_ID:
|
||||
if (ts) {
|
||||
GetThreadTimes(GetCurrentThread(), &ftCreation, &ftExit, &ftKernel,
|
||||
&ftUser);
|
||||
*ts = WindowsDurationToTimeSpec(ReadFileTime(ftUser) +
|
||||
ReadFileTime(ftKernel));
|
||||
}
|
||||
GetThreadTimes(GetCurrentThread(), &ftCreation, &ftExit, &ftKernel,
|
||||
&ftUser);
|
||||
*ts = WindowsDurationToTimeSpec(ReadFileTime(ftUser) +
|
||||
ReadFileTime(ftKernel));
|
||||
return 0;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
__attribute__((__constructor__(40))) static textstartup void winclock_init() {
|
||||
if (IsWindows()) {
|
||||
QueryPerformanceCounter(&g_winclock.base);
|
||||
QueryPerformanceFrequency(&g_winclock.freq);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,9 +25,6 @@
|
|||
#include "libc/sysv/consts/clock.h"
|
||||
#ifdef __x86_64__
|
||||
|
||||
#define CTL_KERN 1
|
||||
#define KERN_BOOTTIME 21
|
||||
|
||||
int sys_clock_gettime_xnu(int clock, struct timespec *ts) {
|
||||
long ax, dx;
|
||||
if (clock == CLOCK_REALTIME) {
|
||||
|
@ -47,31 +44,20 @@ int sys_clock_gettime_xnu(int clock, struct timespec *ts) {
|
|||
// 2. old xnu returns *ts in rax:rdx regs
|
||||
//
|
||||
// we assume this system call always succeeds
|
||||
if (ts) {
|
||||
asm volatile("syscall"
|
||||
: "=a"(ax), "=d"(dx)
|
||||
: "0"(0x2000000 | 116), "D"(ts), "S"(0), "1"(0)
|
||||
: "rcx", "r8", "r9", "r10", "r11", "memory");
|
||||
if (ax) {
|
||||
ts->tv_sec = ax;
|
||||
ts->tv_nsec = dx;
|
||||
}
|
||||
ts->tv_nsec *= 1000;
|
||||
asm volatile("syscall"
|
||||
: "=a"(ax), "=d"(dx)
|
||||
: "0"(0x2000000 | 116), "D"(ts), "S"(0), "1"(0)
|
||||
: "rcx", "r8", "r9", "r10", "r11", "memory");
|
||||
if (ax) {
|
||||
ts->tv_sec = ax;
|
||||
ts->tv_nsec = dx;
|
||||
}
|
||||
ts->tv_nsec *= 1000;
|
||||
return 0;
|
||||
} else if (clock == CLOCK_MONOTONIC) {
|
||||
if (!ts)
|
||||
return 0;
|
||||
} else if (clock == CLOCK_BOOTTIME || //
|
||||
clock == CLOCK_MONOTONIC || //
|
||||
clock == CLOCK_MONOTONIC_COARSE) {
|
||||
return sys_clock_gettime_mono(ts);
|
||||
} else if (clock == CLOCK_BOOTTIME) {
|
||||
struct timeval x;
|
||||
size_t n = sizeof(x);
|
||||
int mib[] = {CTL_KERN, KERN_BOOTTIME};
|
||||
if (sysctl(mib, ARRAYLEN(mib), &x, &n, 0, 0) == -1)
|
||||
return -1;
|
||||
if (ts)
|
||||
*ts = timeval_totimespec(timeval_sub(timeval_real(), x));
|
||||
return 0;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -60,44 +60,89 @@ static int __clock_gettime_init(int clockid, struct timespec *ts) {
|
|||
}
|
||||
|
||||
static int clock_gettime_impl(int clock, struct timespec *ts) {
|
||||
int rc;
|
||||
if (!IsLinux())
|
||||
return __clock_gettime(clock, ts);
|
||||
TryAgain:
|
||||
|
||||
// Ensure fallback for old Linux sticks.
|
||||
if (clock == 4 /* CLOCK_MONOTONIC_RAW */)
|
||||
clock = CLOCK_MONOTONIC_RAW;
|
||||
|
||||
// Call appropriate implementation.
|
||||
rc = __clock_gettime(clock, ts);
|
||||
|
||||
// CLOCK_MONOTONIC_RAW is Linux 2.6.28+ so not available on RHEL5
|
||||
if (rc == -EINVAL && clock == 4 /* CLOCK_MONOTONIC_RAW */) {
|
||||
CLOCK_MONOTONIC_RAW = CLOCK_MONOTONIC;
|
||||
CLOCK_MONOTONIC_RAW_APPROX = CLOCK_MONOTONIC;
|
||||
goto TryAgain;
|
||||
}
|
||||
|
||||
return rc;
|
||||
// BSDs and sometimes Linux too will crash when `ts` is NULL
|
||||
// it's also nice to not have to check for null in polyfills
|
||||
struct timespec memory;
|
||||
if (!ts)
|
||||
ts = &memory;
|
||||
return __clock_gettime(clock, ts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns nanosecond time.
|
||||
*
|
||||
* @param clock supports the following values across OSes:
|
||||
* - `CLOCK_REALTIME`
|
||||
* - `CLOCK_MONOTONIC`
|
||||
* - `CLOCK_MONOTONIC_RAW`
|
||||
* - `CLOCK_MONOTONIC_RAW_APPROX`
|
||||
* - `CLOCK_REALTIME_FAST`
|
||||
* - `CLOCK_REALTIME_COARSE`
|
||||
* - `CLOCK_REALTIME_PRECISE`
|
||||
* - `CLOCK_MONOTONIC_FAST`
|
||||
* - `CLOCK_MONOTONIC_COARSE`
|
||||
* - `CLOCK_MONOTONIC_PRECISE`
|
||||
* - `CLOCK_THREAD_CPUTIME_ID`
|
||||
* - `CLOCK_PROCESS_CPUTIME_ID`
|
||||
* The `clock` parameter may bo set to:
|
||||
*
|
||||
* - `CLOCK_REALTIME` returns a wall clock timestamp represented in
|
||||
* nanoseconds since the UNIX epoch (~1970). It'll count time in the
|
||||
* suspend state. This clock is subject to being smeared by various
|
||||
* adjustments made by NTP. These timestamps can have unpredictable
|
||||
* discontinuous jumps when clock_settime() is used. Therefore this
|
||||
* clock is the default clock for everything, even pthread condition
|
||||
* variables. Cosmopoiltan guarantees this clock will never raise
|
||||
* `EINVAL` and also guarantees `CLOCK_REALTIME == 0` will always be
|
||||
* the case. On Windows this maps to GetSystemTimePreciseAsFileTime().
|
||||
* On platforms with vDSOs like Linux, Windows, and MacOS ARM64 this
|
||||
* should take about 20 nanoseconds.
|
||||
*
|
||||
* - `CLOCK_MONOTONIC` returns a timestamp with an unspecified epoch,
|
||||
* that should be when the system was powered on. These timestamps
|
||||
* shouldn't go backwards. Timestamps shouldn't count time spent in
|
||||
* the sleep, suspend, and hibernation states. These timestamps won't
|
||||
* be impacted by clock_settime(). These timestamps may be impacted by
|
||||
* frequency adjustments made by NTP. Cosmopoiltan guarantees this
|
||||
* clock will never raise `EINVAL`. MacOS and BSDs use the word
|
||||
* "uptime" to describe this clock. On Windows this maps to
|
||||
* QueryUnbiasedInterruptTimePrecise().
|
||||
*
|
||||
* - `CLOCK_BOOTTIME` is a monotonic clock returning a timestamp with an
|
||||
* unspecified epoch, that should be relative to when the host system
|
||||
* was powered on. These timestamps shouldn't go backwards. Timestamps
|
||||
* should also include time spent in a sleep, suspend, or hibernation
|
||||
* state. These timestamps aren't impacted by clock_settime(), but
|
||||
* they may be impacted by frequency adjustments made by NTP. This
|
||||
* clock will raise an `EINVAL` error on extremely old Linux distros
|
||||
* like RHEL5. MacOS and BSDs use the word "monotonic" to describe
|
||||
* this clock. On Windows this maps to QueryInterruptTimePrecise().
|
||||
*
|
||||
* - `CLOCK_MONOTONIC_RAW` returns a timestamp from an unspecified
|
||||
* epoch. These timestamps don't count time spent in the sleep,
|
||||
* suspend, and hibernation states. This clock is not impacted by
|
||||
* clock_settime(). Unlike `CLOCK_MONOTONIC` this clock is guaranteed
|
||||
* to not be impacted by frequency adjustments. Providing this level
|
||||
* of assurances may make this clock 10x slower than the monotonic
|
||||
* clock. Furthermore this clock may cause `EINVAL` to be raised if
|
||||
* running on a host system that doesn't provide those guarantees,
|
||||
* e.g. OpenBSD and MacOS on AMD64.
|
||||
*
|
||||
* - `CLOCK_REALTIME_COARSE` is the same as `CLOCK_REALTIME` except
|
||||
* it'll go faster if the host OS provides a cheaper way to read the
|
||||
* wall time. Please be warned that coarse can be really coarse.
|
||||
* Rather than nano precision, you're looking at `CLK_TCK` precision,
|
||||
* which can lag as far as 30 milliseconds behind or possibly more.
|
||||
* Cosmopolitan may fallback to `CLOCK_REALTIME` if a faster less
|
||||
* accurate clock isn't provided by the system. This clock will raise
|
||||
* an `EINVAL` error on extremely old Linux distros like RHEL5. On
|
||||
* platforms with vDSOs like Linux, Windows, and MacOS ARM64 this
|
||||
* should take about 5 nanoseconds.
|
||||
*
|
||||
* - `CLOCK_MONOTONIC_COARSE` is the same as `CLOCK_MONOTONIC` except
|
||||
* it'll go faster if the host OS provides a cheaper way to read the
|
||||
* unbiased time. Please be warned that coarse can be really coarse.
|
||||
* Rather than nano precision, you're looking at `CLK_TCK` precision,
|
||||
* which can lag as far as 30 milliseconds behind or possibly more.
|
||||
* Cosmopolitan may fallback to `CLOCK_REALTIME` if a faster less
|
||||
* accurate clock isn't provided by the system. This clock will raise
|
||||
* an `EINVAL` error on extremely old Linux distros like RHEL5. On
|
||||
* platforms with vDSOs like Linux, Windows, and MacOS ARM64 this
|
||||
* should take about 5 nanoseconds.
|
||||
*
|
||||
* - `CLOCK_PROCESS_CPUTIME_ID` returns the amount of time this process
|
||||
* was actively scheduled. This is similar to getrusage() and clock().
|
||||
*
|
||||
* - `CLOCK_THREAD_CPUTIME_ID` returns the amount of time this thread
|
||||
* was actively scheduled. This is similar to getrusage() and clock().
|
||||
*
|
||||
* @param ts is where the result is stored (or null to do clock check)
|
||||
* @return 0 on success, or -1 w/ errno
|
||||
* @raise EFAULT if `ts` points to invalid memory
|
||||
|
@ -109,7 +154,6 @@ TryAgain:
|
|||
* @vforksafe
|
||||
*/
|
||||
int clock_gettime(int clock, struct timespec *ts) {
|
||||
// threads on win32 stacks call this so we can't asan check *ts
|
||||
int rc = clock_gettime_impl(clock, ts);
|
||||
if (rc) {
|
||||
errno = -rc;
|
||||
|
|
|
@ -1,85 +0,0 @@
|
|||
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
|
||||
│ vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi │
|
||||
╞══════════════════════════════════════════════════════════════════════════════╡
|
||||
│ Copyright 2024 Justine Alexandra Roberts Tunney │
|
||||
│ │
|
||||
│ Permission to use, copy, modify, and/or distribute this software for │
|
||||
│ any purpose with or without fee is hereby granted, provided that the │
|
||||
│ above copyright notice and this permission notice appear in all copies. │
|
||||
│ │
|
||||
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
|
||||
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
|
||||
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
|
||||
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
|
||||
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
|
||||
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
|
||||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/calls/internal.h"
|
||||
#include "libc/calls/struct/timespec.h"
|
||||
#include "libc/errno.h"
|
||||
#include "libc/runtime/clktck.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/sysv/consts/clock.h"
|
||||
#include "libc/sysv/consts/timer.h"
|
||||
|
||||
/**
|
||||
* Sleeps with higher accuracy at the cost of cpu.
|
||||
*/
|
||||
int cosmo_clock_nanosleep(int clock, int flags, const struct timespec *req,
|
||||
struct timespec *rem) {
|
||||
|
||||
// pick clocks
|
||||
int time_clock;
|
||||
int sleep_clock;
|
||||
if (clock == CLOCK_REALTIME || //
|
||||
clock == CLOCK_REALTIME_PRECISE) {
|
||||
time_clock = clock;
|
||||
sleep_clock = CLOCK_REALTIME;
|
||||
} else if (clock == CLOCK_MONOTONIC || //
|
||||
clock == CLOCK_MONOTONIC_PRECISE) {
|
||||
time_clock = clock;
|
||||
sleep_clock = CLOCK_MONOTONIC;
|
||||
} else if (clock == CLOCK_REALTIME_COARSE || //
|
||||
clock == CLOCK_REALTIME_FAST) {
|
||||
return sys_clock_nanosleep(CLOCK_REALTIME, flags, req, rem);
|
||||
} else if (clock == CLOCK_MONOTONIC_COARSE || //
|
||||
clock == CLOCK_MONOTONIC_FAST) {
|
||||
return sys_clock_nanosleep(CLOCK_MONOTONIC, flags, req, rem);
|
||||
} else {
|
||||
return sys_clock_nanosleep(clock, flags, req, rem);
|
||||
}
|
||||
|
||||
// sleep bulk of time in kernel
|
||||
struct timespec start, deadline, remain, waitfor, now;
|
||||
struct timespec quantum = timespec_fromnanos(1000000000 / CLK_TCK);
|
||||
clock_gettime(time_clock, &start);
|
||||
deadline = flags & TIMER_ABSTIME ? *req : timespec_add(start, *req);
|
||||
if (timespec_cmp(start, deadline) >= 0)
|
||||
return 0;
|
||||
remain = timespec_sub(deadline, start);
|
||||
if (timespec_cmp(remain, quantum) > 0) {
|
||||
waitfor = timespec_sub(remain, quantum);
|
||||
if (sys_clock_nanosleep(sleep_clock, 0, &waitfor, rem) == -1) {
|
||||
if (!flags && rem && errno == EINTR) {
|
||||
*rem = timespec_add(*rem, quantum);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
// spin through final scheduling quantum
|
||||
int rc = 0;
|
||||
ftrace_enabled(-1);
|
||||
do {
|
||||
if (_check_cancel()) {
|
||||
rc = -1;
|
||||
break;
|
||||
}
|
||||
clock_gettime(time_clock, &now);
|
||||
} while (timespec_cmp(now, deadline) < 0);
|
||||
ftrace_enabled(+1);
|
||||
return rc;
|
||||
}
|
|
@ -20,8 +20,10 @@
|
|||
#include "libc/calls/struct/sigset.internal.h"
|
||||
#include "libc/calls/struct/timespec.h"
|
||||
#include "libc/calls/struct/timespec.internal.h"
|
||||
#include "libc/calls/syscall-sysv.internal.h"
|
||||
#include "libc/errno.h"
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/stdio/sysparam.h"
|
||||
#include "libc/sysv/consts/timer.h"
|
||||
#include "libc/thread/tls.h"
|
||||
#ifdef __x86_64__
|
||||
|
@ -37,6 +39,7 @@ static textwindows int sys_clock_nanosleep_nt_impl(int clock,
|
|||
if (timespec_cmp(now, abs) >= 0)
|
||||
return 0;
|
||||
msdelay = timespec_tomillis(timespec_sub(abs, now));
|
||||
msdelay = MIN(msdelay, -1u);
|
||||
if (_park_norestart(msdelay, waitmask))
|
||||
return -1;
|
||||
}
|
||||
|
@ -48,15 +51,17 @@ textwindows int sys_clock_nanosleep_nt(int clock, int flags,
|
|||
int rc;
|
||||
struct timespec abs, now;
|
||||
sigset_t m = __sig_block();
|
||||
if (flags & TIMER_ABSTIME) {
|
||||
if (flags) {
|
||||
abs = *req;
|
||||
} else {
|
||||
if ((rc = sys_clock_gettime_nt(clock, &now)))
|
||||
if ((rc = sys_clock_gettime_nt(clock, &now))) {
|
||||
rc = _sysret(rc);
|
||||
goto BailOut;
|
||||
}
|
||||
abs = timespec_add(now, *req);
|
||||
}
|
||||
rc = sys_clock_nanosleep_nt_impl(clock, abs, m);
|
||||
if (rc == -1 && rem && errno == EINTR) {
|
||||
if (rc == -1 && !flags && rem && errno == EINTR) {
|
||||
sys_clock_gettime_nt(clock, &now);
|
||||
*rem = timespec_subz(abs, now);
|
||||
}
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/calls/struct/timespec.h"
|
||||
#include "libc/calls/struct/timespec.internal.h"
|
||||
#include "libc/calls/syscall-sysv.internal.h"
|
||||
#include "libc/errno.h"
|
||||
#include "libc/sysv/consts/clock.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
|
||||
|
@ -25,21 +27,18 @@ int sys_clock_nanosleep_openbsd(int clock, int flags,
|
|||
const struct timespec *req,
|
||||
struct timespec *rem) {
|
||||
int res;
|
||||
struct timespec now, rel;
|
||||
if (clock == CLOCK_REALTIME) {
|
||||
if (!flags) {
|
||||
res = sys_nanosleep(req, rem);
|
||||
} else {
|
||||
sys_clock_gettime(clock, &now);
|
||||
if (timespec_cmp(*req, now) > 0) {
|
||||
rel = timespec_sub(*req, now);
|
||||
res = sys_nanosleep(&rel, 0);
|
||||
} else {
|
||||
res = 0;
|
||||
}
|
||||
}
|
||||
struct timespec start, relative, remainder;
|
||||
if (!flags) {
|
||||
relative = *req;
|
||||
} else {
|
||||
res = enotsup();
|
||||
if ((res = sys_clock_gettime(clock, &start)))
|
||||
return _sysret(res);
|
||||
if (timespec_cmp(start, *req) >= 0)
|
||||
return 0;
|
||||
relative = timespec_sub(*req, start);
|
||||
}
|
||||
res = sys_nanosleep(&relative, &remainder);
|
||||
if (res == -1 && errno == EINTR && rem && !flags)
|
||||
*rem = remainder;
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -35,8 +35,10 @@ int sys_clock_nanosleep_xnu(int clock, int flags, const struct timespec *req,
|
|||
struct timespec *rem) {
|
||||
#ifdef __x86_64__
|
||||
if (flags & TIMER_ABSTIME) {
|
||||
int nerr;
|
||||
struct timespec now;
|
||||
sys_clock_gettime_xnu(clock, &now);
|
||||
if ((nerr = sys_clock_gettime_xnu(clock, &now)))
|
||||
return _sysret(nerr);
|
||||
if (timespec_cmp(*req, now) > 0) {
|
||||
struct timeval rel = timespec_totimeval(timespec_sub(*req, now));
|
||||
return sys_select(0, 0, 0, 0, &rel);
|
||||
|
@ -47,12 +49,13 @@ int sys_clock_nanosleep_xnu(int clock, int flags, const struct timespec *req,
|
|||
int rc;
|
||||
struct timespec beg;
|
||||
if (rem)
|
||||
sys_clock_gettime_xnu(CLOCK_REALTIME, &beg);
|
||||
if ((rc = sys_clock_gettime_xnu(clock, &beg)))
|
||||
return _sysret(rc);
|
||||
struct timeval rel = timespec_totimeval(*req); // rounds up
|
||||
rc = sys_select(0, 0, 0, 0, &rel);
|
||||
if (rc == -1 && rem && errno == EINTR) {
|
||||
struct timespec end;
|
||||
sys_clock_gettime_xnu(CLOCK_REALTIME, &end);
|
||||
sys_clock_gettime_xnu(clock, &end);
|
||||
*rem = timespec_subz(*req, timespec_sub(end, beg));
|
||||
}
|
||||
return rc;
|
||||
|
@ -61,9 +64,8 @@ int sys_clock_nanosleep_xnu(int clock, int flags, const struct timespec *req,
|
|||
long res;
|
||||
struct timespec abs, now, rel;
|
||||
if (_weaken(pthread_testcancel_np) && //
|
||||
_weaken(pthread_testcancel_np)()) {
|
||||
_weaken(pthread_testcancel_np)())
|
||||
return ecanceled();
|
||||
}
|
||||
if (flags & TIMER_ABSTIME) {
|
||||
abs = *req;
|
||||
if (!(res = __syslib->__clock_gettime(clock, &now))) {
|
||||
|
@ -73,7 +75,10 @@ int sys_clock_nanosleep_xnu(int clock, int flags, const struct timespec *req,
|
|||
}
|
||||
}
|
||||
} else {
|
||||
res = __syslib->__nanosleep(req, rem);
|
||||
struct timespec remainder;
|
||||
res = __syslib->__nanosleep(req, &remainder);
|
||||
if (res == -EINTR && rem)
|
||||
*rem = remainder;
|
||||
}
|
||||
if (res == -EINTR && //
|
||||
(_weaken(pthread_testcancel_np) && //
|
||||
|
|
|
@ -82,6 +82,10 @@ errno_t clock_nanosleep(int clock, int flags, //
|
|||
struct timespec *rem) {
|
||||
if (IsMetal())
|
||||
return ENOSYS;
|
||||
if (IsLinux() && clock == CLOCK_REALTIME_COARSE)
|
||||
clock = CLOCK_REALTIME;
|
||||
if (IsLinux() && clock == CLOCK_MONOTONIC_COARSE)
|
||||
clock = CLOCK_MONOTONIC;
|
||||
if (clock == 127 || //
|
||||
(flags & ~TIMER_ABSTIME) || //
|
||||
req->tv_sec < 0 || //
|
||||
|
@ -89,22 +93,7 @@ errno_t clock_nanosleep(int clock, int flags, //
|
|||
return EINVAL;
|
||||
int rc;
|
||||
errno_t err, old = errno;
|
||||
|
||||
TryAgain:
|
||||
// Ensure fallback for old Linux sticks.
|
||||
if (IsLinux() && clock == 4 /* CLOCK_MONOTONIC_RAW */)
|
||||
clock = CLOCK_MONOTONIC_RAW;
|
||||
|
||||
rc = sys_clock_nanosleep(clock, flags, req, rem);
|
||||
|
||||
// CLOCK_MONOTONIC_RAW is Linux 2.6.28+ so not available on RHEL5
|
||||
if (IsLinux() && rc && errno == EINVAL &&
|
||||
clock == 4 /* CLOCK_MONOTONIC_RAW */) {
|
||||
CLOCK_MONOTONIC_RAW = CLOCK_MONOTONIC;
|
||||
CLOCK_MONOTONIC_RAW_APPROX = CLOCK_MONOTONIC;
|
||||
goto TryAgain;
|
||||
}
|
||||
|
||||
err = !rc ? 0 : errno;
|
||||
errno = old;
|
||||
return err;
|
||||
|
|
|
@ -20,7 +20,6 @@ int timespec_get(struct timespec *, int) libcesque;
|
|||
|
||||
#ifdef _COSMO_SOURCE
|
||||
int sys_clock_nanosleep(int, int, const struct timespec *, struct timespec *);
|
||||
int cosmo_clock_nanosleep(int, int, const struct timespec *, struct timespec *);
|
||||
#define timespec_zero ((struct timespec){0})
|
||||
#define timespec_max ((struct timespec){0x7fffffffffffffff, 999999999})
|
||||
libcesque int timespec_cmp(struct timespec, struct timespec) pureconst;
|
||||
|
@ -34,8 +33,8 @@ libcesque struct timespec timespec_frommicros(int64_t) pureconst;
|
|||
libcesque struct timespec timespec_frommillis(int64_t) pureconst;
|
||||
libcesque struct timespec timespec_real(void) libcesque;
|
||||
libcesque struct timespec timespec_mono(void) libcesque;
|
||||
libcesque struct timespec timespec_sleep(struct timespec) libcesque;
|
||||
libcesque int timespec_sleep_until(struct timespec) libcesque;
|
||||
libcesque struct timespec timespec_sleep(int, struct timespec) libcesque;
|
||||
libcesque int timespec_sleep_until(int, struct timespec) libcesque;
|
||||
libcesque struct timespec timespec_sub(struct timespec,
|
||||
struct timespec) pureconst;
|
||||
libcesque struct timespec timespec_subz(struct timespec,
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/assert.h"
|
||||
#include "libc/calls/struct/timespec.h"
|
||||
#include "libc/sysv/consts/clock.h"
|
||||
|
||||
|
@ -31,6 +30,6 @@
|
|||
*/
|
||||
struct timespec timespec_real(void) {
|
||||
struct timespec ts;
|
||||
unassert(!clock_gettime(CLOCK_REALTIME, &ts));
|
||||
clock_gettime(CLOCK_REALTIME, &ts);
|
||||
return ts;
|
||||
}
|
||||
|
|
|
@ -34,19 +34,16 @@
|
|||
* @return unslept time which may be non-zero if the call was interrupted
|
||||
* @cancelationpoint
|
||||
*/
|
||||
struct timespec timespec_sleep(struct timespec delay) {
|
||||
struct timespec timespec_sleep(int clock, struct timespec delay) {
|
||||
int cs = -1;
|
||||
errno_t err;
|
||||
struct timespec remain;
|
||||
remain = timespec_zero;
|
||||
if (_pthread_self()->pt_flags & PT_MASKED) {
|
||||
if (_pthread_self()->pt_flags & PT_MASKED)
|
||||
cs = _pthread_block_cancelation();
|
||||
}
|
||||
if ((err = clock_nanosleep(CLOCK_REALTIME, 0, &delay, &remain))) {
|
||||
if ((err = clock_nanosleep(clock, 0, &delay, &remain)))
|
||||
unassert(err == EINTR);
|
||||
}
|
||||
if (cs != -1) {
|
||||
if (cs != -1)
|
||||
_pthread_allow_cancelation(cs);
|
||||
}
|
||||
return remain;
|
||||
}
|
||||
|
|
|
@ -30,9 +30,9 @@
|
|||
* @raise EINTR if signal was delivered
|
||||
* @cancelationpoint
|
||||
*/
|
||||
errno_t timespec_sleep_until(struct timespec abs_deadline) {
|
||||
errno_t timespec_sleep_until(int clock, struct timespec abs_deadline) {
|
||||
errno_t rc;
|
||||
rc = clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &abs_deadline, 0);
|
||||
rc = clock_nanosleep(clock, TIMER_ABSTIME, &abs_deadline, 0);
|
||||
unassert(!rc || rc == EINTR || rc == ECANCELED);
|
||||
return rc;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue