Use better memory strategy on Windows

Rather than using the the rollo global to pick addresses, we select them
randomly now using a conservative vaspace.
This commit is contained in:
Justine Tunney 2024-07-20 02:20:03 -07:00
parent 6a5d4ed65b
commit 2018cac11f
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
13 changed files with 113 additions and 124 deletions

View file

@ -25,11 +25,9 @@
#include "libc/thread/thread.h"
#include "libc/thread/tls.h"
static struct {
int thepid;
uint128_t thepool;
pthread_spinlock_t lock;
} g_rand64;
static int _rand64_pid;
static unsigned __int128 _rand64_pool;
pthread_mutex_t _rand64_lock_obj = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
/**
* Returns nondeterministic random data.
@ -39,19 +37,17 @@ static struct {
* the case even across forks and threads, whose sequences will differ.
*
* @see rdseed(), rdrand(), rand(), random(), rngset()
* @note this function takes 5 cycles (30 if `__threaded`)
* @note this function is not intended for cryptography
* @note this function passes bigcrush and practrand
* @asyncsignalsafe
*/
uint64_t _rand64(void) {
void *p;
uint128_t s;
if (__threaded)
pthread_spin_lock(&g_rand64.lock);
if (__pid == g_rand64.thepid) {
s = g_rand64.thepool; // normal path
pthread_mutex_lock(&_rand64_lock_obj);
if (__pid == _rand64_pid) {
s = _rand64_pool; // normal path
} else {
if (!g_rand64.thepid) {
if (!_rand64_pid) {
if (AT_RANDOM && (p = (void *)__getauxval(AT_RANDOM).value)) {
// linux / freebsd kernel supplied entropy
memcpy(&s, p, 16);
@ -61,13 +57,13 @@ uint64_t _rand64(void) {
}
} else {
// blend another timestamp on fork contention
s = g_rand64.thepool ^ rdtsc();
s = _rand64_pool ^ rdtsc();
}
// blend the pid on startup and fork contention
s ^= __pid;
g_rand64.thepid = __pid;
_rand64_pid = __pid;
}
g_rand64.thepool = (s *= 15750249268501108917ull); // lemur64
pthread_spin_unlock(&g_rand64.lock);
_rand64_pool = (s *= 15750249268501108917ull); // lemur64
pthread_mutex_unlock(&_rand64_lock_obj);
return s >> 64;
}