Make malloc() go 200x faster

If pthread_create() is linked into the binary, then the cosmo runtime
will create an independent dlmalloc arena for each core. Whenever the
malloc() function is used it will index `g_heaps[sched_getcpu() / 2]`
to find the arena with the greatest hyperthread / numa locality. This
may be configured via an environment variable. For example if you say
`export COSMOPOLITAN_HEAP_COUNT=1` then you can restore the old ways.
Your process may be configured to have anywhere between 1 - 128 heaps

We need this revision because it makes multithreaded C++ applications
faster. For example, an HTTP server I'm working on that makes extreme
use of the STL went from 16k to 2000k requests per second, after this
change was made. To understand why, try out the malloc_test benchmark
which calls malloc() + realloc() in a loop across many threads, which
sees a a 250x improvement in process clock time and 200x on wall time

The tradeoff is this adds ~25ns of latency to individual malloc calls
compared to MODE=tiny, once the cosmo runtime has transitioned into a
fully multi-threaded state. If you don't need malloc() to be scalable
then cosmo provides many options for you. For starters the heap count
variable above can be set to put the process back in single heap mode
plus you can go even faster still, if you include tinymalloc.inc like
many of the programs in tool/build/.. are already doing since that'll
shave tens of kb off your binary footprint too. Theres also MODE=tiny
which is configured to use just 1 plain old dlmalloc arena by default

Another tradeoff is we need more memory now (except in MODE=tiny), to
track the provenance of memory allocation. This is so allocations can
be freely shared across threads, and because OSes can reschedule code
to different CPUs at any time.
This commit is contained in:
Justine Tunney 2024-06-05 01:31:21 -07:00
parent 9906f299bb
commit 3609f65de3
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
60 changed files with 858 additions and 1064 deletions

View file

@ -86,13 +86,6 @@ o//libc/intrin/memmove.o: private \
-finline \
-foptimize-sibling-calls
# make asan stack traces shorter
o/$(MODE)/libc/intrin/asanthunk.o: private \
CFLAGS += \
-Os \
$(NO_MAGIC) \
-foptimize-sibling-calls
o/$(MODE)/libc/intrin/bzero.o \
o/$(MODE)/libc/intrin/memcmp.o \
o/$(MODE)/libc/intrin/memmove.o: private \

View file

@ -35,7 +35,6 @@
#include "libc/log/libfatal.internal.h"
#include "libc/log/log.h"
#include "libc/macros.internal.h"
#include "libc/mem/hook.internal.h"
#include "libc/mem/mem.h"
#include "libc/nexgen32e/gc.internal.h"
#include "libc/nexgen32e/stackframe.h"
@ -56,25 +55,10 @@
#include "libc/thread/tls.h"
#include "third_party/dlmalloc/dlmalloc.h"
#ifdef __x86_64__
#ifdef __SANITIZE_ADDRESS__
__static_yoink("_init_asan");
#if IsModeDbg()
// MODE=dbg
// O(32mb) of morgue memory
// Θ(64) bytes of malloc overhead
#define ASAN_MORGUE_ITEMS 512
#define ASAN_MORGUE_THRESHOLD 65536
#define ASAN_TRACE_ITEMS 16
#else
// MODE=asan
// O(32mb) of morgue memory
// Θ(32) bytes of malloc overhead
#define ASAN_MORGUE_ITEMS 512
#define ASAN_MORGUE_THRESHOLD 65536
#define ASAN_TRACE_ITEMS 4
#endif
/**
* @fileoverview Cosmopolitan Address Sanitizer Runtime.
*
@ -110,13 +94,6 @@ __static_yoink("_init_asan");
#define ASAN_LOG(...) (void)0 // kprintf(__VA_ARGS__)
#define HOOK(HOOK, IMPL) \
do { \
if (_weaken(HOOK)) { \
*_weaken(HOOK) = IMPL; \
} \
} while (0)
#define REQUIRE(FUNC) \
do { \
if (!_weaken(FUNC)) { \
@ -124,15 +101,6 @@ __static_yoink("_init_asan");
} \
} while (0)
struct AsanTrace {
uint32_t p[ASAN_TRACE_ITEMS]; // assumes linkage into 32-bit space
};
struct AsanExtra {
uint64_t size;
struct AsanTrace bt;
};
struct AsanSourceLocation {
char *filename;
int line;
@ -158,16 +126,6 @@ struct AsanGlobal {
char *odr_indicator;
};
struct ReportOriginHeap {
const unsigned char *a;
int z;
};
static struct AsanMorgue {
_Atomic(unsigned) i;
_Atomic(void *) p[ASAN_MORGUE_ITEMS];
} __asan_morgue;
int __asan_option_detect_stack_use_after_return = 0;
void __asan_version_mismatch_check_v8(void) {
@ -180,26 +138,11 @@ static bool __asan_once(void) {
&once, &want, true, memory_order_relaxed, memory_order_relaxed);
}
#define __asan_unreachable() \
do { \
kprintf("%s:%d: __asan_unreachable()\n", __FILE__, __LINE__); \
__builtin_trap(); \
} while (0)
static int __asan_bsf(uint64_t x) {
_Static_assert(sizeof(long long) == sizeof(uint64_t), "");
return __builtin_ctzll(x);
}
static int __asan_bsr(uint64_t x) {
_Static_assert(sizeof(long long) == sizeof(uint64_t), "");
return __builtin_clzll(x) ^ 63;
}
static uint64_t __asan_roundup2pow(uint64_t x) {
return 2ull << __asan_bsr(x - 1);
}
static char *__asan_utf8cpy(char *p, unsigned c) {
uint64_t z;
z = tpenc(c);
@ -356,7 +299,7 @@ static void __asan_exit(void) {
_Exit(99);
}
static __wur __asan_die_f *__asan_die(void) {
__wur __asan_die_f *__asan_die(void) {
if (_weaken(__die)) {
return _weaken(__die);
} else {
@ -410,7 +353,7 @@ void __asan_unpoison(void *p, long n) {
}
}
static bool __asan_is_mapped(int x) {
bool __asan_is_mapped(int x) {
// xxx: we can't lock because no reentrant locks yet
int i;
bool res;
@ -724,12 +667,6 @@ static const char *__asan_describe_access_poison(signed char kind) {
}
}
static __wur __asan_die_f *__asan_report_invalid_pointer(const void *addr) {
kprintf("\n\e[J\e[1;31masan error\e[0m: this corruption at %p shadow %p\n",
addr, SHADOW(addr));
return __asan_die();
}
static char *__asan_format_interval(char *p, intptr_t a, intptr_t b) {
p = __asan_hexcpy(p, a, 48), *p++ = '-';
p = __asan_hexcpy(p, b, 48);
@ -750,7 +687,7 @@ static char *__asan_format_section(char *p, const void *p1, const void *p2,
return p;
}
static void __asan_report_memory_origin_image(intptr_t a, int z) {
void __asan_report_memory_origin_image(intptr_t a, int z) {
unsigned l, m, r, n, k;
struct SymbolTable *st;
kprintf("\nthe memory belongs to image symbols\n");
@ -787,32 +724,8 @@ static void __asan_report_memory_origin_image(intptr_t a, int z) {
}
}
static void __asan_onmemory(void *x, void *y, size_t n, void *a) {
const unsigned char *p = x;
struct ReportOriginHeap *t = a;
if ((p <= t->a && t->a < p + n) ||
(p <= t->a + t->z && t->a + t->z < p + n) ||
(t->a < p && p + n <= t->a + t->z)) {
kprintf("%p %,lu bytes [dlmalloc]", x, n);
__asan_print_trace(x);
kprintf("\n");
}
}
static void __asan_report_memory_origin_heap(const unsigned char *a, int z) {
struct ReportOriginHeap t;
kprintf("\nthe memory was allocated by\n");
if (_weaken(malloc_inspect_all)) {
t.a = a;
t.z = z;
_weaken(malloc_inspect_all)(__asan_onmemory, &t);
} else {
kprintf("\tunknown please __static_yoink(\"malloc_inspect_all\");\n");
}
}
static void __asan_report_memory_origin(const unsigned char *addr, int size,
signed char kind) {
void __asan_report_memory_origin(const unsigned char *addr, int size,
signed char kind) {
switch (kind) {
case kAsanStackOverrun:
case kAsanGlobalOverrun:
@ -837,7 +750,8 @@ static void __asan_report_memory_origin(const unsigned char *addr, int size,
if (__executable_start <= addr && addr < _end) {
__asan_report_memory_origin_image((intptr_t)addr, size);
} else if (IsAutoFrame((intptr_t)addr >> 16)) {
__asan_report_memory_origin_heap(addr, size);
if (_weaken(__asan_report_memory_origin_heap))
_weaken(__asan_report_memory_origin_heap)(addr, size);
}
}
@ -972,49 +886,7 @@ static __wur __asan_die_f *__asan_report_memory_fault(void *addr, int size,
__asan_fault(SHADOW(addr), -128).kind);
}
static void *__asan_morgue_add(void *p) {
return atomic_exchange_explicit(
__asan_morgue.p + (atomic_fetch_add_explicit(&__asan_morgue.i, 1,
memory_order_acq_rel) &
(ARRAYLEN(__asan_morgue.p) - 1)),
p, memory_order_acq_rel);
}
__attribute__((__destructor__)) static void __asan_morgue_flush(void) {
unsigned i;
for (i = 0; i < ARRAYLEN(__asan_morgue.p); ++i) {
if (atomic_load_explicit(__asan_morgue.p + i, memory_order_acquire)) {
_weaken(dlfree)(atomic_exchange_explicit(__asan_morgue.p + i, 0,
memory_order_release));
}
}
}
static size_t __asan_heap_size(size_t n) {
if (n < 0x7fffffff0000) {
n = ROUNDUP(n, _Alignof(struct AsanExtra));
return __asan_roundup2pow(n + sizeof(struct AsanExtra));
} else {
return -1;
}
}
static void __asan_write48(uint64_t *value, uint64_t x) {
uint64_t cookie;
cookie = 'J' | 'T' << 8;
cookie ^= x & 0xffff;
*value = (x & 0xffffffffffff) | cookie << 48;
}
static bool __asan_read48(uint64_t value, uint64_t *x) {
uint64_t cookie;
cookie = value >> 48;
cookie ^= value & 0xffff;
*x = (int64_t)(value << 16) >> 16;
return cookie == ('J' | 'T' << 8);
}
static void __asan_rawtrace(struct AsanTrace *bt, const struct StackFrame *bp) {
void __asan_rawtrace(struct AsanTrace *bt, const struct StackFrame *bp) {
size_t i;
for (i = 0; bp && i < ARRAYLEN(bt->p); ++i, bp = bp->next) {
if (kisdangerous(bp))
@ -1058,234 +930,6 @@ static void __asan_trace(struct AsanTrace *bt, const struct StackFrame *bp) {
}
}
#define __asan_trace __asan_rawtrace
static void *__asan_allocate(size_t a, size_t n, struct AsanTrace *bt,
int underrun, int overrun, int initializer) {
char *p;
size_t c;
struct AsanExtra *e;
if ((p = _weaken(dlmemalign)(a, __asan_heap_size(n)))) {
c = _weaken(dlmalloc_usable_size)(p);
e = (struct AsanExtra *)(p + c - sizeof(*e));
__asan_unpoison(p, n);
__asan_poison(p - 16, 16, underrun); /* see dlmalloc design */
__asan_poison(p + n, c - n, overrun);
__asan_memset(p, initializer, n);
__asan_write48(&e->size, n);
__asan_memcpy(&e->bt, bt, sizeof(*bt));
}
return p;
}
static void *__asan_allocate_heap(size_t a, size_t n, struct AsanTrace *bt) {
return __asan_allocate(a, n, bt, kAsanHeapUnderrun, kAsanHeapOverrun, 0xf9);
}
static struct AsanExtra *__asan_get_extra(const void *p, size_t *c) {
int f;
long x, n;
struct AsanExtra *e;
f = (intptr_t)p >> 16;
if (!kisdangerous(p) &&
(n = _weaken(dlmalloc_usable_size)((void *)p)) > sizeof(*e) &&
!ckd_add(&x, (intptr_t)p, n) && x <= 0x800000000000 &&
(LIKELY(f == (int)((x - 1) >> 16)) || !kisdangerous((void *)(x - 1))) &&
(LIKELY(f == (int)((x = x - sizeof(*e)) >> 16)) ||
__asan_is_mapped(x >> 16)) &&
!(x & (_Alignof(struct AsanExtra) - 1))) {
*c = n;
return (struct AsanExtra *)x;
} else {
return 0;
}
}
size_t __asan_get_heap_size(const void *p) {
size_t n, c;
struct AsanExtra *e;
if ((e = __asan_get_extra(p, &c)) && __asan_read48(e->size, &n)) {
return n;
}
return 0;
}
static size_t __asan_malloc_usable_size(void *p) {
size_t n, c;
struct AsanExtra *e;
if ((e = __asan_get_extra(p, &c)) && __asan_read48(e->size, &n)) {
return n;
}
__asan_report_invalid_pointer(p)();
__asan_unreachable();
}
int __asan_print_trace(void *p) {
size_t c, i, n;
struct AsanExtra *e;
if (!(e = __asan_get_extra(p, &c))) {
kprintf(" bad pointer");
return einval();
}
if (!__asan_read48(e->size, &n)) {
kprintf(" bad cookie");
return -1;
}
kprintf("\n%p %,lu bytes [asan]", (char *)p, n);
if (!__asan_is_mapped((((intptr_t)p >> 3) + 0x7fff8000) >> 16)) {
kprintf(" (shadow not mapped?!)");
}
for (i = 0; i < ARRAYLEN(e->bt.p) && e->bt.p[i]; ++i) {
kprintf("\n%*lx %t", 12, e->bt.p[i], e->bt.p[i]);
}
return 0;
}
// Returns true if `p` was allocated by an IGNORE_LEAKS(function).
int __asan_is_leaky(void *p) {
int sym;
size_t c, i, n;
intptr_t f, *l;
struct AsanExtra *e;
struct SymbolTable *st;
if (!_weaken(GetSymbolTable))
notpossible;
if (!(e = __asan_get_extra(p, &c)))
return 0;
if (!__asan_read48(e->size, &n))
return 0;
if (!__asan_is_mapped((((intptr_t)p >> 3) + 0x7fff8000) >> 16))
return 0;
if (!(st = GetSymbolTable()))
return 0;
for (i = 0; i < ARRAYLEN(e->bt.p) && e->bt.p[i]; ++i) {
if ((sym = _weaken(__get_symbol)(st, e->bt.p[i])) == -1)
continue;
f = st->addr_base + st->symbols[sym].x;
for (l = _leaky_start; l < _leaky_end; ++l) {
if (f == *l) {
return 1;
}
}
}
return 0;
}
static void __asan_deallocate(char *p, long kind) {
size_t c, n;
struct AsanExtra *e;
if ((e = __asan_get_extra(p, &c))) {
if (__asan_read48(e->size, &n)) {
__asan_poison(p, c, kind);
if (c <= ASAN_MORGUE_THRESHOLD) {
p = __asan_morgue_add(p);
}
_weaken(dlfree)(p);
} else {
__asan_report_invalid_pointer(p)();
__asan_unreachable();
}
} else {
__asan_report_invalid_pointer(p)();
__asan_unreachable();
}
}
void __asan_free(void *p) {
if (!p)
return;
__asan_deallocate(p, kAsanHeapFree);
}
size_t __asan_bulk_free(void *p[], size_t n) {
size_t i;
for (i = 0; i < n; ++i) {
if (p[i]) {
__asan_deallocate(p[i], kAsanHeapFree);
p[i] = 0;
}
}
return 0;
}
static void *__asan_realloc_nogrow(void *p, size_t n, size_t m,
struct AsanTrace *bt) {
return 0;
}
static void *__asan_realloc_grow(void *p, size_t n, size_t m,
struct AsanTrace *bt) {
char *q;
if ((q = __asan_allocate_heap(16, n, bt))) {
__asan_memcpy(q, p, m);
__asan_deallocate(p, kAsanHeapRelocated);
}
return q;
}
static void *__asan_realloc_impl(void *p, size_t n,
void *grow(void *, size_t, size_t,
struct AsanTrace *)) {
size_t c, m;
struct AsanExtra *e;
if ((e = __asan_get_extra(p, &c))) {
if (__asan_read48(e->size, &m)) {
if (n <= m) { // shrink
__asan_poison((char *)p + n, m - n, kAsanHeapOverrun);
__asan_write48(&e->size, n);
return p;
} else if (n <= c - sizeof(struct AsanExtra)) { // small growth
__asan_unpoison((char *)p + m, n - m);
__asan_write48(&e->size, n);
return p;
} else { // exponential growth
return grow(p, n, m, &e->bt);
}
}
}
__asan_report_invalid_pointer(p)();
__asan_unreachable();
}
void *__asan_malloc(size_t size) {
struct AsanTrace bt;
__asan_trace(&bt, RBP);
return __asan_allocate_heap(16, size, &bt);
}
void *__asan_memalign(size_t align, size_t size) {
struct AsanTrace bt;
__asan_trace(&bt, RBP);
return __asan_allocate_heap(align, size, &bt);
}
void *__asan_calloc(size_t n, size_t m) {
struct AsanTrace bt;
__asan_trace(&bt, RBP);
if (ckd_mul(&n, n, m))
n = -1;
return __asan_allocate(16, n, &bt, kAsanHeapUnderrun, kAsanHeapOverrun, 0x00);
}
void *__asan_realloc(void *p, size_t n) {
struct AsanTrace bt;
if (p) {
return __asan_realloc_impl(p, n, __asan_realloc_grow);
} else {
__asan_trace(&bt, RBP);
return __asan_allocate_heap(16, n, &bt);
}
}
void *__asan_realloc_in_place(void *p, size_t n) {
return p ? __asan_realloc_impl(p, n, __asan_realloc_nogrow) : 0;
}
int __asan_malloc_trim(size_t pad) {
__asan_morgue_flush();
return _weaken(dlmalloc_trim) ? _weaken(dlmalloc_trim)(pad) : 0;
}
void __asan_register_globals(struct AsanGlobal g[], int n) {
int i;
__asan_poison(g, sizeof(*g) * n, kAsanProtected);
@ -1328,19 +972,6 @@ void __asan_report_store(uint8_t *addr, int size) {
__asan_unreachable();
}
void *__asan_stack_malloc(size_t size, int classid) {
struct AsanTrace bt;
ASAN_LOG("__asan_stack_malloc(%zu, %d)\n", size, classid);
__asan_trace(&bt, RBP);
return __asan_allocate(16, size, &bt, kAsanStackUnderrun, kAsanStackOverrun,
0xf9);
}
void __asan_stack_free(char *p, size_t size, int classid) {
ASAN_LOG("__asan_stack_free(%p, %zu, %d)\n", p, size, classid);
__asan_deallocate(p, kAsanStackFree);
}
void __asan_handle_no_return(void) {
// this check is stupid and has far-reaching toilsome ramifications
}
@ -1402,18 +1033,6 @@ void __asan_after_dynamic_init(void) {
ASAN_LOG("__asan_after_dynamic_init()\n");
}
void __asan_install_malloc_hooks(void) {
HOOK(hook_free, __asan_free);
HOOK(hook_malloc, __asan_malloc);
HOOK(hook_calloc, __asan_calloc);
HOOK(hook_realloc, __asan_realloc);
HOOK(hook_memalign, __asan_memalign);
HOOK(hook_bulk_free, __asan_bulk_free);
HOOK(hook_malloc_trim, __asan_malloc_trim);
HOOK(hook_realloc_in_place, __asan_realloc_in_place);
HOOK(hook_malloc_usable_size, __asan_malloc_usable_size);
}
void __asan_map_shadow(uintptr_t p, size_t n) {
// assume _mmi.lock is held
void *addr;
@ -1496,13 +1115,6 @@ void __asan_init(int argc, char **argv, char **envp, unsigned long *auxv) {
static bool once;
if (!_cmpxchg(&once, false, true))
return;
if (_weaken(hook_malloc) || _weaken(hook_calloc) || _weaken(hook_realloc) ||
_weaken(hook_realloc_in_place) || _weaken(hook_free) ||
_weaken(hook_malloc_usable_size)) {
REQUIRE(dlfree);
REQUIRE(dlmemalign);
REQUIRE(dlmalloc_usable_size);
}
__asan_shadow_existing_mappings();
__asan_map_shadow((uintptr_t)__executable_start, _end - __executable_start);
__asan_map_shadow(0, 4096);
@ -1510,7 +1122,6 @@ void __asan_init(int argc, char **argv, char **envp, unsigned long *auxv) {
if (!IsWindows()) {
sys_mprotect((void *)0x7fff8000, 0x10000, PROT_READ);
}
__asan_install_malloc_hooks();
STRACE(" _ ____ _ _ _ ");
STRACE(" / \\ / ___| / \\ | \\ | |");
STRACE(" / _ \\ \\___ \\ / _ \\ | \\| |");
@ -1522,4 +1133,5 @@ void __asan_init(int argc, char **argv, char **envp, unsigned long *auxv) {
__weak_reference(__asan_poison, __asan_poison_memory_region);
__weak_reference(__asan_unpoison, __asan_unpoison_memory_region);
#endif /* __SANITIZE_ADDRESS__ */
#endif /* __x86_64__ */

View file

@ -3,10 +3,33 @@
#include "libc/calls/struct/iovec.h"
#include "libc/intrin/asancodes.h"
#include "libc/macros.internal.h"
#include "libc/nexgen32e/stackframe.h"
COSMOPOLITAN_C_START_
#ifdef MODE_DBG
// MODE=dbg
// O(32mb) of morgue memory
// Θ(64) bytes of malloc overhead
#define ASAN_MORGUE_ITEMS 512
#define ASAN_MORGUE_THRESHOLD 65536
#define ASAN_TRACE_ITEMS 16
#else
// MODE=asan
// O(32mb) of morgue memory
// Θ(32) bytes of malloc overhead
#define ASAN_MORGUE_ITEMS 512
#define ASAN_MORGUE_THRESHOLD 65536
#define ASAN_TRACE_ITEMS 4
#endif
#define SHADOW(x) ((signed char *)(((intptr_t)(x) >> kAsanScale) + kAsanMagic))
#define UNSHADOW(x) ((void *)(MAX(0, (intptr_t)(x)-kAsanMagic) << kAsanScale))
#define UNSHADOW(x) ((void *)(MAX(0, (intptr_t)(x) - kAsanMagic) << kAsanScale))
#define __asan_unreachable() \
do { \
kprintf("%s:%d: __asan_unreachable()\n", __FILE__, __LINE__); \
__builtin_trap(); \
} while (0)
typedef void __asan_die_f(void);
@ -15,6 +38,10 @@ struct AsanFault {
const signed char *shadow;
};
struct AsanTrace {
uint32_t p[ASAN_TRACE_ITEMS]; // assumes linkage into 32-bit space
};
void __asan_unpoison(void *, long);
void __asan_poison(void *, long, signed char);
void __asan_verify(const void *, size_t);
@ -27,19 +54,17 @@ bool __asan_is_valid_iov(const struct iovec *, int) nosideeffect;
struct AsanFault __asan_check(const void *, long) nosideeffect;
struct AsanFault __asan_check_str(const char *) nosideeffect;
void __asan_free(void *);
void *__asan_malloc(size_t);
bool __asan_is_mapped(int);
int __asan_is_leaky(void *);
int __asan_malloc_trim(size_t);
int __asan_print_trace(void *);
void *__asan_calloc(size_t, size_t);
void *__asan_realloc(void *, size_t);
void *__asan_memalign(size_t, size_t);
size_t __asan_get_heap_size(const void *);
void *__asan_realloc_in_place(void *, size_t);
__asan_die_f *__asan_die(void) __wur;
void __asan_memset(void *, char, size_t);
size_t __asan_get_heap_size(const void *);
void *__asan_memcpy(void *, const void *, size_t);
void __asan_rawtrace(struct AsanTrace *, const struct StackFrame *);
void __asan_report_memory_origin(const unsigned char *, int, signed char);
void __asan_report_memory_origin_image(intptr_t, int);
void __asan_report_memory_origin_heap(const unsigned char *, int);
COSMOPOLITAN_C_END_
#endif /* COSMOPOLITAN_LIBC_INTRIN_ASAN_H_ */

View file

@ -25,6 +25,7 @@
mov %r13,%rsi
mov %r14,%rdx
mov %r15,%rcx
.weak __asan_init
call __asan_init
pop %rsi
pop %rdi

View file

@ -1,176 +0,0 @@
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi
Copyright 2023 Justine Alexandra Roberts Tunney
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted, provided that the
above copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef __x86_64__
void __asan_report_load(void *, int);
void __asan_report_store(void *, int);
void *__asan_stack_malloc(size_t, int);
void __asan_stack_free(char *, size_t, int);
void __asan_report_load1(void *p) {
__asan_report_load(p, 1);
}
void __asan_report_load2(void *p) {
__asan_report_load(p, 2);
}
void __asan_report_load4(void *p) {
__asan_report_load(p, 4);
}
void __asan_report_load8(void *p) {
__asan_report_load(p, 8);
}
void __asan_report_load16(void *p) {
__asan_report_load(p, 16);
}
void __asan_report_load32(void *p) {
__asan_report_load(p, 32);
}
void __asan_report_load_n(void *p, int n) {
__asan_report_load(p, n);
}
void __asan_report_store1(void *p) {
__asan_report_store(p, 1);
}
void __asan_report_store2(void *p) {
__asan_report_store(p, 2);
}
void __asan_report_store4(void *p) {
__asan_report_store(p, 4);
}
void __asan_report_store8(void *p) {
__asan_report_store(p, 8);
}
void __asan_report_store16(void *p) {
__asan_report_store(p, 16);
}
void __asan_report_store32(void *p) {
__asan_report_store(p, 32);
}
void __asan_report_store_n(void *p, int n) {
__asan_report_store(p, n);
}
void *__asan_stack_malloc_0(size_t n) {
return __asan_stack_malloc(n, 0);
}
void *__asan_stack_malloc_1(size_t n) {
return __asan_stack_malloc(n, 1);
}
void *__asan_stack_malloc_2(size_t n) {
return __asan_stack_malloc(n, 2);
}
void *__asan_stack_malloc_3(size_t n) {
return __asan_stack_malloc(n, 3);
}
void *__asan_stack_malloc_4(size_t n) {
return __asan_stack_malloc(n, 4);
}
void *__asan_stack_malloc_5(size_t n) {
return __asan_stack_malloc(n, 5);
}
void *__asan_stack_malloc_6(size_t n) {
return __asan_stack_malloc(n, 6);
}
void *__asan_stack_malloc_7(size_t n) {
return __asan_stack_malloc(n, 7);
}
void *__asan_stack_malloc_8(size_t n) {
return __asan_stack_malloc(n, 8);
}
void *__asan_stack_malloc_9(size_t n) {
return __asan_stack_malloc(n, 9);
}
void *__asan_stack_malloc_10(size_t n) {
return __asan_stack_malloc(n, 10);
}
void __asan_stack_free_0(char *p, size_t n) {
__asan_stack_free(p, n, 0);
}
void __asan_stack_free_1(char *p, size_t n) {
__asan_stack_free(p, n, 1);
}
void __asan_stack_free_2(char *p, size_t n) {
__asan_stack_free(p, n, 2);
}
void __asan_stack_free_3(char *p, size_t n) {
__asan_stack_free(p, n, 3);
}
void __asan_stack_free_4(char *p, size_t n) {
__asan_stack_free(p, n, 4);
}
void __asan_stack_free_5(char *p, size_t n) {
__asan_stack_free(p, n, 5);
}
void __asan_stack_free_6(char *p, size_t n) {
__asan_stack_free(p, n, 6);
}
void __asan_stack_free_7(char *p, size_t n) {
__asan_stack_free(p, n, 7);
}
void __asan_stack_free_8(char *p, size_t n) {
__asan_stack_free(p, n, 8);
}
void __asan_stack_free_9(char *p, size_t n) {
__asan_stack_free(p, n, 9);
}
void __asan_stack_free_10(char *p, size_t n) {
__asan_stack_free(p, n, 10);
}
void __asan_load1() {
__builtin_trap();
}
void __asan_load2() {
__builtin_trap();
}
void __asan_load4() {
__builtin_trap();
}
void __asan_load8() {
__builtin_trap();
}
void __asan_load16() {
__builtin_trap();
}
void __asan_load32() {
__builtin_trap();
}
void __asan_store1() {
__builtin_trap();
}
void __asan_store2() {
__builtin_trap();
}
void __asan_store4() {
__builtin_trap();
}
void __asan_store8() {
__builtin_trap();
}
void __asan_store16() {
__builtin_trap();
}
void __asan_store32() {
__builtin_trap();
}
#endif /* __x86_64__ */

View file

@ -816,7 +816,7 @@ privileged static size_t kformat(char *b, size_t n, const char *fmt,
goto FormatString;
}
base = 4;
hash = '&';
/* hash = '&'; */
goto FormatNumber;
}

68
libc/intrin/magicu.c Normal file
View file

@ -0,0 +1,68 @@
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi
Copyright 2023 Justine Alexandra Roberts Tunney
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted, provided that the
above copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/intrin/magicu.h"
#include "libc/assert.h"
/**
* Precomputes magic numbers for unsigned division by constant.
*
* The returned divisor may be passed to __magic_div() to perform
* unsigned integer division way faster than normal division e.g.
*
* assert(77 / 7 == __magicu_div(77, __magicu_get(7)));
*
* @param d is intended divisor, which must not be zero
* @return magic divisor (never zero)
*/
struct magicu __magicu_get(uint32_t d) {
// From Hacker's Delight by Henry S. Warren Jr., 9780321842688
// Figure 103. Simplified algorithm for magic number unsigned
int a, p;
struct magicu magu;
uint32_t p32, q, r, delta;
npassert(d); // Can't divide by zero.
p32 = 0; // Avoid compiler warning.
a = 0; // Initialize "add" indicator.
p = 31; // Initialize p.
q = 0x7FFFFFFF / d; // Initialize q = (2**p - 1)/d.
r = 0x7FFFFFFF - q * d; // Init. r = rem(2**p - 1, d).
do {
p = p + 1;
if (p == 32) {
p32 = 1; // Set p32 = 2**(p-32).
} else {
p32 = 2 * p32;
}
if (r + 1 >= d - r) {
if (q >= 0x7FFFFFFF) a = 1;
q = 2 * q + 1; // Update q.
r = 2 * r + 1 - d; // Update r.
} else {
if (q >= 0x80000000) a = 1;
q = 2 * q;
r = 2 * r + 1;
}
delta = d - 1 - r;
} while (p < 64 && p32 < delta);
magu.M = q + 1; // Magic number and
magu.s = p - 32; // Shift amount to return
if (a) magu.s |= 64; // Sets "add" indicator
npassert(magu.M || magu.s); // Never returns zero.
return magu;
}

33
libc/intrin/magicu.h Normal file
View file

@ -0,0 +1,33 @@
#ifndef COSMOPOLITAN_LIBC_TINYMATH_MAGICU_H_
#define COSMOPOLITAN_LIBC_TINYMATH_MAGICU_H_
COSMOPOLITAN_C_START_
struct magicu {
uint32_t M;
uint32_t s;
};
struct magicu __magicu_get(uint32_t);
/**
* Performs fast division using precomputed magic for constant divisor.
*
* @param x is unsigned integer that shall be divided
* @param d should be `__magicu_get(y)` if computing `x / y`
* @return result of unsigned integer division
*/
forceinline uint32_t __magicu_div(uint32_t x, struct magicu d) {
return ((((uint64_t)x * d.M) >> 32) + ((d.s & 64) ? x : 0)) >> (d.s & 63);
}
/**
* Checks if 𝑑 contains a valid initialized divisor.
*/
static inline bool32 __magicu_valid(struct magicu d) {
if (!d.M && !d.s) return false; /* uninitialized */
if (d.s & ~(64 | 63)) return false; /* corrupted */
return true;
}
COSMOPOLITAN_C_END_
#endif /* COSMOPOLITAN_LIBC_TINYMATH_MAGICU_H_ */