Always initialize thread local storage

We had previously not enabled TLS in MODE=tiny in order to keep the
smallest example programs (e.g. life.com) just 16kb in size. But it
was error prone doing that, so now we just always enable it because
this change uses hacks to ensure it won't increase life.com's size.

This change also fixes a bug on NetBSD, where signal handlers would
break thread local storage if SA_SIGINFO was being used. This looks
like it might be a bug in NetBSD, but it's got a simple workaround.
This commit is contained in:
Justine Tunney 2022-07-18 22:26:11 -07:00
parent 057e8f5b54
commit 69f4152f38
33 changed files with 174 additions and 123 deletions

View file

@ -9,5 +9,7 @@ LICENSE
LOCAL CHANGES
- Introduce __oom_hook()
- Favor pause (rather than sched_yield) for spin locks
- Use faster two power roundup for memalign()
- Poison maps to integrate with Address Sanitizer
- Introduce __oom_hook() by using _mapanon() vs. mmap()
- Wrap locks with __threaded check to improve perf lots

View file

@ -5,7 +5,9 @@
#include "libc/dce.h"
#include "libc/errno.h"
#include "libc/intrin/kprintf.h"
#include "libc/macros.internal.h"
#include "libc/mem/mem.h"
#include "libc/nexgen32e/bsr.h"
#include "libc/nexgen32e/rdtsc.h"
#include "libc/rand/rand.h"
#include "libc/runtime/runtime.h"
@ -917,11 +919,8 @@ static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
void* mem = 0;
if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
alignment = MIN_CHUNK_SIZE;
if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
size_t a = MALLOC_ALIGNMENT << 1;
while (a < alignment) a <<= 1;
alignment = a;
}
/* alignment is 32+ bytes rounded up to nearest two power */
alignment = 2ul << bsrl(MAX(MIN_CHUNK_SIZE, alignment) - 1);
if (bytes >= MAX_REQUEST - alignment) {
if (m != 0) { /* Test isn't needed but avoids compiler warning */
MALLOC_FAILURE_ACTION;

View file

@ -1,4 +1,5 @@
// clang-format off
#include "libc/calls/calls.h"
#include "libc/nexgen32e/threaded.h"
/* --------------------------- Lock preliminaries ------------------------ */
@ -94,7 +95,7 @@ FORCEINLINE void x86_clear_lock(int* sl) {
/* Plain spin locks use single word (embedded in malloc_states) */
static dontinline int spin_acquire_lock(int *sl) {
while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
__builtin_ia32_pause();
sched_yield();
}
return 0;
}
@ -154,7 +155,7 @@ FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
++lk->c;
return 0;
}
__builtin_ia32_pause();
sched_yield();
}
}

View file

@ -179,6 +179,8 @@
/*
========================================================================
To make a fully customizable malloc.h header file, cut everything
#include "libc/sysv/consts/map.h"
#include "libc/runtime/runtime.h"
above this line, put into file malloc.h, edit to suit, and #include it
on the next line, as well as in programs that use this malloc.
========================================================================
@ -385,7 +387,7 @@ unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
/* MORECORE and MMAP must return MFAIL on failure */
#define MFAIL ((void*)(MAX_SIZE_T))
#define MFAIL NULL
#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
#if HAVE_MMAP
@ -398,7 +400,7 @@ unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
#endif /* MAP_ANON */
#ifdef MAP_ANONYMOUS
#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
#define MMAP_DEFAULT(s) _mapanon(s)
#else /* MAP_ANONYMOUS */
/*
Nearly all versions of mmap support MAP_ANONYMOUS, so the following
@ -408,8 +410,8 @@ unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
(dev_zero_fd = open("/dev/zero", O_RDWR), \
mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
mmap_no(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
mmap_no(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
#endif /* MAP_ANONYMOUS */
#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)

View file

@ -16,24 +16,21 @@
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/bits/weaken.h"
#include "libc/calls/calls.h"
#include "libc/dce.h"
#include "libc/intrin/asan.internal.h"
#include "libc/intrin/asancodes.h"
#include "libc/intrin/kprintf.h"
#include "libc/runtime/runtime.h"
#include "libc/sysv/consts/map.h"
#include "libc/sysv/consts/prot.h"
#include "third_party/dlmalloc/vespene.internal.h"
/**
* Acquires more system memory for dlmalloc.
* @return memory map address on success, or null w/ errno
*/
void *dlmalloc_requires_more_vespene_gas(size_t size) {
char *p;
if ((p = mmap(0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0)) != MAP_FAILED) {
if (weaken(__asan_poison)) {
weaken(__asan_poison)(p, size, kAsanHeapFree);
if ((p = _mapanon(size))) {
if (IsAsan()) {
__asan_poison(p, size, kAsanHeapFree);
}
}
return p;