mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-07-06 03:08:31 +00:00
Mint APE Loader v1.5
This change ports APE Loader to Linux AARCH64, so that Raspberry Pi users can run programs like redbean, without the executable needing to modify itself. Progress has also slipped into this change on the issue of making progress better conforming to user expectations and industry standards regarding which symbols we're allowed to declare
This commit is contained in:
parent
6843150e0c
commit
7e0a09feec
510 changed files with 1783 additions and 1483 deletions
|
@ -20,19 +20,15 @@
|
|||
#include "libc/runtime/runtime.h"
|
||||
|
||||
/**
|
||||
* Returns auxiliary value, or zero if kernel didn't provide it.
|
||||
*
|
||||
* This function is typically regarded as a libc implementation detail;
|
||||
* thus, the source code is the documentation.
|
||||
* Returns auxiliary value better.
|
||||
*
|
||||
* @param at is `AT_...` search key
|
||||
* @return true if value was found
|
||||
* @see libc/sysv/consts.sh
|
||||
* @see System Five Application Binary Interface § 3.4.3
|
||||
* @error ENOENT when value not found
|
||||
* @asyncsignalsafe
|
||||
*/
|
||||
struct AuxiliaryValue _getauxval(unsigned long at) {
|
||||
dontasan struct AuxiliaryValue _getauxval(unsigned long at) {
|
||||
unsigned long *ap;
|
||||
for (ap = __auxv; ap[0]; ap += 2) {
|
||||
if (at == ap[0]) {
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include "libc/intrin/strace.internal.h"
|
||||
#include "libc/runtime/memtrack.internal.h"
|
||||
|
||||
noasan bool AreMemoryIntervalsOk(const struct MemoryIntervals *mm) {
|
||||
dontasan bool AreMemoryIntervalsOk(const struct MemoryIntervals *mm) {
|
||||
/* asan runtime depends on this function */
|
||||
int i;
|
||||
size_t wantsize;
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
#include "third_party/dlmalloc/dlmalloc.h"
|
||||
#ifdef __x86_64__
|
||||
|
||||
STATIC_YOINK("_init_asan");
|
||||
__static_yoink("_init_asan");
|
||||
|
||||
#if IsModeDbg()
|
||||
// MODE=dbg
|
||||
|
@ -342,7 +342,7 @@ static char *__asan_hexcpy(char *p, uint64_t x, uint8_t k) {
|
|||
|
||||
static void __asan_exit(void) {
|
||||
kprintf("your asan runtime needs\n"
|
||||
"\tSTATIC_YOINK(\"__die\");\n"
|
||||
"\t__static_yoink(\"__die\");\n"
|
||||
"in order to show you backtraces\n");
|
||||
_Exitr(99);
|
||||
}
|
||||
|
@ -457,7 +457,7 @@ static struct AsanFault __asan_checka(const signed char *s, long ndiv8) {
|
|||
* This is normally abstracted by the compiler. In some cases, it may be
|
||||
* desirable to perform an ASAN memory safety check explicitly, e.g. for
|
||||
* system call wrappers that need to vet memory passed to the kernel, or
|
||||
* string library routines that use the `noasan` keyword due to compiler
|
||||
* string library routines that use the `dontasan` keyword due to compiler
|
||||
* generated ASAN being too costly. This function is fast especially for
|
||||
* large memory ranges since this takes a few picoseconds for each byte.
|
||||
*
|
||||
|
@ -766,11 +766,11 @@ static void __asan_report_memory_origin_image(intptr_t a, int z) {
|
|||
kprintf("\tunknown please supply .com.dbg symbols or set COMDBG\n");
|
||||
}
|
||||
} else {
|
||||
kprintf("\tunknown please STATIC_YOINK(\"GetSymbolTable\");\n");
|
||||
kprintf("\tunknown please __static_yoink(\"GetSymbolTable\");\n");
|
||||
}
|
||||
}
|
||||
|
||||
static noasan void __asan_onmemory(void *x, void *y, size_t n, void *a) {
|
||||
static dontasan void __asan_onmemory(void *x, void *y, size_t n, void *a) {
|
||||
const unsigned char *p = x;
|
||||
struct ReportOriginHeap *t = a;
|
||||
if ((p <= t->a && t->a < p + n) ||
|
||||
|
@ -790,7 +790,7 @@ static void __asan_report_memory_origin_heap(const unsigned char *a, int z) {
|
|||
t.z = z;
|
||||
_weaken(malloc_inspect_all)(__asan_onmemory, &t);
|
||||
} else {
|
||||
kprintf("\tunknown please STATIC_YOINK(\"malloc_inspect_all\");\n");
|
||||
kprintf("\tunknown please __static_yoink(\"malloc_inspect_all\");\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1117,7 +1117,7 @@ int __asan_print_trace(void *p) {
|
|||
kprintf("\n%*lx %s", 12, e->bt.p[i],
|
||||
_weaken(GetSymbolByAddr)
|
||||
? _weaken(GetSymbolByAddr)(e->bt.p[i])
|
||||
: "please STATIC_YOINK(\"GetSymbolByAddr\")");
|
||||
: "please __static_yoink(\"GetSymbolByAddr\")");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1482,7 +1482,8 @@ static textstartup void __asan_shadow_mapping(struct MemoryIntervals *m,
|
|||
static textstartup void __asan_shadow_existing_mappings(void) {
|
||||
__asan_shadow_mapping(&_mmi, 0);
|
||||
__asan_map_shadow(GetStackAddr(), GetStackSize());
|
||||
__asan_poison((void *)GetStackAddr(), APE_GUARDSIZE, kAsanStackOverflow);
|
||||
__asan_poison((void *)GetStackAddr(), getauxval(AT_PAGESZ),
|
||||
kAsanStackOverflow);
|
||||
}
|
||||
|
||||
forceinline ssize_t __write_str(const char *s) {
|
||||
|
@ -1509,7 +1510,7 @@ void __asan_init(int argc, char **argv, char **envp, intptr_t *auxv) {
|
|||
__asan_shadow_existing_mappings();
|
||||
__asan_map_shadow((uintptr_t)__executable_start, _end - __executable_start);
|
||||
__asan_map_shadow(0, 4096);
|
||||
__asan_poison(0, APE_GUARDSIZE, kAsanNullPage);
|
||||
__asan_poison(0, getauxval(AT_PAGESZ), kAsanNullPage);
|
||||
if (!IsWindows()) {
|
||||
sys_mprotect((void *)0x7fff8000, 0x10000, PROT_READ);
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ void _bt(const char *fmt, ...) {
|
|||
errno = e;
|
||||
} else {
|
||||
kprintf("_bt() can't show backtrace because you need:\n"
|
||||
"\tSTATIC_YOINK(\"ShowBacktrace\");\n"
|
||||
"\t__static_yoink(\"ShowBacktrace\");\n"
|
||||
"to be linked.\n");
|
||||
if (_weaken(PrintBacktraceUsingSymbols) && _weaken(GetSymbolTable)) {
|
||||
e = errno;
|
||||
|
@ -58,8 +58,8 @@ void _bt(const char *fmt, ...) {
|
|||
errno = e;
|
||||
} else {
|
||||
kprintf("_bt() can't show backtrace because you need:\n"
|
||||
"\tSTATIC_YOINK(\"PrintBacktraceUsingSymbols\");\n"
|
||||
"\tSTATIC_YOINK(\"GetSymbolTable\");\n"
|
||||
"\t__static_yoink(\"PrintBacktraceUsingSymbols\");\n"
|
||||
"\t__static_yoink(\"GetSymbolTable\");\n"
|
||||
"to be linked.\n");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
|
||||
STATIC_YOINK("__cxa_finalize");
|
||||
__static_yoink("__cxa_finalize");
|
||||
|
||||
/**
|
||||
* Adds global destructor.
|
||||
|
@ -42,7 +42,7 @@ STATIC_YOINK("__cxa_finalize");
|
|||
* @return 0 on success or nonzero w/ errno
|
||||
* @note folks have forked libc in past just to unbloat atexit()
|
||||
*/
|
||||
noasan int __cxa_atexit(void *fp, void *arg, void *pred) {
|
||||
dontasan int __cxa_atexit(void *fp, void *arg, void *pred) {
|
||||
/* asan runtime depends on this function */
|
||||
unsigned i;
|
||||
struct CxaAtexitBlock *b, *b2;
|
||||
|
@ -61,7 +61,7 @@ noasan int __cxa_atexit(void *fp, void *arg, void *pred) {
|
|||
}
|
||||
}
|
||||
i = _bsr(~b->mask);
|
||||
_unassert(i < ARRAYLEN(b->p));
|
||||
unassert(i < ARRAYLEN(b->p));
|
||||
b->mask |= 1u << i;
|
||||
b->p[i].fp = fp;
|
||||
b->p[i].arg = arg;
|
||||
|
|
|
@ -60,7 +60,7 @@ StartOverLocked:
|
|||
if (!pred) {
|
||||
b2 = b->next;
|
||||
if (b2) {
|
||||
_unassert(b != &__cxa_blocks.root);
|
||||
unassert(b != &__cxa_blocks.root);
|
||||
if (_weaken(free)) {
|
||||
_weaken(free)(b);
|
||||
}
|
||||
|
|
|
@ -32,15 +32,15 @@
|
|||
|
||||
static uint64_t sys_mmap_metal_break;
|
||||
|
||||
noasan static struct DirectMap bad_mmap(void) {
|
||||
dontasan static struct DirectMap bad_mmap(void) {
|
||||
struct DirectMap res;
|
||||
res.addr = (void *)-1;
|
||||
res.maphandle = -1;
|
||||
return res;
|
||||
}
|
||||
|
||||
noasan struct DirectMap sys_mmap_metal(void *vaddr, size_t size, int prot,
|
||||
int flags, int fd, int64_t off) {
|
||||
dontasan struct DirectMap sys_mmap_metal(void *vaddr, size_t size, int prot,
|
||||
int flags, int fd, int64_t off) {
|
||||
/* asan runtime depends on this function */
|
||||
size_t i;
|
||||
struct mman *mm;
|
||||
|
|
|
@ -72,7 +72,7 @@ textwindows struct DirectMap sys_mmap_nt(void *addr, size_t size, int prot,
|
|||
}
|
||||
}
|
||||
} else {
|
||||
_unassert(flags & MAP_ANONYMOUS);
|
||||
unassert(flags & MAP_ANONYMOUS);
|
||||
fl = (struct ProtectNt){kNtPageExecuteReadwrite,
|
||||
kNtFileMapWrite | kNtFileMapExecute};
|
||||
}
|
||||
|
|
|
@ -43,11 +43,11 @@ static void *_mapframe(void *p, int f) {
|
|||
if (!rc) {
|
||||
return p;
|
||||
} else {
|
||||
_unassert(errno == ENOMEM);
|
||||
unassert(errno == ENOMEM);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
_unassert(errno == ENOMEM);
|
||||
unassert(errno == ENOMEM);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -71,14 +71,14 @@ static void *_mapframe(void *p, int f) {
|
|||
* @return new value for `e` or null w/ errno
|
||||
* @raise ENOMEM if we require more vespene gas
|
||||
*/
|
||||
noasan void *_extend(void *p, size_t n, void *e, int f, intptr_t h) {
|
||||
dontasan void *_extend(void *p, size_t n, void *e, int f, intptr_t h) {
|
||||
char *q;
|
||||
_unassert(!((uintptr_t)SHADOW(p) & (G - 1)));
|
||||
_unassert((uintptr_t)p + (G << kAsanScale) <= h);
|
||||
unassert(!((uintptr_t)SHADOW(p) & (G - 1)));
|
||||
unassert((uintptr_t)p + (G << kAsanScale) <= h);
|
||||
// TODO(jart): Make this spin less in non-ASAN mode.
|
||||
for (q = e; q < ((char *)p + n); q += 8) {
|
||||
if (!((uintptr_t)q & (G - 1))) {
|
||||
_unassert(q + G <= (char *)h);
|
||||
unassert(q + G <= (char *)h);
|
||||
if (!_mapframe(q, f)) return 0;
|
||||
if (IsAsan()) {
|
||||
if (!((uintptr_t)SHADOW(q) & (G - 1))) {
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include "libc/assert.h"
|
||||
#include "libc/runtime/memtrack.internal.h"
|
||||
|
||||
noasan unsigned FindMemoryInterval(const struct MemoryIntervals *mm, int x) {
|
||||
dontasan unsigned FindMemoryInterval(const struct MemoryIntervals *mm, int x) {
|
||||
unsigned l, m, r;
|
||||
l = 0;
|
||||
r = mm->i;
|
||||
|
@ -31,6 +31,6 @@ noasan unsigned FindMemoryInterval(const struct MemoryIntervals *mm, int x) {
|
|||
r = m;
|
||||
}
|
||||
}
|
||||
_unassert(l == mm->i || x <= mm->p[l].y);
|
||||
unassert(l == mm->i || x <= mm->p[l].y);
|
||||
return l;
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#include "libc/thread/thread.h"
|
||||
|
||||
#ifdef __x86_64__
|
||||
STATIC_YOINK("_init_g_fds");
|
||||
__static_yoink("_init_g_fds");
|
||||
#endif
|
||||
|
||||
struct Fds g_fds;
|
||||
|
@ -49,7 +49,7 @@ static textwindows dontinline void SetupWinStd(struct Fds *fds, int i, int x) {
|
|||
textstartup void __init_fds(void) {
|
||||
struct Fds *fds;
|
||||
__fds_lock_obj._type = PTHREAD_MUTEX_RECURSIVE;
|
||||
fds = VEIL("r", &g_fds);
|
||||
fds = __veil("r", &g_fds);
|
||||
fds->n = 4;
|
||||
atomic_store_explicit(&fds->f, 3, memory_order_relaxed);
|
||||
if (_weaken(_extend)) {
|
||||
|
@ -73,9 +73,9 @@ textstartup void __init_fds(void) {
|
|||
fds->p[1].kind = pushpop(kFdSerial);
|
||||
fds->p[2].kind = pushpop(kFdSerial);
|
||||
}
|
||||
fds->p[0].handle = VEIL("r", 0x3F8ull);
|
||||
fds->p[1].handle = VEIL("r", 0x3F8ull);
|
||||
fds->p[2].handle = VEIL("r", 0x3F8ull);
|
||||
fds->p[0].handle = __veil("r", 0x3F8ull);
|
||||
fds->p[1].handle = __veil("r", 0x3F8ull);
|
||||
fds->p[2].handle = __veil("r", 0x3F8ull);
|
||||
} else if (IsWindows()) {
|
||||
SetupWinStd(fds, 0, kNtStdInputHandle);
|
||||
SetupWinStd(fds, 1, kNtStdOutputHandle);
|
||||
|
|
|
@ -16,28 +16,33 @@
|
|||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/errno.h"
|
||||
#include "libc/intrin/_getauxval.internal.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
#include "libc/sysv/consts/auxv.h"
|
||||
|
||||
/**
|
||||
* Returns auxiliary value, or zero if kernel didn't provide it.
|
||||
* Returns auxiliary value.
|
||||
*
|
||||
* This function is typically regarded as a libc implementation detail;
|
||||
* thus, the source code is the documentation.
|
||||
*
|
||||
* @return auxiliary value or 0 if `at` not found
|
||||
* @return auxiliary value or 0 if `key` not found
|
||||
* @see libc/sysv/consts.sh
|
||||
* @see System Five Application Binary Interface § 3.4.3
|
||||
* @error ENOENT when value not found
|
||||
* @asyncsignalsafe
|
||||
*/
|
||||
unsigned long getauxval(unsigned long at) {
|
||||
unsigned long res, *ap;
|
||||
for (ap = __auxv; ap[0]; ap += 2) {
|
||||
if (at == ap[0]) {
|
||||
return ap[1];
|
||||
unsigned long getauxval(unsigned long key) {
|
||||
struct AuxiliaryValue x;
|
||||
x = _getauxval(key);
|
||||
if (key == AT_PAGESZ) {
|
||||
if (!x.isfound) {
|
||||
x.value = 16384;
|
||||
}
|
||||
x.isfound = true;
|
||||
}
|
||||
if (x.isfound) {
|
||||
return x.value;
|
||||
} else {
|
||||
errno = ENOENT;
|
||||
return 0;
|
||||
}
|
||||
enoent();
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#define kBufSize 1024
|
||||
#define kPid "TracerPid:\t"
|
||||
|
||||
static textwindows noasan bool IsBeingDebugged(void) {
|
||||
static textwindows dontasan bool IsBeingDebugged(void) {
|
||||
return !!NtGetPeb()->BeingDebugged;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,9 +38,9 @@ bool IsWsl1(void) {
|
|||
if (res) return res & 1;
|
||||
if (!IsLinux()) return res = 2, false;
|
||||
int e = errno;
|
||||
_unassert(__sys_mmap((void *)1, 4096, PROT_READ | PROT_WRITE,
|
||||
MAP_FIXED | MAP_PRIVATE | ANONYMOUS | GROWSDOWN, -1, 0,
|
||||
0) == MAP_FAILED);
|
||||
unassert(__sys_mmap((void *)1, 4096, PROT_READ | PROT_WRITE,
|
||||
MAP_FIXED | MAP_PRIVATE | ANONYMOUS | GROWSDOWN, -1, 0,
|
||||
0) == MAP_FAILED);
|
||||
bool tmp = errno == ENOTSUP;
|
||||
errno = e;
|
||||
res = 2 | tmp;
|
||||
|
|
|
@ -82,7 +82,7 @@ void *kmalloc(size_t size) {
|
|||
}
|
||||
__kmalloc_unlock();
|
||||
if (p) {
|
||||
_unassert(!((intptr_t)(p + i) & (KMALLOC_ALIGN - 1)));
|
||||
unassert(!((intptr_t)(p + i) & (KMALLOC_ALIGN - 1)));
|
||||
if (IsAsan()) __asan_poison(p + i + size, n - size, kAsanHeapOverrun);
|
||||
return p + i;
|
||||
} else {
|
||||
|
|
|
@ -57,6 +57,7 @@
|
|||
#include "libc/str/str.h"
|
||||
#include "libc/str/tab.internal.h"
|
||||
#include "libc/str/utf16.h"
|
||||
#include "libc/sysv/consts/auxv.h"
|
||||
#include "libc/sysv/consts/nr.h"
|
||||
#include "libc/sysv/consts/prot.h"
|
||||
#include "libc/thread/tls.h"
|
||||
|
@ -187,7 +188,7 @@ privileged bool kisdangerous(const void *p) {
|
|||
if (IsStackFrame(frame)) return false;
|
||||
if (kismapped(frame)) return false;
|
||||
}
|
||||
if (GetStackAddr() + APE_GUARDSIZE <= (uintptr_t)p &&
|
||||
if (GetStackAddr() + 16384 <= (uintptr_t)p &&
|
||||
(uintptr_t)p < GetStackAddr() + GetStackSize()) {
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
COSMOPOLITAN_C_START_
|
||||
|
||||
#define IGNORE_LEAKS(FUNC) \
|
||||
STATIC_YOINK("_leaky_start"); \
|
||||
__static_yoink("_leaky_start"); \
|
||||
void *_leaky_##FUNC[] _Section(".piro.relo.sort.leaky.2." #FUNC \
|
||||
",\"aw\",@init_array #") = {FUNC}
|
||||
|
||||
|
|
|
@ -36,9 +36,9 @@ static inline const unsigned char *memchr_pure(const unsigned char *s,
|
|||
}
|
||||
|
||||
#ifdef __x86_64__
|
||||
noasan static inline const unsigned char *memchr_sse(const unsigned char *s,
|
||||
unsigned char c,
|
||||
size_t n) {
|
||||
dontasan static inline const unsigned char *memchr_sse(const unsigned char *s,
|
||||
unsigned char c,
|
||||
size_t n) {
|
||||
size_t i;
|
||||
unsigned k;
|
||||
unsigned m;
|
||||
|
|
|
@ -36,9 +36,9 @@ static inline const unsigned char *memrchr_pure(const unsigned char *s,
|
|||
}
|
||||
|
||||
#ifdef __x86_64__
|
||||
noasan static inline const unsigned char *memrchr_sse(const unsigned char *s,
|
||||
unsigned char c,
|
||||
size_t n) {
|
||||
dontasan static inline const unsigned char *memrchr_sse(const unsigned char *s,
|
||||
unsigned char c,
|
||||
size_t n) {
|
||||
size_t i;
|
||||
unsigned k, m;
|
||||
xmm_t v, t = {c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c};
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
static void *MoveMemoryIntervals(struct MemoryInterval *d,
|
||||
const struct MemoryInterval *s, int n) {
|
||||
int i;
|
||||
_unassert(n >= 0);
|
||||
unassert(n >= 0);
|
||||
if (d > s) {
|
||||
for (i = n; i--;) {
|
||||
d[i] = s[i];
|
||||
|
@ -64,8 +64,8 @@ static void *MoveMemoryIntervals(struct MemoryInterval *d,
|
|||
}
|
||||
|
||||
static void RemoveMemoryIntervals(struct MemoryIntervals *mm, int i, int n) {
|
||||
_unassert(i >= 0);
|
||||
_unassert(i + n <= mm->i);
|
||||
unassert(i >= 0);
|
||||
unassert(i + n <= mm->i);
|
||||
MoveMemoryIntervals(mm->p + i, mm->p + i + n, mm->i - (i + n));
|
||||
mm->i -= n;
|
||||
}
|
||||
|
@ -109,9 +109,9 @@ static bool ExtendMemoryIntervals(struct MemoryIntervals *mm) {
|
|||
}
|
||||
|
||||
int CreateMemoryInterval(struct MemoryIntervals *mm, int i) {
|
||||
_unassert(i >= 0);
|
||||
_unassert(i <= mm->i);
|
||||
_unassert(mm->n >= 0);
|
||||
unassert(i >= 0);
|
||||
unassert(i <= mm->i);
|
||||
unassert(mm->n >= 0);
|
||||
if (UNLIKELY(mm->i == mm->n) && !ExtendMemoryIntervals(mm)) return enomem();
|
||||
MoveMemoryIntervals(mm->p + i + 1, mm->p + i, mm->i++ - i);
|
||||
return 0;
|
||||
|
@ -130,7 +130,7 @@ int ReleaseMemoryIntervals(struct MemoryIntervals *mm, int x, int y,
|
|||
void wf(struct MemoryIntervals *, int, int)) {
|
||||
unsigned l, r;
|
||||
ASSERT_MEMTRACK();
|
||||
_unassert(y >= x);
|
||||
unassert(y >= x);
|
||||
if (!mm->i) return 0;
|
||||
// binary search for the lefthand side
|
||||
l = FindMemoryInterval(mm, x);
|
||||
|
@ -140,8 +140,8 @@ int ReleaseMemoryIntervals(struct MemoryIntervals *mm, int x, int y,
|
|||
// binary search for the righthand side
|
||||
r = FindMemoryInterval(mm, y);
|
||||
if (r == mm->i || (r > l && y < mm->p[r].x)) --r;
|
||||
_unassert(r >= l);
|
||||
_unassert(x <= mm->p[r].y);
|
||||
unassert(r >= l);
|
||||
unassert(x <= mm->p[r].y);
|
||||
|
||||
// remove the middle of an existing map
|
||||
//
|
||||
|
@ -162,11 +162,11 @@ int ReleaseMemoryIntervals(struct MemoryIntervals *mm, int x, int y,
|
|||
// ----|mmmm|----------------- after
|
||||
//
|
||||
if (x > mm->p[l].x && x <= mm->p[l].y) {
|
||||
_unassert(y >= mm->p[l].y);
|
||||
unassert(y >= mm->p[l].y);
|
||||
if (IsWindows()) return einval();
|
||||
mm->p[l].size -= (size_t)(mm->p[l].y - (x - 1)) * FRAMESIZE;
|
||||
mm->p[l].y = x - 1;
|
||||
_unassert(mm->p[l].x <= mm->p[l].y);
|
||||
unassert(mm->p[l].x <= mm->p[l].y);
|
||||
++l;
|
||||
}
|
||||
|
||||
|
@ -177,11 +177,11 @@ int ReleaseMemoryIntervals(struct MemoryIntervals *mm, int x, int y,
|
|||
// ---------------|mm|-------- after
|
||||
//
|
||||
if (y >= mm->p[r].x && y < mm->p[r].y) {
|
||||
_unassert(x <= mm->p[r].x);
|
||||
unassert(x <= mm->p[r].x);
|
||||
if (IsWindows()) return einval();
|
||||
mm->p[r].size -= (size_t)((y + 1) - mm->p[r].x) * FRAMESIZE;
|
||||
mm->p[r].x = y + 1;
|
||||
_unassert(mm->p[r].x <= mm->p[r].y);
|
||||
unassert(mm->p[r].x <= mm->p[r].y);
|
||||
--r;
|
||||
}
|
||||
|
||||
|
@ -199,7 +199,7 @@ int TrackMemoryInterval(struct MemoryIntervals *mm, int x, int y, long h,
|
|||
long offset, long size) {
|
||||
unsigned i;
|
||||
ASSERT_MEMTRACK();
|
||||
_unassert(y >= x);
|
||||
unassert(y >= x);
|
||||
i = FindMemoryInterval(mm, x);
|
||||
|
||||
// try to extend the righthand side of the lefthand entry
|
||||
|
|
|
@ -54,14 +54,14 @@ struct ReclaimedPage {
|
|||
/**
|
||||
* Allocates new page of physical memory.
|
||||
*/
|
||||
noasan texthead uint64_t __new_page(struct mman *mm) {
|
||||
dontasan texthead uint64_t __new_page(struct mman *mm) {
|
||||
uint64_t p = mm->frp;
|
||||
if (p != NOPAGE) {
|
||||
uint64_t q;
|
||||
struct ReclaimedPage *rp = (struct ReclaimedPage *)(BANE + p);
|
||||
_unassert(p == (p & PAGE_TA));
|
||||
unassert(p == (p & PAGE_TA));
|
||||
q = rp->next;
|
||||
_unassert(q == (q & PAGE_TA) || q == NOPAGE);
|
||||
unassert(q == (q & PAGE_TA) || q == NOPAGE);
|
||||
mm->frp = q;
|
||||
return p;
|
||||
}
|
||||
|
@ -81,8 +81,8 @@ noasan texthead uint64_t __new_page(struct mman *mm) {
|
|||
* Returns pointer to page table entry for page at virtual address.
|
||||
* Additional page tables are allocated if needed as a side-effect.
|
||||
*/
|
||||
noasan textreal uint64_t *__get_virtual(struct mman *mm, uint64_t *t,
|
||||
int64_t vaddr, bool maketables) {
|
||||
dontasan textreal uint64_t *__get_virtual(struct mman *mm, uint64_t *t,
|
||||
int64_t vaddr, bool maketables) {
|
||||
uint64_t *e, p;
|
||||
unsigned char h;
|
||||
for (h = 39;; h -= 9) {
|
||||
|
@ -101,7 +101,7 @@ noasan textreal uint64_t *__get_virtual(struct mman *mm, uint64_t *t,
|
|||
/**
|
||||
* Sorts, rounds, and filters BIOS memory map.
|
||||
*/
|
||||
static noasan textreal void __normalize_e820(struct mman *mm, uint64_t top) {
|
||||
static dontasan textreal void __normalize_e820(struct mman *mm, uint64_t top) {
|
||||
uint64_t a, b;
|
||||
uint64_t x, y;
|
||||
unsigned i, j, n;
|
||||
|
@ -134,9 +134,10 @@ static noasan textreal void __normalize_e820(struct mman *mm, uint64_t top) {
|
|||
/**
|
||||
* Identity maps an area of physical memory to its negative address.
|
||||
*/
|
||||
noasan textreal uint64_t *__invert_memory_area(struct mman *mm, uint64_t *pml4t,
|
||||
uint64_t ps, uint64_t size,
|
||||
uint64_t pte_flags) {
|
||||
dontasan textreal uint64_t *__invert_memory_area(struct mman *mm,
|
||||
uint64_t *pml4t, uint64_t ps,
|
||||
uint64_t size,
|
||||
uint64_t pte_flags) {
|
||||
uint64_t pe = ps + size, p, *m = NULL;
|
||||
ps = ROUNDDOWN(ps, 4096);
|
||||
pe = ROUNDUP(pe, 4096);
|
||||
|
@ -152,7 +153,7 @@ noasan textreal uint64_t *__invert_memory_area(struct mman *mm, uint64_t *pml4t,
|
|||
/**
|
||||
* Increments the reference count for a page of physical memory.
|
||||
*/
|
||||
noasan void __ref_page(struct mman *mm, uint64_t *pml4t, uint64_t p) {
|
||||
dontasan void __ref_page(struct mman *mm, uint64_t *pml4t, uint64_t p) {
|
||||
uint64_t *m, e;
|
||||
m = __invert_memory_area(mm, pml4t, p, 4096, PAGE_RW | PAGE_XD);
|
||||
if (m) {
|
||||
|
@ -167,8 +168,8 @@ noasan void __ref_page(struct mman *mm, uint64_t *pml4t, uint64_t p) {
|
|||
/**
|
||||
* Increments the reference counts for an area of physical memory.
|
||||
*/
|
||||
noasan void __ref_pages(struct mman *mm, uint64_t *pml4t, uint64_t ps,
|
||||
uint64_t size) {
|
||||
dontasan void __ref_pages(struct mman *mm, uint64_t *pml4t, uint64_t ps,
|
||||
uint64_t size) {
|
||||
uint64_t p = ROUNDDOWN(ps, 4096), e = ROUNDUP(ps + size, 4096);
|
||||
while (p != e) {
|
||||
__ref_page(mm, pml4t, p);
|
||||
|
@ -179,9 +180,9 @@ noasan void __ref_pages(struct mman *mm, uint64_t *pml4t, uint64_t ps,
|
|||
/**
|
||||
* Reclaims a page of physical memory for later use.
|
||||
*/
|
||||
static noasan void __reclaim_page(struct mman *mm, uint64_t p) {
|
||||
static dontasan void __reclaim_page(struct mman *mm, uint64_t p) {
|
||||
struct ReclaimedPage *rp = (struct ReclaimedPage *)(BANE + p);
|
||||
_unassert(p == (p & PAGE_TA));
|
||||
unassert(p == (p & PAGE_TA));
|
||||
rp->next = mm->frp;
|
||||
mm->frp = p;
|
||||
}
|
||||
|
@ -191,7 +192,7 @@ static noasan void __reclaim_page(struct mman *mm, uint64_t p) {
|
|||
* page if there are no virtual addresses (excluding the negative space)
|
||||
* referring to it.
|
||||
*/
|
||||
noasan void __unref_page(struct mman *mm, uint64_t *pml4t, uint64_t p) {
|
||||
dontasan void __unref_page(struct mman *mm, uint64_t *pml4t, uint64_t p) {
|
||||
uint64_t *m, e;
|
||||
m = __invert_memory_area(mm, pml4t, p, 4096, PAGE_RW | PAGE_XD);
|
||||
if (m) {
|
||||
|
@ -207,7 +208,8 @@ noasan void __unref_page(struct mman *mm, uint64_t *pml4t, uint64_t p) {
|
|||
/**
|
||||
* Identity maps all usable physical memory to its negative address.
|
||||
*/
|
||||
static noasan textreal void __invert_memory(struct mman *mm, uint64_t *pml4t) {
|
||||
static dontasan textreal void __invert_memory(struct mman *mm,
|
||||
uint64_t *pml4t) {
|
||||
uint64_t i, j, *m, p, pe;
|
||||
for (i = 0; i < mm->e820n; ++i) {
|
||||
uint64_t ps = mm->e820[i].addr, size = mm->e820[i].size;
|
||||
|
@ -230,8 +232,8 @@ static noasan textreal void __invert_memory(struct mman *mm, uint64_t *pml4t) {
|
|||
: "i"(offsetof(type, member))); \
|
||||
} while (0)
|
||||
|
||||
noasan textreal void __setup_mman(struct mman *mm, uint64_t *pml4t,
|
||||
uint64_t top) {
|
||||
dontasan textreal void __setup_mman(struct mman *mm, uint64_t *pml4t,
|
||||
uint64_t top) {
|
||||
export_offsetof(struct mman, pc_drive_base_table);
|
||||
export_offsetof(struct mman, pc_drive_last_sector);
|
||||
export_offsetof(struct mman, pc_drive_last_head);
|
||||
|
@ -257,8 +259,8 @@ noasan textreal void __setup_mman(struct mman *mm, uint64_t *pml4t,
|
|||
/**
|
||||
* Maps APE-defined ELF program headers into memory and clears BSS.
|
||||
*/
|
||||
noasan textreal void __map_phdrs(struct mman *mm, uint64_t *pml4t, uint64_t b,
|
||||
uint64_t top) {
|
||||
dontasan textreal void __map_phdrs(struct mman *mm, uint64_t *pml4t, uint64_t b,
|
||||
uint64_t top) {
|
||||
struct Elf64_Phdr *p;
|
||||
uint64_t i, f, v, m, *e;
|
||||
extern char ape_phdrs[] __attribute__((__weak__));
|
||||
|
@ -292,8 +294,9 @@ noasan textreal void __map_phdrs(struct mman *mm, uint64_t *pml4t, uint64_t b,
|
|||
* Reclaims memory pages which were used at boot time but which can now be
|
||||
* made available for the application.
|
||||
*/
|
||||
noasan textreal void __reclaim_boot_pages(struct mman *mm, uint64_t skip_start,
|
||||
uint64_t skip_end) {
|
||||
dontasan textreal void __reclaim_boot_pages(struct mman *mm,
|
||||
uint64_t skip_start,
|
||||
uint64_t skip_end) {
|
||||
uint64_t p = mm->frp, q = IMAGE_BASE_REAL, i, n = mm->e820n, b, e;
|
||||
for (i = 0; i < n; ++i) {
|
||||
b = mm->e820[i].addr;
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#include "libc/thread/thread.h"
|
||||
|
||||
#ifdef __x86_64__
|
||||
STATIC_YOINK("_init__mmi");
|
||||
__static_yoink("_init__mmi");
|
||||
#endif
|
||||
|
||||
struct MemoryIntervals _mmi;
|
||||
|
|
|
@ -48,9 +48,9 @@
|
|||
*/
|
||||
bool __nocolor;
|
||||
|
||||
optimizesize textstartup noasan void __nocolor_init(int argc, char **argv,
|
||||
char **envp,
|
||||
intptr_t *auxv) {
|
||||
optimizesize textstartup dontasan void __nocolor_init(int argc, char **argv,
|
||||
char **envp,
|
||||
intptr_t *auxv) {
|
||||
char *s;
|
||||
__nocolor = (IsWindows() && !IsAtLeastWindows10()) ||
|
||||
((s = getenv("TERM")) && IsDumb(s));
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
*
|
||||
* @see IsAtLeastWindows10()
|
||||
*/
|
||||
textwindows noasan int NtGetVersion(void) {
|
||||
textwindows dontasan int NtGetVersion(void) {
|
||||
return (NtGetPeb()->OSMajorVersion & 0xff) << 8 | NtGetPeb()->OSMinorVersion;
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
void(pthread_cleanup_pop)(struct _pthread_cleanup_buffer *cb, int execute) {
|
||||
struct PosixThread *pt;
|
||||
if (__tls_enabled && (pt = (struct PosixThread *)__get_tls()->tib_pthread)) {
|
||||
_unassert(cb == pt->cleanup);
|
||||
unassert(cb == pt->cleanup);
|
||||
pt->cleanup = cb->__prev;
|
||||
}
|
||||
if (execute) {
|
||||
|
|
|
@ -35,7 +35,7 @@ void *pthread_getspecific(pthread_key_t k) {
|
|||
// pthread_key_create() or after key has been deleted with
|
||||
// pthread_key_delete() is undefined."
|
||||
// ──Quoth POSIX.1-2017
|
||||
_unassert(0 <= k && k < PTHREAD_KEYS_MAX);
|
||||
_unassert(atomic_load_explicit(_pthread_key_dtor + k, memory_order_acquire));
|
||||
unassert(0 <= k && k < PTHREAD_KEYS_MAX);
|
||||
unassert(atomic_load_explicit(_pthread_key_dtor + k, memory_order_acquire));
|
||||
return __get_tls()->tib_keys[k];
|
||||
}
|
||||
|
|
|
@ -35,8 +35,8 @@
|
|||
*/
|
||||
int pthread_key_delete(pthread_key_t k) {
|
||||
uint64_t mask;
|
||||
_unassert(0 <= k && k < PTHREAD_KEYS_MAX);
|
||||
_unassert(atomic_load_explicit(_pthread_key_dtor + k, memory_order_acquire));
|
||||
unassert(0 <= k && k < PTHREAD_KEYS_MAX);
|
||||
unassert(atomic_load_explicit(_pthread_key_dtor + k, memory_order_acquire));
|
||||
atomic_store_explicit(_pthread_key_dtor + k, 0, memory_order_release);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -35,8 +35,8 @@ int pthread_setspecific(pthread_key_t k, const void *val) {
|
|||
// pthread_key_create() or after key has been deleted with
|
||||
// pthread_key_delete() is undefined."
|
||||
// ──Quoth POSIX.1-2017
|
||||
_unassert(0 <= k && k < PTHREAD_KEYS_MAX);
|
||||
_unassert(atomic_load_explicit(_pthread_key_dtor + k, memory_order_acquire));
|
||||
unassert(0 <= k && k < PTHREAD_KEYS_MAX);
|
||||
unassert(atomic_load_explicit(_pthread_key_dtor + k, memory_order_acquire));
|
||||
__get_tls()->tib_keys[k] = val;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -48,14 +48,14 @@ errno_t(pthread_spin_lock)(pthread_spinlock_t *spin) {
|
|||
LOCKTRACE("pthread_spin_lock(%t)", spin);
|
||||
break;
|
||||
}
|
||||
_unassert(x == 1);
|
||||
unassert(x == 1);
|
||||
LOCKTRACE("pthread_spin_lock(%t) trying...", spin);
|
||||
}
|
||||
#else
|
||||
for (;;) {
|
||||
x = atomic_exchange_explicit(&spin->_lock, 1, memory_order_acquire);
|
||||
if (!x) break;
|
||||
_unassert(x == 1);
|
||||
unassert(x == 1);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
|
|
|
@ -34,6 +34,6 @@ errno_t(pthread_spin_trylock)(pthread_spinlock_t *spin) {
|
|||
int x;
|
||||
x = atomic_exchange_explicit(&spin->_lock, 1, memory_order_acquire);
|
||||
if (!x) return 0;
|
||||
_unassert(x == 1);
|
||||
unassert(x == 1);
|
||||
return EBUSY;
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ typedef char xmm_t __attribute__((__vector_size__(16), __aligned__(16)));
|
|||
* @return pointer to nul byte
|
||||
* @asyncsignalsafe
|
||||
*/
|
||||
noasan char *stpcpy(char *d, const char *s) {
|
||||
dontasan char *stpcpy(char *d, const char *s) {
|
||||
size_t i = 0;
|
||||
if (IsAsan()) {
|
||||
__asan_verify(d, strlen(s) + 1);
|
||||
|
|
|
@ -32,7 +32,7 @@ static inline const char *strchr_pure(const char *s, int c) {
|
|||
|
||||
#ifdef __x86_64__
|
||||
typedef char xmm_t __attribute__((__vector_size__(16), __aligned__(16)));
|
||||
noasan static inline const char *strchr_sse(const char *s, unsigned char c) {
|
||||
dontasan static inline const char *strchr_sse(const char *s, unsigned char c) {
|
||||
unsigned k;
|
||||
unsigned m;
|
||||
xmm_t v, *p;
|
||||
|
@ -55,7 +55,7 @@ noasan static inline const char *strchr_sse(const char *s, unsigned char c) {
|
|||
}
|
||||
#endif
|
||||
|
||||
static noasan inline const char *strchr_x64(const char *p, uint64_t c) {
|
||||
static dontasan inline const char *strchr_x64(const char *p, uint64_t c) {
|
||||
unsigned a, b;
|
||||
uint64_t w, x, y;
|
||||
for (c *= 0x0101010101010101;; p += 8) {
|
||||
|
@ -103,7 +103,7 @@ char *strchr(const char *s, int c) {
|
|||
} else {
|
||||
r = strchr_pure(s, c);
|
||||
}
|
||||
_unassert(!r || *r || !(c & 255));
|
||||
unassert(!r || *r || !(c & 255));
|
||||
return (char *)r;
|
||||
#else
|
||||
char *r;
|
||||
|
@ -112,7 +112,7 @@ char *strchr(const char *s, int c) {
|
|||
if (!*s) return NULL;
|
||||
}
|
||||
r = strchr_x64(s, c);
|
||||
_unassert(!r || *r || !c);
|
||||
unassert(!r || *r || !c);
|
||||
return r;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -32,7 +32,8 @@ static inline const char *strchrnul_pure(const char *s, int c) {
|
|||
|
||||
#ifdef __x86_64__
|
||||
typedef char xmm_t __attribute__((__vector_size__(16), __aligned__(16)));
|
||||
noasan static inline const char *strchrnul_sse(const char *s, unsigned char c) {
|
||||
dontasan static inline const char *strchrnul_sse(const char *s,
|
||||
unsigned char c) {
|
||||
unsigned k;
|
||||
unsigned m;
|
||||
xmm_t v, *p;
|
||||
|
@ -52,7 +53,7 @@ noasan static inline const char *strchrnul_sse(const char *s, unsigned char c) {
|
|||
}
|
||||
#endif
|
||||
|
||||
noasan static const char *strchrnul_x64(const char *p, uint64_t c) {
|
||||
dontasan static const char *strchrnul_x64(const char *p, uint64_t c) {
|
||||
unsigned a, b;
|
||||
uint64_t w, x, y;
|
||||
for (c *= 0x0101010101010101;; p += 8) {
|
||||
|
@ -101,7 +102,7 @@ char *strchrnul(const char *s, int c) {
|
|||
} else {
|
||||
r = strchrnul_pure(s, c);
|
||||
}
|
||||
_unassert((*r & 255) == (c & 255) || !*r);
|
||||
unassert((*r & 255) == (c & 255) || !*r);
|
||||
return (char *)r;
|
||||
#else
|
||||
char *r;
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
* @return is <0, 0, or >0 based on uint8_t comparison
|
||||
* @asyncsignalsafe
|
||||
*/
|
||||
noasan int strcmp(const char *a, const char *b) {
|
||||
dontasan int strcmp(const char *a, const char *b) {
|
||||
int c;
|
||||
size_t i = 0;
|
||||
uint64_t v, w, d;
|
||||
|
|
|
@ -34,7 +34,7 @@ typedef char xmm_t __attribute__((__vector_size__(16), __aligned__(16)));
|
|||
* @return original dest
|
||||
* @asyncsignalsafe
|
||||
*/
|
||||
noasan char *strcpy(char *d, const char *s) {
|
||||
dontasan char *strcpy(char *d, const char *s) {
|
||||
size_t i = 0;
|
||||
if (IsAsan()) {
|
||||
__asan_verify(d, strlen(s) + 1);
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
* @return number of bytes (excluding NUL)
|
||||
* @asyncsignalsafe
|
||||
*/
|
||||
noasan size_t strlen(const char *s) {
|
||||
dontasan size_t strlen(const char *s) {
|
||||
if (IsAsan()) __asan_verify_str(s);
|
||||
#ifdef __x86_64__
|
||||
typedef char xmm_t __attribute__((__vector_size__(16), __aligned__(16)));
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#include "libc/str/str.h"
|
||||
#ifndef __aarch64__
|
||||
|
||||
static noasan size_t strnlen_x64(const char *s, size_t n, size_t i) {
|
||||
static dontasan size_t strnlen_x64(const char *s, size_t n, size_t i) {
|
||||
uint64_t w;
|
||||
for (; i + 8 < n; i += 8) {
|
||||
w = *(uint64_t *)(s + i);
|
||||
|
@ -43,7 +43,7 @@ static noasan size_t strnlen_x64(const char *s, size_t n, size_t i) {
|
|||
* @return byte length
|
||||
* @asyncsignalsafe
|
||||
*/
|
||||
noasan size_t strnlen(const char *s, size_t n) {
|
||||
dontasan size_t strnlen(const char *s, size_t n) {
|
||||
size_t i;
|
||||
if (IsAsan() && n) __asan_verify(s, 1);
|
||||
for (i = 0; (uintptr_t)(s + i) & 7; ++i) {
|
||||
|
@ -53,7 +53,7 @@ noasan size_t strnlen(const char *s, size_t n) {
|
|||
for (;; ++i) {
|
||||
if (i == n || !s[i]) break;
|
||||
}
|
||||
_unassert(i == n || (i < n && !s[i]));
|
||||
unassert(i == n || (i < n && !s[i]));
|
||||
if (IsAsan()) __asan_verify(s, i);
|
||||
return i;
|
||||
}
|
||||
|
|
|
@ -204,7 +204,7 @@ static wontreturn void __ubsan_unreachable(void) {
|
|||
|
||||
static void __ubsan_exit(void) {
|
||||
kprintf("your ubsan runtime needs\n"
|
||||
"\tSTATIC_YOINK(\"__die\");\n"
|
||||
"\t__static_yoink(\"__die\");\n"
|
||||
"in order to show you backtraces\n");
|
||||
_Exitr(99);
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ textwindows int WSARecv(
|
|||
// Use NULL for this parameter if the lpOverlapped parameter is not
|
||||
// NULL to avoid potentially erroneous results. This parameter can
|
||||
// be NULL only if the lpOverlapped parameter is not NULL.
|
||||
_unassert(!opt_out_lpNumberOfBytesRecvd);
|
||||
unassert(!opt_out_lpNumberOfBytesRecvd);
|
||||
}
|
||||
#if defined(SYSDEBUG) && _NTTRACE
|
||||
uint32_t NumberOfBytesRecvd;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue