mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-06-30 08:18:30 +00:00
Replace COSMO define with _COSMO_SOURCE
This change might cause ABI breakages for /opt/cosmos. It's needed to help us better conform to header declaration practices.
This commit is contained in:
parent
a033b65a33
commit
c776a32f75
238 changed files with 858 additions and 1069 deletions
|
@ -48,7 +48,7 @@ textwindows dontasan void DecodeDosArgv(int ignore, struct DosArgv *st) {
|
|||
|
||||
static textwindows dontasan void AppendDosArgv(wint_t wc, struct DosArgv *st) {
|
||||
uint64_t w;
|
||||
w = _tpenc(wc);
|
||||
w = tpenc(wc);
|
||||
do {
|
||||
if (st->p >= st->pe) break;
|
||||
*st->p++ = w & 0xff;
|
||||
|
|
|
@ -55,7 +55,7 @@ Recode16to8(char *dst, size_t dstsize, const char16_t *src) {
|
|||
x = ToUpper(x);
|
||||
}
|
||||
}
|
||||
w = _tpenc(x);
|
||||
w = tpenc(x);
|
||||
do {
|
||||
if (r.ax + 1 < dstsize) {
|
||||
dst[r.ax++] = w;
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/runtime/memtrack.internal.h"
|
||||
|
||||
dontasan size_t GetMemtrackSize(struct MemoryIntervals *mm) {
|
||||
dontasan size_t __get_memtrack_size(struct MemoryIntervals *mm) {
|
||||
size_t i, n;
|
||||
for (n = i = 0; i < mm->i; ++i) {
|
||||
n += ((size_t)(mm->p[i].y - mm->p[i].x) + 1) << 16;
|
||||
|
|
|
@ -48,8 +48,8 @@ int GetDosEnviron(const char16_t *, char *, size_t, char **, size_t);
|
|||
bool __intercept_flag(int *, char *[], const char *);
|
||||
int sys_mprotect_nt(void *, size_t, int);
|
||||
int __inflate(void *, size_t, const void *, size_t);
|
||||
void *_Mmap(void *, size_t, int, int, int, int64_t) dontasan;
|
||||
int _Munmap(char *, size_t) dontasan;
|
||||
void *__mmap_unlocked(void *, size_t, int, int, int, int64_t) dontasan;
|
||||
int __munmap_unlocked(char *, size_t) dontasan;
|
||||
void __on_arithmetic_overflow(void);
|
||||
void __init_fds(int, char **, char **);
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
static inline bool IsMemtrackedImpl(int x, int y) {
|
||||
unsigned i;
|
||||
i = FindMemoryInterval(&_mmi, x);
|
||||
i = __find_memory(&_mmi, x);
|
||||
if (i == _mmi.i) return false;
|
||||
if (x < _mmi.p[i].x) return false;
|
||||
for (;;) {
|
||||
|
|
|
@ -52,16 +52,16 @@ void __mmi_unlock(void);
|
|||
void __mmi_funlock(void);
|
||||
bool IsMemtracked(int, int);
|
||||
void PrintSystemMappings(int);
|
||||
unsigned FindMemoryInterval(const struct MemoryIntervals *, int) nosideeffect;
|
||||
bool AreMemoryIntervalsOk(const struct MemoryIntervals *) nosideeffect;
|
||||
unsigned __find_memory(const struct MemoryIntervals *, int) nosideeffect;
|
||||
bool __check_memtrack(const struct MemoryIntervals *) nosideeffect;
|
||||
void PrintMemoryIntervals(int, const struct MemoryIntervals *);
|
||||
int TrackMemoryInterval(struct MemoryIntervals *, int, int, long, int, int,
|
||||
bool, bool, long, long);
|
||||
int ReleaseMemoryIntervals(struct MemoryIntervals *, int, int,
|
||||
void (*)(struct MemoryIntervals *, int, int));
|
||||
void ReleaseMemoryNt(struct MemoryIntervals *, int, int);
|
||||
int UntrackMemoryIntervals(void *, size_t);
|
||||
size_t GetMemtrackSize(struct MemoryIntervals *);
|
||||
int __track_memory(struct MemoryIntervals *, int, int, long, int, int, bool,
|
||||
bool, long, long);
|
||||
int __untrack_memory(struct MemoryIntervals *, int, int,
|
||||
void (*)(struct MemoryIntervals *, int, int));
|
||||
void __release_memory_nt(struct MemoryIntervals *, int, int);
|
||||
int __untrack_memories(void *, size_t);
|
||||
size_t __get_memtrack_size(struct MemoryIntervals *);
|
||||
|
||||
#ifdef _NOPL0
|
||||
#define __mmi_lock() _NOPL0("__threadcalls", __mmi_lock)
|
||||
|
|
|
@ -30,7 +30,7 @@ static inline dontasan void *GetFrameAddr(int f) {
|
|||
return (void *)a;
|
||||
}
|
||||
|
||||
dontasan void ReleaseMemoryNt(struct MemoryIntervals *mm, int l, int r) {
|
||||
dontasan void __release_memory_nt(struct MemoryIntervals *mm, int l, int r) {
|
||||
int i;
|
||||
for (i = l; i <= r; ++i) {
|
||||
UnmapViewOfFile(GetFrameAddr(mm->p[i].x));
|
||||
|
|
|
@ -68,22 +68,22 @@
|
|||
#define SHADE(x) (((intptr_t)(x) >> 3) + 0x7fff8000)
|
||||
#define FRAME(x) ((int)((intptr_t)(x) >> 16))
|
||||
|
||||
static pureconst unsigned long RoundDownTwoPow(unsigned long x) {
|
||||
static inline pureconst unsigned long __rounddown2pow(unsigned long x) {
|
||||
return x ? 1ul << _bsrl(x) : 0;
|
||||
}
|
||||
|
||||
static wontreturn void OnUnrecoverableMmapError(const char *s) {
|
||||
static wontreturn void __mmap_die(const char *s) {
|
||||
if (_weaken(__die)) _weaken(__die)();
|
||||
STRACE("%s %m", s);
|
||||
_Exitr(199);
|
||||
}
|
||||
|
||||
static dontasan inline bool OverlapsExistingMapping(char *p, size_t n) {
|
||||
static dontasan inline bool __overlaps_existing_mapping(char *p, size_t n) {
|
||||
int a, b, i;
|
||||
unassert(n > 0);
|
||||
a = FRAME(p);
|
||||
b = FRAME(p + (n - 1));
|
||||
i = FindMemoryInterval(&_mmi, a);
|
||||
i = __find_memory(&_mmi, a);
|
||||
if (i < _mmi.i) {
|
||||
if (a <= _mmi.p[i].x && _mmi.p[i].x <= b) return true;
|
||||
if (a <= _mmi.p[i].y && _mmi.p[i].y <= b) return true;
|
||||
|
@ -92,14 +92,14 @@ static dontasan inline bool OverlapsExistingMapping(char *p, size_t n) {
|
|||
return false;
|
||||
}
|
||||
|
||||
static dontasan bool ChooseMemoryInterval(int x, int n, int align, int *res) {
|
||||
static dontasan bool __choose_memory(int x, int n, int align, int *res) {
|
||||
// TODO: improve performance
|
||||
int i, start, end;
|
||||
unassert(align > 0);
|
||||
if (_mmi.i) {
|
||||
|
||||
// find the start of the automap memory region
|
||||
i = FindMemoryInterval(&_mmi, x);
|
||||
i = __find_memory(&_mmi, x);
|
||||
if (i < _mmi.i) {
|
||||
|
||||
// check to see if there's space available before the first entry
|
||||
|
@ -153,23 +153,23 @@ static dontasan bool ChooseMemoryInterval(int x, int n, int align, int *res) {
|
|||
return false;
|
||||
}
|
||||
|
||||
dontasan static bool Automap(int count, int align, int *res) {
|
||||
return ChooseMemoryInterval(FRAME(kAutomapStart), count, align, res) &&
|
||||
dontasan static bool __auto_map(int count, int align, int *res) {
|
||||
return __choose_memory(FRAME(kAutomapStart), count, align, res) &&
|
||||
*res + count <= FRAME(kAutomapStart + (kAutomapSize - 1));
|
||||
}
|
||||
|
||||
static dontasan void *FinishMemory(void *addr, size_t size, int prot, int flags,
|
||||
int fd, int64_t off, int f, int x, int n,
|
||||
struct DirectMap dm) {
|
||||
static dontasan void *__finish_memory(void *addr, size_t size, int prot,
|
||||
int flags, int fd, int64_t off, int f,
|
||||
int x, int n, struct DirectMap dm) {
|
||||
if (!IsWindows() && (flags & MAP_FIXED)) {
|
||||
if (UntrackMemoryIntervals(addr, size)) {
|
||||
OnUnrecoverableMmapError("FIXED UNTRACK FAILED");
|
||||
if (__untrack_memories(addr, size)) {
|
||||
__mmap_die("FIXED UNTRACK FAILED");
|
||||
}
|
||||
}
|
||||
if (TrackMemoryInterval(&_mmi, x, x + (n - 1), dm.maphandle, prot, flags,
|
||||
false, false, off, size)) {
|
||||
if (__track_memory(&_mmi, x, x + (n - 1), dm.maphandle, prot, flags, false,
|
||||
false, off, size)) {
|
||||
if (sys_munmap(addr, n) == -1) {
|
||||
OnUnrecoverableMmapError("TRACK MUNMAP FAILED");
|
||||
__mmap_die("TRACK MUNMAP FAILED");
|
||||
}
|
||||
return MAP_FAILED;
|
||||
}
|
||||
|
@ -179,21 +179,20 @@ static dontasan void *FinishMemory(void *addr, size_t size, int prot, int flags,
|
|||
return addr;
|
||||
}
|
||||
|
||||
static dontasan void *MapMemory(void *addr, size_t size, int prot, int flags,
|
||||
int fd, int64_t off, int f, int x, int n) {
|
||||
static dontasan void *__map_memory(void *addr, size_t size, int prot, int flags,
|
||||
int fd, int64_t off, int f, int x, int n) {
|
||||
struct DirectMap dm;
|
||||
dm = sys_mmap(addr, size, prot, f, fd, off);
|
||||
if (VERY_UNLIKELY(dm.addr == MAP_FAILED)) {
|
||||
if (IsWindows() && (flags & MAP_FIXED)) {
|
||||
OnUnrecoverableMmapError(
|
||||
"can't recover from MAP_FIXED errors on Windows");
|
||||
__mmap_die("can't recover from MAP_FIXED errors on Windows");
|
||||
}
|
||||
return MAP_FAILED;
|
||||
}
|
||||
if (VERY_UNLIKELY(dm.addr != addr)) {
|
||||
OnUnrecoverableMmapError("KERNEL DIDN'T RESPECT MAP_FIXED");
|
||||
__mmap_die("KERNEL DIDN'T RESPECT MAP_FIXED");
|
||||
}
|
||||
return FinishMemory(addr, size, prot, flags, fd, off, f, x, n, dm);
|
||||
return __finish_memory(addr, size, prot, flags, fd, off, f, x, n, dm);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -202,11 +201,9 @@ static dontasan void *MapMemory(void *addr, size_t size, int prot, int flags,
|
|||
* This is useful on Windows since it allows us to partially unmap or
|
||||
* punch holes into existing mappings.
|
||||
*/
|
||||
static textwindows dontinline dontasan void *MapMemories(char *addr,
|
||||
size_t size, int prot,
|
||||
int flags, int fd,
|
||||
int64_t off, int f,
|
||||
int x, int n) {
|
||||
static textwindows dontinline dontasan void *__map_memories(
|
||||
char *addr, size_t size, int prot, int flags, int fd, int64_t off, int f,
|
||||
int x, int n) {
|
||||
size_t i, m;
|
||||
int64_t oi, sz;
|
||||
struct DirectMap dm;
|
||||
|
@ -221,19 +218,19 @@ static textwindows dontinline dontasan void *MapMemories(char *addr,
|
|||
iscow = (flags & MAP_TYPE) != MAP_SHARED && fd != -1;
|
||||
readonlyfile = (flags & MAP_TYPE) == MAP_SHARED && fd != -1 &&
|
||||
(g_fds.p[fd].flags & O_ACCMODE) == O_RDONLY;
|
||||
if (TrackMemoryInterval(&_mmi, x + (n - 1), x + (n - 1), dm.maphandle, prot,
|
||||
flags, readonlyfile, iscow, oi, sz) == -1) {
|
||||
OnUnrecoverableMmapError("MapMemories unrecoverable #1");
|
||||
if (__track_memory(&_mmi, x + (n - 1), x + (n - 1), dm.maphandle, prot, flags,
|
||||
readonlyfile, iscow, oi, sz) == -1) {
|
||||
__mmap_die("__map_memories unrecoverable #1");
|
||||
}
|
||||
for (i = 0; i < m; i += FRAMESIZE) {
|
||||
oi = fd == -1 ? 0 : off + i;
|
||||
sz = FRAMESIZE;
|
||||
dm = sys_mmap(addr + i, sz, prot, f, fd, oi);
|
||||
if (dm.addr == MAP_FAILED ||
|
||||
TrackMemoryInterval(&_mmi, x + i / FRAMESIZE, x + i / FRAMESIZE,
|
||||
dm.maphandle, prot, flags, readonlyfile, iscow, oi,
|
||||
sz) == -1) {
|
||||
OnUnrecoverableMmapError("MapMemories unrecoverable #2");
|
||||
__track_memory(&_mmi, x + i / FRAMESIZE, x + i / FRAMESIZE,
|
||||
dm.maphandle, prot, flags, readonlyfile, iscow, oi,
|
||||
sz) == -1) {
|
||||
__mmap_die("__map_memories unrecoverable #2");
|
||||
}
|
||||
}
|
||||
if (_weaken(__asan_map_shadow) && !OverlapsShadowSpace(addr, size)) {
|
||||
|
@ -242,8 +239,8 @@ static textwindows dontinline dontasan void *MapMemories(char *addr,
|
|||
return addr;
|
||||
}
|
||||
|
||||
dontasan inline void *_Mmap(void *addr, size_t size, int prot, int flags,
|
||||
int fd, int64_t off) {
|
||||
dontasan inline void *__mmap_unlocked(void *addr, size_t size, int prot,
|
||||
int flags, int fd, int64_t off) {
|
||||
char *p = addr;
|
||||
struct DirectMap dm;
|
||||
size_t requested_size;
|
||||
|
@ -301,14 +298,15 @@ dontasan inline void *_Mmap(void *addr, size_t size, int prot, int flags,
|
|||
}
|
||||
|
||||
if (__virtualmax < LONG_MAX &&
|
||||
(ckd_add(&virtualneed, (virtualused = GetMemtrackSize(&_mmi)), size) ||
|
||||
(ckd_add(&virtualneed, (virtualused = __get_memtrack_size(&_mmi)),
|
||||
size) ||
|
||||
virtualneed > __virtualmax)) {
|
||||
STRACE("mmap %'zu size + %'zu inuse exceeds virtual memory limit %'zu",
|
||||
size, virtualused, __virtualmax);
|
||||
return VIP(enomem());
|
||||
}
|
||||
|
||||
clashes = OverlapsImageSpace(p, size) || OverlapsExistingMapping(p, size);
|
||||
clashes = OverlapsImageSpace(p, size) || __overlaps_existing_mapping(p, size);
|
||||
|
||||
if ((flags & MAP_FIXED_NOREPLACE) == MAP_FIXED_NOREPLACE && clashes) {
|
||||
STRACE("mmap noreplace overlaps existing");
|
||||
|
@ -320,18 +318,18 @@ dontasan inline void *_Mmap(void *addr, size_t size, int prot, int flags,
|
|||
return VIP(einval());
|
||||
}
|
||||
|
||||
a = MAX(1, RoundDownTwoPow(size) >> 16);
|
||||
a = MAX(1, __rounddown2pow(size) >> 16);
|
||||
f = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED;
|
||||
if (flags & MAP_FIXED) {
|
||||
x = FRAME(p);
|
||||
if (IsWindows()) {
|
||||
if (UntrackMemoryIntervals(p, size)) {
|
||||
OnUnrecoverableMmapError("FIXED UNTRACK FAILED");
|
||||
if (__untrack_memories(p, size)) {
|
||||
__mmap_die("FIXED UNTRACK FAILED");
|
||||
}
|
||||
}
|
||||
} else if (p && !clashes && !OverlapsShadowSpace(p, size)) {
|
||||
x = FRAME(p);
|
||||
} else if (!Automap(n, a, &x)) {
|
||||
} else if (!__auto_map(n, a, &x)) {
|
||||
STRACE("automap has no room for %d frames with %d alignment", n, a);
|
||||
return VIP(enomem());
|
||||
}
|
||||
|
@ -379,7 +377,7 @@ dontasan inline void *_Mmap(void *addr, size_t size, int prot, int flags,
|
|||
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
|
||||
.addr == p);
|
||||
dm.addr = p;
|
||||
p = FinishMemory(p, size, prot, flags, fd, off, f, x, n, dm);
|
||||
p = __finish_memory(p, size, prot, flags, fd, off, f, x, n, dm);
|
||||
if (IsAsan() && p != MAP_FAILED) {
|
||||
__asan_poison(p, page_size, kAsanStackOverflow);
|
||||
}
|
||||
|
@ -398,9 +396,9 @@ dontasan inline void *_Mmap(void *addr, size_t size, int prot, int flags,
|
|||
}
|
||||
|
||||
if (!IsWindows()) {
|
||||
p = MapMemory(p, size, prot, flags, fd, off, f, x, n);
|
||||
p = __map_memory(p, size, prot, flags, fd, off, f, x, n);
|
||||
} else {
|
||||
p = MapMemories(p, size, prot, flags, fd, off, f, x, n);
|
||||
p = __map_memories(p, size, prot, flags, fd, off, f, x, n);
|
||||
}
|
||||
|
||||
if (p != MAP_FAILED) {
|
||||
|
@ -477,14 +475,14 @@ void *mmap(void *addr, size_t size, int prot, int flags, int fd, int64_t off) {
|
|||
#endif
|
||||
__mmi_lock();
|
||||
if (!__isfdkind(fd, kFdZip)) {
|
||||
res = _Mmap(addr, size, prot, flags, fd, off);
|
||||
res = __mmap_unlocked(addr, size, prot, flags, fd, off);
|
||||
} else {
|
||||
res = _weaken(__zipos_Mmap)(
|
||||
res = _weaken(__zipos_mmap)(
|
||||
addr, size, prot, flags,
|
||||
(struct ZiposHandle *)(intptr_t)g_fds.p[fd].handle, off);
|
||||
}
|
||||
#if SYSDEBUG
|
||||
toto = __strace > 0 ? GetMemtrackSize(&_mmi) : 0;
|
||||
toto = __strace > 0 ? __get_memtrack_size(&_mmi) : 0;
|
||||
#endif
|
||||
__mmi_unlock();
|
||||
STRACE("mmap(%p, %'zu, %s, %s, %d, %'ld) → %p% m (%'zu bytes total)", addr,
|
||||
|
|
|
@ -29,7 +29,7 @@ textwindows int sys_mprotect_nt(void *addr, size_t size, int prot) {
|
|||
__mmi_lock();
|
||||
size = (size + 4095) & -4096;
|
||||
p = addr;
|
||||
i = FindMemoryInterval(&_mmi, (intptr_t)p >> 16);
|
||||
i = __find_memory(&_mmi, (intptr_t)p >> 16);
|
||||
if (i == _mmi.i || (!i && p + size <= (char *)ADDR_32_TO_48(_mmi.p[0].x))) {
|
||||
// memory isn't in memtrack
|
||||
// let's just trust the user then
|
||||
|
|
|
@ -29,7 +29,7 @@ dontasan textwindows int sys_msync_nt(char *addr, size_t size, int flags) {
|
|||
int i, rc = 0;
|
||||
char *a, *b, *x, *y;
|
||||
__mmi_lock();
|
||||
for (i = FindMemoryInterval(&_mmi, (intptr_t)addr >> 16); i < _mmi.i; ++i) {
|
||||
for (i = __find_memory(&_mmi, (intptr_t)addr >> 16); i < _mmi.i; ++i) {
|
||||
x = (char *)ADDR_32_TO_48(_mmi.p[i].x);
|
||||
y = x + _mmi.p[i].size;
|
||||
if ((x <= addr && addr < y) || (x < addr + size && addr + size <= y) ||
|
||||
|
|
|
@ -39,9 +39,9 @@
|
|||
#define ALIGNED(p) (!(IP(p) & (FRAMESIZE - 1)))
|
||||
#define FRAME(x) ((int)((intptr_t)(x) >> 16))
|
||||
|
||||
static dontasan void MunmapShadow(char *p, size_t n) {
|
||||
static dontasan void __munmap_shadow(char *p, size_t n) {
|
||||
intptr_t a, b, x, y;
|
||||
KERNTRACE("MunmapShadow(%p, %'zu)", p, n);
|
||||
KERNTRACE("__munmap_shadow(%p, %'zu)", p, n);
|
||||
a = ((intptr_t)p >> 3) + 0x7fff8000;
|
||||
b = a + (n >> 3);
|
||||
if (IsMemtracked(FRAME(a), FRAME(b - 1))) {
|
||||
|
@ -52,7 +52,7 @@ static dontasan void MunmapShadow(char *p, size_t n) {
|
|||
// to be >1mb since we can only unmap it if it's aligned, and
|
||||
// as such we poison the edges if there are any.
|
||||
__repstosb((void *)a, kAsanUnmapped, x - a);
|
||||
_Munmap((void *)x, y - x);
|
||||
__munmap_unlocked((void *)x, y - x);
|
||||
__repstosb((void *)y, kAsanUnmapped, b - y);
|
||||
} else {
|
||||
// otherwise just poison and assume reuse
|
||||
|
@ -66,15 +66,15 @@ static dontasan void MunmapShadow(char *p, size_t n) {
|
|||
// our api supports doing things like munmap(0, 0x7fffffffffff) but some
|
||||
// platforms (e.g. openbsd) require that we know the specific intervals
|
||||
// or else it returns EINVAL. so we munmap a piecewise.
|
||||
static dontasan void MunmapImpl(char *p, size_t n) {
|
||||
static dontasan void __munmap_impl(char *p, size_t n) {
|
||||
char *q;
|
||||
size_t m;
|
||||
intptr_t a, b, c;
|
||||
int i, l, r, rc, beg, end;
|
||||
KERNTRACE("MunmapImpl(%p, %'zu)", p, n);
|
||||
KERNTRACE("__munmap_impl(%p, %'zu)", p, n);
|
||||
l = FRAME(p);
|
||||
r = FRAME(p + n - 1);
|
||||
i = FindMemoryInterval(&_mmi, l);
|
||||
i = __find_memory(&_mmi, l);
|
||||
for (; i < _mmi.i && r >= _mmi.p[i].x; ++i) {
|
||||
if (l >= _mmi.p[i].x && r <= _mmi.p[i].y) {
|
||||
|
||||
|
@ -104,15 +104,15 @@ static dontasan void MunmapImpl(char *p, size_t n) {
|
|||
if (!IsWindows()) {
|
||||
npassert(!sys_munmap(q, m));
|
||||
} else {
|
||||
// Handled by UntrackMemoryIntervals() on Windows
|
||||
// Handled by __untrack_memories() on Windows
|
||||
}
|
||||
if (IsAsan() && !OverlapsShadowSpace(p, n)) {
|
||||
MunmapShadow(q, m);
|
||||
__munmap_shadow(q, m);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dontasan int _Munmap(char *p, size_t n) {
|
||||
dontasan int __munmap_unlocked(char *p, size_t n) {
|
||||
unsigned i;
|
||||
char poison;
|
||||
intptr_t a, b, x, y;
|
||||
|
@ -137,8 +137,8 @@ dontasan int _Munmap(char *p, size_t n) {
|
|||
STRACE("munmap(%p) isn't 64kb aligned", p);
|
||||
return einval();
|
||||
}
|
||||
MunmapImpl(p, n);
|
||||
return UntrackMemoryIntervals(p, n);
|
||||
__munmap_impl(p, n);
|
||||
return __untrack_memories(p, n);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -156,9 +156,9 @@ int munmap(void *p, size_t n) {
|
|||
int rc;
|
||||
size_t toto;
|
||||
__mmi_lock();
|
||||
rc = _Munmap(p, n);
|
||||
rc = __munmap_unlocked(p, n);
|
||||
#if SYSDEBUG
|
||||
toto = __strace > 0 ? GetMemtrackSize(&_mmi) : 0;
|
||||
toto = __strace > 0 ? __get_memtrack_size(&_mmi) : 0;
|
||||
#endif
|
||||
__mmi_unlock();
|
||||
STRACE("munmap(%.12p, %'zu) → %d% m (%'zu bytes total)", p, n, rc, toto);
|
||||
|
|
|
@ -19,10 +19,10 @@
|
|||
#include "libc/calls/calls.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
|
||||
int _OpenExecutable(void) {
|
||||
int __open_executable(void) {
|
||||
tinyprint(2,
|
||||
"error: redbean StoreAsset() support is currently unavailable "
|
||||
"because _OpenExecutable() in a regressed state, due to the "
|
||||
"because __open_executable() in a regressed state, due to the "
|
||||
"work we're doing on Arm64 support; we're hoping to address "
|
||||
"this soon; please try using redbean v2.2 or rewinding to "
|
||||
"commit 38112aeb206cc95ef615c268ca809cad693ecb9e\n",
|
||||
|
|
|
@ -20,8 +20,6 @@ typedef unsigned long jmp_buf[26];
|
|||
|
||||
typedef long sigjmp_buf[12];
|
||||
|
||||
extern char **environ;
|
||||
|
||||
void mcount(void);
|
||||
int daemon(int, int);
|
||||
unsigned long getauxval(unsigned long);
|
||||
|
@ -65,7 +63,11 @@ int getdtablesize(void);
|
|||
int sethostname(const char *, size_t);
|
||||
int acct(const char *);
|
||||
|
||||
#ifdef COSMO
|
||||
#if defined(_GNU_SOURCE) || defined(_COSMO_SOURCE)
|
||||
extern char **environ;
|
||||
#endif
|
||||
|
||||
#ifdef _COSMO_SOURCE
|
||||
extern int __argc;
|
||||
extern char **__argv;
|
||||
extern char **__envp;
|
||||
|
@ -100,7 +102,7 @@ int _cocmd(int, char **, char **);
|
|||
/* executable program */
|
||||
char *GetProgramExecutableName(void);
|
||||
char *GetInterpreterExecutableName(char *, size_t);
|
||||
int _OpenExecutable(void);
|
||||
int __open_executable(void);
|
||||
/* execution control */
|
||||
int verynice(void);
|
||||
axdx_t setlongerjmp(jmp_buf)
|
||||
|
@ -130,7 +132,7 @@ void GetCpuidBrand(char[13], uint32_t);
|
|||
long _GetResourceLimit(int);
|
||||
const char *__describe_os(void);
|
||||
int __arg_max(void);
|
||||
#endif
|
||||
#endif /* _COSMO_SOURCE */
|
||||
|
||||
COSMOPOLITAN_C_END_
|
||||
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
|
||||
|
|
|
@ -77,7 +77,6 @@ o/$(MODE)/libc/runtime/ftracer.o: private \
|
|||
o/$(MODE)/libc/runtime/cosmo2.o \
|
||||
o/$(MODE)/libc/runtime/fork-nt.o \
|
||||
o/$(MODE)/libc/runtime/printmemoryintervals.o \
|
||||
o/$(MODE)/libc/runtime/arememoryintervalsok.o \
|
||||
o/$(MODE)/libc/runtime/findmemoryinterval.o \
|
||||
o/$(MODE)/libc/runtime/sys_mprotect.greg.o \
|
||||
o/$(MODE)/libc/runtime/getdosargv.o \
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#ifndef COSMOPOLITAN_LIBC_RUNTIME_STACK_H_
|
||||
#define COSMOPOLITAN_LIBC_RUNTIME_STACK_H_
|
||||
#if !(__ASSEMBLER__ + __LINKER__ + 0)
|
||||
#ifdef COSMO
|
||||
#ifdef _COSMO_SOURCE
|
||||
|
||||
/**
|
||||
* Tunes APE stack maximum size.
|
||||
|
@ -136,8 +136,8 @@ forceinline void CheckLargeStackAllocation(void *p, ssize_t n) {
|
|||
void *NewCosmoStack(void) vallocesque;
|
||||
int FreeCosmoStack(void *) libcesque;
|
||||
|
||||
#endif /* COSMO */
|
||||
COSMOPOLITAN_C_END_
|
||||
#endif /* GNU ELF */
|
||||
#endif /* _COSMO_SOURCE */
|
||||
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
|
||||
#endif /* COSMOPOLITAN_LIBC_RUNTIME_STACK_H_ */
|
||||
|
|
|
@ -21,11 +21,11 @@
|
|||
#include "libc/macros.internal.h"
|
||||
#include "libc/runtime/memtrack.internal.h"
|
||||
|
||||
int UntrackMemoryIntervals(void *addr, size_t size) {
|
||||
int __untrack_memories(void *addr, size_t size) {
|
||||
int a, b;
|
||||
unassert(size > 0);
|
||||
a = ROUNDDOWN((intptr_t)addr, FRAMESIZE) >> 16;
|
||||
b = ROUNDDOWN((intptr_t)addr + size - 1, FRAMESIZE) >> 16;
|
||||
return ReleaseMemoryIntervals(&_mmi, a, b,
|
||||
SupportsWindows() ? ReleaseMemoryNt : 0);
|
||||
return __untrack_memory(&_mmi, a, b,
|
||||
SupportsWindows() ? __release_memory_nt : 0);
|
||||
}
|
||||
|
|
|
@ -37,6 +37,6 @@ void __warn_if_powersave(void) {
|
|||
if ((fd = open(FILE, O_RDONLY)) == -1) return;
|
||||
read(fd, buf, 15);
|
||||
close(fd);
|
||||
if (!_startswith(buf, "powersave")) return;
|
||||
if (!startswith(buf, "powersave")) return;
|
||||
write(2, WARN, sizeof(WARN) - 1);
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
* it does not need to be 64kb aligned.
|
||||
* @return virtual base address of new mapping, or MAP_FAILED w/ errno
|
||||
*/
|
||||
dontasan void *__zipos_Mmap(void *addr, size_t size, int prot, int flags,
|
||||
dontasan void *__zipos_mmap(void *addr, size_t size, int prot, int flags,
|
||||
struct ZiposHandle *h, int64_t off) {
|
||||
if (!(flags & MAP_PRIVATE) ||
|
||||
(flags & ~(MAP_PRIVATE | MAP_FILE | MAP_FIXED | MAP_FIXED_NOREPLACE)) ||
|
||||
|
@ -64,8 +64,8 @@ dontasan void *__zipos_Mmap(void *addr, size_t size, int prot, int flags,
|
|||
}
|
||||
|
||||
const int tempProt = !IsXnu() ? prot | PROT_WRITE : PROT_WRITE;
|
||||
void *outAddr =
|
||||
_Mmap(addr, size, tempProt, (flags & (~MAP_FILE)) | MAP_ANONYMOUS, -1, 0);
|
||||
void *outAddr = __mmap_unlocked(addr, size, tempProt,
|
||||
(flags & (~MAP_FILE)) | MAP_ANONYMOUS, -1, 0);
|
||||
if (outAddr == MAP_FAILED) {
|
||||
return MAP_FAILED;
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ dontasan void *__zipos_Mmap(void *addr, size_t size, int prot, int flags,
|
|||
return outAddr;
|
||||
} while (0);
|
||||
const int e = errno;
|
||||
_Munmap(outAddr, size);
|
||||
__munmap_unlocked(outAddr, size);
|
||||
errno = e;
|
||||
strace_enabled(+1);
|
||||
return MAP_FAILED;
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
.yoink __zipos_read
|
||||
.yoink __zipos_stat
|
||||
.yoink __zipos_notat
|
||||
.yoink __zipos_Mmap
|
||||
.yoink __zipos_mmap
|
||||
|
||||
// TODO(jart): why does corruption happen when zip has no assets?
|
||||
.yoink .cosmo
|
||||
|
|
|
@ -50,7 +50,7 @@ ssize_t __zipos_write(struct ZiposHandle *, const struct iovec *, size_t,
|
|||
int64_t __zipos_lseek(struct ZiposHandle *, int64_t, unsigned);
|
||||
int __zipos_fcntl(int, int, uintptr_t);
|
||||
int __zipos_notat(int, const char *);
|
||||
void *__zipos_Mmap(void *, uint64_t, int32_t, int32_t, struct ZiposHandle *,
|
||||
void *__zipos_mmap(void *, uint64_t, int32_t, int32_t, struct ZiposHandle *,
|
||||
int64_t) dontasan;
|
||||
|
||||
#ifdef _NOPL0
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue