diff --git a/examples/forkrand.c b/examples/forkrand.c
index 7f81afb6b..ea6417d50 100644
--- a/examples/forkrand.c
+++ b/examples/forkrand.c
@@ -33,8 +33,6 @@ dontinline void dostuff(const char *s) {
int main(int argc, char *argv[]) {
int rc, child, wstatus;
- /* puts(_gc(xiso8601ts(NULL))); */
- PrintMemoryIntervals(2, &_mmi);
CHECK_NE(-1, (child = fork()));
if (!child) {
/* child process */
diff --git a/libc/calls/calls.h b/libc/calls/calls.h
index 062fbb453..eb8f2f63b 100644
--- a/libc/calls/calls.h
+++ b/libc/calls/calls.h
@@ -148,7 +148,6 @@ int munlock(const void *, size_t);
int munlockall(void);
int nice(int);
int open(const char *, int, ...);
-int openanon(char *, unsigned);
int openat(int, const char *, int, ...);
int pause(void);
int personality(uint64_t);
@@ -169,7 +168,7 @@ int sched_setaffinity(int, uint64_t, const void *);
int sched_yield(void);
int setegid(uint32_t);
int seteuid(uint32_t);
-int setgid(uint32_t);
+int setgid(int);
int setpgid(int, int);
int setpriority(int, unsigned, int);
int setregid(uint32_t, uint32_t);
@@ -178,7 +177,7 @@ int setresuid(uint32_t, uint32_t, uint32_t);
int setreuid(uint32_t, uint32_t);
int setrlimit(int, const struct rlimit *);
int setsid(void);
-int setuid(uint32_t);
+int setuid(int);
int sigignore(int);
int siginterrupt(int, int);
int sigprocmask(int, const struct sigset *, struct sigset *);
diff --git a/libc/calls/calls.mk b/libc/calls/calls.mk
index 8511564f0..2708adc6a 100644
--- a/libc/calls/calls.mk
+++ b/libc/calls/calls.mk
@@ -65,8 +65,11 @@ $(LIBC_CALLS_A).pkg: \
$(LIBC_CALLS_A_OBJS) \
$(foreach x,$(LIBC_CALLS_A_DIRECTDEPS),$($(x)_A).pkg)
+o/$(MODE)/libc/calls/directmap.o \
+o/$(MODE)/libc/calls/directmap-nt.o \
o/$(MODE)/libc/calls/raise.o: \
OVERRIDE_COPTS += \
+ -ffreestanding \
$(NO_MAGIC)
o/$(MODE)/libc/calls/termios2linux.o \
diff --git a/libc/calls/close-nt.c b/libc/calls/close-nt.c
index 2c62a7069..8f6964bf0 100644
--- a/libc/calls/close-nt.c
+++ b/libc/calls/close-nt.c
@@ -16,30 +16,42 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/bits/weaken.h"
#include "libc/calls/internal.h"
#include "libc/nt/enum/filetype.h"
#include "libc/nt/files.h"
#include "libc/nt/runtime.h"
+#include "libc/sock/ntstdin.internal.h"
#include "libc/sysv/consts/o.h"
#include "libc/sysv/errfuns.h"
textwindows int sys_close_nt(struct Fd *fd) {
int e;
- bool32 ok;
+ bool ok = true;
+
+ // if this file descriptor is wrapped in a named pipe worker thread
+ // then we need to close our copy of the worker thread handle. it's
+ // also required that whatever install a worker use malloc, so free
+ if (fd->worker) {
+ if (!weaken(UnrefNtStdinWorker)(fd->worker)) ok = false;
+ fd->worker = 0;
+ }
+
if (fd->kind == kFdFile && ((fd->flags & O_ACCMODE) != O_RDONLY &&
GetFileType(fd->handle) == kNtFileTypeDisk)) {
- /*
- * Like Linux, closing a file on Windows doesn't guarantee it's
- * immediately synced to disk. But unlike Linux, this could cause
- * subsequent operations, e.g. unlink() to break w/ access error.
- */
+ // Like Linux, closing a file on Windows doesn't guarantee it's
+ // immediately synced to disk. But unlike Linux, this could cause
+ // subsequent operations, e.g. unlink() to break w/ access error.
e = errno;
FlushFileBuffers(fd->handle);
errno = e;
}
- ok = CloseHandle(fd->handle);
+
+ // now we can close the handle
+ if (!CloseHandle(fd->handle)) ok = false;
if (fd->kind == kFdConsole && fd->extra && fd->extra != -1) {
- ok &= CloseHandle(fd->extra);
+ if (!CloseHandle(fd->extra)) ok = false;
}
+
return ok ? 0 : -1;
}
diff --git a/libc/calls/close.c b/libc/calls/close.c
index d5f01d38d..c8137e0ee 100644
--- a/libc/calls/close.c
+++ b/libc/calls/close.c
@@ -73,7 +73,7 @@ int close(int fd) {
}
}
}
- __releasefd(fd);
+ if (!__vforked) __releasefd(fd);
}
STRACE("%s(%d) → %d% m", "close", fd, rc);
return rc;
diff --git a/libc/calls/createfileflags.c b/libc/calls/createfileflags.c
new file mode 100644
index 000000000..d90d3cc94
--- /dev/null
+++ b/libc/calls/createfileflags.c
@@ -0,0 +1,110 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/assert.h"
+#include "libc/calls/calls.h"
+#include "libc/nt/createfile.h"
+#include "libc/nt/enum/accessmask.h"
+#include "libc/nt/enum/creationdisposition.h"
+#include "libc/nt/enum/fileflagandattributes.h"
+#include "libc/nt/enum/filesharemode.h"
+#include "libc/sysv/consts/o.h"
+
+// code size optimization
+//
+#define _O_APPEND 0x00000400 // kNtFileAppendData
+#define _O_CREAT 0x00000040 // kNtOpenAlways
+#define _O_EXCL 0x00000080 // kNtCreateNew
+#define _O_TRUNC 0x00000200 // kNtCreateAlways
+#define _O_DIRECTORY 0x00010000 // kNtFileFlagBackupSemantics
+#define _O_TMPFILE 0x00410000 // AttributeTemporary|FlagDeleteOnClose
+#define _O_DIRECT 0x00004000 // kNtFileFlagNoBuffering
+#define _O_NDELAY 0x00000800 // kNtFileFlagWriteThrough
+#define _O_RANDOM 0x80000000 // kNtFileFlagRandomAccess
+#define _O_SEQUENTIAL 0x40000000 // kNtFileFlagSequentialScan
+#define _O_COMPRESSED 0x20000000 // kNtFileAttributeCompressed
+#define _O_INDEXED 0x10000000 // !kNtFileAttributeNotContentIndexed
+#define _O_NONBLOCK 0x00000800
+#define _O_CLOEXEC 0x00080000
+//
+
+textwindows int GetNtOpenFlags(int flags, int mode, uint32_t *out_perm,
+ uint32_t *out_share, uint32_t *out_disp,
+ uint32_t *out_attr) {
+ uint32_t perm, share, disp, attr;
+
+ switch (flags & O_ACCMODE) {
+ case O_RDONLY:
+ perm = kNtFileGenericRead | kNtGenericExecute;
+ break;
+ case O_WRONLY:
+ perm = kNtFileGenericWrite | kNtGenericExecute;
+ break;
+ case O_RDWR:
+ perm = kNtFileGenericRead | kNtFileGenericWrite | kNtGenericExecute;
+ break;
+ default:
+ return -1;
+ }
+ if (flags & _O_APPEND) {
+ perm = kNtFileAppendData;
+ }
+
+ if (flags & _O_EXCL) {
+ share = kNtFileShareExclusive;
+ } else {
+ share = kNtFileShareRead | kNtFileShareWrite | kNtFileShareDelete;
+ }
+
+ if ((flags & _O_CREAT) && (flags & _O_EXCL)) {
+ disp = kNtCreateNew;
+ } else if ((flags & _O_CREAT) && (flags & _O_TRUNC)) {
+ disp = kNtCreateAlways;
+ } else if (flags & _O_CREAT) {
+ disp = kNtOpenAlways;
+ } else if (flags & _O_TRUNC) {
+ disp = kNtTruncateExisting;
+ } else {
+ disp = kNtOpenExisting;
+ }
+
+ if ((flags & _O_TMPFILE) == _O_TMPFILE) {
+ attr = kNtFileAttributeTemporary | kNtFileFlagDeleteOnClose;
+ } else {
+ attr = kNtFileAttributeNormal;
+ if (flags & _O_DIRECTORY) {
+ attr |= kNtFileFlagBackupSemantics;
+ }
+ if (~mode & 0200) {
+ attr |= kNtFileAttributeReadonly;
+ }
+ }
+
+ if (~flags & _O_INDEXED) attr |= kNtFileAttributeNotContentIndexed;
+ if (flags & _O_COMPRESSED) attr |= kNtFileAttributeCompressed;
+ if (flags & _O_SEQUENTIAL) attr |= kNtFileFlagSequentialScan;
+ if (flags & _O_RANDOM) attr |= kNtFileFlagRandomAccess;
+ if (flags & _O_DIRECT) attr |= kNtFileFlagNoBuffering;
+ if (flags & _O_NDELAY) attr |= kNtFileFlagWriteThrough;
+
+ if (out_perm) *out_perm = perm;
+ if (out_share) *out_share = share;
+ if (out_disp) *out_disp = disp;
+ if (out_attr) *out_attr = attr;
+ return 0;
+}
diff --git a/libc/calls/createpipename.c b/libc/calls/createpipename.c
index 8b81f5e25..5c3464e5d 100644
--- a/libc/calls/createpipename.c
+++ b/libc/calls/createpipename.c
@@ -16,32 +16,35 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
-#include "libc/alg/reverse.internal.h"
#include "libc/calls/calls.h"
+#include "libc/intrin/lockxadd.h"
#include "libc/nt/process.h"
-static const char kPipeNamePrefix[] = "\\\\?\\pipe\\cosmo\\";
-
-static textwindows size_t UintToChar16Array(char16_t *a, uint64_t i) {
- size_t j = 0;
+static textwindows char16_t *UintToChar16Array(char16_t p[21], uint64_t x) {
+ char t;
+ size_t a, b, i = 0;
do {
- a[j++] = i % 10 + '0';
- i /= 10;
- } while (i > 0);
- a[j] = 0;
- reverse(a, j);
- return j;
+ p[i++] = x % 10 + '0';
+ x = x / 10;
+ } while (x > 0);
+ if (i) {
+ for (a = 0, b = i - 1; a < b; ++a, --b) {
+ t = p[a];
+ p[a] = p[b];
+ p[b] = t;
+ }
+ }
+ return p + i;
}
textwindows char16_t *CreatePipeName(char16_t *a) {
static long x;
- unsigned i;
- for (i = 0; kPipeNamePrefix[i]; ++i) a[i] = kPipeNamePrefix[i];
- i += UintToChar16Array(a + i, GetCurrentProcessId());
- a[i++] = u'-';
- i += UintToChar16Array(a + i, GetCurrentProcessId());
- a[i++] = u'-';
- i += UintToChar16Array(a + i, x++);
- a[i] = u'\0';
+ char16_t *p = a;
+ const char *q = "\\\\?\\pipe\\cosmo\\";
+ while (*q) *p++ = *q++;
+ p = UintToChar16Array(p, GetCurrentProcessId());
+ *p++ = '-';
+ p = UintToChar16Array(p, _lockxadd(&x, 1));
+ *p = 0;
return a;
}
diff --git a/libc/calls/directmap-nt.c b/libc/calls/directmap-nt.c
index 7d30c7e98..7215ccc3e 100644
--- a/libc/calls/directmap-nt.c
+++ b/libc/calls/directmap-nt.c
@@ -18,18 +18,21 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/assert.h"
#include "libc/calls/internal.h"
+#include "libc/intrin/kprintf.h"
#include "libc/nt/enum/filemapflags.h"
#include "libc/nt/enum/pageflags.h"
#include "libc/nt/memory.h"
+#include "libc/nt/process.h"
#include "libc/nt/runtime.h"
+#include "libc/nt/struct/processmemorycounters.h"
#include "libc/runtime/directmap.internal.h"
+#include "libc/runtime/runtime.h"
#include "libc/sysv/consts/map.h"
#include "libc/sysv/consts/o.h"
#include "libc/sysv/consts/prot.h"
-textwindows noasan struct DirectMap sys_mmap_nt(void *addr, size_t size,
- int prot, int flags, int fd,
- int64_t off) {
+textwindows struct DirectMap sys_mmap_nt(void *addr, size_t size, int prot,
+ int flags, int fd, int64_t off) {
size_t i;
bool iscow;
int64_t handle;
@@ -38,6 +41,18 @@ textwindows noasan struct DirectMap sys_mmap_nt(void *addr, size_t size,
struct ProtectNt fl;
const struct NtSecurityAttributes *sec;
+#ifndef NDEBUG
+ struct NtProcessMemoryCountersEx memcount = {
+ .cb = sizeof(struct NtProcessMemoryCountersEx),
+ };
+ if (GetProcessMemoryInfo(GetCurrentProcess(), &memcount, sizeof(memcount))) {
+ if (memcount.PeakWorkingSetSize > 5ull * 1024 * 1024 * 1024) {
+ kprintf("error: exceeded 5gb memory limit%n");
+ _Exit(201);
+ }
+ }
+#endif
+
if (fd != -1) {
handle = g_fds.p[fd].handle;
} else {
diff --git a/libc/calls/directmap.c b/libc/calls/directmap.c
index 261196338..0ad050478 100644
--- a/libc/calls/directmap.c
+++ b/libc/calls/directmap.c
@@ -33,8 +33,8 @@
* bypassed by calling this function. However the caller is responsible
* for passing the magic memory handle on Windows NT to CloseHandle().
*/
-noasan struct DirectMap sys_mmap(void *addr, size_t size, int prot, int flags,
- int fd, int64_t off) {
+struct DirectMap sys_mmap(void *addr, size_t size, int prot, int flags, int fd,
+ int64_t off) {
/* asan runtime depends on this function */
struct DirectMap d;
if (!IsWindows() && !IsMetal()) {
diff --git a/libc/calls/dup-nt.c b/libc/calls/dup-nt.c
index 21bbf0d38..ba52b9e20 100644
--- a/libc/calls/dup-nt.c
+++ b/libc/calls/dup-nt.c
@@ -24,38 +24,62 @@
#include "libc/nt/files.h"
#include "libc/nt/runtime.h"
#include "libc/sock/internal.h"
+#include "libc/sock/ntstdin.internal.h"
#include "libc/sysv/consts/o.h"
#include "libc/sysv/errfuns.h"
/**
- * Implements dup(), dup2(), and dup3() for Windows NT.
+ * Implements dup(), dup2(), dup3(), and F_DUPFD for Windows.
*/
-textwindows int sys_dup_nt(int oldfd, int newfd, int flags) {
- int64_t proc;
+textwindows int sys_dup_nt(int oldfd, int newfd, int flags, int start) {
+ int64_t proc, handle;
+
+ // validate the api usage
if (oldfd < 0) return einval();
+ if (flags & ~O_CLOEXEC) return einval();
if (oldfd >= g_fds.n ||
(g_fds.p[oldfd].kind != kFdFile && g_fds.p[oldfd].kind != kFdSocket &&
g_fds.p[oldfd].kind != kFdConsole)) {
return ebadf();
}
+
+ // allocate a new file descriptor
if (newfd == -1) {
- if ((newfd = __reservefd()) == -1) return -1;
+ if ((newfd = __reservefd(start)) == -1) {
+ return -1;
+ }
} else {
if (__ensurefds(newfd) == -1) return -1;
if (g_fds.p[newfd].kind) close(newfd);
g_fds.p[newfd].kind = kFdReserved;
}
+
+ // if this file descriptor is wrapped in a named pipe worker thread
+ // then we should clone the original authentic handle rather than the
+ // stdin worker's named pipe. we won't clone the worker, since that
+ // can always be recreated again on demand.
+ if (g_fds.p[oldfd].worker) {
+ handle = g_fds.p[oldfd].worker->reader;
+ } else {
+ handle = g_fds.p[oldfd].handle;
+ }
+
proc = GetCurrentProcess();
- if (DuplicateHandle(proc, g_fds.p[oldfd].handle, proc, &g_fds.p[newfd].handle,
- 0, true, kNtDuplicateSameAccess)) {
+ if (DuplicateHandle(proc, handle, proc, &g_fds.p[newfd].handle, 0, true,
+ kNtDuplicateSameAccess)) {
g_fds.p[newfd].kind = g_fds.p[oldfd].kind;
- g_fds.p[newfd].flags = flags;
+ g_fds.p[newfd].mode = g_fds.p[oldfd].mode;
+ g_fds.p[newfd].flags = g_fds.p[oldfd].flags & ~O_CLOEXEC;
+ if (flags & O_CLOEXEC) g_fds.p[newfd].flags |= O_CLOEXEC;
if (g_fds.p[oldfd].kind == kFdSocket && weaken(_dupsockfd)) {
g_fds.p[newfd].extra =
(intptr_t)weaken(_dupsockfd)((struct SockFd *)g_fds.p[oldfd].extra);
} else {
g_fds.p[newfd].extra = g_fds.p[oldfd].extra;
}
+ if (g_fds.p[oldfd].worker) {
+ g_fds.p[newfd].worker = weaken(RefNtStdinWorker)(g_fds.p[oldfd].worker);
+ }
return newfd;
} else {
__releasefd(newfd);
diff --git a/libc/calls/dup.c b/libc/calls/dup.c
index e8c010801..84ce876ee 100644
--- a/libc/calls/dup.c
+++ b/libc/calls/dup.c
@@ -34,7 +34,7 @@ int dup(int fd) {
if (!IsWindows()) {
fd2 = sys_dup(fd);
} else {
- fd2 = sys_dup_nt(fd, -1, 0);
+ fd2 = sys_dup_nt(fd, -1, 0, -1);
}
STRACE("%s(%d) → %d% m", "dup", fd, fd2);
return fd2;
diff --git a/libc/calls/dup2.c b/libc/calls/dup2.c
index 108f739df..ab8b79449 100644
--- a/libc/calls/dup2.c
+++ b/libc/calls/dup2.c
@@ -38,7 +38,7 @@ int dup2(int oldfd, int newfd) {
} else if (!IsWindows()) {
rc = sys_dup3(oldfd, newfd, 0);
} else {
- rc = sys_dup_nt(oldfd, newfd, 0);
+ rc = sys_dup_nt(oldfd, newfd, 0, -1);
}
STRACE("dup2(%d, %d) → %d% m", oldfd, newfd, rc);
return rc;
diff --git a/libc/calls/dup3.c b/libc/calls/dup3.c
index be3898bb4..f19dccab9 100644
--- a/libc/calls/dup3.c
+++ b/libc/calls/dup3.c
@@ -40,7 +40,7 @@ int dup3(int oldfd, int newfd, int flags) {
if (!IsWindows()) {
rc = sys_dup3(oldfd, newfd, flags);
} else {
- rc = sys_dup_nt(oldfd, newfd, flags);
+ rc = sys_dup_nt(oldfd, newfd, flags, -1);
}
STRACE("dup3(%d, %d, %d) → %d% m", oldfd, newfd, flags, rc);
return rc;
diff --git a/libc/calls/execve-nt.c b/libc/calls/execve-nt.c
index 44261bf45..c45128e8c 100644
--- a/libc/calls/execve-nt.c
+++ b/libc/calls/execve-nt.c
@@ -41,9 +41,9 @@ textwindows int sys_execve_nt(const char *program, char *const argv[],
bzero(&startinfo, sizeof(startinfo));
startinfo.cb = sizeof(struct NtStartupInfo);
startinfo.dwFlags = kNtStartfUsestdhandles;
- startinfo.hStdInput = g_fds.p[0].handle;
- startinfo.hStdOutput = g_fds.p[1].handle;
- startinfo.hStdError = g_fds.p[2].handle;
+ startinfo.hStdInput = __getfdhandleactual(0);
+ startinfo.hStdOutput = __getfdhandleactual(1);
+ startinfo.hStdError = __getfdhandleactual(2);
rc = ntspawn(program, argv, envp, 0, 0, 0, 1, 0, 0, &startinfo, &procinfo);
if (rc == -1) return -1;
CloseHandle(g_fds.p[0].handle);
diff --git a/libc/calls/fadvise-nt.c b/libc/calls/fadvise-nt.c
index 97aba7bda..82d818be8 100644
--- a/libc/calls/fadvise-nt.c
+++ b/libc/calls/fadvise-nt.c
@@ -17,49 +17,52 @@
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/internal.h"
+#include "libc/nt/createfile.h"
#include "libc/nt/enum/fileflagandattributes.h"
#include "libc/nt/enum/filesharemode.h"
#include "libc/nt/enum/status.h"
#include "libc/nt/files.h"
#include "libc/nt/nt/file.h"
-#include "libc/nt/ntdll.h"
#include "libc/nt/runtime.h"
#include "libc/nt/struct/fileaccessinformation.h"
#include "libc/nt/struct/filebasicinformation.h"
#include "libc/nt/struct/iostatusblock.h"
#include "libc/runtime/runtime.h"
+#include "libc/sysv/consts/madv.h"
+#include "libc/sysv/consts/o.h"
#include "libc/sysv/errfuns.h"
textwindows int sys_fadvise_nt(int fd, uint64_t offset, uint64_t len,
int advice) {
- int64_t h2;
- NtStatus status;
- uint32_t sharemode;
- struct NtIoStatusBlock iostatus;
- struct NtFileBasicInformation basicinfo;
- struct NtFileAccessInformation accessinfo;
+ int64_t h1, h2;
+ int flags, mode;
+ uint32_t perm, share, attr;
if (!__isfdkind(fd, kFdFile)) return ebadf();
- sharemode = /* xxx: no clue how to query this */
- kNtFileShareRead | kNtFileShareWrite | kNtFileShareDelete;
- /* TODO(jart): can we do it in one call w/ NtQueryObject? */
- if (!NtError(status = NtQueryInformationFile(g_fds.p[fd].handle, &iostatus,
- &basicinfo, sizeof(basicinfo),
- kNtFileBasicInformation)) &&
- !NtError(status = NtQueryInformationFile(g_fds.p[fd].handle, &iostatus,
- &accessinfo, sizeof(accessinfo),
- kNtFileAccessInformation))) {
- if ((h2 = ReOpenFile(g_fds.p[fd].handle, accessinfo.AccessFlags, sharemode,
- advice | basicinfo.FileAttributes)) != -1) {
- if (h2 != g_fds.p[fd].handle) {
- CloseHandle(g_fds.p[fd].handle);
- g_fds.p[fd].handle = h2;
- }
- return 0;
+ h1 = g_fds.p[fd].handle;
+ mode = g_fds.p[fd].mode;
+ flags = g_fds.p[fd].flags;
+ flags &= ~(O_SEQUENTIAL | O_RANDOM);
+ switch (advice) {
+ case MADV_NORMAL:
+ break;
+ case MADV_RANDOM:
+ flags |= O_RANDOM;
+ break;
+ case MADV_SEQUENTIAL:
+ flags |= O_SEQUENTIAL;
+ break;
+ default:
+ return einval();
+ }
+ if (GetNtOpenFlags(flags, mode, &perm, &share, 0, &attr) == -1) return -1;
+ if ((h2 = ReOpenFile(h1, perm, share, attr)) != -1) {
+ if (h2 != h1) {
+ CloseHandle(h1);
+ g_fds.p[fd].handle = h2;
}
- return __winerr();
- } else if (status == kNtStatusDllNotFound) {
- return enosys();
+ g_fds.p[fd].flags = flags;
+ return 0;
} else {
- return ntreturn(status);
+ return __winerr();
}
}
diff --git a/libc/calls/fcntl-nt.c b/libc/calls/fcntl-nt.c
index 5498a6aea..d51cdc0a9 100644
--- a/libc/calls/fcntl-nt.c
+++ b/libc/calls/fcntl-nt.c
@@ -20,7 +20,9 @@
#include "libc/calls/internal.h"
#include "libc/calls/struct/flock.h"
#include "libc/intrin/cmpxchg.h"
+#include "libc/intrin/kprintf.h"
#include "libc/macros.internal.h"
+#include "libc/nt/createfile.h"
#include "libc/nt/enum/accessmask.h"
#include "libc/nt/enum/fileflagandattributes.h"
#include "libc/nt/enum/filelockflags.h"
@@ -38,38 +40,8 @@
#include "libc/sysv/consts/o.h"
#include "libc/sysv/errfuns.h"
-static textwindows int sys_fcntl_nt_reservefd(int start) {
- int fd;
- for (;;) {
- fd = start;
- if (fd >= g_fds.n) {
- if (__ensurefds(fd) == -1) return -1;
- }
- _cmpxchg(&g_fds.f, fd, fd + 1);
- if (_cmpxchg(&g_fds.p[fd].kind, kFdEmpty, kFdReserved)) {
- return fd;
- }
- }
-}
-
-static textwindows int sys_fcntl_nt_dupfd(int oldfd, int cmd, int start) {
- int newfd;
- int64_t proc;
- if ((newfd = sys_fcntl_nt_reservefd(start)) != -1) {
- proc = GetCurrentProcess();
- if (DuplicateHandle(proc, g_fds.p[oldfd].handle, proc,
- &g_fds.p[newfd].handle, 0, true,
- kNtDuplicateSameAccess)) {
- g_fds.p[newfd].kind = g_fds.p[oldfd].kind;
- g_fds.p[newfd].flags = cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0;
- return newfd;
- } else {
- __releasefd(newfd);
- return __winerr();
- }
- } else {
- return -1;
- }
+static textwindows int sys_fcntl_nt_dupfd(int fd, int cmd, int start) {
+ return sys_dup_nt(fd, -1, (cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0), start);
}
static textwindows int sys_fcntl_nt_lock(struct Fd *f, int cmd, uintptr_t arg) {
@@ -98,7 +70,7 @@ static textwindows int sys_fcntl_nt_lock(struct Fd *f, int cmd, uintptr_t arg) {
}
if (!len) len = size - off;
if (off < 0 || len < 0) return einval();
- _offset2overlap(off, &ov);
+ _offset2overlap(f->handle, off, &ov);
if (l->l_type == F_RDLCK || l->l_type == F_WRLCK) {
flags = 0;
if (cmd == F_SETLK) flags |= kNtLockfileFailImmediately;
@@ -136,11 +108,9 @@ textwindows int sys_fcntl_nt(int fd, int cmd, uintptr_t arg) {
return g_fds.p[fd].flags & (O_ACCMODE | O_APPEND | O_ASYNC | O_DIRECT |
O_NOATIME | O_NONBLOCK);
} else if (cmd == F_SETFL) {
- /*
- * - O_APPEND doesn't appear to be tunable at cursory glance
- * - O_NONBLOCK might require we start doing all i/o in threads
- * - O_DSYNC / O_RSYNC / O_SYNC maybe if we fsync() everything
- */
+ // O_APPEND doesn't appear to be tunable at cursory glance
+ // O_NONBLOCK might require we start doing all i/o in threads
+ // O_DSYNC / O_RSYNC / O_SYNC maybe if we fsync() everything
return einval();
} else if (cmd == F_GETFD) {
if (g_fds.p[fd].flags & O_CLOEXEC) {
diff --git a/libc/calls/fdatasync-nt.c b/libc/calls/fdatasync-nt.c
index 1bd858604..147d879fb 100644
--- a/libc/calls/fdatasync-nt.c
+++ b/libc/calls/fdatasync-nt.c
@@ -21,6 +21,7 @@
#include "libc/sysv/errfuns.h"
textwindows int sys_fdatasync_nt(int fd) {
+ // TODO(jart): what should we do with worker pipes?
if (!__isfdkind(fd, kFdFile)) return ebadf();
return FlushFileBuffers(g_fds.p[fd].handle) ? 0 : -1;
}
diff --git a/libc/calls/g_fds.c b/libc/calls/g_fds.c
index 5fd38837f..148031d4a 100644
--- a/libc/calls/g_fds.c
+++ b/libc/calls/g_fds.c
@@ -20,42 +20,32 @@
#include "libc/calls/internal.h"
#include "libc/calls/strace.internal.h"
#include "libc/nt/runtime.h"
-#include "libc/sysv/consts/fileno.h"
STATIC_YOINK("_init_g_fds");
-hidden struct Fds g_fds;
+struct Fds g_fds;
+_Alignas(64) char __fds_lock;
-static textwindows int64_t GetHandleNt(long a) {
- int64_t b;
- b = GetStdHandle(a);
- STRACE("GetStdHandle(%ld) → %ld% m", a, b);
- return b;
-}
-
-hidden textstartup void InitializeFileDescriptors(void) {
+textstartup void InitializeFileDescriptors(void) {
struct Fds *fds;
fds = VEIL("r", &g_fds);
pushmov(&fds->n, ARRAYLEN(fds->__init_p));
fds->p = fds->__init_p;
if (IsMetal()) {
pushmov(&fds->f, 3ull);
- fds->__init_p[STDIN_FILENO].kind = pushpop(kFdSerial);
- fds->__init_p[STDOUT_FILENO].kind = pushpop(kFdSerial);
- fds->__init_p[STDERR_FILENO].kind = pushpop(kFdSerial);
- fds->__init_p[STDIN_FILENO].handle = VEIL("r", 0x3F8ull);
- fds->__init_p[STDOUT_FILENO].handle = VEIL("r", 0x3F8ull);
- fds->__init_p[STDERR_FILENO].handle = VEIL("r", 0x3F8ull);
+ fds->__init_p[0].kind = pushpop(kFdSerial);
+ fds->__init_p[1].kind = pushpop(kFdSerial);
+ fds->__init_p[2].kind = pushpop(kFdSerial);
+ fds->__init_p[0].handle = VEIL("r", 0x3F8ull);
+ fds->__init_p[1].handle = VEIL("r", 0x3F8ull);
+ fds->__init_p[2].handle = VEIL("r", 0x3F8ull);
} else if (IsWindows()) {
pushmov(&fds->f, 3ull);
- fds->__init_p[STDIN_FILENO].kind = pushpop(kFdFile);
- fds->__init_p[STDOUT_FILENO].kind = pushpop(kFdFile);
- fds->__init_p[STDERR_FILENO].kind = pushpop(kFdFile);
- fds->__init_p[STDIN_FILENO].handle =
- GetHandleNt(pushpop(kNtStdInputHandle));
- fds->__init_p[STDOUT_FILENO].handle =
- GetHandleNt(pushpop(kNtStdOutputHandle));
- fds->__init_p[STDERR_FILENO].handle =
- GetHandleNt(pushpop(kNtStdErrorHandle));
+ fds->__init_p[0].kind = pushpop(kFdFile);
+ fds->__init_p[1].kind = pushpop(kFdFile);
+ fds->__init_p[2].kind = pushpop(kFdFile);
+ fds->__init_p[0].handle = GetStdHandle(pushpop(kNtStdInputHandle));
+ fds->__init_p[1].handle = GetStdHandle(pushpop(kNtStdOutputHandle));
+ fds->__init_p[2].handle = GetStdHandle(pushpop(kNtStdErrorHandle));
}
}
diff --git a/libc/calls/g_sighandrvas.c b/libc/calls/g_sighandrvas.c
index ef14a7d0b..97964a95d 100644
--- a/libc/calls/g_sighandrvas.c
+++ b/libc/calls/g_sighandrvas.c
@@ -18,6 +18,6 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/internal.h"
-cthread_spinlock_t __sig_lock;
+_Alignas(64) char __sig_lock;
unsigned __sighandrvas[NSIG];
unsigned __sighandflags[NSIG];
diff --git a/libc/intrin/spinlock.c b/libc/calls/geteuid.c
similarity index 76%
rename from libc/intrin/spinlock.c
rename to libc/calls/geteuid.c
index 0ee680400..f1990087c 100644
--- a/libc/intrin/spinlock.c
+++ b/libc/calls/geteuid.c
@@ -16,35 +16,16 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
-#include "libc/assert.h"
#include "libc/calls/calls.h"
+#include "libc/calls/internal.h"
#include "libc/calls/strace.internal.h"
-#include "libc/intrin/lockcmpxchg.h"
-#include "libc/intrin/spinlock.h"
/**
- * Acquires spinlock.
+ * Returns effective user ID of calling process.
*/
-void cthread_spinlock(cthread_spinlock_t *lock) {
-#if 0
- // TODO(jart): possibly reenable for !NDEBUG when we have TLS
- int me = gettid();
- if (lock->x && lock->owner == me) {
- assert(!"cosmo spinlock not intended to be reentrant");
- return;
- }
-#endif
- do
- while (lock->x) asm("pause");
- while (!_lockcmpxchg(&lock->x, false, true));
-#if 0
- lock->owner = me;
-#endif
-}
-
-/**
- * Releases spinlock.
- */
-void cthread_spunlock(cthread_spinlock_t *lock) {
- lock->x = false;
+uint32_t geteuid(void) {
+ int rc;
+ rc = sys_geteuid();
+ STRACE("%s() → %d% m", "geteuid", rc);
+ return rc;
}
diff --git a/third_party/dlmalloc/initdlmalloc.S b/libc/calls/getfdhandleactual.greg.c
similarity index 78%
rename from third_party/dlmalloc/initdlmalloc.S
rename to libc/calls/getfdhandleactual.greg.c
index 2410df12d..b0f797b12 100644
--- a/third_party/dlmalloc/initdlmalloc.S
+++ b/libc/calls/getfdhandleactual.greg.c
@@ -1,7 +1,7 @@
-/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│
-│vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi│
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
╞══════════════════════════════════════════════════════════════════════════════╡
-│ Copyright 2020 Justine Alexandra Roberts Tunney │
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
│ │
│ Permission to use, copy, modify, and/or distribute this software for │
│ any purpose with or without fee is hereby granted, provided that the │
@@ -16,13 +16,13 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
-#include "libc/macros.internal.h"
+#include "libc/calls/internal.h"
+#include "libc/sock/ntstdin.internal.h"
-// Sneak ahead ctor list b/c runtime weakly links malloc.
- .init.start 800,_init_dlmalloc
- push %rdi
- push %rsi
- call dlmalloc_init
- pop %rsi
- pop %rdi
- .init.end 800,_init_dlmalloc,globl,hidden
+int64_t __getfdhandleactual(int fd) {
+ if (g_fds.p[fd].worker) {
+ return g_fds.p[fd].worker->reader;
+ } else {
+ return g_fds.p[fd].handle;
+ }
+}
diff --git a/libc/calls/getrusage-nt.c b/libc/calls/getrusage-nt.c
index 46f70282b..c96b449b4 100644
--- a/libc/calls/getrusage-nt.c
+++ b/libc/calls/getrusage-nt.c
@@ -47,6 +47,8 @@ textwindows int sys_getrusage_nt(int who, struct rusage *usage) {
} else {
return __winerr();
}
+ bzero(&memcount, sizeof(memcount));
+ memcount.cb = sizeof(struct NtProcessMemoryCountersEx);
if (GetProcessMemoryInfo(GetCurrentProcess(), &memcount, sizeof(memcount))) {
usage->ru_maxrss = memcount.PeakWorkingSetSize / 1024;
usage->ru_majflt = memcount.PageFaultCount;
diff --git a/libc/calls/internal.h b/libc/calls/internal.h
index 3cd6c360a..4a0f9e03e 100644
--- a/libc/calls/internal.h
+++ b/libc/calls/internal.h
@@ -16,11 +16,11 @@
#include "libc/calls/struct/winsize.h"
#include "libc/calls/ucontext.h"
#include "libc/dce.h"
-#include "libc/intrin/spinlock.h"
#include "libc/limits.h"
#include "libc/macros.internal.h"
#include "libc/nt/struct/context.h"
#include "libc/nt/struct/ntexceptionpointers.h"
+#include "libc/nt/struct/overlapped.h"
#include "libc/nt/struct/securityattributes.h"
#include "libc/nt/struct/startupinfo.h"
#include "libc/nt/struct/systeminfo.h"
@@ -58,8 +58,10 @@ enum FdKind {
struct Fd {
enum FdKind kind;
unsigned flags;
+ unsigned mode;
int64_t handle;
int64_t extra;
+ struct NtStdinWorker *worker;
bool zombie;
};
@@ -73,16 +75,19 @@ struct Fds {
extern const struct Fd kEmptyFd;
hidden extern int __vforked;
+hidden extern char __fds_lock;
+hidden extern char __sig_lock;
hidden extern bool __time_critical;
-hidden extern cthread_spinlock_t __sig_lock;
hidden extern unsigned __sighandrvas[NSIG];
hidden extern unsigned __sighandflags[NSIG];
hidden extern struct Fds g_fds;
hidden extern const struct NtSecurityAttributes kNtIsInheritable;
-int __reservefd(void) hidden;
+int __reservefd(int) hidden;
void __releasefd(int) hidden;
int __ensurefds(int) hidden;
+int64_t __getfdhandleactual(int) hidden;
+void __printfds(void);
forceinline bool __isfdopen(int fd) {
return 0 <= fd && fd < g_fds.n && g_fds.p[fd].kind != kFdEmpty;
@@ -178,6 +183,7 @@ i32 sys_posix_openpt(i32) hidden;
i32 sys_renameat(i32, const char *, i32, const char *) hidden;
i32 sys_sched_setaffinity(i32, u64, const void *) hidden;
i32 sys_sched_yield(void) hidden;
+i32 sys_setgid(i32) hidden;
i32 sys_setitimer(i32, const struct itimerval *, struct itimerval *) hidden;
i32 sys_setpgid(i32, i32) hidden;
i32 sys_setpriority(i32, u32, i32) hidden;
@@ -185,6 +191,7 @@ i32 sys_setresgid(uint32_t, uint32_t, uint32_t) hidden;
i32 sys_setresuid(uint32_t, uint32_t, uint32_t) hidden;
i32 sys_setrlimit(i32, const struct rlimit *) hidden;
i32 sys_setsid(void) hidden;
+i32 sys_setuid(i32) hidden;
i32 sys_sigaction(i32, const void *, void *, i64, i64) hidden;
i32 sys_sigaltstack(const void *, void *) hidden;
i32 sys_sigprocmask(i32, const sigset *, sigset *, u64) hidden;
@@ -216,6 +223,7 @@ i64 sys_sendfile(i32, i32, i64 *, u64) hidden;
i64 sys_splice(i32, i64 *, i32, i64 *, u64, u32) hidden;
i64 sys_vmsplice(i32, const struct iovec *, i64, u32) hidden;
i64 sys_write(i32, const void *, u64) hidden;
+u32 sys_geteuid(void) hidden;
u32 sys_getgid(void) hidden;
u32 sys_getsid(int) hidden;
u32 sys_gettid(void) hidden;
@@ -266,7 +274,7 @@ i64 sys_lseek_nt(int, i64, int) hidden;
int sys_chdir_nt(const char *) hidden;
int sys_close_epoll_nt(int) hidden;
int sys_close_nt(struct Fd *) hidden;
-int sys_dup_nt(int, int, int) hidden;
+int sys_dup_nt(int, int, int, int) hidden;
int sys_execve_nt(const char *, char *const[], char *const[]) hidden;
int sys_faccessat_nt(int, const char *, int, uint32_t) hidden;
int sys_fadvise_nt(int, u64, u64, int) hidden;
@@ -331,12 +339,13 @@ int64_t __winerr(void) nocallback privileged;
int64_t ntreturn(uint32_t);
ssize_t sys_readv_nt(struct Fd *, const struct iovec *, int) hidden;
ssize_t sys_writev_nt(int, const struct iovec *, int) hidden;
-struct NtOverlapped *_offset2overlap(int64_t, struct NtOverlapped *) hidden;
unsigned __wincrash_nt(struct NtExceptionPointers *);
void *GetProcAddressModule(const char *, const char *) hidden;
void WinMainForked(void) hidden;
void _ntcontext2linux(struct ucontext *, const struct NtContext *) hidden;
void _ntlinux2context(struct NtContext *, const ucontext_t *) hidden;
+struct NtOverlapped *_offset2overlap(int64_t, int64_t,
+ struct NtOverlapped *) hidden;
/*───────────────────────────────────────────────────────────────────────────│─╗
│ cosmopolitan § syscalls » metal ─╬─│┼
diff --git a/libc/calls/loadavg-nt.c b/libc/calls/loadavg-nt.c
index 977b992f2..28671071e 100644
--- a/libc/calls/loadavg-nt.c
+++ b/libc/calls/loadavg-nt.c
@@ -92,4 +92,6 @@ static textstartup void LoadavgNtInit(void) {
LoadavgNtPoll(hCounter, 0);
}
-const void *const LoadavgNtCtor[] initarray = {LoadavgNtInit};
+const void *const LoadavgNtCtor[] initarray = {
+ LoadavgNtInit,
+};
diff --git a/libc/calls/mkfifo.c b/libc/calls/mkfifo.c
index 82bc49cc7..2f85e7251 100644
--- a/libc/calls/mkfifo.c
+++ b/libc/calls/mkfifo.c
@@ -18,6 +18,7 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/calls.h"
#include "libc/calls/internal.h"
+#include "libc/calls/strace.internal.h"
#include "libc/dce.h"
#include "libc/intrin/asan.internal.h"
#include "libc/nt/ipc.h"
@@ -33,11 +34,15 @@
* @asyncsignalsafe
*/
int mkfifo(const char *pathname, unsigned mode) {
- /* TODO(jart): Windows? */
- if (IsAsan() && !__asan_is_valid(pathname, 1)) return efault();
- if (IsLinux()) {
- return sys_mknod(pathname, mode | S_IFIFO, 0);
+ // TODO(jart): Windows?
+ int rc;
+ if (IsAsan() && !__asan_is_valid(pathname, 1)) {
+ rc = efault();
+ } else if (IsLinux()) {
+ rc = sys_mknod(pathname, mode | S_IFIFO, 0);
} else {
- return sys_mkfifo(pathname, mode);
+ rc = sys_mkfifo(pathname, mode);
}
+ STRACE("mkfifo(%#s, %#o) → %d% m", pathname, mode, rc);
+ return rc;
}
diff --git a/libc/calls/nanosleep-nt.c b/libc/calls/nanosleep-nt.c
index a02a1188a..9d6e56a63 100644
--- a/libc/calls/nanosleep-nt.c
+++ b/libc/calls/nanosleep-nt.c
@@ -49,10 +49,8 @@ textwindows noinstrument int sys_nanosleep_nt(const struct timespec *req,
} else {
slice = ms;
}
- if (!__time_critical) {
- STRACE("SleepEx(%u, true)", slice);
- }
if (SleepEx(slice, true) == kNtWaitIoCompletion) {
+ STRACE("IOCP TRIGGERED EINTR");
rc = eintr();
break;
}
diff --git a/libc/calls/offset2overlap.c b/libc/calls/offset2overlap.c
index 38e00d3dc..e50d99811 100644
--- a/libc/calls/offset2overlap.c
+++ b/libc/calls/offset2overlap.c
@@ -18,9 +18,9 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/internal.h"
#include "libc/nt/struct/overlapped.h"
-#include "libc/str/str.h"
-textwindows struct NtOverlapped *_offset2overlap(int64_t opt_offset,
+textwindows struct NtOverlapped *_offset2overlap(int64_t handle,
+ int64_t opt_offset,
struct NtOverlapped *mem) {
if (opt_offset == -1) return NULL;
bzero(mem, sizeof(struct NtOverlapped));
diff --git a/libc/calls/open-nt.c b/libc/calls/open-nt.c
index a95a3da4a..6fcc16a41 100644
--- a/libc/calls/open-nt.c
+++ b/libc/calls/open-nt.c
@@ -36,89 +36,12 @@
#include "libc/sysv/consts/o.h"
#include "libc/sysv/errfuns.h"
-#define _O_APPEND 0x00000400 /* kNtFileAppendData */
-#define _O_CREAT 0x00000040 /* kNtOpenAlways */
-#define _O_EXCL 0x00000080 /* kNtCreateNew */
-#define _O_TRUNC 0x00000200 /* kNtCreateAlways */
-#define _O_DIRECTORY 0x00010000 /* kNtFileFlagBackupSemantics */
-#define _O_TMPFILE 0x00410000 /* AttributeTemporary|FlagDeleteOnClose */
-#define _O_DIRECT 0x00004000 /* kNtFileFlagNoBuffering */
-#define _O_NDELAY 0x00000800 /* kNtFileFlagWriteThrough */
-#define _O_RANDOM 0x80000000 /* kNtFileFlagRandomAccess */
-#define _O_SEQUENTIAL 0x40000000 /* kNtFileFlagSequentialScan */
-#define _O_COMPRESSED 0x20000000 /* kNtFileAttributeCompressed */
-#define _O_INDEXED 0x10000000 /* !kNtFileAttributeNotContentIndexed */
-#define _O_NONBLOCK 0x00000800
-#define _O_CLOEXEC 0x00080000
-
static textwindows int64_t sys_open_nt_impl(int dirfd, const char *path,
uint32_t flags, int32_t mode) {
- int64_t handle;
- uint32_t br, err;
char16_t path16[PATH_MAX];
uint32_t perm, share, disp, attr;
-
if (__mkntpathat(dirfd, path, flags, path16) == -1) return -1;
-
- switch (flags & O_ACCMODE) {
- case O_RDONLY:
- perm = kNtFileGenericRead | kNtGenericExecute;
- break;
- case O_WRONLY:
- perm = kNtFileGenericWrite | kNtGenericExecute;
- break;
- case O_RDWR:
- perm = kNtFileGenericRead | kNtFileGenericWrite | kNtGenericExecute;
- break;
- default:
- unreachable;
- }
-
- if (flags & _O_EXCL) {
- share = kNtFileShareExclusive;
- } else {
- share = kNtFileShareRead | kNtFileShareWrite | kNtFileShareDelete;
- }
-
- if ((flags & _O_CREAT) && (flags & _O_EXCL)) {
- disp = kNtCreateNew;
- } else if ((flags & _O_CREAT) && (flags & _O_TRUNC)) {
- disp = kNtCreateAlways;
- } else if (flags & _O_CREAT) {
- disp = kNtOpenAlways;
- } else if (flags & _O_TRUNC) {
- disp = kNtTruncateExisting;
- } else {
- disp = kNtOpenExisting;
- }
-
- if ((flags & _O_TMPFILE) == _O_TMPFILE) {
- attr = kNtFileAttributeTemporary | kNtFileFlagDeleteOnClose;
- } else {
- attr = kNtFileAttributeNormal;
- if (flags & _O_DIRECTORY) attr |= kNtFileFlagBackupSemantics;
- if (~mode & 0200) {
- attr |= kNtFileAttributeReadonly;
- if (!IsTiny() && disp == kNtCreateAlways) {
- // iron out esoteric unix/win32 inconsistency (golang #38225)
- if ((handle = CreateFile(path16, perm, share, &kNtIsInheritable,
- kNtTruncateExisting, kNtFileAttributeNormal,
- 0)) != -1 ||
- (errno != ENOENT && errno != ENOTDIR)) {
- return handle;
- }
- }
- }
- }
-
- flags |= kNtFileFlagOverlapped;
- if (~flags & _O_INDEXED) attr |= kNtFileAttributeNotContentIndexed;
- if (flags & _O_COMPRESSED) attr |= kNtFileAttributeCompressed;
- if (flags & _O_SEQUENTIAL) attr |= kNtFileFlagSequentialScan;
- if (flags & _O_RANDOM) attr |= kNtFileFlagRandomAccess;
- if (flags & _O_DIRECT) attr |= kNtFileFlagNoBuffering;
- if (flags & _O_NDELAY) attr |= kNtFileFlagWriteThrough;
-
+ if (GetNtOpenFlags(flags, mode, &perm, &share, &disp, &attr) == -1) return -1;
return CreateFile(path16, perm, share, &kNtIsInheritable, disp, attr, 0);
}
@@ -141,6 +64,7 @@ static textwindows ssize_t sys_open_nt_console(int dirfd,
}
g_fds.p[fd].kind = kFdConsole;
g_fds.p[fd].flags = flags;
+ g_fds.p[fd].mode = mode;
return fd;
}
@@ -150,6 +74,7 @@ static textwindows ssize_t sys_open_nt_file(int dirfd, const char *file,
if ((g_fds.p[fd].handle = sys_open_nt_impl(dirfd, file, flags, mode)) != -1) {
g_fds.p[fd].kind = kFdFile;
g_fds.p[fd].flags = flags;
+ g_fds.p[fd].mode = mode;
return fd;
} else {
return -1;
@@ -160,7 +85,7 @@ textwindows ssize_t sys_open_nt(int dirfd, const char *file, uint32_t flags,
int32_t mode) {
int fd;
ssize_t rc;
- if ((fd = __reservefd()) == -1) return -1;
+ if ((fd = __reservefd(-1)) == -1) return -1;
if ((flags & O_ACCMODE) == O_RDWR && !strcmp(file, kNtMagicPaths.devtty)) {
rc = sys_open_nt_console(dirfd, &kNtMagicPaths, flags, mode, fd);
} else {
diff --git a/libc/calls/openanon.c b/libc/calls/openanon.c
deleted file mode 100644
index aa36cf475..000000000
--- a/libc/calls/openanon.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
-│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
-╞══════════════════════════════════════════════════════════════════════════════╡
-│ Copyright 2020 Justine Alexandra Roberts Tunney │
-│ │
-│ Permission to use, copy, modify, and/or distribute this software for │
-│ any purpose with or without fee is hereby granted, provided that the │
-│ above copyright notice and this permission notice appear in all copies. │
-│ │
-│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
-│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
-│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
-│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
-│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
-│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
-│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
-│ PERFORMANCE OF THIS SOFTWARE. │
-╚─────────────────────────────────────────────────────────────────────────────*/
-#include "libc/assert.h"
-#include "libc/calls/calls.h"
-#include "libc/calls/internal.h"
-#include "libc/dce.h"
-#include "libc/fmt/conv.h"
-#include "libc/fmt/itoa.h"
-#include "libc/nt/createfile.h"
-#include "libc/nt/enum/accessmask.h"
-#include "libc/nt/enum/creationdisposition.h"
-#include "libc/nt/enum/fileflagandattributes.h"
-#include "libc/nt/enum/filesharemode.h"
-#include "libc/str/path.h"
-#include "libc/str/str.h"
-#include "libc/sysv/consts/at.h"
-#include "libc/sysv/consts/o.h"
-
-static struct OpenAnon { int count; } g_openanon;
-
-static void openanon_genpath(const char *name, struct OpenAnon *state,
- char pathbuf[hasatleast PATH_MAX]) {
- char c;
- size_t i;
- char *p, *pe;
- p = stpcpy(pathbuf, kTmpPath);
- pe = pathbuf + PATH_MAX - 8 - 10 - 1 - 10 - 1;
- if (!name) name = "openanon";
- for (i = 0; p < pe; ++i) {
- if (!(c = name[i])) break;
- if (_isdirsep(c)) c = '_';
- *p++ = c;
- }
- *p++ = '.';
- p += uint64toarray_radix10(getpid(), p);
- *p++ = '.';
- p += uint64toarray_radix10(++state->count, p);
- *p = '\0';
- assert(p < pe);
-}
-
-static int openanon_impl(const char *name, unsigned flags,
- struct OpenAnon *state,
- char pathbuf[hasatleast PATH_MAX]) {
- int fd;
- openanon_genpath(name, state, pathbuf);
- flags |= O_RDWR | O_CREAT | O_EXCL | O_TRUNC;
- if (!IsWindows()) {
- if ((fd = sys_openat(AT_FDCWD, pathbuf, flags, 0600)) != -1) {
- unlink(pathbuf);
- }
- return fd;
- } else {
- if ((fd = __reservefd()) == -1) return -1;
- if ((g_fds.p[fd].handle = CreateFileA(
- pathbuf, kNtGenericRead | kNtGenericWrite, kNtFileShareExclusive,
- &kNtIsInheritable, kNtCreateAlways,
- (kNtFileAttributeNotContentIndexed | kNtFileAttributeNormal |
- kNtFileAttributeTemporary | kNtFileFlagDeleteOnClose),
- 0)) != -1) {
- g_fds.p[fd].kind = kFdFile;
- g_fds.p[fd].flags = flags;
- return fd;
- } else {
- __releasefd(fd);
- return __winerr();
- }
- }
-}
-
-/**
- * Creates anonymous file.
- *
- * @param name is purely informative
- * @param flags can have O_CLOEXEC
- * @return fd of file with no name, needing close(), or -1 w/ errno
- * @see memfd_create() if disk-paranoid
- * @see mkostempsm() for named files
- */
-int openanon(char *name, unsigned flags) {
- char pathbuf[PATH_MAX];
- return openanon_impl(name, flags, &g_openanon, pathbuf);
-}
diff --git a/libc/calls/openat-metal.c b/libc/calls/openat-metal.c
index 2d83914ca..352301f0e 100644
--- a/libc/calls/openat-metal.c
+++ b/libc/calls/openat-metal.c
@@ -29,12 +29,13 @@ int sys_openat_metal(int dirfd, const char *file, int flags, unsigned mode) {
struct MetalFile *state;
if (strcmp(file, "ape.com")) return enoent();
if (!weaken(calloc)) return enomem();
- if ((fd = __reservefd()) == -1) return -1;
+ if ((fd = __reservefd(-1)) == -1) return -1;
state = weaken(calloc)(1, sizeof(struct MetalFile));
state->base = (char *)_base;
state->size = _end - _base;
g_fds.p[fd].kind = kFdFile;
g_fds.p[fd].flags = flags;
+ g_fds.p[fd].mode = mode;
g_fds.p[fd].handle = (intptr_t)state;
return fd;
}
diff --git a/libc/calls/pipe-nt.c b/libc/calls/pipe-nt.c
index 2af1a78e9..ad967c610 100644
--- a/libc/calls/pipe-nt.c
+++ b/libc/calls/pipe-nt.c
@@ -20,31 +20,41 @@
#include "libc/nt/createfile.h"
#include "libc/nt/enum/accessmask.h"
#include "libc/nt/enum/creationdisposition.h"
+#include "libc/nt/enum/fileflagandattributes.h"
#include "libc/nt/ipc.h"
#include "libc/nt/runtime.h"
+#include "libc/sysv/consts/o.h"
#include "libc/sysv/errfuns.h"
textwindows int sys_pipe_nt(int pipefd[2], unsigned flags) {
+ uint32_t mode;
int64_t hin, hout;
int reader, writer;
char16_t pipename[64];
if (!pipefd) return efault();
CreatePipeName(pipename);
- if ((reader = __reservefd()) == -1) return -1;
- if ((writer = __reservefd()) == -1) {
+ if ((reader = __reservefd(-1)) == -1) return -1;
+ if ((writer = __reservefd(-1)) == -1) {
__releasefd(reader);
return -1;
}
- if ((hin = CreateNamedPipe(pipename, kNtPipeAccessInbound,
- kNtPipeWait | kNtPipeReadmodeByte, 1, 65536, 65536,
- 0, &kNtIsInheritable)) != -1) {
+ if (~flags & O_DIRECT) {
+ mode = kNtPipeTypeByte | kNtPipeReadmodeByte;
+ } else {
+ mode = kNtPipeTypeMessage | kNtPipeReadmodeMessage;
+ }
+ if ((hin = CreateNamedPipe(pipename,
+ kNtPipeAccessInbound | kNtFileFlagOverlapped, mode,
+ 1, 512, 512, 0, &kNtIsInheritable)) != -1) {
if ((hout = CreateFile(pipename, kNtGenericWrite, 0, &kNtIsInheritable,
- kNtOpenExisting, 0, 0)) != -1) {
+ kNtOpenExisting, kNtFileFlagOverlapped, 0)) != -1) {
g_fds.p[reader].kind = kFdFile;
g_fds.p[reader].flags = flags;
+ g_fds.p[reader].mode = 0010444;
g_fds.p[reader].handle = hin;
g_fds.p[writer].kind = kFdFile;
g_fds.p[writer].flags = flags;
+ g_fds.p[writer].mode = 0010222;
g_fds.p[writer].handle = hout;
pipefd[0] = reader;
pipefd[1] = writer;
diff --git a/libc/calls/pipe.c b/libc/calls/pipe.c
index 03a97f54b..e7ec101c0 100644
--- a/libc/calls/pipe.c
+++ b/libc/calls/pipe.c
@@ -18,6 +18,7 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/calls.h"
#include "libc/calls/internal.h"
+#include "libc/calls/strace.internal.h"
#include "libc/dce.h"
#include "libc/intrin/asan.internal.h"
#include "libc/sysv/errfuns.h"
@@ -31,10 +32,18 @@
* @see pipe2()
*/
int pipe(int pipefd[hasatleast 2]) {
- if (IsAsan() && !__asan_is_valid(pipefd, sizeof(int) * 2)) return efault();
- if (!IsWindows()) {
- return sys_pipe(pipefd);
+ int rc;
+ if (IsAsan() && !__asan_is_valid(pipefd, sizeof(int) * 2)) {
+ rc = efault();
+ } else if (!IsWindows()) {
+ rc = sys_pipe(pipefd);
} else {
- return sys_pipe_nt(pipefd, 0);
+ rc = sys_pipe_nt(pipefd, 0);
}
+ if (!rc) {
+ STRACE("pipe([{%d, %d}]) → %d% m", pipefd[0], pipefd[1], rc);
+ } else {
+ STRACE("pipe(%p) → %d% m", pipefd, rc);
+ }
+ return rc;
}
diff --git a/libc/calls/pipe2.c b/libc/calls/pipe2.c
index f782dd018..faa8e4d3e 100644
--- a/libc/calls/pipe2.c
+++ b/libc/calls/pipe2.c
@@ -17,6 +17,7 @@
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/internal.h"
+#include "libc/calls/strace.internal.h"
#include "libc/dce.h"
#include "libc/intrin/asan.internal.h"
#include "libc/sysv/errfuns.h"
@@ -29,11 +30,20 @@
* @return 0 on success, or -1 w/ errno and pipefd isn't modified
*/
int pipe2(int pipefd[hasatleast 2], int flags) {
- if (!pipefd) return efault();
- if (IsAsan() && !__asan_is_valid(pipefd, sizeof(int) * 2)) return efault();
- if (!IsWindows()) {
- return sys_pipe2(pipefd, flags);
+ int rc;
+ if (!pipefd) {
+ rc = efault();
+ } else if (IsAsan() && !__asan_is_valid(pipefd, sizeof(int) * 2)) {
+ rc = efault();
+ } else if (!IsWindows()) {
+ rc = sys_pipe2(pipefd, flags);
} else {
- return sys_pipe_nt(pipefd, flags);
+ rc = sys_pipe_nt(pipefd, flags);
}
+ if (!rc) {
+ STRACE("pipe2([{%d, %d}], %#o) → %d% m", pipefd[0], pipefd[1], flags, rc);
+ } else {
+ STRACE("pipe2(%p, %#o) → %d% m", pipefd, flags, rc);
+ }
+ return rc;
}
diff --git a/libc/calls/printfds.c b/libc/calls/printfds.c
new file mode 100644
index 000000000..8b5d93ffd
--- /dev/null
+++ b/libc/calls/printfds.c
@@ -0,0 +1,58 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/calls/internal.h"
+#include "libc/intrin/kprintf.h"
+
+static const char *__fdkind2str(int x) {
+ switch (x) {
+ case kFdEmpty:
+ return "kFdEmpty";
+ case kFdFile:
+ return "kFdFile";
+ case kFdSocket:
+ return "kFdSocket";
+ case kFdProcess:
+ return "kFdProcess";
+ case kFdConsole:
+ return "kFdConsole";
+ case kFdSerial:
+ return "kFdSerial";
+ case kFdZip:
+ return "kFdZip";
+ case kFdEpoll:
+ return "kFdEpoll";
+ default:
+ return "kFdWut";
+ }
+}
+
+void __printfds(void) {
+ int i;
+ for (i = 0; i < g_fds.n; ++i) {
+ if (!g_fds.p[i].kind) continue;
+ kprintf("%3d %s", i, __fdkind2str(g_fds.p[i].kind));
+ if (g_fds.p[i].zombie) kprintf(" zombie");
+ if (g_fds.p[i].flags) kprintf(" flags=%#x", g_fds.p[i].flags);
+ if (g_fds.p[i].mode) kprintf(" mode=%#o", g_fds.p[i].mode);
+ if (g_fds.p[i].handle) kprintf(" handle=%ld", g_fds.p[i].handle);
+ if (g_fds.p[i].extra) kprintf(" extra=%ld", g_fds.p[i].extra);
+ if (g_fds.p[i].worker) kprintf(" worker=%p", g_fds.p[i].worker);
+ kprintf("%n", g_fds.p[i].zombie);
+ }
+}
diff --git a/libc/calls/raise.c b/libc/calls/raise.c
index d638f85f3..e73c58884 100644
--- a/libc/calls/raise.c
+++ b/libc/calls/raise.c
@@ -23,6 +23,7 @@
#include "libc/calls/strace.internal.h"
#include "libc/intrin/kprintf.h"
#include "libc/nt/console.h"
+#include "libc/nt/errors.h"
#include "libc/nt/process.h"
#include "libc/nt/runtime.h"
#include "libc/nt/synchronization.h"
@@ -65,7 +66,9 @@ int raise(int sig) {
// doesn't make any sense and it's so evil.
if (GenerateConsoleCtrlEvent(event, 0)) {
// XXX: we shouldn't need to sleep here ctrl-c is evil on nt
- SleepEx(100, false);
+ if (SleepEx(100, true) == kNtWaitIoCompletion) {
+ STRACE("IOCP TRIGGERED EINTR");
+ }
__sig_check(false);
rc = 0;
} else {
diff --git a/libc/calls/read-nt.c b/libc/calls/read-nt.c
index 03859fd13..ff8cb597a 100644
--- a/libc/calls/read-nt.c
+++ b/libc/calls/read-nt.c
@@ -20,30 +20,48 @@
#include "libc/bits/bits.h"
#include "libc/bits/weaken.h"
#include "libc/calls/internal.h"
+#include "libc/calls/sig.internal.h"
+#include "libc/calls/strace.internal.h"
#include "libc/calls/struct/iovec.h"
#include "libc/errno.h"
+#include "libc/intrin/kprintf.h"
#include "libc/limits.h"
+#include "libc/nt/enum/wait.h"
#include "libc/nt/errors.h"
+#include "libc/nt/files.h"
+#include "libc/nt/ipc.h"
#include "libc/nt/runtime.h"
#include "libc/nt/struct/overlapped.h"
+#include "libc/nt/synchronization.h"
+#include "libc/sock/internal.h"
#include "libc/sysv/errfuns.h"
static textwindows ssize_t sys_read_nt_impl(struct Fd *fd, void *data,
size_t size, ssize_t offset) {
- uint32_t got;
+ uint32_t err, got, avail;
struct NtOverlapped overlap;
- if (ReadFile(fd->handle, data, _clampio(size), &got,
- _offset2overlap(offset, &overlap))) {
- return got;
- } else if (
- // make sure read() returns 0 on broken pipe
- GetLastError() == kNtErrorBrokenPipe ||
- // make sure pread() returns 0 if we start reading after EOF
- GetLastError() == kNtErrorHandleEof) {
- return 0;
- } else {
- return __winerr();
+ if (fd->worker) {
+ for (;;) {
+ if (!PeekNamedPipe(fd->handle, 0, 0, 0, &avail, 0)) break;
+ if (avail) break;
+ if (SleepEx(__SIG_POLLING_INTERVAL_MS, true) == kNtWaitIoCompletion ||
+ _check_interrupts(true, g_fds.p)) {
+ return eintr();
+ }
+ }
}
+ if (ReadFile(fd->handle, data, _clampio(size), &got,
+ _offset2overlap(fd->handle, offset, &overlap))) {
+ return got;
+ }
+ err = GetLastError();
+ // make sure read() returns 0 on broken pipe
+ if (err == kNtErrorBrokenPipe) return 0;
+ // make sure read() returns 0 on closing named pipe
+ if (err == kNtErrorNoData) return 0;
+ // make sure pread() returns 0 if we start reading after EOF
+ if (err == kNtErrorHandleEof) return 0;
+ return __winerr();
}
textwindows ssize_t sys_read_nt(struct Fd *fd, const struct iovec *iov,
diff --git a/libc/calls/read.c b/libc/calls/read.c
index 51df598c5..f8ff698d4 100644
--- a/libc/calls/read.c
+++ b/libc/calls/read.c
@@ -23,6 +23,7 @@
#include "libc/calls/struct/iovec.h"
#include "libc/dce.h"
#include "libc/intrin/asan.internal.h"
+#include "libc/intrin/kprintf.h"
#include "libc/sock/internal.h"
#include "libc/sock/sock.h"
#include "libc/sysv/errfuns.h"
diff --git a/libc/calls/readv-nt.c b/libc/calls/readv-nt.c
index 1cc9d56c8..a9b55edd5 100644
--- a/libc/calls/readv-nt.c
+++ b/libc/calls/readv-nt.c
@@ -28,7 +28,7 @@ textwindows ssize_t sys_readv_nt(struct Fd *fd, const struct iovec *iov,
case kFdConsole:
return sys_read_nt(fd, iov, iovlen, -1);
case kFdSocket:
- return weaken(sys_recvfrom_nt)(fd, iov, iovlen, 0, NULL, 0);
+ return weaken(sys_recv_nt)(fd, iov, iovlen, 0);
default:
return ebadf();
}
diff --git a/libc/calls/readv-serial.c b/libc/calls/readv-serial.c
index f9b1d120b..15fde98be 100644
--- a/libc/calls/readv-serial.c
+++ b/libc/calls/readv-serial.c
@@ -37,7 +37,9 @@ static int GetFirstIov(struct iovec *iov, int iovlen) {
ssize_t sys_readv_serial(struct Fd *fd, const struct iovec *iov, int iovlen) {
size_t i;
if ((i = GetFirstIov(iov, iovlen)) != -1) {
- while (!IsDataAvailable(fd)) asm("pause");
+ while (!IsDataAvailable(fd)) {
+ __builtin_ia32_pause();
+ }
((char *)iov[i].iov_base)[0] = inb(fd->handle);
return 1;
} else {
diff --git a/libc/calls/releasefd.c b/libc/calls/releasefd.c
index 90e43ad57..58cd4bff6 100644
--- a/libc/calls/releasefd.c
+++ b/libc/calls/releasefd.c
@@ -17,16 +17,14 @@
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/internal.h"
-#include "libc/intrin/cmpxchg.h"
-#include "libc/intrin/lockcmpxchg.h"
+#include "libc/intrin/spinlock.h"
+#include "libc/macros.internal.h"
void __releasefd(int fd) {
- int x;
- if (!__vforked && 0 <= fd && fd < g_fds.n) {
- bzero(g_fds.p + fd, sizeof(*g_fds.p));
- do {
- x = g_fds.f;
- if (fd >= x) break;
- } while (!_lockcmpxchg(&g_fds.f, x, fd));
+ if (0 <= fd && fd < g_fds.n) {
+ _spinlock(&__fds_lock);
+ g_fds.p[fd].kind = 0;
+ g_fds.f = MIN(fd, g_fds.f);
+ _spunlock(&__fds_lock);
}
}
diff --git a/libc/calls/reservefd.c b/libc/calls/reservefd.c
index edbaf99c6..e4461710b 100644
--- a/libc/calls/reservefd.c
+++ b/libc/calls/reservefd.c
@@ -16,24 +16,98 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/assert.h"
+#include "libc/bits/weaken.h"
+#include "libc/calls/calls.h"
#include "libc/calls/internal.h"
-#include "libc/intrin/cmpxchg.h"
+#include "libc/calls/strace.internal.h"
+#include "libc/intrin/kprintf.h"
+#include "libc/intrin/spinlock.h"
+#include "libc/macros.internal.h"
#include "libc/mem/mem.h"
+#include "libc/runtime/runtime.h"
+#include "libc/str/str.h"
#include "libc/sysv/errfuns.h"
+/**
+ * Grows file descriptor array memory if needed.
+ */
+int __ensurefds(int fd) {
+ size_t n1, n2;
+ struct Fd *p1, *p2;
+ _spinlock(&__fds_lock);
+ n1 = g_fds.n;
+ if (fd >= n1) {
+ STRACE("__ensurefds(%d) extending", fd);
+ if (weaken(malloc)) {
+ // TODO(jart): we need a semaphore for this
+ p1 = g_fds.p;
+ n2 = fd + (fd >> 1);
+ if ((p2 = weaken(malloc)(n2 * sizeof(*p1)))) {
+ memcpy(p2, p1, n1 * sizeof(*p1));
+ g_fds.p = p2;
+ g_fds.n = n2;
+ if (p1 != g_fds.__init_p) {
+ weaken(free)(p1);
+ }
+ } else {
+ fd = enomem();
+ }
+ } else {
+ fd = emfile();
+ }
+ }
+ _spunlock(&__fds_lock);
+ return fd;
+}
+
/**
* Finds open file descriptor slot.
*/
-int __reservefd(void) {
+int __reservefd(int start) {
int fd;
for (;;) {
- fd = g_fds.f;
- if (fd >= g_fds.n) {
- if (__ensurefds(fd) == -1) return -1;
- }
- _cmpxchg(&g_fds.f, fd, fd + 1);
- if (_cmpxchg(&g_fds.p[fd].kind, kFdEmpty, kFdReserved)) {
+ _spinlock(&__fds_lock);
+ fd = start < 0 ? g_fds.f : start;
+ while (fd < g_fds.n && g_fds.p[fd].kind) ++fd;
+ if (fd < g_fds.n) {
+ g_fds.f = fd + 1;
+ bzero(g_fds.p + fd, sizeof(*g_fds.p));
+ g_fds.p[fd].kind = kFdReserved;
+ _spunlock(&__fds_lock);
return fd;
+ } else {
+ _spunlock(&__fds_lock);
+ if (__ensurefds(fd) == -1) {
+ return -1;
+ }
}
}
}
+
+/**
+ * Closes non-stdio file descriptors to free dynamic memory.
+ */
+static void __freefds(void) {
+ int i;
+ STRACE("__freefds()");
+ for (i = 3; i < g_fds.n; ++i) {
+ if (g_fds.p[i].kind) {
+ close(i);
+ }
+ }
+ if (g_fds.p != g_fds.__init_p) {
+ memcpy(g_fds.__init_p, g_fds.p, sizeof(*g_fds.p) * 3);
+ weaken(free)(g_fds.p);
+ g_fds.p = g_fds.__init_p;
+ g_fds.n = ARRAYLEN(g_fds.__init_p);
+ }
+}
+
+static textstartup void __freefds_init(void) {
+ atexit(__freefds);
+}
+
+const void *const __freefds_ctor[] initarray = {
+ __freefds_init,
+};
diff --git a/libc/calls/setgid.c b/libc/calls/setgid.c
new file mode 100644
index 000000000..76a5d2989
--- /dev/null
+++ b/libc/calls/setgid.c
@@ -0,0 +1,32 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/calls/calls.h"
+#include "libc/calls/internal.h"
+#include "libc/calls/strace.internal.h"
+
+/**
+ * Sets effective group id of current process.
+ * @return 0 on success or -1 w/ errno
+ */
+int setgid(int gid) {
+ int rc;
+ rc = sys_setgid(gid);
+ STRACE("%s(%d) → %d% m", "setgid", gid);
+ return rc;
+}
diff --git a/libc/calls/setuid.c b/libc/calls/setuid.c
new file mode 100644
index 000000000..edf316f04
--- /dev/null
+++ b/libc/calls/setuid.c
@@ -0,0 +1,32 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/calls/calls.h"
+#include "libc/calls/internal.h"
+#include "libc/calls/strace.internal.h"
+
+/**
+ * Sets effective group id of current process.
+ * @return 0 on success or -1 w/ errno
+ */
+int setuid(int uid) {
+ int rc;
+ rc = sys_setuid(uid);
+ STRACE("%s(%d) → %d% m", "setuid", uid);
+ return rc;
+}
diff --git a/libc/calls/sig.c b/libc/calls/sig.c
index fc4ca7d1c..5412f0bf5 100644
--- a/libc/calls/sig.c
+++ b/libc/calls/sig.c
@@ -38,7 +38,7 @@ textwindows int __sig_mask(int how, const sigset_t *neu, sigset_t *old) {
int i;
uint64_t a, b;
if (how == SIG_BLOCK || how == SIG_UNBLOCK || how == SIG_SETMASK) {
- cthread_spinlock(&__sig_lock);
+ _spinlock(&__sig_lock);
if (old) {
*old = __sig.mask;
}
@@ -54,7 +54,7 @@ textwindows int __sig_mask(int how, const sigset_t *neu, sigset_t *old) {
}
__sig.mask.__bits[0] &= ~(SIGKILL | SIGSTOP);
}
- cthread_spunlock(&__sig_lock);
+ _spunlock(&__sig_lock);
return 0;
} else {
return einval();
diff --git a/libc/calls/sig2.c b/libc/calls/sig2.c
index 51062b4e9..fe380e8cc 100644
--- a/libc/calls/sig2.c
+++ b/libc/calls/sig2.c
@@ -64,7 +64,7 @@ static textwindows void __sig_free(struct Signal *mem) {
static textwindows struct Signal *__sig_remove(void) {
struct Signal *prev, *res;
if (__sig.queue) {
- cthread_spinlock(&__sig_lock);
+ _spinlock(&__sig_lock);
for (prev = 0, res = __sig.queue; res; prev = res, res = res->next) {
if (!sigismember(&__sig.mask, res->sig)) {
if (res == __sig.queue) {
@@ -78,7 +78,7 @@ static textwindows struct Signal *__sig_remove(void) {
STRACE("%G is masked", res->sig);
}
}
- cthread_spunlock(&__sig_lock);
+ _spunlock(&__sig_lock);
} else {
res = 0;
}
@@ -97,7 +97,7 @@ static textwindows bool __sig_deliver(bool restartable, int sig, int si_code,
STRACE("delivering %G", sig);
// enter the signal
- cthread_spinlock(&__sig_lock);
+ _spinlock(&__sig_lock);
rva = __sighandrvas[sig];
flags = __sighandflags[sig];
if ((~flags & SA_NODEFER) || (flags & SA_RESETHAND)) {
@@ -108,7 +108,7 @@ static textwindows bool __sig_deliver(bool restartable, int sig, int si_code,
// signal handler. in that case you must use SA_NODEFER.
__sighandrvas[sig] = (int32_t)(intptr_t)SIG_DFL;
}
- cthread_spunlock(&__sig_lock);
+ _spunlock(&__sig_lock);
// setup the somewhat expensive information args
// only if they're requested by the user in sigaction()
@@ -130,9 +130,9 @@ static textwindows bool __sig_deliver(bool restartable, int sig, int si_code,
// since sigaction() is @asyncsignalsafe we only restore it if the
// user didn't change it during the signal handler. we also don't
// need to do anything if this was a oneshot signal or nodefer.
- cthread_spinlock(&__sig_lock);
+ _spinlock(&__sig_lock);
_cmpxchg(__sighandrvas + sig, (int32_t)(intptr_t)SIG_DFL, rva);
- cthread_spunlock(&__sig_lock);
+ _spunlock(&__sig_lock);
}
if (!restartable) {
@@ -192,9 +192,9 @@ textwindows bool __sig_handle(bool restartable, int sig, int si_code,
textwindows int __sig_raise(int sig, int si_code) {
int rc;
int candeliver;
- cthread_spinlock(&__sig_lock);
+ _spinlock(&__sig_lock);
candeliver = !sigismember(&__sig.mask, sig);
- cthread_spunlock(&__sig_lock);
+ _spunlock(&__sig_lock);
switch (candeliver) {
case 1:
__sig_handle(false, sig, si_code, 0);
@@ -217,7 +217,7 @@ textwindows int __sig_add(int sig, int si_code) {
struct Signal *mem;
if (1 <= sig && sig <= NSIG) {
STRACE("enqueuing %G", sig);
- cthread_spinlock(&__sig_lock);
+ _spinlock(&__sig_lock);
if ((mem = __sig_alloc())) {
mem->sig = sig;
mem->si_code = si_code;
@@ -227,7 +227,7 @@ textwindows int __sig_add(int sig, int si_code) {
} else {
rc = enomem();
}
- cthread_spunlock(&__sig_lock);
+ _spunlock(&__sig_lock);
} else {
rc = einval();
}
diff --git a/libc/calls/sigaction.c b/libc/calls/sigaction.c
index c0150fc9d..11199fb0b 100644
--- a/libc/calls/sigaction.c
+++ b/libc/calls/sigaction.c
@@ -213,7 +213,7 @@ static int __sigaction(int sig, const struct sigaction *act,
rc = 0;
}
if (rc != -1 && !__vforked) {
- cthread_spinlock(&__sig_lock);
+ _spinlock(&__sig_lock);
if (oldact) {
oldrva = __sighandrvas[sig];
oldact->sa_sigaction = (sigaction_f)(
@@ -223,7 +223,7 @@ static int __sigaction(int sig, const struct sigaction *act,
__sighandrvas[sig] = rva;
__sighandflags[sig] = act->sa_flags;
}
- cthread_spunlock(&__sig_lock);
+ _spunlock(&__sig_lock);
}
return rc;
}
diff --git a/libc/calls/sigsuspend.c b/libc/calls/sigsuspend.c
index 6a3e85deb..7dc0c58bc 100644
--- a/libc/calls/sigsuspend.c
+++ b/libc/calls/sigsuspend.c
@@ -26,6 +26,7 @@
#include "libc/dce.h"
#include "libc/intrin/asan.internal.h"
#include "libc/log/backtrace.internal.h"
+#include "libc/nt/errors.h"
#include "libc/nt/synchronization.h"
#include "libc/sysv/errfuns.h"
@@ -76,7 +77,11 @@ int sigsuspend(const sigset_t *ignore) {
rc = eintr();
break;
}
- SleepEx(__SIG_POLLING_INTERVAL_MS, true);
+ if (SleepEx(__SIG_POLLING_INTERVAL_MS, true) == kNtWaitIoCompletion) {
+ STRACE("IOCP TRIGGERED EINTR");
+ rc = eintr();
+ break;
+ }
#ifdef SYSDEBUG
ms += __SIG_POLLING_INTERVAL_MS;
if (ms >= __SIG_LOGGING_INTERVAL_MS) {
diff --git a/libc/calls/wait4-nt.c b/libc/calls/wait4-nt.c
index ed82d83ad..c411018eb 100644
--- a/libc/calls/wait4-nt.c
+++ b/libc/calls/wait4-nt.c
@@ -67,7 +67,7 @@ static textwindows int sys_wait4_nt_impl(int pid, int *opt_out_wstatus,
if (!__isfdopen(pid) &&
(handle = OpenProcess(kNtSynchronize | kNtProcessQueryInformation,
true, pid))) {
- if ((pid = __reservefd()) != -1) {
+ if ((pid = __reservefd(-1)) != -1) {
g_fds.p[pid].kind = kFdProcess;
g_fds.p[pid].handle = handle;
g_fds.p[pid].flags = O_CLOEXEC;
@@ -111,6 +111,8 @@ static textwindows int sys_wait4_nt_impl(int pid, int *opt_out_wstatus,
}
if (opt_out_rusage) {
bzero(opt_out_rusage, sizeof(*opt_out_rusage));
+ bzero(&memcount, sizeof(memcount));
+ memcount.cb = sizeof(struct NtProcessMemoryCountersEx);
if (GetProcessMemoryInfo(handles[i], &memcount, sizeof(memcount))) {
opt_out_rusage->ru_maxrss = memcount.PeakWorkingSetSize / 1024;
opt_out_rusage->ru_majflt = memcount.PageFaultCount;
diff --git a/libc/calls/wincrash.c b/libc/calls/wincrash.c
index b2ee12d60..bd2487faf 100644
--- a/libc/calls/wincrash.c
+++ b/libc/calls/wincrash.c
@@ -18,6 +18,7 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/internal.h"
#include "libc/calls/sig.internal.h"
+#include "libc/calls/sigbits.h"
#include "libc/calls/strace.internal.h"
#include "libc/calls/typedef/sigaction_f.h"
#include "libc/calls/ucontext.h"
diff --git a/libc/calls/wincrash_init.S b/libc/calls/wincrash_init.S
index abd1e1322..68274ae48 100644
--- a/libc/calls/wincrash_init.S
+++ b/libc/calls/wincrash_init.S
@@ -19,6 +19,8 @@
#include "libc/macros.internal.h"
.init.start 300,_init_wincrash
+ mov __wincrashearly(%rip),%rcx
+ ntcall __imp_RemoveVectoredExceptionHandler
pushpop 1,%rcx
ezlea __wincrash_nt,dx
ntcall __imp_AddVectoredExceptionHandler
diff --git a/libc/calls/wincrashearly.c b/libc/calls/wincrashearly.c
new file mode 100644
index 000000000..f2da22651
--- /dev/null
+++ b/libc/calls/wincrashearly.c
@@ -0,0 +1,20 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+
+int64_t __wincrashearly;
diff --git a/libc/calls/write-nt.c b/libc/calls/write-nt.c
index a7b9f5a1d..839d4d956 100644
--- a/libc/calls/write-nt.c
+++ b/libc/calls/write-nt.c
@@ -23,12 +23,14 @@
#include "libc/calls/struct/iovec.h"
#include "libc/calls/struct/siginfo.h"
#include "libc/calls/typedef/sigaction_f.h"
+#include "libc/intrin/kprintf.h"
#include "libc/limits.h"
#include "libc/nt/errors.h"
#include "libc/nt/files.h"
#include "libc/nt/runtime.h"
#include "libc/nt/struct/overlapped.h"
#include "libc/runtime/internal.h"
+#include "libc/sock/internal.h"
#include "libc/str/str.h"
#include "libc/sysv/consts/sicode.h"
#include "libc/sysv/consts/sig.h"
@@ -36,17 +38,20 @@
static textwindows ssize_t sys_write_nt_impl(int fd, void *data, size_t size,
ssize_t offset) {
- uint32_t sent;
+ uint32_t err, sent;
struct NtOverlapped overlap;
if (WriteFile(g_fds.p[fd].handle, data, _clampio(size), &sent,
- _offset2overlap(offset, &overlap))) {
+ _offset2overlap(g_fds.p[fd].handle, offset, &overlap))) {
return sent;
- } else if (GetLastError() == kNtErrorBrokenPipe) {
+ }
+ err = GetLastError();
+ // make sure write() raises SIGPIPE on broken pipe
+ // make sure write() raises SIGPIPE on closing named pipe
+ if (err == kNtErrorBrokenPipe || err == kNtErrorNoData) {
__sig_raise(SIGPIPE, SI_KERNEL);
return epipe();
- } else {
- return __winerr();
}
+ return __winerr();
}
textwindows ssize_t sys_write_nt(int fd, const struct iovec *iov, size_t iovlen,
diff --git a/libc/calls/writev-nt.c b/libc/calls/writev-nt.c
index fec0c7d00..fb6231aa1 100644
--- a/libc/calls/writev-nt.c
+++ b/libc/calls/writev-nt.c
@@ -27,7 +27,7 @@ textwindows ssize_t sys_writev_nt(int fd, const struct iovec *iov, int iovlen) {
case kFdConsole:
return sys_write_nt(fd, iov, iovlen, -1);
case kFdSocket:
- return weaken(sys_sendto_nt)(fd, iov, iovlen, 0, NULL, 0);
+ return weaken(sys_send_nt)(fd, iov, iovlen, 0);
default:
return ebadf();
}
diff --git a/libc/calls/writev-serial.c b/libc/calls/writev-serial.c
index b3fc44b4c..0fc1e5aaf 100644
--- a/libc/calls/writev-serial.c
+++ b/libc/calls/writev-serial.c
@@ -24,7 +24,9 @@ ssize_t sys_writev_serial(struct Fd *fd, const struct iovec *iov, int iovlen) {
size_t i, j, wrote = 0;
for (i = 0; i < iovlen; ++i) {
for (j = 0; j < iov[i].iov_len; ++j) {
- while (!(inb(fd->handle + UART_LSR) & UART_TTYTXR)) asm("pause");
+ while (!(inb(fd->handle + UART_LSR) & UART_TTYTXR)) {
+ __builtin_ia32_pause();
+ }
outb(fd->handle, ((char *)iov[i].iov_base)[j]);
++wrote;
}
diff --git a/libc/fmt/fmt.h b/libc/fmt/fmt.h
index 4bbc8e145..3bc5b89c2 100644
--- a/libc/fmt/fmt.h
+++ b/libc/fmt/fmt.h
@@ -27,6 +27,7 @@ int vsscanf(const char *, const char *, va_list);
int vcscanf(int (*)(void *), int (*)(int, void *), void *, const char *,
va_list);
int strerror_r(int, char *, size_t) dontthrow nocallback;
+int strerror_wr(int, uint32_t, char *, size_t) dontthrow nocallback;
const char *strerror_short(int) nosideeffect;
const char *strerror_long(int) nosideeffect;
int __fmt(void *, void *, const char *, va_list) hidden;
diff --git a/libc/fmt/strerror_r.greg.c b/libc/fmt/strerror_r.greg.c
index b1195a3cd..22365785b 100644
--- a/libc/fmt/strerror_r.greg.c
+++ b/libc/fmt/strerror_r.greg.c
@@ -16,23 +16,8 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
-#define ShouldUseMsabiAttribute() 1
-#include "libc/bits/safemacros.internal.h"
-#include "libc/dce.h"
-#include "libc/errno.h"
#include "libc/fmt/fmt.h"
-#include "libc/fmt/itoa.h"
-#include "libc/intrin/kprintf.h"
-#include "libc/log/libfatal.internal.h"
-#include "libc/macros.internal.h"
-#include "libc/nexgen32e/bsr.h"
-#include "libc/nt/enum/formatmessageflags.h"
-#include "libc/nt/enum/lang.h"
-#include "libc/nt/memory.h"
-#include "libc/nt/process.h"
#include "libc/nt/runtime.h"
-#include "libc/str/str.h"
-#include "libc/str/tpenc.h"
/**
* Converts errno value to string.
@@ -41,32 +26,5 @@
* @return 0 on success, or error code
*/
privileged int strerror_r(int err, char *buf, size_t size) {
- /* kprintf() weakly depends on this function */
- int c, n, winerr;
- char16_t winmsg[256];
- const char *sym, *msg;
- sym = firstnonnull(strerror_short(err), "EUNKNOWN");
- msg = firstnonnull(strerror_long(err), "No error information");
- if (IsTiny()) {
- if (!sym) sym = "EUNKNOWN";
- for (; (c = *sym++); --size)
- if (size > 1) *buf++ = c;
- if (size) *buf = 0;
- } else if (!IsWindows()) {
- ksnprintf(buf, size, "%s[%d][%s]", sym, err, msg);
- } else {
- winerr = __imp_GetLastError();
- if ((n = __imp_FormatMessageW(
- kNtFormatMessageFromSystem | kNtFormatMessageIgnoreInserts, 0,
- winerr, MAKELANGID(kNtLangNeutral, kNtSublangDefault), winmsg,
- ARRAYLEN(winmsg), 0))) {
- while ((n && winmsg[n - 1] <= ' ') || winmsg[n - 1] == '.') --n;
- ksnprintf(buf, size, "%s[%d][%s][%.*hs][%d]", sym, err, msg, n, winmsg,
- winerr);
- } else {
- ksnprintf(buf, size, "%s[%d][%s][%d]", sym, err, msg, winerr);
- }
- __imp_SetLastError(winerr);
- }
- return 0;
+ return strerror_wr(err, GetLastError(), buf, size);
}
diff --git a/libc/fmt/strerror_wr.greg.c b/libc/fmt/strerror_wr.greg.c
new file mode 100644
index 000000000..b4c795038
--- /dev/null
+++ b/libc/fmt/strerror_wr.greg.c
@@ -0,0 +1,62 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#define ShouldUseMsabiAttribute() 1
+#include "libc/bits/safemacros.internal.h"
+#include "libc/dce.h"
+#include "libc/fmt/fmt.h"
+#include "libc/intrin/kprintf.h"
+#include "libc/macros.internal.h"
+#include "libc/nt/enum/formatmessageflags.h"
+#include "libc/nt/enum/lang.h"
+#include "libc/nt/process.h"
+
+/**
+ * Converts errno value to string with explicit windows errno too.
+ *
+ * @param err is error number or zero if unknown
+ * @return 0 on success, or error code
+ */
+privileged int strerror_wr(int err, uint32_t winerr, char *buf, size_t size) {
+ /* kprintf() weakly depends on this function */
+ int c, n;
+ char16_t winmsg[256];
+ const char *sym, *msg;
+ sym = firstnonnull(strerror_short(err), "EUNKNOWN");
+ msg = firstnonnull(strerror_long(err), "No error information");
+ if (IsTiny()) {
+ if (!sym) sym = "EUNKNOWN";
+ for (; (c = *sym++); --size)
+ if (size > 1) *buf++ = c;
+ if (size) *buf = 0;
+ } else if (!IsWindows()) {
+ ksnprintf(buf, size, "%s[%d][%s]", sym, err, msg);
+ } else {
+ if ((n = __imp_FormatMessageW(
+ kNtFormatMessageFromSystem | kNtFormatMessageIgnoreInserts, 0,
+ winerr, MAKELANGID(kNtLangNeutral, kNtSublangDefault), winmsg,
+ ARRAYLEN(winmsg), 0))) {
+ while ((n && winmsg[n - 1] <= ' ') || winmsg[n - 1] == '.') --n;
+ ksnprintf(buf, size, "%s[%d][%s][%.*hs][%d]", sym, err, msg, n, winmsg,
+ winerr);
+ } else {
+ ksnprintf(buf, size, "%s[%d][%s][%d]", sym, err, msg, winerr);
+ }
+ }
+ return 0;
+}
diff --git a/libc/intrin/asan.c b/libc/intrin/asan.c
index d9fe24aff..6fd7e194d 100644
--- a/libc/intrin/asan.c
+++ b/libc/intrin/asan.c
@@ -51,13 +51,13 @@
#include "libc/sysv/consts/nr.h"
#include "libc/sysv/consts/prot.h"
#include "libc/sysv/errfuns.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
+#include "third_party/dlmalloc/dlmalloc.h"
STATIC_YOINK("_init_asan");
#define ASAN_MORGUE_ITEMS 512
#define ASAN_MORGUE_THRESHOLD 65536 // morgue memory O(ITEMS*THRESHOLD)
-#define ASAN_TRACE_ITEMS 16 // backtrace limit on malloc origin
+#define ASAN_TRACE_ITEMS 16 // backtrace limit on malloc origin
/**
* @fileoverview Cosmopolitan Address Sanitizer Runtime.
@@ -177,8 +177,7 @@ static uint64_t __asan_roundup2pow(uint64_t x) {
static char *__asan_utf8cpy(char *p, unsigned c) {
uint64_t z;
z = tpenc(c);
- do
- *p++ = z;
+ do *p++ = z;
while ((z >>= 8));
return p;
}
@@ -922,8 +921,7 @@ static void __asan_trace(struct AsanTrace *bt, const struct StackFrame *bp) {
if (!__asan_checka(SHADOW(bp), sizeof(*bp) >> 3).kind) {
addr = bp->addr;
if (addr == weakaddr("__gc") && weakaddr("__gc")) {
- do
- --gi;
+ do --gi;
while ((addr = garbage->p[gi].ret) == weakaddr("__gc"));
}
bt->p[i] = addr;
diff --git a/libc/intrin/assertfail.c b/libc/intrin/assertfail.c
index 6aaeec388..ba38f3dc5 100644
--- a/libc/intrin/assertfail.c
+++ b/libc/intrin/assertfail.c
@@ -18,6 +18,7 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/assert.h"
#include "libc/bits/weaken.h"
+#include "libc/calls/strace.internal.h"
#include "libc/intrin/cmpxchg.h"
#include "libc/intrin/kprintf.h"
#include "libc/intrin/lockcmpxchg.h"
@@ -32,6 +33,8 @@ relegated wontreturn void __assert_fail(const char *expr, const char *file,
int line) {
int rc;
static bool noreentry;
+ __strace = 0;
+ g_ftrace = 0;
kprintf("%s:%d: assert(%s) failed%n", file, line, expr);
if (_lockcmpxchg(&noreentry, false, true)) {
if (weaken(__die)) {
diff --git a/libc/intrin/closehandle.greg.c b/libc/intrin/closehandle.greg.c
index a9d3f8600..fdf99152d 100644
--- a/libc/intrin/closehandle.greg.c
+++ b/libc/intrin/closehandle.greg.c
@@ -16,12 +16,14 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/bits/weaken.h"
#include "libc/calls/internal.h"
#include "libc/calls/strace.internal.h"
+#include "libc/log/log.h"
#include "libc/nt/runtime.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(CloseHandle) *const __imp_CloseHandle __msabi;
+__msabi extern typeof(CloseHandle) *const __imp_CloseHandle;
/**
* Closes an open object handle.
@@ -30,7 +32,10 @@ extern typeof(CloseHandle) *const __imp_CloseHandle __msabi;
textwindows bool32 CloseHandle(int64_t hObject) {
bool32 ok;
ok = __imp_CloseHandle(hObject);
- if (!ok) __winerr();
+ if (!ok) {
+ __winerr();
+ if (weaken(__die)) weaken(__die)();
+ }
STRACE("CloseHandle(%ld) → %hhhd% m", hObject, ok);
return ok;
}
diff --git a/libc/intrin/createdirectory.greg.c b/libc/intrin/createdirectory.greg.c
index d6d242f92..16252b79e 100644
--- a/libc/intrin/createdirectory.greg.c
+++ b/libc/intrin/createdirectory.greg.c
@@ -18,10 +18,11 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/internal.h"
#include "libc/calls/strace.internal.h"
+#include "libc/intrin/describeflags.internal.h"
#include "libc/nt/files.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(CreateDirectory) *const __imp_CreateDirectoryW __msabi;
+__msabi extern typeof(CreateDirectory) *const __imp_CreateDirectoryW;
/**
* Creates directory on the New Technology.
@@ -35,7 +36,7 @@ CreateDirectory(const char16_t *lpPathName,
bool32 ok;
ok = __imp_CreateDirectoryW(lpPathName, lpSecurityAttributes);
if (!ok) __winerr();
- STRACE("CreateDirectory(%#hs, %p) → %hhhd% m", lpPathName,
- lpSecurityAttributes, ok);
+ STRACE("CreateDirectory(%#hs, %s) → %hhhd% m", lpPathName,
+ DescribeNtSecurityAttributes(lpSecurityAttributes), ok);
return ok;
}
diff --git a/libc/intrin/createfile.greg.c b/libc/intrin/createfile.greg.c
index 215770db3..23755b1cc 100644
--- a/libc/intrin/createfile.greg.c
+++ b/libc/intrin/createfile.greg.c
@@ -20,27 +20,9 @@
#include "libc/calls/strace.internal.h"
#include "libc/intrin/describeflags.internal.h"
#include "libc/nt/createfile.h"
-#include "libc/nt/enum/creationdisposition.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(CreateFile) *const __imp_CreateFileW __msabi;
-
-static const char *DescribeDisposition(int x) {
- switch (x) {
- case kNtCreateNew:
- return "kNtCreateNew";
- case kNtCreateAlways:
- return "kNtCreateAlways";
- case kNtOpenExisting:
- return "kNtOpenExisting";
- case kNtOpenAlways:
- return "kNtOpenAlways";
- case kNtTruncateExisting:
- return "kNtTruncateExisting";
- default:
- return "wut";
- }
-}
+__msabi extern typeof(CreateFile) *const __imp_CreateFileW;
/**
* Opens file on the New Technology.
@@ -58,10 +40,11 @@ textwindows int64_t CreateFile(
opt_lpSecurityAttributes, dwCreationDisposition,
dwFlagsAndAttributes, opt_hTemplateFile);
if (hHandle == -1) __winerr();
- STRACE("CreateFile(%#hs, %s, %s, %p, %s, %s, %ld) → %ld% m", lpFileName,
+ STRACE("CreateFile(%#hs, %s, %s, %s, %s, %s, %ld) → %ld% m", lpFileName,
DescribeNtFileAccessFlags(dwDesiredAccess),
- DescribeNtFileShareFlags(dwShareMode), opt_lpSecurityAttributes,
- DescribeDisposition(dwCreationDisposition),
+ DescribeNtFileShareFlags(dwShareMode),
+ DescribeNtSecurityAttributes(opt_lpSecurityAttributes),
+ DescribeNtCreationDisposition(dwCreationDisposition),
DescribeNtFileFlagsAndAttributes(dwFlagsAndAttributes),
opt_hTemplateFile, hHandle);
return hHandle;
diff --git a/libc/intrin/createfilemapping.greg.c b/libc/intrin/createfilemapping.greg.c
index ff52dca24..9022f8f77 100644
--- a/libc/intrin/createfilemapping.greg.c
+++ b/libc/intrin/createfilemapping.greg.c
@@ -23,7 +23,7 @@
#include "libc/nt/struct/securityattributes.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(CreateFileMapping) *const __imp_CreateFileMappingW __msabi;
+__msabi extern typeof(CreateFileMapping) *const __imp_CreateFileMappingW;
/**
* Creates file mapping object on the New Technology.
@@ -43,7 +43,8 @@ textwindows int64_t CreateFileMapping(
flProtect, dwMaximumSizeHigh,
dwMaximumSizeLow, opt_lpName);
if (!hHandle) __winerr();
- STRACE("CreateFileMapping(%ld, %s, max:%'zu, name:%#hs) → %ld% m", opt_hFile,
+ STRACE("CreateFileMapping(%ld, %s, %s, %'zu, %#hs) → %ld% m", opt_hFile,
+ DescribeNtSecurityAttributes(opt_lpFileMappingAttributes),
DescribeNtPageFlags(flProtect),
(uint64_t)dwMaximumSizeHigh << 32 | dwMaximumSizeLow, opt_lpName,
hHandle);
diff --git a/libc/intrin/createfilemappingnuma.greg.c b/libc/intrin/createfilemappingnuma.greg.c
index 117b436ee..bdc005761 100644
--- a/libc/intrin/createfilemappingnuma.greg.c
+++ b/libc/intrin/createfilemappingnuma.greg.c
@@ -23,8 +23,8 @@
#include "libc/nt/memory.h"
#include "libc/nt/struct/securityattributes.h"
-extern typeof(CreateFileMappingNuma) *const
- __imp_CreateFileMappingNumaW __msabi;
+__msabi extern typeof(CreateFileMappingNuma) *const
+ __imp_CreateFileMappingNumaW;
/**
* Creates file mapping object on the New Technology.
@@ -44,8 +44,9 @@ textwindows int64_t CreateFileMappingNuma(
opt_hFile, opt_lpFileMappingAttributes, flProtect, dwMaximumSizeHigh,
dwMaximumSizeLow, opt_lpName, nndDesiredNumaNode);
if (!hHandle) __winerr();
- STRACE("CreateFileMappingNuma(%ld, %s, max:%'zu, name:%#hs) → %ld% m",
- opt_hFile, DescribeNtPageFlags(flProtect),
+ STRACE("CreateFileMappingNuma(%ld, %s, %s, %'zu, %#hs) → %ld% m", opt_hFile,
+ DescribeNtSecurityAttributes(opt_lpFileMappingAttributes),
+ DescribeNtPageFlags(flProtect),
(uint64_t)dwMaximumSizeHigh << 32 | dwMaximumSizeLow, opt_lpName,
hHandle);
return hHandle;
diff --git a/libc/intrin/createnamedpipe.greg.c b/libc/intrin/createnamedpipe.greg.c
index 31828bfc9..457ba3986 100644
--- a/libc/intrin/createnamedpipe.greg.c
+++ b/libc/intrin/createnamedpipe.greg.c
@@ -18,11 +18,12 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/internal.h"
#include "libc/calls/strace.internal.h"
+#include "libc/intrin/describeflags.internal.h"
#include "libc/nt/ipc.h"
#include "libc/nt/struct/securityattributes.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(CreateNamedPipe) *const __imp_CreateNamedPipeW __msabi;
+__msabi extern typeof(CreateNamedPipe) *const __imp_CreateNamedPipeW;
/**
* Creates pipe.
@@ -40,16 +41,10 @@ textwindows int64_t CreateNamedPipe(
nMaxInstances, nOutBufferSize, nInBufferSize,
nDefaultTimeOutMs, opt_lpSecurityAttributes);
if (hServer == -1) __winerr();
- STRACE("CreateNamedPipe(%#hs,"
- " dwOpenMode=%u,"
- " dwPipeMode=%u,"
- " nMaxInstances=%u,"
- " nOutBufferSize=%'u,"
- " nInBufferSize=%'u,"
- " nDefaultTimeOutMs=%'u,"
- " lpSecurity=%p) → "
- "%ld% m",
- lpName, dwOpenMode, dwPipeMode, nMaxInstances, nOutBufferSize,
- nInBufferSize, nDefaultTimeOutMs, opt_lpSecurityAttributes, hServer);
+ STRACE("CreateNamedPipe(%#hs, %s, %s, %u, %'u, %'u, %'u, %s) → %ld% m",
+ lpName, DescribeNtPipeOpenFlags(dwOpenMode),
+ DescribeNtPipeModeFlags(dwPipeMode), nMaxInstances, nOutBufferSize,
+ nInBufferSize, nDefaultTimeOutMs,
+ DescribeNtSecurityAttributes(opt_lpSecurityAttributes), hServer);
return hServer;
}
diff --git a/libc/intrin/createpipe.greg.c b/libc/intrin/createpipe.greg.c
index 007d36568..8eede07cc 100644
--- a/libc/intrin/createpipe.greg.c
+++ b/libc/intrin/createpipe.greg.c
@@ -18,11 +18,12 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/internal.h"
#include "libc/calls/strace.internal.h"
+#include "libc/intrin/describeflags.internal.h"
#include "libc/nt/ipc.h"
#include "libc/nt/struct/securityattributes.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(CreatePipe) *const __imp_CreatePipe __msabi;
+__msabi extern typeof(CreatePipe) *const __imp_CreatePipe;
/**
* Creates anonymous pipe.
@@ -35,7 +36,8 @@ textwindows bool32 CreatePipe(
ok = __imp_CreatePipe(out_hReadPipe, out_hWritePipe, opt_lpPipeAttributes,
nSize);
if (!ok) __winerr();
- STRACE("CreatePipe([%ld], [%ld], %p, %'zu) → %hhhd% m", *out_hReadPipe,
- *out_hWritePipe, opt_lpPipeAttributes, nSize, ok);
+ STRACE("CreatePipe([%ld], [%ld], %s, %'zu) → %hhhd% m", *out_hReadPipe,
+ *out_hWritePipe, DescribeNtSecurityAttributes(opt_lpPipeAttributes),
+ nSize, ok);
return ok;
}
diff --git a/libc/intrin/createprocess.greg.c b/libc/intrin/createprocess.greg.c
index 56b0679ea..36cf40d9b 100644
--- a/libc/intrin/createprocess.greg.c
+++ b/libc/intrin/createprocess.greg.c
@@ -18,10 +18,11 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/internal.h"
#include "libc/calls/strace.internal.h"
+#include "libc/intrin/describeflags.internal.h"
#include "libc/nt/process.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(CreateProcess) *const __imp_CreateProcessW __msabi;
+__msabi extern typeof(CreateProcess) *const __imp_CreateProcessW;
/**
* Creates process on the New Technology.
@@ -44,10 +45,11 @@ CreateProcess(const char16_t *opt_lpApplicationName, char16_t *lpCommandLine,
opt_out_lpProcessInformation);
if (!ok) __winerr();
STRACE(
- "CreateFile(%#hs, %#hs, %p, %p, %hhhd, %u, %p, %#hs, %p, %p) → %hhhd% m",
- opt_lpApplicationName, lpCommandLine, opt_lpProcessAttributes,
- opt_lpThreadAttributes, bInheritHandles, dwCreationFlags,
- opt_lpEnvironment, opt_lpCurrentDirectory, lpStartupInfo,
+ "CreateFile(%#hs, %#hs, %s, %s, %hhhd, %u, %p, %#hs, %p, %p) → %hhhd% m",
+ opt_lpApplicationName, lpCommandLine,
+ DescribeNtSecurityAttributes(opt_lpProcessAttributes),
+ DescribeNtSecurityAttributes(opt_lpThreadAttributes), bInheritHandles,
+ dwCreationFlags, opt_lpEnvironment, opt_lpCurrentDirectory, lpStartupInfo,
opt_out_lpProcessInformation, ok);
return ok;
}
diff --git a/libc/intrin/createthread.greg.c b/libc/intrin/createthread.greg.c
index 566491e69..2dab0bd54 100644
--- a/libc/intrin/createthread.greg.c
+++ b/libc/intrin/createthread.greg.c
@@ -18,10 +18,11 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/internal.h"
#include "libc/calls/strace.internal.h"
+#include "libc/intrin/describeflags.internal.h"
#include "libc/nt/struct/securityattributes.h"
#include "libc/nt/thread.h"
-extern typeof(CreateThread) *const __imp_CreateThread __msabi;
+__msabi extern typeof(CreateThread) *const __imp_CreateThread;
/**
* Opens file on the New Technology.
@@ -38,9 +39,8 @@ textwindows int64_t CreateThread(
hHandle = __imp_CreateThread(lpThreadAttributes, dwStackSize, lpStartAddress,
lpParameter, dwCreationFlags, opt_lpThreadId);
if (hHandle == -1) __winerr();
- STRACE("CreateThread(sec=%p, stack=%'zu, start=%p, param=%p, flags=%s, "
- "id=%p) → %ld% m",
- lpThreadAttributes, dwStackSize, lpStartAddress, lpParameter,
- dwCreationFlags, opt_lpThreadId, hHandle);
+ STRACE("CreateThread(%s, %'zu, %p, %p, %s, %p) → %ld% m",
+ DescribeNtSecurityAttributes(lpThreadAttributes), dwStackSize,
+ lpStartAddress, lpParameter, dwCreationFlags, opt_lpThreadId, hHandle);
return hHandle;
}
diff --git a/libc/intrin/deletefile.greg.c b/libc/intrin/deletefile.greg.c
index e28bc7c64..17ac4a829 100644
--- a/libc/intrin/deletefile.greg.c
+++ b/libc/intrin/deletefile.greg.c
@@ -22,7 +22,7 @@
#include "libc/nt/memory.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(DeleteFile) *const __imp_DeleteFileW __msabi;
+__msabi extern typeof(DeleteFile) *const __imp_DeleteFileW;
/**
* Deletes existing file.
diff --git a/libc/intrin/describeflags.internal.h b/libc/intrin/describeflags.internal.h
index 3beff09b8..cb9538119 100644
--- a/libc/intrin/describeflags.internal.h
+++ b/libc/intrin/describeflags.internal.h
@@ -2,6 +2,7 @@
#define COSMOPOLITAN_LIBC_INTRIN_DESCRIBEFLAGS_INTERNAL_H_
#if !(__ASSEMBLER__ + __LINKER__ + 0)
COSMOPOLITAN_C_START_
+#include "libc/nt/struct/securityattributes.h"
struct thatispacked DescribeFlags {
unsigned flag;
@@ -16,14 +17,19 @@ const char *DescribeProtFlags(int);
const char *DescribeRemapFlags(int);
const char *DescribeNtPageFlags(uint32_t);
+const char *DescribeNtStartFlags(uint32_t);
const char *DescribeNtFileMapFlags(uint32_t);
const char *DescribeNtFiletypeFlags(uint32_t);
-const char *DescribeNtFileFlagsAndAttributes(uint32_t);
+const char *DescribeNtPipeOpenFlags(uint32_t);
+const char *DescribeNtPipeModeFlags(uint32_t);
const char *DescribeNtFileShareFlags(uint32_t);
const char *DescribeNtFileAccessFlags(uint32_t);
const char *DescribeNtProcessAccessFlags(uint32_t);
+const char *DescribeNtCreationDisposition(uint32_t);
const char *DescribeNtConsoleModeInputFlags(uint32_t);
const char *DescribeNtConsoleModeOutputFlags(uint32_t);
+const char *DescribeNtFileFlagsAndAttributes(uint32_t);
+const char *DescribeNtSecurityAttributes(struct NtSecurityAttributes *);
COSMOPOLITAN_C_END_
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
diff --git a/libc/intrin/describentcreationdisposition.greg.c b/libc/intrin/describentcreationdisposition.greg.c
new file mode 100644
index 000000000..3285578d0
--- /dev/null
+++ b/libc/intrin/describentcreationdisposition.greg.c
@@ -0,0 +1,37 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/intrin/describeflags.internal.h"
+#include "libc/nt/enum/creationdisposition.h"
+
+const char *DescribeNtCreationDisposition(uint32_t x) {
+ switch (x) {
+ case kNtCreateNew:
+ return "kNtCreateNew";
+ case kNtCreateAlways:
+ return "kNtCreateAlways";
+ case kNtOpenExisting:
+ return "kNtOpenExisting";
+ case kNtOpenAlways:
+ return "kNtOpenAlways";
+ case kNtTruncateExisting:
+ return "kNtTruncateExisting";
+ default:
+ return "wut";
+ }
+}
diff --git a/libc/intrin/describentfileaccessflags.greg.c b/libc/intrin/describentfileaccessflags.greg.c
index a6f93b5be..cd09bf6ab 100644
--- a/libc/intrin/describentfileaccessflags.greg.c
+++ b/libc/intrin/describentfileaccessflags.greg.c
@@ -64,7 +64,7 @@ static const struct DescribeFlags kFileAccessflags[] = {
};
const char *DescribeNtFileAccessFlags(uint32_t x) {
- static char ntfileaccessflags[256];
+ static char ntfileaccessflags[512];
return DescribeFlags(ntfileaccessflags, sizeof(ntfileaccessflags),
kFileAccessflags, ARRAYLEN(kFileAccessflags), "kNt", x);
}
diff --git a/libc/intrin/describentfileflagsandattributes.greg.c b/libc/intrin/describentfileflagsandattributes.greg.c
index d0ce0675c..9e6174477 100644
--- a/libc/intrin/describentfileflagsandattributes.greg.c
+++ b/libc/intrin/describentfileflagsandattributes.greg.c
@@ -17,8 +17,10 @@
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/intrin/describeflags.internal.h"
+#include "libc/intrin/kprintf.h"
#include "libc/macros.internal.h"
#include "libc/nt/enum/fileflagandattributes.h"
+#include "libc/runtime/runtime.h"
static const struct DescribeFlags kFileFlags[] = {
{kNtFileAttributeReadonly, "AttributeReadonly"}, //
diff --git a/libc/intrin/describentpipemodeflags.greg.c b/libc/intrin/describentpipemodeflags.greg.c
new file mode 100644
index 000000000..978935805
--- /dev/null
+++ b/libc/intrin/describentpipemodeflags.greg.c
@@ -0,0 +1,39 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/intrin/describeflags.internal.h"
+#include "libc/macros.internal.h"
+#include "libc/nt/enum/filemapflags.h"
+#include "libc/nt/ipc.h"
+
+static const struct DescribeFlags kPipeModeFlags[] = {
+ {kNtPipeNowait, "Nowait"}, // 0x0000000001
+ {kNtPipeReadmodeMessage, "ReadmodeMessage"}, // 0x0000000002
+ {kNtPipeTypeMessage, "TypeMessage"}, // 0x0000000004
+ {kNtPipeRejectRemoteClients, "RejectRemoteClients"}, // 0x0000000008
+ //{kNtPipeAcceptRemoteClients, "AcceptRemoteClients"}, // 0x00000000
+ //{kNtPipeReadmodeByte, "ReadmodeByte"}, // 0x00000000
+ //{kNtPipeWait, "Wait"}, // 0x00000000
+ //{kNtPipeTypeByte, "TypeByte"}, // 0x00000000
+};
+
+const char *DescribeNtPipeModeFlags(uint32_t x) {
+ static char pipemodeflags[64];
+ return DescribeFlags(pipemodeflags, sizeof(pipemodeflags), kPipeModeFlags,
+ ARRAYLEN(kPipeModeFlags), "kNtPipe", x);
+}
diff --git a/libc/intrin/describentpipeopenflags.greg.c b/libc/intrin/describentpipeopenflags.greg.c
new file mode 100644
index 000000000..7b6fb5c68
--- /dev/null
+++ b/libc/intrin/describentpipeopenflags.greg.c
@@ -0,0 +1,34 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/intrin/describeflags.internal.h"
+#include "libc/macros.internal.h"
+#include "libc/nt/enum/filemapflags.h"
+#include "libc/nt/ipc.h"
+
+static const struct DescribeFlags kPipeOpenFlags[] = {
+ {kNtPipeAccessDuplex, "Duplex"}, // 0x00000003
+ {kNtPipeAccessOutbound, "Outbound"}, // 0x00000002
+ {kNtPipeAccessInbound, "Inbound"}, // 0x00000001
+};
+
+const char *DescribeNtPipeOpenFlags(uint32_t x) {
+ static char pipeopenflags[64];
+ return DescribeFlags(pipeopenflags, sizeof(pipeopenflags), kPipeOpenFlags,
+ ARRAYLEN(kPipeOpenFlags), "kNtPipeAccess", x);
+}
diff --git a/libc/intrin/describentsecurityattributes.greg.c b/libc/intrin/describentsecurityattributes.greg.c
new file mode 100644
index 000000000..192fb72f0
--- /dev/null
+++ b/libc/intrin/describentsecurityattributes.greg.c
@@ -0,0 +1,26 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/calls/internal.h"
+#include "libc/intrin/describeflags.internal.h"
+#include "libc/nt/struct/securityattributes.h"
+
+const char *DescribeNtSecurityAttributes(struct NtSecurityAttributes *p) {
+ if (p == &kNtIsInheritable) return "&kNtIsInheritable";
+ return "0";
+}
diff --git a/libc/intrin/describentstartflags.greg.c b/libc/intrin/describentstartflags.greg.c
new file mode 100644
index 000000000..e7578a56c
--- /dev/null
+++ b/libc/intrin/describentstartflags.greg.c
@@ -0,0 +1,45 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/intrin/describeflags.internal.h"
+#include "libc/macros.internal.h"
+#include "libc/nt/enum/startf.h"
+#include "libc/sysv/consts/prot.h"
+
+static const struct DescribeFlags kNtStartFlags[] = {
+ {kNtStartfUseshowwindow, "Useshowwindow"}, //
+ {kNtStartfUsesize, "Usesize"}, //
+ {kNtStartfUseposition, "Useposition"}, //
+ {kNtStartfUsecountchars, "Usecountchars"}, //
+ {kNtStartfUsefillattribute, "Usefillattribute"}, //
+ {kNtStartfRunfullscreen, "Runfullscreen"}, //
+ {kNtStartfForceonfeedback, "Forceonfeedback"}, //
+ {kNtStartfForceofffeedback, "Forceofffeedback"}, //
+ {kNtStartfUsestdhandles, "Usestdhandles"}, //
+ {kNtStartfUsehotkey, "Usehotkey"}, //
+ {kNtStartfTitleislinkname, "Titleislinkname"}, //
+ {kNtStartfTitleisappid, "Titleisappid"}, //
+ {kNtStartfPreventpinning, "Preventpinning"}, //
+ {kNtStartfUntrustedsource, "Untrustedsource"}, //
+};
+
+const char *DescribeNtStartFlags(uint32_t x) {
+ static char startflags[128];
+ return DescribeFlags(startflags, sizeof(startflags), kNtStartFlags,
+ ARRAYLEN(kNtStartFlags), "kNtStartf", x);
+}
diff --git a/libc/intrin/deviceiocontrol.greg.c b/libc/intrin/deviceiocontrol.greg.c
index 3a471cdea..c9924e17f 100644
--- a/libc/intrin/deviceiocontrol.greg.c
+++ b/libc/intrin/deviceiocontrol.greg.c
@@ -22,7 +22,7 @@
#include "libc/nt/struct/overlapped.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(DeviceIoControl) *const __imp_DeviceIoControl __msabi;
+__msabi extern typeof(DeviceIoControl) *const __imp_DeviceIoControl;
/**
* Does device file stuff on the New Technology.
diff --git a/libc/intrin/dos2errno.greg.c b/libc/intrin/dos2errno.greg.c
index 3ae29744b..73c90b8a8 100644
--- a/libc/intrin/dos2errno.greg.c
+++ b/libc/intrin/dos2errno.greg.c
@@ -17,16 +17,10 @@
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/errno.h"
+#include "libc/intrin/dos2errno.internal.h"
#include "libc/nt/errors.h"
#include "libc/sock/sock.h"
-struct thatispacked Dos2Errno {
- uint16_t doscode;
- int32_t systemv;
-};
-
-extern const struct Dos2Errno kDos2Errno[];
-
/**
* Translates Windows error using superset of consts.sh.
*
diff --git a/libc/intrin/dos2errno.internal.h b/libc/intrin/dos2errno.internal.h
new file mode 100644
index 000000000..bc02eb705
--- /dev/null
+++ b/libc/intrin/dos2errno.internal.h
@@ -0,0 +1,15 @@
+#ifndef COSMOPOLITAN_LIBC_INTRIN_DOS2ERRNO_INTERNAL_H_
+#define COSMOPOLITAN_LIBC_INTRIN_DOS2ERRNO_INTERNAL_H_
+#if !(__ASSEMBLER__ + __LINKER__ + 0)
+COSMOPOLITAN_C_START_
+
+struct thatispacked Dos2Errno {
+ uint16_t doscode;
+ int32_t systemv;
+};
+
+extern const struct Dos2Errno kDos2Errno[];
+
+COSMOPOLITAN_C_END_
+#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
+#endif /* COSMOPOLITAN_LIBC_INTRIN_DOS2ERRNO_INTERNAL_H_ */
diff --git a/libc/intrin/findclose.greg.c b/libc/intrin/findclose.greg.c
new file mode 100644
index 000000000..b30f697b8
--- /dev/null
+++ b/libc/intrin/findclose.greg.c
@@ -0,0 +1,36 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/calls/internal.h"
+#include "libc/calls/strace.internal.h"
+#include "libc/nt/files.h"
+#include "libc/nt/thunk/msabi.h"
+
+__msabi extern typeof(FindClose) *const __imp_FindClose;
+
+/**
+ * Finds more files in directory.
+ * @note this wrapper takes care of ABI, STRACE(), and __winerr()
+ */
+textwindows bool32 FindClose(int64_t hFindFile) {
+ bool32 ok;
+ ok = __imp_FindClose(hFindFile);
+ if (!ok) __winerr();
+ STRACE("FindClose(%ld) → %hhhd% m", hFindFile, ok);
+ return ok;
+}
diff --git a/libc/intrin/findfirstfile.greg.c b/libc/intrin/findfirstfile.greg.c
index fc7446085..48a8e773a 100644
--- a/libc/intrin/findfirstfile.greg.c
+++ b/libc/intrin/findfirstfile.greg.c
@@ -24,7 +24,7 @@
#include "libc/nt/struct/win32finddata.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(FindFirstFile) *const __imp_FindFirstFileW __msabi;
+__msabi extern typeof(FindFirstFile) *const __imp_FindFirstFileW;
/**
* Finds first file in directory.
diff --git a/libc/intrin/findnextfile.greg.c b/libc/intrin/findnextfile.greg.c
index 15942fc28..9532ba07b 100644
--- a/libc/intrin/findnextfile.greg.c
+++ b/libc/intrin/findnextfile.greg.c
@@ -19,12 +19,14 @@
#include "libc/calls/internal.h"
#include "libc/calls/strace.internal.h"
#include "libc/intrin/describeflags.internal.h"
+#include "libc/nt/errors.h"
#include "libc/nt/files.h"
#include "libc/nt/memory.h"
+#include "libc/nt/runtime.h"
#include "libc/nt/struct/win32finddata.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(FindNextFile) *const __imp_FindNextFileW __msabi;
+__msabi extern typeof(FindNextFile) *const __imp_FindNextFileW;
/**
* Finds more files in directory.
@@ -45,8 +47,8 @@ textwindows bool32 FindNextFile(int64_t hFindFile,
DescribeNtFileFlagsAndAttributes(out_lpFindFileData->dwFileAttributes),
DescribeNtFiletypeFlags(out_lpFindFileData->dwFileType), ok);
} else {
- __winerr();
- STRACE("FindNextFile(%ld, [n/a]) → %hhhd% m", hFindFile, ok);
+ if (GetLastError() != kNtErrorNoMoreFiles) __winerr();
+ STRACE("FindNextFile(%ld) → %hhhd% m", hFindFile, ok);
}
return ok;
}
diff --git a/libc/intrin/flushfilebuffers.greg.c b/libc/intrin/flushfilebuffers.greg.c
index cb157ac02..28e7ecfa3 100644
--- a/libc/intrin/flushfilebuffers.greg.c
+++ b/libc/intrin/flushfilebuffers.greg.c
@@ -20,7 +20,7 @@
#include "libc/calls/strace.internal.h"
#include "libc/nt/files.h"
-extern typeof(FlushFileBuffers) *const __imp_FlushFileBuffers __msabi;
+__msabi extern typeof(FlushFileBuffers) *const __imp_FlushFileBuffers;
/**
* Flushes buffers of specified file to disk.
diff --git a/libc/intrin/flushviewoffile.greg.c b/libc/intrin/flushviewoffile.greg.c
index 82156af73..c90111e94 100644
--- a/libc/intrin/flushviewoffile.greg.c
+++ b/libc/intrin/flushviewoffile.greg.c
@@ -20,7 +20,7 @@
#include "libc/calls/strace.internal.h"
#include "libc/nt/memory.h"
-extern typeof(FlushViewOfFile) *const __imp_FlushViewOfFile __msabi;
+__msabi extern typeof(FlushViewOfFile) *const __imp_FlushViewOfFile;
/**
* Syncs memory created by MapViewOfFileEx().
diff --git a/libc/intrin/generateconsolectrlevent.greg.c b/libc/intrin/generateconsolectrlevent.greg.c
index 11b1861bf..de4b21306 100644
--- a/libc/intrin/generateconsolectrlevent.greg.c
+++ b/libc/intrin/generateconsolectrlevent.greg.c
@@ -21,8 +21,8 @@
#include "libc/nt/console.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(GenerateConsoleCtrlEvent) *const
- __imp_GenerateConsoleCtrlEvent __msabi;
+__msabi extern typeof(GenerateConsoleCtrlEvent) *const
+ __imp_GenerateConsoleCtrlEvent;
/**
* Sends signal to process group that shares console w/ calling process.
diff --git a/libc/intrin/getfileattributes.greg.c b/libc/intrin/getfileattributes.greg.c
index 77e5284f6..8bf406a31 100644
--- a/libc/intrin/getfileattributes.greg.c
+++ b/libc/intrin/getfileattributes.greg.c
@@ -23,7 +23,7 @@
#include "libc/nt/files.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(GetFileAttributes) *const __imp_GetFileAttributesW __msabi;
+__msabi extern typeof(GetFileAttributes) *const __imp_GetFileAttributesW;
/**
* Gets file info on the New Technology.
diff --git a/libc/intrin/intrin.mk b/libc/intrin/intrin.mk
index 7a39d1ed9..502d6a9ff 100644
--- a/libc/intrin/intrin.mk
+++ b/libc/intrin/intrin.mk
@@ -65,6 +65,7 @@ o/$(MODE)/libc/intrin/kprintf.greg.o: \
$(NO_MAGIC)
o/$(MODE)/libc/intrin/createfile.greg.o \
+o/$(MODE)/libc/intrin/reopenfile.greg.o \
o/$(MODE)/libc/intrin/deletefile.greg.o \
o/$(MODE)/libc/intrin/createpipe.greg.o \
o/$(MODE)/libc/intrin/closehandle.greg.o \
diff --git a/libc/intrin/kdos2errno.S b/libc/intrin/kdos2errno.S
index ae0e6a393..4fb064a93 100644
--- a/libc/intrin/kdos2errno.S
+++ b/libc/intrin/kdos2errno.S
@@ -29,6 +29,71 @@
.section .rodata
.underrun
kDos2Errno:
+// .e kNtErrorInvalidFunction,ENOSYS # in consts.sh
+// .e kNtErrorFileNotFound,ENOENT # in consts.sh
+// .e kNtErrorPathNotFound,ENOTDIR # in consts.sh
+// .e kNtErrorTooManyOpenFiles,EMFILE # in consts.sh
+// .e kNtErrorAccessDenied,EACCES # in consts.sh
+// .e kNtErrorInvalidHandle,EBADF # in consts.sh
+// .e kNtErrorInvalidAccess,EPERM # in consts.sh
+// .e kNtErrorSeek,ESPIPE # in consts.sh
+// .e kNtErrorNotDosDisk,ENOTBLK # in consts.sh
+// .e kNtErrorFileExists,EEXIST # in consts.sh
+// .e kNtErrorInvalidParameter,EINVAL # in consts.sh
+// .e kNtErrorOutofmemory,ENOMEM # in consts.sh
+// .e kNtErrorBrokenPipe,EPIPE # in consts.sh
+// .e kNtErrorWaitNoChildren,ECHILD # in consts.sh
+// .e kNtErrorPathBusy,ETXTBSY # in consts.sh
+// .e kNtErrorBusy,EBUSY # in consts.sh
+// .e kNtErrorAlreadyExists,EEXIST # in consts.sh
+// .e kNtErrorBadExeFormat,ENOEXEC # in consts.sh
+// .e kNtErrorFileTooLarge,EFBIG # in consts.sh
+// .e kNtErrorTooManyDescriptors,ENFILE # in consts.sh
+// .e kNtErrorDirectoryNotSupported,EISDIR # in consts.sh
+// .e kNtErrorInvalidAddress,EFAULT # in consts.sh
+// .e kNtErrorThreadNotInProcess,ESRCH # in consts.sh
+// .e kNtErrorNoMediaInDrive,ENXIO # in consts.sh
+// .e kNtErrorIoDevice,EIO # in consts.sh
+// .e kNtErrorSerialNoDevice,ENOTTY # in consts.sh
+// .e kNtErrorPossibleDeadlock,EDEADLK # in consts.sh
+// .e kNtErrorBadDevice,ENODEV # in consts.sh
+// .e kNtErrorInvalidCommandLine,E2BIG # in consts.sh
+// .e kNtErrorFileReadOnly,EROFS # in consts.sh
+// .e kNtErrorNoData,ENODATA # in consts.sh
+// .e WSAEPROCLIM,EPROCLIM # in consts.sh
+// .e WSAESHUTDOWN,ESHUTDOWN # in consts.sh
+// .e WSAEINPROGRESS,EINPROGRESS # in consts.sh
+// .e WSAENETDOWN,ENETDOWN # in consts.sh
+// .e WSAENETUNREACH,ENETUNREACH # in consts.sh
+// .e WSAENETRESET,ENETRESET # in consts.sh
+// .e WSAEUSERS,EUSERS # in consts.sh
+// .e WSAENOTSOCK,ENOTSOCK # in consts.sh
+// .e WSAEDESTADDRREQ,EDESTADDRREQ # in consts.sh
+// .e WSAEMSGSIZE,EMSGSIZE # in consts.sh
+// .e WSAEPROTOTYPE,EPROTOTYPE # in consts.sh
+// .e WSAENOPROTOOPT,ENOPROTOOPT # in consts.sh
+// .e WSAEPROTONOSUPPORT,EPROTONOSUPPORT # in consts.sh
+// .e WSAESOCKTNOSUPPORT,ESOCKTNOSUPPORT # in consts.sh
+// .e WSAEOPNOTSUPP,ENOTSUP # in consts.sh
+// .e WSAEOPNOTSUPP,EOPNOTSUPP # in consts.sh
+// .e WSAEPFNOSUPPORT,EPFNOSUPPORT # in consts.sh
+// .e WSAEAFNOSUPPORT,EAFNOSUPPORT # in consts.sh
+// .e WSAEADDRINUSE,EADDRINUSE # in consts.sh
+// .e WSAEADDRNOTAVAIL,EADDRNOTAVAIL # in consts.sh
+// .e WSAECONNABORTED,ECONNABORTED # in consts.sh
+// .e WSAECONNRESET,ECONNRESET # in consts.sh
+// .e WSAENOBUFS,ENOBUFS # in consts.sh
+// .e WSAEISCONN,EISCONN # in consts.sh
+// .e WSAENOTCONN,ENOTCONN # in consts.sh
+// .e WSAESHUTDOWN,ESHUTDOWN # in consts.sh
+// .e WSAETOOMANYREFS,ETOOMANYREFS # in consts.sh
+// .e WSAETIMEDOUT,ETIMEDOUT # in consts.sh
+// .e WSAECONNREFUSED,ECONNREFUSED # in consts.sh
+// .e WSAEHOSTDOWN,EHOSTDOWN # in consts.sh
+// .e WSAEHOSTUNREACH,EHOSTUNREACH # in consts.sh
+// .e WSAEALREADY,EALREADY # in consts.sh
+// .e WSAESTALE,ESTALE # in consts.sh
+// .e WSAEREMOTE,EREMOTE # in consts.sh
.e kNtErrorModNotFound,ENOSYS
.e kNtErrorBadCommand,EACCES
.e kNtErrorBadLength,EACCES
@@ -94,10 +159,8 @@ kDos2Errno:
.e WSAEACCES,EACCES
.e WSAEDISCON,EPIPE
.e WSAEFAULT,EFAULT
- .e WSAEINPROGRESS,EBUSY
.e WSAEINVAL,EINVAL
.e WSAEPROCLIM,ENOMEM
- .e WSAESHUTDOWN,EPIPE
.e WSANOTINITIALISED,ENETDOWN
.e WSASYSNOTREADY,ENETDOWN
.e WSAVERNOTSUPPORTED,ENOSYS
diff --git a/libc/calls/zygote.c b/libc/intrin/kntisinheritable.greg.c
similarity index 95%
rename from libc/calls/zygote.c
rename to libc/intrin/kntisinheritable.greg.c
index 10c0a7872..e4c19ceb6 100644
--- a/libc/calls/zygote.c
+++ b/libc/intrin/kntisinheritable.greg.c
@@ -1,7 +1,7 @@
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
╞══════════════════════════════════════════════════════════════════════════════╡
-│ Copyright 2020 Justine Alexandra Roberts Tunney │
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
│ │
│ Permission to use, copy, modify, and/or distribute this software for │
│ any purpose with or without fee is hereby granted, provided that the │
@@ -16,7 +16,6 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
-#include "libc/calls/internal.h"
#include "libc/nt/struct/securityattributes.h"
hidden const struct NtSecurityAttributes kNtIsInheritable = {
diff --git a/libc/intrin/kprintf.greg.c b/libc/intrin/kprintf.greg.c
index a41263249..634dcd7e8 100644
--- a/libc/intrin/kprintf.greg.c
+++ b/libc/intrin/kprintf.greg.c
@@ -37,6 +37,7 @@
#include "libc/nt/process.h"
#include "libc/nt/runtime.h"
#include "libc/nt/thunk/msabi.h"
+#include "libc/nt/winsock.h"
#include "libc/runtime/memtrack.internal.h"
#include "libc/runtime/runtime.h"
#include "libc/str/str.h"
@@ -438,8 +439,7 @@ privileged static size_t kformat(char *b, size_t n, const char *fmt, va_list va,
i = 0;
m = (1 << base) - 1;
if (hash && x) sign = hash;
- do
- z[i++ & 127] = abet[x & m];
+ do z[i++ & 127] = abet[x & m];
while ((x >>= base) || (pdot && i < prec));
goto EmitNumber;
@@ -487,17 +487,31 @@ privileged static size_t kformat(char *b, size_t n, const char *fmt, va_list va,
}
goto EmitChar;
- case 'm':
- if (!(x = errno) && sign == ' ' /* && */
- /* (!IsWindows() || !__imp_GetLastError()) */) {
+ case 'm': {
+ int unixerr;
+ uint32_t winerr;
+ unixerr = errno;
+ winerr = 0;
+ if (IsWindows()) {
+ if (type == 1 && weaken(WSAGetLastError)) {
+ winerr = weaken(WSAGetLastError)();
+ } else if (weaken(GetLastError)) {
+ winerr = weaken(GetLastError)();
+ }
+ }
+ if (!unixerr && sign == ' ') {
break;
- } else if (weaken(strerror_r) &&
- !weaken(strerror_r)(x, z, sizeof(z))) {
+ } else if (weaken(strerror_wr) &&
+ !weaken(strerror_wr)(unixerr, winerr, z, sizeof(z))) {
s = z;
+ type = 0;
goto FormatString;
} else {
+ type = 0;
+ x = unixerr;
goto FormatDecimal;
}
+ }
case 'G':
x = va_arg(va, int);
@@ -864,6 +878,13 @@ privileged void kvprintf(const char *fmt, va_list v) {
* - ` ` space leftpad if positive (aligns w/ negatives)
* - `#` represent value with literal syntax, e.g. 0x, 0b, quotes
*
+ * Error numbers:
+ *
+ * - `%m` formats error (if strerror_wr if is linked)
+ * - `%m` formats errno number (if strerror_wr isn't linked)
+ * - `% m` formats error with leading space if errno isn't zero
+ * - `%lm` means favor WSAGetLastError() over GetLastError() if linked
+ *
* @asyncsignalsafe
* @vforksafe
*/
diff --git a/libc/intrin/mapviewoffileex.greg.c b/libc/intrin/mapviewoffileex.greg.c
index e2affeffe..b4acc0f87 100644
--- a/libc/intrin/mapviewoffileex.greg.c
+++ b/libc/intrin/mapviewoffileex.greg.c
@@ -23,7 +23,7 @@
#include "libc/nt/enum/filemapflags.h"
#include "libc/nt/memory.h"
-extern typeof(MapViewOfFileEx) *const __imp_MapViewOfFileEx __msabi;
+__msabi extern typeof(MapViewOfFileEx) *const __imp_MapViewOfFileEx;
/**
* Maps view of file mapping into memory on the New Technology.
@@ -45,8 +45,8 @@ textwindows void *MapViewOfFileEx(int64_t hFileMappingObject,
hFileMappingObject, dwDesiredAccess, dwFileOffsetHigh, dwFileOffsetLow,
dwNumberOfBytesToMap, opt_lpDesiredBaseAddress);
if (!pStartingAddress) __winerr();
- STRACE("MapViewOfFileEx(%ld, %s, off:%'ld, size:%'zu, addr:%p) → %p% m",
- hFileMappingObject, DescribeNtFileMapFlags(dwDesiredAccess),
+ STRACE("MapViewOfFileEx(%ld, %s, %'ld, %'zu, %p) → %p% m", hFileMappingObject,
+ DescribeNtFileMapFlags(dwDesiredAccess),
(uint64_t)dwFileOffsetHigh << 32 | dwFileOffsetLow,
dwNumberOfBytesToMap, opt_lpDesiredBaseAddress, pStartingAddress);
return pStartingAddress;
diff --git a/libc/intrin/mapviewoffileexnuma.greg.c b/libc/intrin/mapviewoffileexnuma.greg.c
index b9bc50828..2ee0b2813 100644
--- a/libc/intrin/mapviewoffileexnuma.greg.c
+++ b/libc/intrin/mapviewoffileexnuma.greg.c
@@ -24,7 +24,7 @@
#include "libc/nt/enum/filemapflags.h"
#include "libc/nt/memory.h"
-extern typeof(MapViewOfFileExNuma) *const __imp_MapViewOfFileExNuma __msabi;
+__msabi extern typeof(MapViewOfFileExNuma) *const __imp_MapViewOfFileExNuma;
/**
* Maps view of file mapping into memory on the New Technology.
@@ -47,7 +47,7 @@ textwindows void *MapViewOfFileExNuma(int64_t hFileMappingObject,
hFileMappingObject, dwDesiredAccess, dwFileOffsetHigh, dwFileOffsetLow,
dwNumberOfBytesToMap, opt_lpDesiredBaseAddress, nndDesiredNumaNode);
if (!pStartingAddress) __winerr();
- STRACE("MapViewOfFileExNuma(%ld, %s, off:%'ld, size:%'zu, %p) → %p% m",
+ STRACE("MapViewOfFileExNuma(%ld, %s, %'ld, %'zu, %p) → %p% m",
hFileMappingObject, DescribeNtFileMapFlags(dwDesiredAccess),
(uint64_t)dwFileOffsetHigh << 32 | dwFileOffsetLow,
dwNumberOfBytesToMap, opt_lpDesiredBaseAddress, pStartingAddress);
diff --git a/libc/intrin/openprocess.greg.c b/libc/intrin/openprocess.greg.c
index 7326fc186..b38b03b5a 100644
--- a/libc/intrin/openprocess.greg.c
+++ b/libc/intrin/openprocess.greg.c
@@ -24,7 +24,7 @@
#include "libc/nt/struct/securityattributes.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(OpenProcess) *const __imp_OpenProcess __msabi;
+__msabi extern typeof(OpenProcess) *const __imp_OpenProcess;
/**
* Creates file mapping object on the New Technology.
diff --git a/libc/intrin/removedirectory.greg.c b/libc/intrin/removedirectory.greg.c
index e31134519..fdd9a6211 100644
--- a/libc/intrin/removedirectory.greg.c
+++ b/libc/intrin/removedirectory.greg.c
@@ -22,7 +22,7 @@
#include "libc/nt/memory.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(RemoveDirectory) *const __imp_RemoveDirectoryW __msabi;
+__msabi extern typeof(RemoveDirectory) *const __imp_RemoveDirectoryW;
/**
* Deletes existing empty directory.
diff --git a/libc/calls/ensurefds.c b/libc/intrin/reopenfile.greg.c
similarity index 62%
rename from libc/calls/ensurefds.c
rename to libc/intrin/reopenfile.greg.c
index a089c18d1..1462affc9 100644
--- a/libc/calls/ensurefds.c
+++ b/libc/intrin/reopenfile.greg.c
@@ -1,7 +1,7 @@
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
╞══════════════════════════════════════════════════════════════════════════════╡
-│ Copyright 2020 Justine Alexandra Roberts Tunney │
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
│ │
│ Permission to use, copy, modify, and/or distribute this software for │
│ any purpose with or without fee is hereby granted, provided that the │
@@ -16,48 +16,29 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
-#include "libc/assert.h"
-#include "libc/bits/weaken.h"
#include "libc/calls/internal.h"
-#include "libc/intrin/cmpxchg.h"
-#include "libc/mem/mem.h"
-#include "libc/str/str.h"
-#include "libc/sysv/errfuns.h"
+#include "libc/calls/strace.internal.h"
+#include "libc/intrin/describeflags.internal.h"
+#include "libc/nt/files.h"
+#include "libc/nt/thunk/msabi.h"
-static void __ensurefds_destroy(void) {
- weaken(free)(g_fds.p);
-}
+__msabi extern typeof(ReOpenFile) *const __imp_ReOpenFile;
-int __ensurefds(int fd) {
- size_t n1, n2;
- struct Fd *p1, *p2;
- for (;;) {
- p1 = g_fds.p;
- n1 = g_fds.n;
- if (fd < n1) return fd;
- if (weaken(malloc)) {
- n2 = MAX(fd + 1, n1 + (n1 << 1));
- if ((p2 = weaken(malloc)(n2 * sizeof(*p1)))) {
- memcpy(p2, p1, n1 * sizeof(*p1));
- bzero(p2 + n1, (n2 - n1) * sizeof(*p1));
- if (_cmpxchg(&g_fds.p, p1, p2)) {
- g_fds.n = n2;
- if (weaken(free)) {
- if (p1 == g_fds.__init_p) {
- atexit(__ensurefds_destroy);
- } else {
- weaken(free)(p1);
- }
- }
- return fd;
- } else if (weaken(free)) {
- weaken(free)(p2);
- }
- } else {
- return enomem();
- }
- } else {
- return emfile();
- }
- }
+/**
+ * Reopens file on the New Technology.
+ *
+ * @return handle, or -1 on failure
+ * @note this wrapper takes care of ABI, STRACE(), and __winerr()
+ */
+int64_t ReOpenFile(int64_t hOriginalFile, uint32_t dwDesiredAccess,
+ uint32_t dwShareMode, uint32_t dwFlagsAndAttributes) {
+ int64_t hHandle;
+ hHandle = __imp_ReOpenFile(hOriginalFile, dwDesiredAccess, dwShareMode,
+ dwFlagsAndAttributes);
+ if (hHandle == -1) __winerr();
+ STRACE("ReOpenFile(%ld, %s, %s, %s) → %ld% m", hOriginalFile,
+ DescribeNtFileAccessFlags(dwDesiredAccess),
+ DescribeNtFileShareFlags(dwShareMode),
+ DescribeNtFileFlagsAndAttributes(dwFlagsAndAttributes), hHandle);
+ return hHandle;
}
diff --git a/libc/intrin/restorewintty.greg.c b/libc/intrin/restorewintty.greg.c
index 456562e60..9c9b68ad8 100644
--- a/libc/intrin/restorewintty.greg.c
+++ b/libc/intrin/restorewintty.greg.c
@@ -16,6 +16,7 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/calls/strace.internal.h"
#include "libc/dce.h"
#include "libc/nt/console.h"
#include "libc/nt/process.h"
@@ -35,7 +36,9 @@ const char kConsoleHandles[3] = {
*/
noasan void __restorewintty(void) {
int i;
- if (IsWindows() && GetCurrentProcessId() == __winmainpid) {
+ if (!IsWindows()) return;
+ STRACE("__restorewintty()");
+ if (GetCurrentProcessId() == __winmainpid) {
for (i = 0; i < 3; ++i) {
SetConsoleMode(GetStdHandle(kConsoleHandles[i]), __ntconsolemode[i]);
}
diff --git a/libc/intrin/setcurrentdirectory.greg.c b/libc/intrin/setcurrentdirectory.greg.c
index 8f0595ba1..92d6107d1 100644
--- a/libc/intrin/setcurrentdirectory.greg.c
+++ b/libc/intrin/setcurrentdirectory.greg.c
@@ -22,7 +22,7 @@
#include "libc/nt/memory.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(SetCurrentDirectory) *const __imp_SetCurrentDirectoryW __msabi;
+__msabi extern typeof(SetCurrentDirectory) *const __imp_SetCurrentDirectoryW;
/**
* Sets current directory.
diff --git a/libc/intrin/spinlock.h b/libc/intrin/spinlock.h
index 56ce593c7..bcba6b5fd 100644
--- a/libc/intrin/spinlock.h
+++ b/libc/intrin/spinlock.h
@@ -1,22 +1,24 @@
#ifndef COSMOPOLITAN_LIBC_INTRIN_SPINLOCK_H_
#define COSMOPOLITAN_LIBC_INTRIN_SPINLOCK_H_
#if !(__ASSEMBLER__ + __LINKER__ + 0)
-COSMOPOLITAN_C_START_
+#if (__GNUC__ + 0) * 100 + (__GNUC_MINOR__ + 0) >= 401 && \
+ !defined(__STRICT_ANSI__)
-/* "Place each synchronization variable alone,
- separated by 128 bytes or in a separate cache line."
- ──Intel Optimization Manual §8.3.1 */
-struct cthread_spinlock_t {
- bool x;
- int owner;
- char __ignore[128 - 1 - 4];
-} forcealign(128);
+#define _spinlock(lock) \
+ do { \
+ for (;;) { \
+ typeof(*(lock)) x; \
+ __atomic_load(lock, &x, __ATOMIC_RELAXED); \
+ if (!x && !__sync_lock_test_and_set(lock, __ATOMIC_CONSUME)) { \
+ break; \
+ } else { \
+ __builtin_ia32_pause(); \
+ } \
+ } \
+ } while (0)
-typedef struct cthread_spinlock_t cthread_spinlock_t;
+#define _spunlock(lock) __sync_lock_release(lock)
-void cthread_spinlock(cthread_spinlock_t *) dontthrow;
-void cthread_spunlock(cthread_spinlock_t *) dontthrow;
-
-COSMOPOLITAN_C_END_
+#endif /* GNU 4.1+ */
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
#endif /* COSMOPOLITAN_LIBC_INTRIN_SPINLOCK_H_ */
diff --git a/libc/intrin/terminateprocess.greg.c b/libc/intrin/terminateprocess.greg.c
index 258067779..e19c0464a 100644
--- a/libc/intrin/terminateprocess.greg.c
+++ b/libc/intrin/terminateprocess.greg.c
@@ -22,7 +22,7 @@
#include "libc/nt/runtime.h"
#include "libc/nt/thunk/msabi.h"
-extern typeof(TerminateProcess) *const __imp_TerminateProcess __msabi;
+__msabi extern typeof(TerminateProcess) *const __imp_TerminateProcess;
/**
* Terminates the specified process and all of its threads.
diff --git a/libc/intrin/unmapviewoffile.greg.c b/libc/intrin/unmapviewoffile.greg.c
index 210be00e7..33da0e10a 100644
--- a/libc/intrin/unmapviewoffile.greg.c
+++ b/libc/intrin/unmapviewoffile.greg.c
@@ -20,7 +20,7 @@
#include "libc/calls/strace.internal.h"
#include "libc/nt/memory.h"
-extern typeof(UnmapViewOfFile) *const __imp_UnmapViewOfFile __msabi;
+__msabi extern typeof(UnmapViewOfFile) *const __imp_UnmapViewOfFile;
/**
* Unmaps memory created by MapViewOfFileEx().
diff --git a/libc/intrin/virtualprotect.greg.c b/libc/intrin/virtualprotect.greg.c
index fc9cdd7f1..3f3529e74 100644
--- a/libc/intrin/virtualprotect.greg.c
+++ b/libc/intrin/virtualprotect.greg.c
@@ -22,7 +22,7 @@
#include "libc/log/libfatal.internal.h"
#include "libc/nt/memory.h"
-extern typeof(VirtualProtect) *const __imp_VirtualProtect __msabi;
+__msabi extern typeof(VirtualProtect) *const __imp_VirtualProtect;
/**
* Protects memory on the New Technology.
diff --git a/libc/log/backtrace2.c b/libc/log/backtrace2.c
index 675e3562e..0fea1c979 100644
--- a/libc/log/backtrace2.c
+++ b/libc/log/backtrace2.c
@@ -105,7 +105,7 @@ static int PrintBacktraceUsingAddr2line(int fd, const struct StackFrame *bp) {
argv[i++] = buf + j;
buf[j++] = '0';
buf[j++] = 'x';
- j += uint64toarray_radix16(addr - 1, buf + j) + 1;
+ j += uint64toarray_radix16(addr, buf + j) + 1;
}
argv[i++] = NULL;
sigemptyset(&chldmask);
diff --git a/libc/log/checkfail.c b/libc/log/checkfail.c
index 7d5d38f04..9709215d9 100644
--- a/libc/log/checkfail.c
+++ b/libc/log/checkfail.c
@@ -18,6 +18,7 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/bits/safemacros.internal.h"
#include "libc/calls/calls.h"
+#include "libc/calls/strace.internal.h"
#include "libc/errno.h"
#include "libc/fmt/fmt.h"
#include "libc/log/check.h"
@@ -26,6 +27,7 @@
#include "libc/log/libfatal.internal.h"
#include "libc/log/log.h"
#include "libc/runtime/memtrack.internal.h"
+#include "libc/runtime/runtime.h"
/**
* Handles failure of CHECK_xx() macros.
@@ -39,6 +41,8 @@ relegated void __check_fail(const char *suffix, const char *opstr,
size_t i;
va_list va;
char hostname[32];
+ __strace = 0;
+ g_ftrace = 0;
e = errno;
p = __fatalbuf;
__start_fatal(file, line);
diff --git a/libc/log/malloc_stats.c b/libc/log/malloc_stats.c
index f57f049df..4a5612e99 100644
--- a/libc/log/malloc_stats.c
+++ b/libc/log/malloc_stats.c
@@ -17,12 +17,8 @@
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/mem/mem.h"
-#include "libc/stdio/stdio.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
+#include "third_party/dlmalloc/dlmalloc.h"
void malloc_stats(void) {
- struct MallocStats res = dlmalloc_stats(g_dlmalloc);
- (fprintf)(stderr, "max system bytes = %'10zu\n", res.maxfp);
- (fprintf)(stderr, "system bytes = %'10zu\n", res.fp);
- (fprintf)(stderr, "in use bytes = %'10zu\n", res.used);
+ dlmalloc_stats();
}
diff --git a/libc/log/oncrash.c b/libc/log/oncrash.c
index 94e364b02..8a8d1e8f6 100644
--- a/libc/log/oncrash.c
+++ b/libc/log/oncrash.c
@@ -40,8 +40,8 @@
* @see libc/onkill.c
*/
-STATIC_YOINK("strerror_r"); /* for kprintf %m */
-STATIC_YOINK("strsignal"); /* for kprintf %G */
+STATIC_YOINK("strerror_wr"); /* for kprintf %m */
+STATIC_YOINK("strsignal"); /* for kprintf %G */
static const char kGregOrder[17] forcealign(1) = {
13, 11, 8, 14, 12, 9, 10, 15, 16, 0, 1, 2, 3, 4, 5, 6, 7,
diff --git a/test/libc/runtime/mremap_test.c b/libc/mem/mallinfo.c
similarity index 79%
rename from test/libc/runtime/mremap_test.c
rename to libc/mem/mallinfo.c
index 95425bd85..9259f8ff6 100644
--- a/test/libc/runtime/mremap_test.c
+++ b/libc/mem/mallinfo.c
@@ -1,7 +1,7 @@
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
╞══════════════════════════════════════════════════════════════════════════════╡
-│ Copyright 2021 Justine Alexandra Roberts Tunney │
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
│ │
│ Permission to use, copy, modify, and/or distribute this software for │
│ any purpose with or without fee is hereby granted, provided that the │
@@ -17,17 +17,8 @@
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/mem/mem.h"
-#include "libc/runtime/gc.internal.h"
-#include "libc/runtime/runtime.h"
-#include "libc/testlib/testlib.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
+#include "third_party/dlmalloc/dlmalloc.h"
-TEST(mremap, testMalloc) {
- int i;
- char *a, *b, *c, *d;
- ASSERT_NE(NULL, a = malloc(DEFAULT_MMAP_THRESHOLD));
- ASSERT_NE(NULL, b = mapanon(FRAMESIZE));
- ASSERT_NE(NULL, a = realloc(a, DEFAULT_MMAP_THRESHOLD * 2));
- munmap(b, FRAMESIZE);
- free(a);
+struct mallinfo mallinfo(void) {
+ return dlmallinfo();
}
diff --git a/libc/mem/malloc_inspect_all.c b/libc/mem/malloc_inspect_all.c
new file mode 100644
index 000000000..b7620023a
--- /dev/null
+++ b/libc/mem/malloc_inspect_all.c
@@ -0,0 +1,26 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/mem/mem.h"
+#include "third_party/dlmalloc/dlmalloc.h"
+
+void malloc_inspect_all(void (*handler)(void* start, void* end,
+ size_t used_bytes, void* callback_arg),
+ void* arg) {
+ dlmalloc_inspect_all(handler, arg);
+}
diff --git a/libc/mem/mem.h b/libc/mem/mem.h
index 613702a22..4535f6eb9 100644
--- a/libc/mem/mem.h
+++ b/libc/mem/mem.h
@@ -48,6 +48,7 @@ struct mallinfo {
size_t fordblks; /* total free space */
size_t keepcost; /* releasable (via malloc_trim) space */
};
+
struct mallinfo mallinfo(void);
void malloc_stats(void);
diff --git a/libc/nt/createfile.h b/libc/nt/createfile.h
index 98b7d11e1..9030fb3c4 100644
--- a/libc/nt/createfile.h
+++ b/libc/nt/createfile.h
@@ -18,6 +18,9 @@ int64_t CreateFileA(
uint32_t dwFlagsAndAttributes, /* libc/nt/enum/fileflagandattributes.h */
int64_t opt_hTemplateFile) paramsnonnull((1));
+int GetNtOpenFlags(int flags, int mode, uint32_t *out_perm, uint32_t *out_share,
+ uint32_t *out_disp, uint32_t *out_attr);
+
COSMOPOLITAN_C_END_
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
#endif /* COSMOPOLITAN_LIBC_NT_CREATEFILE_H_ */
diff --git a/libc/nt/enum/sio.h b/libc/nt/enum/sio.h
index 9993718ba..b8984aeeb 100644
--- a/libc/nt/enum/sio.h
+++ b/libc/nt/enum/sio.h
@@ -1,7 +1,68 @@
#ifndef COSMOPOLITAN_LIBC_NT_ENUM_SIO_H_
#define COSMOPOLITAN_LIBC_NT_ENUM_SIO_H_
-#define kNtSioBspHandlePoll 0x4800001D
-#define kNtSioBaseHandle 0x48000022
+#define kNtSioAbsorbRtralert 0x98000005u
+#define kNtSioAcquirePortReservation 0x98000064u
+#define kNtSioAddressListChange 0x28000017u
+#define kNtSioAddressListQuery 0x48000016u
+#define kNtSioAddressListSort 0xc8000019u
+#define kNtSioApplyTransportSetting 0x98000013u
+#define kNtSioAssociateHandle 0x88000001u
+#define kNtSioAssociatePortReservation 0x98000066u
+#define kNtSioBaseHandle 0x48000022u
+#define kNtSioBspHandlePoll 0x4800001Du
+#define kNtSioDeletePeerTargetName 0x980000cbu
+#define kNtSioEnableCircularQueueing 0x28000002u
+#define kNtSioFindRoute 0x48000003u
+#define kNtSioFlush 0x28000004u
+#define kNtSioGetBroadcastAddress 0x48000005u
+#define kNtSioGetExtensionFunctionPointer 0xc8000006u
+#define kNtSioGetGroupQos 0xc8000008u
+#define kNtSioGetInterfaceList 0x4008747fu
+#define kNtSioGetMultipleExtensionFunctionPointer 0xc8000024u
+#define kNtSioGetQos 0xc8000007u
+#define kNtSioIndexAddMcast 0x9800000au
+#define kNtSioIndexBind 0x98000008u
+#define kNtSioIndexDelMcast 0x9800000bu
+#define kNtSioIndexMcastif 0x98000009u
+#define kNtSioKeepaliveVals 0x98000004u
+#define kNtSioLimitBroadcasts 0x98000007u
+#define kNtSioLoopbackFastPath 0x98000010u
+#define kNtSioMulticastScope 0x8800000au
+#define kNtSioMultipointLoopback 0x88000009u
+#define kNtSioQueryRssProcessorInfo 0x48000025u
+#define kNtSioQueryRssScalabilityInfo 0x580000d2u
+#define kNtSioQuerySecurity 0xd80000c9u
+#define kNtSioQueryTargetPnpHandle 0x48000018u
+#define kNtSioQueryTransportSetting 0x98000014u
+#define kNtSioQueryWfpAleEndpointHandle 0x580000cdu
+#define kNtSioQueryWfpConnectionRedirectContext 0x980000ddu
+#define kNtSioQueryWfpConnectionRedirectRecords 0x980000dcu
+#define kNtSioRcvall 0x98000001u
+#define kNtSioRcvallIf 0x9800000eu
+#define kNtSioRcvallIgmpmcast 0x98000003u
+#define kNtSioRcvallMcast 0x98000002u
+#define kNtSioRcvallMcastIf 0x9800000du
+#define kNtSioReleasePortReservation 0x98000065u
+#define kNtSioReserved1 0x8800001au
+#define kNtSioReserved2 0x88000021u
+#define kNtSioRoutingInterfaceChange 0x88000015u
+#define kNtSioRoutingInterfaceQuery 0xc8000014u
+#define kNtSioSetGroupQos 0x8800000cu
+#define kNtSioSetPeerTargetName 0x980000cau
+#define kNtSioSetPriorityHint 0x98000018u
+#define kNtSioSetQos 0x8800000bu
+#define kNtSioSetSecurity 0x980000c8u
+#define kNtSioSetWfpConnectionRedirectRecords 0x980000deu
+#define kNtSioSocketCloseNotify 0x9800000du
+#define kNtSioSocketUsageNotification 0x980000ccu
+#define kNtSioTcpInfo 0xd8000027u
+#define kNtSioTcpInitialRto 0x98000011u
+#define kNtSioTcpSetAckFrequency 0x98000017u
+#define kNtSioTcpSetIcw 0x98000016u
+#define kNtSioTranslateHandle 0xc800000du
+#define kNtSioUcastIf 0x98000006u
+#define kNtSioUdpConnreset 0x9800000cu
+#define kNtSioUdpNetreset 0x9800000fu
#endif /* COSMOPOLITAN_LIBC_NT_ENUM_SIO_H_ */
diff --git a/libc/nt/enum/wsa.h b/libc/nt/enum/wsa.h
new file mode 100644
index 000000000..869cbc966
--- /dev/null
+++ b/libc/nt/enum/wsa.h
@@ -0,0 +1,12 @@
+#ifndef COSMOPOLITAN_LIBC_NT_ENUM_WSA_H_
+#define COSMOPOLITAN_LIBC_NT_ENUM_WSA_H_
+#include "libc/nt/errors.h"
+
+#define kNtWsaInvalidHandle kNtErrorInvalidHandle
+#define kNtWsaNotEnoughMemory kNtErrorNotEnoughMemory
+#define kNtWsaInvalidParameter kNtErrorInvalidParameter
+#define kNtWsaIoPending kNtErrorIoPending
+#define kNtWsaIoIncomplete kNtErrorIoIncomplete
+#define kNtWsaOperationAborted kNtErrorOperationAborted
+
+#endif /* COSMOPOLITAN_LIBC_NT_ENUM_WSA_H_ */
diff --git a/libc/nt/enum/wsaid.h b/libc/nt/enum/wsaid.h
new file mode 100644
index 000000000..4fda23aa2
--- /dev/null
+++ b/libc/nt/enum/wsaid.h
@@ -0,0 +1,71 @@
+#ifndef COSMOPOLITAN_LIBC_NT_ENUM_WSAID_H_
+#define COSMOPOLITAN_LIBC_NT_ENUM_WSAID_H_
+#if !(__ASSEMBLER__ + __LINKER__ + 0)
+COSMOPOLITAN_C_START_
+
+#define WSAID_WSAPOLL \
+ { \
+ 0x18C76F85, 0xDC66, 0x4964, { \
+ 0x97, 0x2E, 0x23, 0xC2, 0x72, 0x38, 0x31, 0x2B \
+ } \
+ }
+
+#define WSAID_WSARECVMSG \
+ { \
+ 0xf689d7c8, 0x6f1f, 0x436b, { \
+ 0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22 \
+ } \
+ }
+
+#define WSAID_WSASENDMSG \
+ { \
+ 0xa441e712, 0x754f, 0x43ca, { \
+ 0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d \
+ } \
+ }
+
+#define WSAID_CONNECTEX \
+ { \
+ 0x25a207b9, 0xddf3, 0x4660, { \
+ 0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e \
+ } \
+ }
+
+#define WSAID_ACCEPTEX \
+ { \
+ 0xb5367df1, 0xcbac, 0x11cf, { \
+ 0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92 \
+ } \
+ }
+
+#define WSAID_GETACCEPTEXSOCKADDRS \
+ { \
+ 0xb5367df2, 0xcbac, 0x11cf, { \
+ 0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92 \
+ } \
+ }
+
+#define WSAID_TRANSMITFILE \
+ { \
+ 0xb5367df0, 0xcbac, 0x11cf, { \
+ 0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92 \
+ } \
+ }
+
+#define WSAID_TRANSMITPACKETS \
+ { \
+ 0xd9689da0, 0x1f90, 0x11d3, { \
+ 0x99, 0x71, 0x00, 0xc0, 0x4f, 0x68, 0xc8, 0x76 \
+ } \
+ }
+
+#define WSAID_DISCONNECTEX \
+ { \
+ 0x7fda2e11, 0x8630, 0x436f, { \
+ 0xa0, 0x31, 0xf5, 0x36, 0xa6, 0xee, 0xc1, 0x57 \
+ } \
+ }
+
+COSMOPOLITAN_C_END_
+#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
+#endif /* COSMOPOLITAN_LIBC_NT_ENUM_WSAID_H_ */
diff --git a/libc/nt/files.h b/libc/nt/files.h
index 5a8fc1aaa..0d84caace 100644
--- a/libc/nt/files.h
+++ b/libc/nt/files.h
@@ -179,7 +179,7 @@ int64_t FindFirstFileEx(const char16_t *lpFileName, int fInfoLevelId,
uint32_t dwAdditionalFlags);
bool32 FindNextFile(int64_t hFindFile,
struct NtWin32FindData *out_lpFindFileData);
-bool32 FindClose(int64_t inout_hFindFile);
+bool32 FindClose(int64_t hFindFile);
int64_t FindFirstVolume(char16_t *out_lpszVolumeName, uint32_t cchBufferLength);
bool32 FindNextVolume(int64_t inout_hFindVolume, char16_t *out_lpszVolumeName,
diff --git a/libc/nt/ipc.h b/libc/nt/ipc.h
index 2c79321c9..3fce3fd1e 100644
--- a/libc/nt/ipc.h
+++ b/libc/nt/ipc.h
@@ -43,11 +43,11 @@
#define kNtPipeRejectRemoteClients 0x00000008
/* CreateNamedPipe::nMaxInstances */
-#define NT_PIPE_UNLIMITED_INSTANCES 255
+#define kNtPipeUnlimitedInstances 255
-/* CreateNamedPipeInfo */
-#define PIPE_CLIENT_END 0x00000000
-#define PIPE_SERVER_END 0x00000001
+/* GetNamedPipeInfo */
+#define kNtPipeClientEnd 0x00000000
+#define kNtPipeServerEnd 0x00000001
#if !(__ASSEMBLER__ + __LINKER__ + 0)
COSMOPOLITAN_C_START_
diff --git a/libc/nt/kernel32/FindClose.s b/libc/nt/kernel32/FindClose.s
index e6dcc3acd..2a151dc83 100644
--- a/libc/nt/kernel32/FindClose.s
+++ b/libc/nt/kernel32/FindClose.s
@@ -2,7 +2,7 @@
.imp kernel32,__imp_FindClose,FindClose,0
.text.windows
-FindClose:
+__FindClose:
push %rbp
mov %rsp,%rbp
.profilable
@@ -11,5 +11,5 @@ FindClose:
call *__imp_FindClose(%rip)
leave
ret
- .endfn FindClose,globl
+ .endfn __FindClose,globl
.previous
diff --git a/libc/nt/kernel32/ReOpenFile.s b/libc/nt/kernel32/ReOpenFile.s
index 1d315dfe2..57e2e32df 100644
--- a/libc/nt/kernel32/ReOpenFile.s
+++ b/libc/nt/kernel32/ReOpenFile.s
@@ -2,11 +2,11 @@
.imp kernel32,__imp_ReOpenFile,ReOpenFile,0
.text.windows
-ReOpenFile:
+__ReOpenFile:
push %rbp
mov %rsp,%rbp
.profilable
mov __imp_ReOpenFile(%rip),%rax
jmp __sysv2nt
- .endfn ReOpenFile,globl
+ .endfn __ReOpenFile,globl
.previous
diff --git a/libc/nt/master.sh b/libc/nt/master.sh
index 456d91f94..c70c1754b 100755
--- a/libc/nt/master.sh
+++ b/libc/nt/master.sh
@@ -323,7 +323,6 @@ imp 'FindActCtxSectionStringA' FindActCtxSectionStringA kernel32 373
imp 'FindActCtxSectionStringWWorker' FindActCtxSectionStringWWorker kernel32 375
imp 'FindAtom' FindAtomW kernel32 377
imp 'FindAtomA' FindAtomA kernel32 376
-imp 'FindClose' FindClose kernel32 0 1
imp 'FindCloseChangeNotification' FindCloseChangeNotification kernel32 0
imp 'FindFirstChangeNotification' FindFirstChangeNotificationW kernel32 0
imp 'FindFirstChangeNotificationA' FindFirstChangeNotificationA kernel32 0
@@ -993,7 +992,6 @@ imp 'QuirkIsEnabledWorker' QuirkIsEnabledWorker kernel32 1119
imp 'RaiseException' RaiseException kernel32 0
imp 'RaiseFailFastException' RaiseFailFastException kernel32 0
imp 'RaiseInvalid16BitExeError' RaiseInvalid16BitExeError kernel32 1122
-imp 'ReOpenFile' ReOpenFile kernel32 0 4 # TODO(jart): 6.2 and higher
imp 'ReadConsole' ReadConsoleW kernel32 0 5
imp 'ReadConsoleA' ReadConsoleA kernel32 0 5
imp 'ReadConsoleInput' ReadConsoleInputW kernel32 0 4
@@ -1357,6 +1355,7 @@ imp '__CreateProcess' CreateProcessW kernel32 0 10
imp '__CreateThread' CreateThread kernel32 0 6
imp '__DeleteFile' DeleteFileW kernel32 0 1
imp '__DeviceIoControl' DeviceIoControl kernel32 0 8
+imp '__FindClose' FindClose kernel32 0 1
imp '__FindFirstFile' FindFirstFileW kernel32 0 2
imp '__FindNextFile' FindNextFileW kernel32 0 2
imp '__FlushFileBuffers' FlushFileBuffers kernel32 0 1
@@ -1366,6 +1365,7 @@ imp '__GetFileAttributes' GetFileAttributesW kernel32 0 1
imp '__MapViewOfFileEx' MapViewOfFileEx kernel32 0 6
imp '__MapViewOfFileExNuma' MapViewOfFileExNuma kernel32 0 7
imp '__OpenProcess' OpenProcess kernel32 0 3
+imp '__ReOpenFile' ReOpenFile kernel32 0 4 # TODO(jart): 6.2 and higher
imp '__RemoveDirectory' RemoveDirectoryW kernel32 0 1
imp '__SetCurrentDirectory' SetCurrentDirectoryW kernel32 0 1
imp '__TerminateProcess' TerminateProcess kernel32 0 2
@@ -3956,8 +3956,8 @@ imp 'sys_getprotobyname_nt' getprotobyname ws2_32 53
imp 'sys_getprotobynumber_nt' getprotobynumber ws2_32 54
imp 'sys_getservbyname_nt' getservbyname ws2_32 55
imp 'sys_getservbyport_nt' getservbyport ws2_32 56
-imp 'sys_recv_nt' recv ws2_32 16 4 # we're using WSARecvFrom()
-imp 'sys_send_nt' send ws2_32 19 4 # we're using WSASendTo()
+imp '__sys_recv_nt' recv ws2_32 16 4 # we're using WSARecvFrom()
+imp '__sys_send_nt' send ws2_32 19 4 # we're using WSASendTo()
# IPHLPAPI.DLL
#
diff --git a/libc/nt/winsock.h b/libc/nt/winsock.h
index 176c31180..8f819fbd7 100644
--- a/libc/nt/winsock.h
+++ b/libc/nt/winsock.h
@@ -43,12 +43,12 @@
│ cosmopolitan § new technology » winsock ─╬─│┼
╚────────────────────────────────────────────────────────────────────────────│*/
-#define kNtCompEqual 0
-#define kNtCompNotless 1
-
#define kNtWsaFlagOverlapped 0x01
#define kNtWsaFlagNoHandleInherit 0x80
+#define kNtCompEqual 0
+#define kNtCompNotless 1
+
#define kNtTfDisconnect 0x01
#define kNtTfReuseSocket 0x02
#define kNtTfWriteBehind 0x04
@@ -60,33 +60,6 @@
#define kNtSoUpdateAcceptContext 0x700B
#define kNtSoUpdateConnectContext 0x7010
-#define kNtSioAddressListChange 0x28000017u
-#define kNtSioAddressListQuery 0x48000016u
-#define kNtSioAddressListSort 0xC8000019u
-#define kNtSioAssociateHandle 0x88000001u
-#define kNtSioEnableCircularQueueing 0x28000002u
-#define kNtSioFindRoute 0x48000003u
-#define kNtSioFlush 0x28000004u
-#define kNtSioGetBroadcastAddress 0x48000005u
-#define kNtSioGetExtensionFunctionPointer 0xC8000006u
-#define kNtSioGetGroupQos 0xC8000008u
-#define kNtSioGetQos 0xC8000007u
-#define kNtSioMulticastScope 0x8800000Au
-#define kNtSioMultipointLoopback 0x88000009u
-#define kNtSioQueryRssProcessorInfo 0x48000025u
-#define kNtSioQueryTargetPnpHandle 0x48000018u
-#define kNtSioReserved1 0x8800001Au
-#define kNtSioReserved2 0x88000021u
-#define kNtSioRoutingInterfaceChange 0x88000015u
-#define kNtSioRoutingInterfaceQuery 0xC8000014u
-#define kNtSioSetGroupQos 0x8800000Cu
-#define kNtSioSetQos 0x8800000Bu
-#define kNtSioSocketCloseNotify 0x9800000Du
-#define kNtSioTranslateHandle 0xC800000Du
-#define kNtSioUdpConnreset 0x9800000Cu
-#define kNtSioUdpNetreset 0x9800000Fu
-#define kNtSioGetInterfaceList 0x4008747fu /* _IOR('t', 127, ULONG) */
-
#define kNtNspNotifyImmediately 0
#define kNtNspNotifyHwnd 1
#define kNtNspNotifyEvent 2
@@ -548,11 +521,6 @@ void GetAcceptExSockaddrs(
struct sockaddr **out_RemoteSockaddr /*[*RemoteSockaddrLength]*/,
int *out_RemoteSockaddrLength);
-bool32 ConnectEx(int64_t s, const struct sockaddr *name, int namelen,
- const void *opt_lpSendBuffer, uint32_t dwSendDataLength,
- uint32_t *out_lpdwBytesSent,
- struct NtOverlapped *inout_lpOverlapped);
-
bool32 DisconnectEx(int64_t s, struct NtOverlapped *inout_opt_lpOverlapped,
uint32_t dwFlags, uint32_t dwReserved);
diff --git a/libc/nt/ws2_32/recv.s b/libc/nt/ws2_32/recv.s
index 2d6527103..d13d28f26 100644
--- a/libc/nt/ws2_32/recv.s
+++ b/libc/nt/ws2_32/recv.s
@@ -2,11 +2,11 @@
.imp ws2_32,__imp_recv,recv,16
.text.windows
-sys_recv_nt:
+__sys_recv_nt:
push %rbp
mov %rsp,%rbp
.profilable
mov __imp_recv(%rip),%rax
jmp __sysv2nt
- .endfn sys_recv_nt,globl
+ .endfn __sys_recv_nt,globl
.previous
diff --git a/libc/nt/ws2_32/send.s b/libc/nt/ws2_32/send.s
index 1a0e846a2..6a9ef24cc 100644
--- a/libc/nt/ws2_32/send.s
+++ b/libc/nt/ws2_32/send.s
@@ -2,11 +2,11 @@
.imp ws2_32,__imp_send,send,19
.text.windows
-sys_send_nt:
+__sys_send_nt:
push %rbp
mov %rsp,%rbp
.profilable
mov __imp_send(%rip),%rax
jmp __sysv2nt
- .endfn sys_send_nt,globl
+ .endfn __sys_send_nt,globl
.previous
diff --git a/libc/rand/rand64.c b/libc/rand/rand64.c
index 87acdd1de..254b46c4f 100644
--- a/libc/rand/rand64.c
+++ b/libc/rand/rand64.c
@@ -31,7 +31,7 @@
extern int __pid;
static int thepid;
static uint128_t thepool;
-static cthread_spinlock_t rand64_lock;
+_Alignas(64) static char rand64_lock;
/**
* Returns nondeterministic random data.
@@ -54,7 +54,7 @@ static cthread_spinlock_t rand64_lock;
uint64_t rand64(void) {
void *p;
uint128_t s;
- cthread_spinlock(&rand64_lock);
+ _spinlock(&rand64_lock);
if (__pid == thepid) {
s = thepool; // normal path
} else {
@@ -75,6 +75,6 @@ uint64_t rand64(void) {
thepid = __pid;
}
thepool = (s *= 15750249268501108917ull); // lemur64
- cthread_spunlock(&rand64_lock);
+ _spunlock(&rand64_lock);
return s >> 64;
}
diff --git a/libc/runtime/clone.c b/libc/runtime/clone.c
index 7cff94134..b121faf67 100644
--- a/libc/runtime/clone.c
+++ b/libc/runtime/clone.c
@@ -97,7 +97,7 @@ privileged int clone(int (*f)(void *), void *stack, int flags, void *arg, ...) {
: "memory");
unreachable;
} else if (IsWindows()) {
- if ((tidfd = __reservefd()) == -1) return -1;
+ if ((tidfd = __reservefd(-1)) == -1) return -1;
if (flags == CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND) {
if ((hand = CreateThread(&kNtIsInheritable, 0, NT2SYSV(WinThreadMain),
&(struct WinThread){f, arg, stack}, 0, &tid))) {
diff --git a/libc/runtime/fork-nt.c b/libc/runtime/fork-nt.c
index 9d9ab6d90..6cca9b5ca 100644
--- a/libc/runtime/fork-nt.c
+++ b/libc/runtime/fork-nt.c
@@ -24,8 +24,13 @@
#include "libc/intrin/kprintf.h"
#include "libc/macros.internal.h"
#include "libc/mem/alloca.h"
+#include "libc/mem/mem.h"
#include "libc/nexgen32e/nt2sysv.h"
#include "libc/nt/console.h"
+#include "libc/nt/createfile.h"
+#include "libc/nt/enum/accessmask.h"
+#include "libc/nt/enum/creationdisposition.h"
+#include "libc/nt/enum/fileflagandattributes.h"
#include "libc/nt/enum/filemapflags.h"
#include "libc/nt/enum/pageflags.h"
#include "libc/nt/enum/processcreationflags.h"
@@ -39,6 +44,7 @@
#include "libc/runtime/directmap.internal.h"
#include "libc/runtime/memtrack.internal.h"
#include "libc/runtime/runtime.h"
+#include "libc/sock/ntstdin.internal.h"
#include "libc/sysv/consts/map.h"
#include "libc/sysv/consts/o.h"
#include "libc/sysv/consts/prot.h"
@@ -78,7 +84,7 @@ static inline textwindows ssize_t ForkIo(int64_t h, char *p, size_t n,
static dontinline textwindows bool ForkIo2(int64_t h, void *buf, size_t n,
bool32 (*fn)(), const char *sf) {
ssize_t rc = ForkIo(h, buf, n, fn);
- STRACE("%s(%ld, %'zu) → %'zd% m", sf, h, n, rc);
+ // STRACE("%s(%ld, %'zu) → %'zd% m", sf, h, n, rc);
return rc != -1;
}
@@ -90,25 +96,16 @@ static textwindows dontinline bool ReadAll(int64_t h, void *buf, size_t n) {
return ForkIo2(h, buf, n, ReadFile, "ReadFile");
}
-static textwindows int OnForkCrash(struct NtExceptionPointers *ep) {
- kprintf("error: fork() child crashed!%n"
- "\tExceptionCode = %#x%n"
- "\tRip = %x%n",
- ep->ExceptionRecord->ExceptionCode,
- ep->ContextRecord ? ep->ContextRecord->Rip : -1);
- ExitProcess(73);
-}
-
textwindows void WinMainForked(void) {
bool ok;
jmp_buf jb;
+ int64_t reader;
char *addr, *shad;
struct DirectMap dm;
uint64_t size, upsize;
- int64_t reader, writer;
+ int64_t savetsc, savebir;
struct MemoryInterval *maps;
char16_t fvar[21 + 1 + 21 + 1];
- int64_t oncrash, savetsc, savebir;
uint32_t i, varlen, oldprot, savepid;
long mapcount, mapcapacity, specialz;
extern uint64_t ts asm("kStartTsc");
@@ -119,44 +116,31 @@ textwindows void WinMainForked(void) {
if (!varlen || varlen >= ARRAYLEN(fvar)) return;
STRACE("WinMainForked()");
SetEnvironmentVariable(u"_FORK", NULL);
-#ifdef SYSDEBUG
- oncrash = AddVectoredExceptionHandler(1, NT2SYSV(OnForkCrash));
-#endif
- ParseInt(ParseInt(fvar, &reader), &writer);
- CloseHandle(writer);
+ ParseInt(fvar, &reader);
// read the cpu state from the parent process & plus
// read the list of mappings from the parent process
// this is stored in a special secretive memory map!
// read ExtendMemoryIntervals for further details :|
maps = (void *)kMemtrackStart;
- if (!ReadAll(reader, jb, sizeof(jb)) ||
- !ReadAll(reader, &mapcount, sizeof(_mmi.i)) ||
- !ReadAll(reader, &mapcapacity, sizeof(_mmi.n))) {
- ExitProcess(40);
- }
+ ReadAll(reader, jb, sizeof(jb));
+ ReadAll(reader, &mapcount, sizeof(_mmi.i));
+ ReadAll(reader, &mapcapacity, sizeof(_mmi.n));
specialz = ROUNDUP(mapcapacity * sizeof(_mmi.p[0]), kMemtrackGran);
- if (!MapViewOfFileEx(CreateFileMapping(-1, 0, kNtPageReadwrite,
- specialz >> 32, specialz, 0),
- kNtFileMapWrite, 0, 0, specialz, maps)) {
- ExitProcess(41);
- }
- if (!ReadAll(reader, maps, mapcount * sizeof(_mmi.p[0]))) {
- ExitProcess(42);
- }
+ MapViewOfFileEx(
+ CreateFileMapping(-1, 0, kNtPageReadwrite, specialz >> 32, specialz, 0),
+ kNtFileMapWrite, 0, 0, specialz, maps);
+ ReadAll(reader, maps, mapcount * sizeof(_mmi.p[0]));
if (IsAsan()) {
shad = (char *)(((intptr_t)maps >> 3) + 0x7fff8000);
size = ROUNDUP(specialz >> 3, FRAMESIZE);
MapViewOfFileEx(
CreateFileMapping(-1, 0, kNtPageReadwrite, size >> 32, size, 0),
kNtFileMapWrite, 0, 0, size, maps);
- if (!ReadAll(reader, shad, (mapcount * sizeof(_mmi.p[0])) >> 3)) {
- ExitProcess(43);
- }
+ ReadAll(reader, shad, (mapcount * sizeof(_mmi.p[0])) >> 3);
}
// read the heap mappings from the parent process
- // we can avoid copying via pipe for shared maps!
for (i = 0; i < mapcount; ++i) {
addr = (char *)((uint64_t)maps[i].x << 16);
size = maps[i].size;
@@ -164,22 +148,18 @@ textwindows void WinMainForked(void) {
upsize = ROUNDUP(size, FRAMESIZE);
// we don't need to close the map handle because sys_mmap_nt
// doesn't mark it inheritable across fork() for MAP_PRIVATE
- if (!(maps[i].h = CreateFileMapping(-1, 0, kNtPageExecuteReadwrite,
- upsize >> 32, upsize, 0)) ||
- !MapViewOfFileEx(maps[i].h, kNtFileMapWrite | kNtFileMapExecute, 0, 0,
- upsize, addr) ||
- !ReadAll(reader, addr, size)) {
- ExitProcess(44);
- }
+ maps[i].h = CreateFileMapping(-1, 0, kNtPageExecuteReadwrite,
+ upsize >> 32, upsize, 0);
+ MapViewOfFileEx(maps[i].h, kNtFileMapWrite | kNtFileMapExecute, 0, 0,
+ upsize, addr);
+ ReadAll(reader, addr, size);
} else {
// we can however safely inherit MAP_SHARED with zero copy
- if (!MapViewOfFileEx(maps[i].h,
- maps[i].readonlyfile
- ? kNtFileMapRead | kNtFileMapExecute
- : kNtFileMapWrite | kNtFileMapExecute,
- maps[i].offset >> 32, maps[i].offset, size, addr)) {
- ExitProcess(45);
- }
+ MapViewOfFileEx(maps[i].h,
+ maps[i].readonlyfile
+ ? kNtFileMapRead | kNtFileMapExecute
+ : kNtFileMapWrite | kNtFileMapExecute,
+ maps[i].offset >> 32, maps[i].offset, size, addr);
}
}
@@ -187,10 +167,8 @@ textwindows void WinMainForked(void) {
savepid = __pid;
savebir = __kbirth;
savetsc = ts;
- if (!ReadAll(reader, __data_start, __data_end - __data_start) ||
- !ReadAll(reader, __bss_start, __bss_end - __bss_start)) {
- ExitProcess(46);
- }
+ ReadAll(reader, __data_start, __data_end - __data_start);
+ ReadAll(reader, __bss_start, __bss_end - __bss_start);
__pid = savepid;
__kbirth = savebir;
ts = savetsc;
@@ -203,21 +181,26 @@ textwindows void WinMainForked(void) {
__prot2nt(maps[i].prot, maps[i].iscow), &oldprot);
}
- // we're all done reading!
- if (!CloseHandle(reader)) {
- ExitProcess(47);
- }
+ // mitosis complete
+ CloseHandle(reader);
- // clean up, restore state, and jump back into function below
-#ifdef SYSDEBUG
- RemoveVectoredExceptionHandler(oncrash);
-#endif
+ // rewrap the stdin named pipe hack
+ // since the handles closed on fork
+ if (weaken(ForkNtStdinWorker)) weaken(ForkNtStdinWorker)();
+ struct Fds *fds = VEIL("r", &g_fds);
+ fds->__init_p[0].handle = GetStdHandle(kNtStdInputHandle); // just in case
+ fds->__init_p[1].handle = GetStdHandle(kNtStdOutputHandle); // just in case
+ fds->__init_p[2].handle = GetStdHandle(kNtStdErrorHandle); // just in case
+
+ // restore the crash reporting stuff
if (weaken(__wincrash_nt)) {
AddVectoredExceptionHandler(1, (void *)weaken(__wincrash_nt));
}
if (weaken(__onntconsoleevent_nt)) {
SetConsoleCtrlHandler(weaken(__onntconsoleevent_nt), 1);
}
+
+ // jump back into function below
longjmp(jb, 1);
}
@@ -225,23 +208,29 @@ textwindows int sys_fork_nt(void) {
bool ok;
jmp_buf jb;
char **args, **args2;
+ char16_t pipename[64];
int64_t reader, writer;
- int i, n, rc, pid, untrackpid;
+ int i, n, pid, untrackpid, rc = -1;
char *p, forkvar[6 + 21 + 1 + 21 + 1];
struct NtStartupInfo startinfo;
struct NtProcessInformation procinfo;
- if ((pid = untrackpid = __reservefd()) == -1) return -1;
if (!setjmp(jb)) {
- if (CreatePipe(&reader, &writer, &kNtIsInheritable, 0)) {
+ pid = untrackpid = __reservefd(-1);
+ reader = CreateNamedPipe(CreatePipeName(pipename),
+ kNtPipeAccessInbound | kNtFileFlagOverlapped,
+ kNtPipeTypeMessage | kNtPipeReadmodeMessage, 1,
+ 65536, 65536, 0, &kNtIsInheritable);
+ writer = CreateFile(pipename, kNtGenericWrite, 0, 0, kNtOpenExisting,
+ kNtFileFlagOverlapped, 0);
+ if (pid != -1 && reader != -1 && writer != -1) {
p = stpcpy(forkvar, "_FORK=");
- p += uint64toarray_radix10(reader, p), *p++ = ' ';
- p += uint64toarray_radix10(writer, p);
+ p += uint64toarray_radix10(reader, p);
bzero(&startinfo, sizeof(startinfo));
startinfo.cb = sizeof(struct NtStartupInfo);
startinfo.dwFlags = kNtStartfUsestdhandles;
- startinfo.hStdInput = g_fds.p[0].handle;
- startinfo.hStdOutput = g_fds.p[1].handle;
- startinfo.hStdError = g_fds.p[2].handle;
+ startinfo.hStdInput = __getfdhandleactual(0);
+ startinfo.hStdOutput = __getfdhandleactual(1);
+ startinfo.hStdError = __getfdhandleactual(2);
args = __argv;
#ifdef SYSDEBUG
// If --strace was passed to this program, then propagate it the
@@ -258,7 +247,6 @@ textwindows int sys_fork_nt(void) {
if (ntspawn(GetProgramExecutableName(), args, environ, forkvar,
&kNtIsInheritable, NULL, true, 0, NULL, &startinfo,
&procinfo) != -1) {
- CloseHandle(reader);
CloseHandle(procinfo.hThread);
ok = WriteAll(writer, jb, sizeof(jb)) &&
WriteAll(writer, &_mmi.i, sizeof(_mmi.i)) &&
@@ -277,12 +265,10 @@ textwindows int sys_fork_nt(void) {
if (ok) ok = WriteAll(writer, __data_start, __data_end - __data_start);
if (ok) ok = WriteAll(writer, __bss_start, __bss_end - __bss_start);
if (ok) {
- if (!CloseHandle(writer)) {
- ok = false;
- }
+ if (!CloseHandle(writer)) ok = false;
+ writer = -1;
}
if (ok) {
- // XXX: this should be tracked in a separate data structure
g_fds.p[pid].kind = kFdProcess;
g_fds.p[pid].handle = procinfo.hProcess;
g_fds.p[pid].flags = O_CLOEXEC;
@@ -290,19 +276,13 @@ textwindows int sys_fork_nt(void) {
untrackpid = -1;
rc = pid;
} else {
- rc = __winerr();
TerminateProcess(procinfo.hProcess, 127);
CloseHandle(procinfo.hProcess);
}
- } else {
- CloseHandle(writer);
- rc = -1;
}
- } else {
- STRACE("CreatePipe() failed %m");
- rc = -1;
- CloseHandle(writer);
}
+ if (reader != -1) CloseHandle(reader);
+ if (writer != -1) CloseHandle(writer);
} else {
rc = 0;
}
diff --git a/libc/runtime/getdosargv.c b/libc/runtime/getdosargv.c
index 2c747a5ed..ff97895fb 100644
--- a/libc/runtime/getdosargv.c
+++ b/libc/runtime/getdosargv.c
@@ -18,6 +18,7 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/bits/bits.h"
#include "libc/bits/safemacros.internal.h"
+#include "libc/nt/thunk/msabi.h"
#include "libc/runtime/internal.h"
#include "libc/str/str.h"
#include "libc/str/tpenc.h"
diff --git a/libc/runtime/getinterpreterexecutablename.c b/libc/runtime/getinterpreterexecutablename.c
index ce3f750ef..695c3f46c 100644
--- a/libc/runtime/getinterpreterexecutablename.c
+++ b/libc/runtime/getinterpreterexecutablename.c
@@ -19,7 +19,6 @@
#include "libc/calls/calls.h"
#include "libc/calls/internal.h"
#include "libc/dce.h"
-#include "libc/intrin/kprintf.h"
#include "libc/macros.internal.h"
#include "libc/runtime/runtime.h"
#include "libc/sysv/consts/at.h"
diff --git a/libc/runtime/getsymboltable.c b/libc/runtime/getsymboltable.c
index 797a50f2c..43ff79632 100644
--- a/libc/runtime/getsymboltable.c
+++ b/libc/runtime/getsymboltable.c
@@ -20,7 +20,6 @@
#include "libc/bits/bits.h"
#include "libc/bits/weaken.h"
#include "libc/calls/strace.internal.h"
-#include "libc/intrin/kprintf.h"
#include "libc/macros.internal.h"
#include "libc/runtime/runtime.h"
#include "libc/runtime/symbols.internal.h"
diff --git a/libc/runtime/interceptflag.greg.c b/libc/runtime/interceptflag.greg.c
index 334ab871a..026ea4dbe 100644
--- a/libc/runtime/interceptflag.greg.c
+++ b/libc/runtime/interceptflag.greg.c
@@ -16,7 +16,6 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
-#include "libc/intrin/kprintf.h"
#include "libc/log/libfatal.internal.h"
#include "libc/runtime/internal.h"
#include "libc/str/str.h"
diff --git a/libc/runtime/internal.h b/libc/runtime/internal.h
index d5b9375b3..ea3209ac7 100644
--- a/libc/runtime/internal.h
+++ b/libc/runtime/internal.h
@@ -25,9 +25,9 @@ void __stack_chk_fail(void) wontreturn relegated;
void __stack_chk_fail_local(void) wontreturn relegated hidden;
void _jmpstack(void *, void *, ...) hidden wontreturn;
long _setstack(void *, void *, ...) hidden;
-int GetDosArgv(const char16_t *, char *, size_t, char **, size_t) hidden;
+int GetDosArgv(const char16_t *, char *, size_t, char **, size_t);
Elf64_Ehdr *MapElfRead(const char *, struct MappedFile *) hidden;
-int GetDosEnviron(const char16_t *, char *, size_t, char **, size_t) hidden;
+int GetDosEnviron(const char16_t *, char *, size_t, char **, size_t);
bool __intercept_flag(int *, char *[], const char *);
COSMOPOLITAN_C_END_
diff --git a/libc/runtime/mapanon.c b/libc/runtime/mapanon.c
index f59f46585..6b31c87b6 100644
--- a/libc/runtime/mapanon.c
+++ b/libc/runtime/mapanon.c
@@ -59,6 +59,8 @@ noasan void *mapanon(size_t size) {
/* asan runtime depends on this function */
void *m;
m = mmap(0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (m == MAP_FAILED && weaken(__oom_hook)) weaken(__oom_hook)(size);
+ if (m == MAP_FAILED && weaken(__oom_hook)) {
+ weaken(__oom_hook)(size);
+ }
return m;
}
diff --git a/libc/runtime/memtrack.internal.h b/libc/runtime/memtrack.internal.h
index 112318974..233a2926c 100644
--- a/libc/runtime/memtrack.internal.h
+++ b/libc/runtime/memtrack.internal.h
@@ -66,7 +66,7 @@ int UntrackMemoryIntervals(void *, size_t) hidden;
(-0x800000000000 <= (intptr_t)(p) && (intptr_t)(p) <= 0x7fffffffffff)
forceinline pureconst bool IsLegalSize(size_t n) {
- return n <= 0xffffffffffff;
+ return n <= 0x7fffffffffff;
}
forceinline pureconst bool IsAutoFrame(int x) {
diff --git a/libc/runtime/metalprintf.greg.c b/libc/runtime/metalprintf.greg.c
index 93781a6c4..4da5570fd 100644
--- a/libc/runtime/metalprintf.greg.c
+++ b/libc/runtime/metalprintf.greg.c
@@ -23,7 +23,7 @@
#define PUTC(C) \
do { \
while (!(inb(0x3F8 + UART_LSR) & UART_TTYTXR)) { \
- asm("pause"); \
+ __builtin_ia32_pause(); \
} \
outb(0x3F8, C); \
} while (0)
diff --git a/libc/runtime/mmap.c b/libc/runtime/mmap.c
index 1fdf7f8fe..bc7e69259 100644
--- a/libc/runtime/mmap.c
+++ b/libc/runtime/mmap.c
@@ -26,10 +26,14 @@
#include "libc/errno.h"
#include "libc/intrin/asan.internal.h"
#include "libc/intrin/describeflags.internal.h"
+#include "libc/intrin/kprintf.h"
#include "libc/log/backtrace.internal.h"
#include "libc/log/libfatal.internal.h"
#include "libc/log/log.h"
#include "libc/macros.internal.h"
+#include "libc/nt/process.h"
+#include "libc/nt/runtime.h"
+#include "libc/nt/struct/processmemorycounters.h"
#include "libc/rand/rand.h"
#include "libc/runtime/directmap.internal.h"
#include "libc/runtime/internal.h"
@@ -49,13 +53,10 @@
#define FRAME(x) ((int)((intptr_t)(x) >> 16))
static wontreturn void OnUnrecoverableMmapError(const char *s) {
- if (IsTiny()) {
- unreachable;
- } else {
- STRACE("%s %m", s);
- __restorewintty();
- _Exit(199);
- }
+ if (weaken(__die)) weaken(__die)();
+ STRACE("%s %m", s);
+ __restorewintty();
+ _Exit(199);
}
noasan static bool IsMapped(char *p, size_t n) {
@@ -101,9 +102,12 @@ noasan static bool Automap(int n, int *res) {
if (*res + n <= FRAME(kAutomapStart + (kAutomapStart - 1))) {
return true;
} else {
+ STRACE("mmap(%.12p, %p) ENOMEM (automap interval exhausted)", ADDR(*res),
+ ADDR(n + 1));
return false;
}
} else {
+ STRACE("mmap(%.12p, %p) ENOMEM (automap failed)", ADDR(*res), ADDR(n + 1));
return false;
}
}
@@ -149,12 +153,14 @@ static noasan void *MapMemory(void *addr, size_t size, int prot, int flags,
static textwindows dontinline noasan void *MapMemories(char *addr, size_t size,
int prot, int flags,
int fd, int64_t off,
- int f, int x, size_t n) {
+ int f, int x, int n) {
+ size_t i, m;
int64_t oi, sz;
struct DirectMap dm;
bool iscow, readonlyfile;
- size_t i, m = (n - 1) * FRAMESIZE;
- assert(m < size && m + FRAMESIZE >= size);
+ m = (size_t)(n - 1) << 16;
+ assert(m < size);
+ assert(m + FRAMESIZE >= size);
oi = fd == -1 ? 0 : off + m;
sz = size - m;
dm = sys_mmap(addr + m, sz, prot, f, fd, oi);
@@ -214,42 +220,43 @@ static textwindows dontinline noasan void *MapMemories(char *addr, size_t size,
*/
noasan void *mmap(void *addr, size_t size, int prot, int flags, int fd,
int64_t off) {
+ STRACE("mmap(%p, %'zu, %s, %s, %d, %'ld) → ...", addr, size,
+ DescribeProtFlags(prot), DescribeMapFlags(flags), fd, off);
void *res;
char *p = addr;
struct DirectMap dm;
int a, b, i, f, m, n, x;
- if (!IsTiny() && UNLIKELY(!size)) {
+ if (UNLIKELY(!size)) {
STRACE("size=0");
res = VIP(einval());
- } else if (!IsTiny() && UNLIKELY(!IsLegalSize(size))) {
+ } else if (UNLIKELY(!IsLegalSize(size))) {
STRACE("size isn't 48-bit");
res = VIP(einval());
- } else if (!IsTiny() && UNLIKELY(!IsLegalPointer(p))) {
+ } else if (UNLIKELY(!IsLegalPointer(p))) {
STRACE("p isn't 48-bit");
res = VIP(einval());
- } else if (!IsTiny() && UNLIKELY(!ALIGNED(p))) {
+ } else if (UNLIKELY(!ALIGNED(p))) {
STRACE("p isn't 64kb aligned");
res = VIP(einval());
- } else if (!IsTiny() && UNLIKELY(fd < -1)) {
+ } else if (UNLIKELY(fd < -1)) {
STRACE("mmap(%.12p, %'zu, fd=%d) EBADF", p, size, fd);
res = VIP(ebadf());
- } else if (!IsTiny() && UNLIKELY(!((fd != -1) ^ !!(flags & MAP_ANONYMOUS)))) {
+ } else if (UNLIKELY(!((fd != -1) ^ !!(flags & MAP_ANONYMOUS)))) {
STRACE("fd anonymous mismatch");
res = VIP(einval());
- } else if (!IsTiny() &&
- UNLIKELY(!(!!(flags & MAP_PRIVATE) ^ !!(flags & MAP_SHARED)))) {
+ } else if (UNLIKELY(!(!!(flags & MAP_PRIVATE) ^ !!(flags & MAP_SHARED)))) {
STRACE("MAP_SHARED ^ MAP_PRIVATE");
res = VIP(einval());
- } else if (!IsTiny() && UNLIKELY(off < 0)) {
+ } else if (UNLIKELY(off < 0)) {
STRACE("neg off");
res = VIP(einval());
- } else if (!IsTiny() && UNLIKELY(INT64_MAX - size < off)) {
+ } else if (UNLIKELY(INT64_MAX - size < off)) {
STRACE("too large");
res = VIP(einval());
- } else if (!IsTiny() && UNLIKELY(!ALIGNED(off))) {
+ } else if (UNLIKELY(!ALIGNED(off))) {
STRACE("p isn't 64kb aligned");
res = VIP(einval());
- } else if (!IsTiny() && (flags & MAP_FIXED_NOREPLACE) && IsMapped(p, size)) {
+ } else if ((flags & MAP_FIXED_NOREPLACE) && IsMapped(p, size)) {
#ifdef SYSDEBUG
if (OverlapsImageSpace(p, size)) {
STRACE("overlaps image");
@@ -258,7 +265,7 @@ noasan void *mmap(void *addr, size_t size, int prot, int flags, int fd,
}
#endif
res = VIP(efault());
- } else if (!IsTiny() && __isfdkind(fd, kFdZip)) {
+ } else if (__isfdkind(fd, kFdZip)) {
STRACE("fd is zipos handle");
res = VIP(einval());
} else {
@@ -268,7 +275,8 @@ noasan void *mmap(void *addr, size_t size, int prot, int flags, int fd,
prot |= PROT_WRITE; /* kludge */
}
}
- n = FRAME(size) + !!(size & (FRAMESIZE - 1));
+ n = (int)(size >> 16) + !!(size & (FRAMESIZE - 1));
+ assert(n > 0);
f = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED;
if (flags & MAP_FIXED) {
x = FRAME(p);
diff --git a/libc/runtime/mprotect.greg.c b/libc/runtime/mprotect.greg.c
index 4f7b8c08a..0215270ad 100644
--- a/libc/runtime/mprotect.greg.c
+++ b/libc/runtime/mprotect.greg.c
@@ -32,7 +32,7 @@
#include "libc/sysv/consts/prot.h"
#include "libc/sysv/errfuns.h"
-extern typeof(VirtualProtect) *const __imp_VirtualProtect __msabi;
+__msabi extern typeof(VirtualProtect) *const __imp_VirtualProtect;
#define ADDR(x) ((char *)((int64_t)((uint64_t)(x) << 32) >> 16))
diff --git a/libc/runtime/mremap.c b/libc/runtime/mremap.c
index 3e319723d..e09c323c3 100644
--- a/libc/runtime/mremap.c
+++ b/libc/runtime/mremap.c
@@ -68,6 +68,10 @@ static bool MustMoveMap(intptr_t y, size_t j) {
* @param q is new address
*/
void *mremap(void *p, size_t n, size_t m, int f, ... /* void *q */) {
+ enosys();
+ return MAP_FAILED;
+
+#if 0
va_list va;
void *res, *q;
if (f & MREMAP_FIXED) {
@@ -83,7 +87,6 @@ void *mremap(void *p, size_t n, size_t m, int f, ... /* void *q */) {
DescribeRemapFlags(f), q, res);
return res;
-#if 0
// TODO(jart): perhaps some day?
// probably not a big perf gain at this point :|
size_t i, j, k;
diff --git a/libc/runtime/msync-nt.c b/libc/runtime/msync-nt.c
index 9f774178f..55fe5895f 100644
--- a/libc/runtime/msync-nt.c
+++ b/libc/runtime/msync-nt.c
@@ -17,7 +17,6 @@
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/internal.h"
-#include "libc/intrin/kprintf.h"
#include "libc/macros.internal.h"
#include "libc/nt/files.h"
#include "libc/nt/memory.h"
diff --git a/libc/runtime/printargs.c b/libc/runtime/printargs.c
index 945390cc9..5503211ee 100644
--- a/libc/runtime/printargs.c
+++ b/libc/runtime/printargs.c
@@ -20,8 +20,15 @@
#include "libc/calls/strace.internal.h"
#include "libc/calls/struct/sigset.h"
#include "libc/dce.h"
+#include "libc/intrin/describeflags.internal.h"
#include "libc/intrin/kprintf.h"
#include "libc/macros.internal.h"
+#include "libc/nt/enum/startf.h"
+#include "libc/nt/runtime.h"
+#include "libc/nt/startupinfo.h"
+#include "libc/nt/struct/ldrdatatableentry.h"
+#include "libc/nt/struct/startupinfo.h"
+#include "libc/nt/struct/teb.h"
#include "libc/runtime/runtime.h"
#include "libc/runtime/stack.h"
#include "libc/sock/internal.h"
@@ -85,7 +92,7 @@ static const struct AuxiliaryValue *DescribeAuxv(unsigned long x) {
return NULL;
}
-textstartup void __printargs(void) {
+noasan textstartup void __printargs(void) {
#ifdef SYSDEBUG
int st;
long key;
@@ -100,16 +107,19 @@ textstartup void __printargs(void) {
st = __strace;
__strace = 0;
+ PRINT("");
PRINT("ARGUMENTS (%p)", __argv);
for (i = 0; i < __argc; ++i) {
PRINT(" ☼ %s", __argv[i]);
}
+ PRINT("");
PRINT("ENVIRONMENT (%p)", __envp);
for (env = __envp; *env; ++env) {
PRINT(" ☼ %s", *env);
}
+ PRINT("");
PRINT("AUXILIARY (%p)", __auxv);
for (auxp = __auxv; *auxp; auxp += 2) {
if ((auxinfo = DescribeAuxv(auxp[0]))) {
@@ -120,19 +130,21 @@ textstartup void __printargs(void) {
}
}
+ PRINT("");
PRINT("SPECIALS");
- PRINT(" ☼ %30s = %#s", "kTmpPath", kTmpPath);
- PRINT(" ☼ %30s = %#s", "kNtSystemDirectory", kNtSystemDirectory);
- PRINT(" ☼ %30s = %#s", "kNtWindowsDirectory", kNtWindowsDirectory);
- PRINT(" ☼ %30s = %#s", "program_executable_name", GetProgramExecutableName());
- PRINT(" ☼ %30s = %#s", "GetInterpreterExecutableName()",
+ PRINT(" ☼ %s = %#s", "kTmpPath", kTmpPath);
+ PRINT(" ☼ %s = %#s", "kNtSystemDirectory", kNtSystemDirectory);
+ PRINT(" ☼ %s = %#s", "kNtWindowsDirectory", kNtWindowsDirectory);
+ PRINT(" ☼ %s = %#s", "program_executable_name", GetProgramExecutableName());
+ PRINT(" ☼ %s = %#s", "GetInterpreterExecutableName()",
GetInterpreterExecutableName(path, sizeof(path)));
- PRINT(" ☼ %30s = %p", "RSP", __builtin_frame_address(0));
- PRINT(" ☼ %30s = %p", "GetStackAddr()", GetStackAddr(0));
- PRINT(" ☼ %30s = %p", "GetStaticStackAddr(0)", GetStaticStackAddr(0));
- PRINT(" ☼ %30s = %p", "GetStackSize()", GetStackSize());
+ PRINT(" ☼ %s = %p", "RSP", __builtin_frame_address(0));
+ PRINT(" ☼ %s = %p", "GetStackAddr()", GetStackAddr(0));
+ PRINT(" ☼ %s = %p", "GetStaticStackAddr(0)", GetStaticStackAddr(0));
+ PRINT(" ☼ %s = %p", "GetStackSize()", GetStackSize());
if (!IsWindows()) {
+ PRINT("");
PRINT("OPEN FILE DESCRIPTORS");
for (i = 0; i < ARRAYLEN(pfds); ++i) {
pfds[i].fd = i;
@@ -148,6 +160,7 @@ textstartup void __printargs(void) {
}
if (!sigprocmask(SIG_BLOCK, 0, &ss) && (ss.__bits[0] || ss.__bits[1])) {
+ PRINT("");
PRINT("BLOCKED SIGNALS {%#lx, %#lx}", ss.__bits[0], ss.__bits[1]);
for (i = 0; i < 32; ++i) {
if (ss.__bits[0] & (1u << i)) {
@@ -156,6 +169,77 @@ textstartup void __printargs(void) {
}
}
+ if (IsWindows()) {
+ struct NtStartupInfo startinfo;
+ GetStartupInfo(&startinfo);
+
+ PRINT("");
+ PRINT("GETSTARTUPINFO");
+ if (startinfo.lpDesktop)
+ PRINT(" ☼ %s = %#!hs", "lpDesktop", startinfo.lpDesktop);
+ if (startinfo.lpTitle) PRINT(" ☼ %s = %#!hs", "lpTitle", startinfo.lpTitle);
+ if (startinfo.dwX) PRINT(" ☼ %s = %u", "dwX", startinfo.dwX);
+ if (startinfo.dwY) PRINT(" ☼ %s = %u", "dwY", startinfo.dwY);
+ if (startinfo.dwXSize) PRINT(" ☼ %s = %u", "dwXSize", startinfo.dwXSize);
+ if (startinfo.dwYSize) PRINT(" ☼ %s = %u", "dwYSize", startinfo.dwYSize);
+ if (startinfo.dwXCountChars)
+ PRINT(" ☼ %s = %u", "dwXCountChars", startinfo.dwXCountChars);
+ if (startinfo.dwYCountChars)
+ PRINT(" ☼ %s = %u", "dwYCountChars", startinfo.dwYCountChars);
+ if (startinfo.dwFillAttribute)
+ PRINT(" ☼ %s = %u", "dwFillAttribute", startinfo.dwFillAttribute);
+ if (startinfo.dwFlags)
+ PRINT(" ☼ %s = %s", "dwFlags", DescribeNtStartFlags(startinfo.dwFlags));
+ if (startinfo.wShowWindow)
+ PRINT(" ☼ %s = %hu", "wShowWindow", startinfo.wShowWindow);
+ if (startinfo.cbReserved2)
+ PRINT(" ☼ %s = %hu", "cbReserved2", startinfo.cbReserved2);
+ if (startinfo.hStdInput)
+ PRINT(" ☼ %s = %ld", "hStdInput", startinfo.hStdInput);
+ if (startinfo.hStdOutput)
+ PRINT(" ☼ %s = %ld", "hStdOutput", startinfo.hStdOutput);
+ if (startinfo.hStdError)
+ PRINT(" ☼ %s = %ld", "hStdError", startinfo.hStdError);
+
+ PRINT("");
+ PRINT("STANDARD HANDLES");
+ PRINT(" ☼ %s = %ld", "GetStdHandle(kNtStdInputHandle)",
+ GetStdHandle(kNtStdInputHandle));
+ PRINT(" ☼ %s = %ld", "GetStdHandle(kNtStdOutputHandle)",
+ GetStdHandle(kNtStdOutputHandle));
+ PRINT(" ☼ %s = %ld", "GetStdHandle(kNtStdErrorHandle)",
+ GetStdHandle(kNtStdErrorHandle));
+
+ PRINT("");
+ PRINT("TEB");
+ PRINT(" ☼ gs:0x%02x %s = %p", 0x00, "NtGetSeh()", _NtGetSeh());
+ PRINT(" ☼ gs:0x%02x %s = %p", 0x08, "NtGetStackHigh()", _NtGetStackHigh());
+ PRINT(" ☼ gs:0x%02x %s = %p", 0x10, "NtGetStackLow()", _NtGetStackLow());
+ PRINT(" ☼ gs:0x%02x %s = %p", 0x18, "_NtGetSubsystemTib()",
+ _NtGetSubsystemTib());
+ PRINT(" ☼ gs:0x%02x %s = %p", 0x20, "NtGetFib()", _NtGetFib());
+ PRINT(" ☼ gs:0x%02x %s = %p", 0x30, "NtGetTeb()", NtGetTeb());
+ PRINT(" ☼ gs:0x%02x %s = %p", 0x38, "NtGetEnv()", _NtGetEnv());
+ PRINT(" ☼ gs:0x%02x %s = %p", 0x40, "NtGetPid()", NtGetPid());
+ PRINT(" ☼ gs:0x%02x %s = %p", 0x48, "NtGetTid()", NtGetTid());
+ PRINT(" ☼ gs:0x%02x %s = %p", 0x50, "NtGetRpc()", _NtGetRpc());
+ PRINT(" ☼ gs:0x%02x %s = %p", 0x58, "NtGetTls()", _NtGetTls());
+ PRINT(" ☼ gs:0x%02x %s = %p", 0x60, "NtGetPeb()", NtGetPeb());
+ PRINT(" ☼ gs:0x%02x %s = %p", 0x68, "NtGetErr()", NtGetErr());
+
+ PRINT("");
+ PRINT("DEPENDENCIES");
+ struct NtLinkedList *head = &NtGetPeb()->Ldr->InLoadOrderModuleList;
+ struct NtLinkedList *ldr = head->Next;
+ do {
+ const struct NtLdrDataTableEntry *dll =
+ (const struct NtLdrDataTableEntry *)ldr;
+ PRINT(" ☼ %.*!hs\t\t%'zu bytes", dll->FullDllName.Length,
+ dll->FullDllName.Data, dll->SizeOfImage);
+ } while ((ldr = ldr->Next) && ldr != head);
+ }
+
+ PRINT("");
__strace = st;
#endif
}
diff --git a/libc/runtime/runtime.mk b/libc/runtime/runtime.mk
index 6afc6be9a..429f76ddb 100644
--- a/libc/runtime/runtime.mk
+++ b/libc/runtime/runtime.mk
@@ -57,6 +57,7 @@ $(LIBC_RUNTIME_A).pkg: \
$(LIBC_RUNTIME_A_OBJS) \
$(foreach x,$(LIBC_RUNTIME_A_DIRECTDEPS),$($(x)_A).pkg)
+o/$(MODE)/libc/runtime/fork-nt.o \
o/$(MODE)/libc/runtime/printf.o \
o/$(MODE)/libc/runtime/abort-nt.o \
o/$(MODE)/libc/runtime/printmemoryintervals.o \
@@ -80,6 +81,7 @@ o/$(MODE)/libc/runtime/stackchkfail.o \
o/$(MODE)/libc/runtime/stackchkfaillocal.o \
o/$(MODE)/libc/runtime/winmain.greg.o: \
OVERRIDE_CFLAGS += \
+ -ffreestanding \
$(NO_MAGIC)
# must use alloca()
@@ -87,9 +89,6 @@ o/$(MODE)/libc/runtime/winmain.greg.o: \
o/$(MODE)/libc/runtime/fork-nt.o: \
OVERRIDE_CPPFLAGS += \
-DSTACK_FRAME_UNLIMITED
-o/$(MODE)/libc/runtime/fork-nt.o: \
- OVERRIDE_CFLAGS += \
- $(NO_MAGIC)
o/$(MODE)/libc/runtime/printf.o \
o/$(MODE)/libc/runtime/memtrack.o \
diff --git a/libc/runtime/winmain.greg.c b/libc/runtime/winmain.greg.c
index 1c612adce..5c3078c57 100644
--- a/libc/runtime/winmain.greg.c
+++ b/libc/runtime/winmain.greg.c
@@ -29,6 +29,7 @@
#include "libc/log/libfatal.internal.h"
#include "libc/macros.internal.h"
#include "libc/nexgen32e/bsr.h"
+#include "libc/nexgen32e/nt2sysv.h"
#include "libc/nexgen32e/rdtsc.h"
#include "libc/nt/console.h"
#include "libc/nt/enum/consolemodeflags.h"
@@ -42,7 +43,10 @@
#include "libc/nt/pedef.internal.h"
#include "libc/nt/process.h"
#include "libc/nt/runtime.h"
+#include "libc/nt/signals.h"
+#include "libc/nt/struct/ntexceptionpointers.h"
#include "libc/nt/struct/teb.h"
+#include "libc/nt/synchronization.h"
#include "libc/nt/thunk/msabi.h"
#include "libc/runtime/directmap.internal.h"
#include "libc/runtime/internal.h"
@@ -53,9 +57,9 @@
#include "libc/str/utf16.h"
#if IsTiny()
-extern typeof(CreateFileMapping) *const __imp_CreateFileMappingW __msabi;
-extern typeof(MapViewOfFileEx) *const __imp_MapViewOfFileEx __msabi;
-extern typeof(VirtualProtect) *const __imp_VirtualProtect __msabi;
+__msabi extern typeof(CreateFileMapping) *const __imp_CreateFileMappingW;
+__msabi extern typeof(MapViewOfFileEx) *const __imp_MapViewOfFileEx;
+__msabi extern typeof(VirtualProtect) *const __imp_VirtualProtect;
#define CreateFileMapping __imp_CreateFileMappingW
#define MapViewOfFileEx __imp_MapViewOfFileEx
#define VirtualProtect __imp_VirtualProtect
@@ -84,6 +88,7 @@ struct WinArgs {
extern int __pid;
extern bool __nomultics;
extern uint32_t __winmainpid;
+extern int64_t __wincrashearly;
extern const char kConsoleHandles[3];
static const short kConsoleModes[3] = {
@@ -115,8 +120,31 @@ forceinline void MakeLongDoubleLongAgain(void) {
asm volatile("fldcw\t%0" : /* no outputs */ : "m"(x87cw));
}
-static noasan textwindows wontreturn noinstrument void WinMainNew(
- const char16_t *cmdline) {
+__msabi static textwindows int WinCrashEarly(struct NtExceptionPointers *ep) {
+ uint32_t wrote;
+ char buf[64], *p = buf;
+ *p++ = 'c';
+ *p++ = 'r';
+ *p++ = 'a';
+ *p++ = 's';
+ *p++ = 'h';
+ *p++ = ' ';
+ *p++ = '0';
+ *p++ = 'x';
+ p = __fixcpy(p, ep->ExceptionRecord->ExceptionCode, 32);
+ *p++ = ' ';
+ *p++ = 'r';
+ *p++ = 'i';
+ *p++ = 'p';
+ *p++ = ' ';
+ p = __fixcpy(p, ep->ContextRecord ? ep->ContextRecord->Rip : -1, 32);
+ *p++ = '\r';
+ *p++ = '\n';
+ WriteFile(GetStdHandle(kNtStdErrorHandle), buf, p - buf, &wrote, 0);
+ ExitProcess(200);
+}
+
+__msabi static textwindows wontreturn void WinMainNew(const char16_t *cmdline) {
bool32 rc;
int64_t h, hand;
uint32_t oldprot;
@@ -218,10 +246,8 @@ static noasan textwindows wontreturn noinstrument void WinMainNew(
*
* @param hInstance call GetModuleHandle(NULL) from main if you need it
*/
-noasan textwindows noinstrument int64_t WinMain(int64_t hInstance,
- int64_t hPrevInstance,
- const char *lpCmdLine,
- int nCmdShow) {
+__msabi textwindows int64_t WinMain(int64_t hInstance, int64_t hPrevInstance,
+ const char *lpCmdLine, int nCmdShow) {
const char16_t *cmdline;
extern char os asm("__hostos");
extern uint64_t ts asm("kStartTsc");
@@ -229,6 +255,7 @@ noasan textwindows noinstrument int64_t WinMain(int64_t hInstance,
ts = rdtsc();
__nomultics = true;
__pid = GetCurrentProcessId();
+ __wincrashearly = AddVectoredExceptionHandler(1, (void *)WinCrashEarly);
cmdline = GetCommandLine();
#ifdef SYSDEBUG
/* sloppy flag-only check for early initialization */
diff --git a/libc/sock/accept-nt.c b/libc/sock/accept-nt.c
index 8ac2cd7c9..d0bd84f4d 100644
--- a/libc/sock/accept-nt.c
+++ b/libc/sock/accept-nt.c
@@ -17,6 +17,8 @@
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/internal.h"
+#include "libc/calls/sig.internal.h"
+#include "libc/intrin/kprintf.h"
#include "libc/mem/mem.h"
#include "libc/nt/files.h"
#include "libc/nt/struct/pollfd.h"
@@ -38,9 +40,13 @@ textwindows int sys_accept_nt(struct Fd *fd, void *addr, uint32_t *addrsize,
int client, oflags;
struct SockFd *sockfd, *sockfd2;
sockfd = (struct SockFd *)fd->extra;
+ if (_check_interrupts(true, g_fds.p)) return eintr();
for (;;) {
- if (!WSAPoll(&(struct sys_pollfd_nt){fd->handle, POLLIN}, 1, 1000))
+ if (!WSAPoll(&(struct sys_pollfd_nt){fd->handle, POLLIN}, 1,
+ __SIG_POLLING_INTERVAL_MS)) {
+ if (_check_interrupts(true, g_fds.p)) return eintr();
continue;
+ }
if ((h = WSAAccept(fd->handle, addr, (int32_t *)addrsize, 0, 0)) != -1) {
oflags = 0;
if (flags & SOCK_CLOEXEC) oflags |= O_CLOEXEC;
@@ -48,13 +54,13 @@ textwindows int sys_accept_nt(struct Fd *fd, void *addr, uint32_t *addrsize,
if ((!(flags & SOCK_NONBLOCK) ||
__sys_ioctlsocket_nt(h, FIONBIO, (uint32_t[]){1}) != -1) &&
(sockfd2 = calloc(1, sizeof(struct SockFd)))) {
- if ((client = __reservefd()) != -1) {
+ if ((client = __reservefd(-1)) != -1) {
sockfd2->family = sockfd->family;
sockfd2->type = sockfd->type;
sockfd2->protocol = sockfd->protocol;
- sockfd2->event = WSACreateEvent();
g_fds.p[client].kind = kFdSocket;
g_fds.p[client].flags = oflags;
+ g_fds.p[client].mode = 0140666;
g_fds.p[client].handle = h;
g_fds.p[client].extra = (uintptr_t)sockfd2;
return client;
diff --git a/libc/sock/accept4.c b/libc/sock/accept4.c
index 85c871898..92b407f6a 100644
--- a/libc/sock/accept4.c
+++ b/libc/sock/accept4.c
@@ -49,7 +49,7 @@ int accept4(int fd, void *out_addr, uint32_t *inout_addrsize, int flags) {
} else {
rc = ebadf();
}
- STRACE("accept4(%d, [%s]) -> %d% m", fd,
+ STRACE("accept4(%d, [%s]) -> %d% lm", fd,
__describe_sockaddr(out_addr, inout_addrsize ? *inout_addrsize : 0),
rc);
return rc;
diff --git a/libc/sock/basesocket.c b/libc/sock/basesocket.c
new file mode 100644
index 000000000..cf82d7660
--- /dev/null
+++ b/libc/sock/basesocket.c
@@ -0,0 +1,61 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/nt/enum/sio.h"
+#include "libc/nt/errors.h"
+#include "libc/nt/winsock.h"
+#include "libc/sock/internal.h"
+#include "libc/sock/sock.h"
+
+static textwindows int64_t GetNtBspSocket(int64_t socket, uint32_t ioctl) {
+ uint32_t bytes;
+ int64_t bsp_socket;
+ if (WSAIoctl(socket, ioctl, NULL, 0, &bsp_socket, sizeof(bsp_socket), &bytes,
+ NULL, NULL) != -1) {
+ return bsp_socket;
+ } else {
+ return -1;
+ }
+}
+
+textwindows int64_t GetNtBaseSocket(int64_t socket) {
+ int64_t base_socket;
+ for (;;) {
+ base_socket = GetNtBspSocket(socket, kNtSioBaseHandle);
+ if (base_socket != -1) return base_socket;
+ if (WSAGetLastError() == WSAENOTSOCK) return __winsockerr();
+ /*
+ * Even though Microsoft documentation clearly states that Layered
+ * Spyware Providers must never ever intercept the SIO_BASE_HANDLE
+ * ioctl, Komodia LSPs (that Lenovo got sued for preinstalling) do
+ * so anyway in order to redirect decrypted https requests through
+ * some foreign proxy and inject ads which breaks high-performance
+ * network event io. However it doesn't handle SIO_BSP_HANDLE_POLL
+ * which will at least let us obtain the socket associated with the
+ * next winsock protocol chain entry. If this succeeds, loop around
+ * and call SIO_BASE_HANDLE again with the returned BSP socket, to
+ * make sure we unwrap all layers and retrieve the real base socket.
+ */
+ base_socket = GetNtBspSocket(socket, kNtSioBspHandlePoll);
+ if (base_socket != -1 && base_socket != socket) {
+ socket = base_socket;
+ } else {
+ return __winsockerr();
+ }
+ }
+}
diff --git a/libc/sock/bind.c b/libc/sock/bind.c
index 2188084db..a269b21a0 100644
--- a/libc/sock/bind.c
+++ b/libc/sock/bind.c
@@ -62,6 +62,6 @@ int bind(int fd, const void *addr, uint32_t addrsize) {
} else {
rc = einval();
}
- STRACE("bind(%d, %s) -> %d% m", fd, __describe_sockaddr(addr, addrsize), rc);
+ STRACE("bind(%d, %s) -> %d% lm", fd, __describe_sockaddr(addr, addrsize), rc);
return rc;
}
diff --git a/libc/sock/closesocket-nt.c b/libc/sock/closesocket-nt.c
index b87e62542..baf2352ef 100644
--- a/libc/sock/closesocket-nt.c
+++ b/libc/sock/closesocket-nt.c
@@ -27,7 +27,6 @@
textwindows int sys_closesocket_nt(struct Fd *fd) {
struct SockFd *sockfd;
sockfd = (struct SockFd *)fd->extra;
- WSACloseEvent(sockfd->event);
free(sockfd);
if (__sys_closesocket_nt(fd->handle) != -1) {
return 0;
diff --git a/libc/sock/connect.c b/libc/sock/connect.c
index d47a11325..814cc18d4 100644
--- a/libc/sock/connect.c
+++ b/libc/sock/connect.c
@@ -48,7 +48,7 @@ int connect(int fd, const void *addr, uint32_t addrsize) {
} else {
rc = efault();
}
- STRACE("connect(%d, %s) -> %d% m", fd, __describe_sockaddr(addr, addrsize),
+ STRACE("connect(%d, %s) -> %d% lm", fd, __describe_sockaddr(addr, addrsize),
rc);
return rc;
}
diff --git a/libc/sock/dupsockfd.c b/libc/sock/dupsockfd.c
index f688c4d42..f665d56e6 100644
--- a/libc/sock/dupsockfd.c
+++ b/libc/sock/dupsockfd.c
@@ -26,7 +26,6 @@ textwindows struct SockFd *_dupsockfd(struct SockFd *sockfd) {
newsf->family = sockfd->family;
newsf->type = sockfd->type;
newsf->protocol = sockfd->protocol;
- newsf->event = WSACreateEvent();
}
return newsf;
}
diff --git a/libc/sock/epoll.c b/libc/sock/epoll.c
index c325c7e89..99e508734 100644
--- a/libc/sock/epoll.c
+++ b/libc/sock/epoll.c
@@ -1324,7 +1324,7 @@ static textwindows dontinline int sys_epoll_create1_nt(uint32_t flags) {
struct PortState *port_state;
struct TsTreeNode *tree_node;
if (wepoll_init() < 0) return -1;
- if ((fd = __reservefd()) == -1) return -1;
+ if ((fd = __reservefd(-1)) == -1) return -1;
port_state = port_new(&ephnd);
if (!port_state) {
__releasefd(fd);
@@ -1341,6 +1341,7 @@ static textwindows dontinline int sys_epoll_create1_nt(uint32_t flags) {
g_fds.p[fd].kind = kFdEpoll;
g_fds.p[fd].handle = ephnd;
g_fds.p[fd].flags = flags;
+ g_fds.p[fd].mode = 0140666;
return fd;
}
diff --git a/libc/sock/getpeername.c b/libc/sock/getpeername.c
index 0fbf32df7..677a2e5cf 100644
--- a/libc/sock/getpeername.c
+++ b/libc/sock/getpeername.c
@@ -43,7 +43,7 @@ int getpeername(int fd, void *out_addr, uint32_t *out_addrsize) {
} else {
rc = ebadf();
}
- STRACE("getpeername(%d, [%s]) -> %d% m", fd,
+ STRACE("getpeername(%d, [%s]) -> %d% lm", fd,
__describe_sockaddr(out_addr, out_addrsize ? *out_addrsize : 0), rc);
return rc;
}
diff --git a/libc/sock/getsockname.c b/libc/sock/getsockname.c
index fac44ca1a..3def705a9 100644
--- a/libc/sock/getsockname.c
+++ b/libc/sock/getsockname.c
@@ -43,7 +43,7 @@ int getsockname(int fd, void *out_addr, uint32_t *out_addrsize) {
} else {
rc = ebadf();
}
- STRACE("getsockname(%d, [%s]) -> %d% m", fd,
+ STRACE("getsockname(%d, [%s]) -> %d% lm", fd,
__describe_sockaddr(out_addr, out_addrsize ? *out_addrsize : 0), rc);
return rc;
}
diff --git a/libc/sock/getsockopt.c b/libc/sock/getsockopt.c
index bf327fa10..067c05267 100644
--- a/libc/sock/getsockopt.c
+++ b/libc/sock/getsockopt.c
@@ -48,7 +48,7 @@ int getsockopt(int fd, int level, int optname, void *out_opt_optval,
} else {
rc = ebadf();
}
- STRACE("getsockopt(%d, %#x, %#x, %p, %p) → %d% m", fd, level, optname,
+ STRACE("getsockopt(%d, %#x, %#x, %p, %p) → %d% lm", fd, level, optname,
out_opt_optval, out_optlen, rc);
return rc;
}
diff --git a/libc/sock/internal.h b/libc/sock/internal.h
index 6401fea14..3f54c86d4 100644
--- a/libc/sock/internal.h
+++ b/libc/sock/internal.h
@@ -1,6 +1,7 @@
#ifndef COSMOPOLITAN_LIBC_SOCK_INTERNAL_H_
#define COSMOPOLITAN_LIBC_SOCK_INTERNAL_H_
#include "libc/calls/internal.h"
+#include "libc/nt/struct/overlapped.h"
#include "libc/nt/thunk/msabi.h"
#include "libc/nt/winsock.h"
#include "libc/sock/select.h"
@@ -54,23 +55,26 @@ struct sockaddr_un_bsd {
/* ------------------------------------------------------------------------------------*/
+#define SOCKFD_OVERLAP_BUFSIZ 128
+
struct SockFd {
int family;
int type;
int protocol;
- int64_t event;
- bool32 (*AcceptEx)(int64_t sListenSocket, int64_t sAcceptSocket,
- void *out_lpOutputBuffer /*[recvlen+local+remoteaddrlen]*/,
- uint32_t dwReceiveDataLength,
- uint32_t dwLocalAddressLength,
- uint32_t dwRemoteAddressLength,
- uint32_t *out_lpdwBytesReceived,
- struct NtOverlapped *inout_lpOverlapped) __msabi;
+ bool32 (*__msabi ConnectEx)(int64_t s, const struct sockaddr *name,
+ int namelen, const void *opt_lpSendBuffer,
+ uint32_t dwSendDataLength,
+ uint32_t *out_lpdwBytesSent,
+ struct NtOverlapped *inout_lpOverlapped);
+ bool32 (*__msabi AcceptEx)(
+ int64_t sListenSocket, int64_t sAcceptSocket,
+ void *out_lpOutputBuffer /*[recvlen+local+remoteaddrlen]*/,
+ uint32_t dwReceiveDataLength, uint32_t dwLocalAddressLength,
+ uint32_t dwRemoteAddressLength, uint32_t *out_lpdwBytesReceived,
+ struct NtOverlapped *inout_lpOverlapped);
};
-hidden extern int64_t __iocp;
-
-errno_t __dos2errno(uint32_t);
+errno_t __dos2errno(uint32_t) hidden;
void _firewall(const void *, uint32_t) hidden;
@@ -109,7 +113,7 @@ int32_t sys_epoll_ctl(int32_t, int32_t, int32_t, void *) hidden;
int32_t sys_epoll_wait(int32_t, void *, int32_t, int32_t) hidden;
int sys_poll_metal(struct pollfd *, size_t, unsigned);
-int sys_poll_nt(struct pollfd *, uint64_t, uint64_t) hidden;
+int sys_poll_nt(struct pollfd *, uint64_t, uint64_t *) hidden;
int sys_getsockopt_nt(struct Fd *, int, int, void *, uint32_t *) hidden;
int sys_getsockname_nt(struct Fd *, void *, uint32_t *) hidden;
int sys_getpeername_nt(struct Fd *, void *, uint32_t *) hidden;
@@ -128,6 +132,8 @@ int sys_select_nt(int, fd_set *, fd_set *, fd_set *, struct timeval *) hidden;
int sys_shutdown_nt(struct Fd *, int) hidden;
int sys_setsockopt_nt(struct Fd *, int, int, const void *, uint32_t) hidden;
+ssize_t sys_send_nt(int, const struct iovec *, size_t, uint32_t) hidden;
+ssize_t sys_recv_nt(struct Fd *, const struct iovec *, size_t, uint32_t) hidden;
size_t __iovec2nt(struct NtIovec[hasatleast 16], const struct iovec *,
size_t) hidden;
ssize_t sys_sendto_nt(int, const struct iovec *, size_t, uint32_t, void *,
@@ -140,7 +146,7 @@ int64_t __winsockerr(void) nocallback hidden;
int __fixupnewsockfd(int, int) hidden;
int64_t __winsockblock(int64_t, unsigned, int64_t) hidden;
struct SockFd *_dupsockfd(struct SockFd *) hidden;
-
+int64_t GetNtBaseSocket(int64_t) hidden;
int sys_close_epoll(int) hidden;
/**
diff --git a/libc/sock/kntwsadata.c b/libc/sock/kntwsadata.c
index 74fb4144b..6396f4eab 100644
--- a/libc/sock/kntwsadata.c
+++ b/libc/sock/kntwsadata.c
@@ -17,6 +17,7 @@
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/bits/weaken.h"
+#include "libc/calls/calls.h"
#include "libc/calls/strace.internal.h"
#include "libc/dce.h"
#include "libc/mem/mem.h"
@@ -37,17 +38,16 @@
hidden struct NtWsaData kNtWsaData;
static textwindows void WinSockCleanup(void) {
- size_t i;
- STRACE("WSACleanup()");
- WSACleanup();
- for (i = 0; i < g_fds.n; ++i) {
+ int i, rc;
+ STRACE("WinSockCleanup()");
+ for (i = g_fds.n; i--;) {
if (g_fds.p[i].kind == kFdSocket) {
- if (weaken(free)) {
- weaken(free)((struct SockFd *)g_fds.p[i].extra);
- g_fds.p[i].extra = 0;
- }
+ close(i);
}
}
+ // TODO(jart): Check WSACleanup() result code
+ rc = WSACleanup();
+ STRACE("WSACleanup() → %d% lm", rc);
}
textwindows noasan void WinSockInit(void) {
diff --git a/libc/sock/listen.c b/libc/sock/listen.c
index a3500345f..c452a2ae9 100644
--- a/libc/sock/listen.c
+++ b/libc/sock/listen.c
@@ -43,6 +43,6 @@ int listen(int fd, int backlog) {
} else {
rc = ebadf();
}
- STRACE("listen(%d, %d) → %d% m", fd, backlog, rc);
+ STRACE("listen(%d, %d) → %d% lm", fd, backlog, rc);
return rc;
}
diff --git a/libc/sock/ntstdin.greg.c b/libc/sock/ntstdin.greg.c
new file mode 100644
index 000000000..6253b69d3
--- /dev/null
+++ b/libc/sock/ntstdin.greg.c
@@ -0,0 +1,158 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#define ShouldUseMsabiAttribute() 1
+#include "libc/assert.h"
+#include "libc/calls/calls.h"
+#include "libc/calls/internal.h"
+#include "libc/calls/strace.internal.h"
+#include "libc/intrin/kprintf.h"
+#include "libc/mem/mem.h"
+#include "libc/nexgen32e/nt2sysv.h"
+#include "libc/nt/createfile.h"
+#include "libc/nt/enum/accessmask.h"
+#include "libc/nt/enum/creationdisposition.h"
+#include "libc/nt/enum/fileflagandattributes.h"
+#include "libc/nt/errors.h"
+#include "libc/nt/events.h"
+#include "libc/nt/ipc.h"
+#include "libc/nt/runtime.h"
+#include "libc/nt/synchronization.h"
+#include "libc/nt/thread.h"
+#include "libc/nt/thunk/msabi.h"
+#include "libc/sock/ntstdin.internal.h"
+
+/**
+ * @fileoverview Pollable Standard Input for the New Technology.
+ */
+
+__msabi extern typeof(CloseHandle) *const __imp_CloseHandle;
+
+static textwindows bool IsEof(bool ok, uint32_t got) {
+ return (ok && !got) || (!ok && (__imp_GetLastError() == kNtErrorHandleEof ||
+ __imp_GetLastError() == kNtErrorBrokenPipe));
+}
+
+static textwindows uint32_t StdinWorkerThread(void *arg) {
+ char buf[512];
+ bool32 ok = true;
+ uint32_t i, rc, got, err, wrote;
+ struct NtStdinWorker w, *wp = arg;
+ STRACE("StdinWorkerThread(%ld → %ld → %ld) pid %d tid %d", wp->reader,
+ wp->writer, wp->consumer, getpid(), gettid());
+ __sync_lock_release(&wp->sync);
+ w = *wp;
+ do {
+ ok = __imp_ReadFile(w.reader, buf, sizeof(buf), &got, 0);
+ /* When writing to a non-blocking, byte-mode pipe handle with
+ insufficient buffer space, WriteFile returns TRUE with
+ *lpNumberOfBytesWritten < nNumberOfBytesToWrite.
+ ──Quoth MSDN WriteFile() */
+ for (i = 0; ok && i < got; i += wrote) {
+ ok = __imp_WriteFile(w.writer, buf + i, got - i, &wrote, 0);
+ }
+ } while (ok && got);
+ if (!ok) {
+ err = __imp_GetLastError();
+ if (err == kNtErrorHandleEof || err == kNtErrorBrokenPipe ||
+ err == kNtErrorNoData) {
+ ok = true;
+ }
+ }
+ STRACE("StdinWorkerThread(%ld → %ld → %ld) → %hhhd %d", w.reader, w.writer,
+ w.consumer, __imp_GetLastError());
+ return !ok;
+}
+
+/**
+ * Converts read-only file descriptor to pollable named pipe.
+ *
+ * @param fd is open file descriptor to convert
+ * @return new object on success, or 0 w/ errno
+ */
+textwindows struct NtStdinWorker *NewNtStdinWorker(int fd) {
+ struct NtStdinWorker *w;
+ STRACE("LaunchNtStdinWorker(%d) pid %d tid %d", fd, getpid(), gettid());
+ assert(!g_fds.p[fd].worker);
+ assert(__isfdopen(fd));
+ if (!(w = calloc(1, sizeof(struct NtStdinWorker)))) return 0;
+ w->refs = 1;
+ w->sync = 1;
+ w->reader = g_fds.p[fd].handle;
+ if ((w->consumer = CreateNamedPipe(
+ CreatePipeName(w->name),
+ kNtPipeAccessInbound | kNtFileFlagOverlapped,
+ kNtPipeTypeByte | kNtPipeReadmodeByte | kNtPipeRejectRemoteClients,
+ 1, 512, 512, 0, 0)) != -1) {
+ if ((w->writer = CreateFile(w->name, kNtGenericWrite, 0, 0, kNtOpenExisting,
+ kNtFileFlagOverlapped, 0)) != -1) {
+ if ((w->worker = CreateThread(0, 0, NT2SYSV(StdinWorkerThread), w, 0,
+ &w->tid)) != -1) {
+ while (__sync_lock_test_and_set(&w->sync, __ATOMIC_CONSUME)) {
+ __builtin_ia32_pause();
+ }
+ g_fds.p[fd].handle = w->consumer;
+ g_fds.p[fd].worker = w;
+ return w;
+ }
+ CloseHandle(w->writer);
+ }
+ CloseHandle(w->consumer);
+ }
+ free(w);
+ return w;
+}
+
+/**
+ * References stdin worker on the New Technology.
+ * @param w is non-null worker object
+ * @return worker object for new fd
+ */
+textwindows struct NtStdinWorker *RefNtStdinWorker(struct NtStdinWorker *w) {
+ __atomic_fetch_add(&w->refs, 1, __ATOMIC_RELAXED);
+ return w;
+}
+
+/**
+ * Dereferences stdin worker on the New Technology.
+ * @param w is non-null worker object
+ * @return true if ok otherwise false
+ */
+textwindows bool UnrefNtStdinWorker(struct NtStdinWorker *w) {
+ bool ok = true;
+ if (__atomic_sub_fetch(&w->refs, 1, __ATOMIC_SEQ_CST)) return true;
+ // w->consumer is freed by close_nt()
+ if (!CloseHandle(w->writer)) ok = false;
+ if (!CloseHandle(w->reader)) ok = false;
+ if (!CloseHandle(w->worker)) ok = false;
+ free(w);
+ return ok;
+}
+
+/**
+ * Runs post fork for stdin workers on the New Technology.
+ */
+textwindows void ForkNtStdinWorker(void) {
+ for (int i = 0; i < g_fds.n; ++i) {
+ if (g_fds.p[i].kind && g_fds.p[i].worker) {
+ g_fds.p[i].handle = g_fds.p[i].worker->reader;
+ free(g_fds.p[i].worker);
+ g_fds.p[i].worker = 0;
+ }
+ }
+}
diff --git a/libc/sock/ntstdin.internal.h b/libc/sock/ntstdin.internal.h
new file mode 100644
index 000000000..4afd2d023
--- /dev/null
+++ b/libc/sock/ntstdin.internal.h
@@ -0,0 +1,24 @@
+#ifndef COSMOPOLITAN_LIBC_SOCK_NTSTDIN_INTERNAL_H_
+#define COSMOPOLITAN_LIBC_SOCK_NTSTDIN_INTERNAL_H_
+#if !(__ASSEMBLER__ + __LINKER__ + 0)
+COSMOPOLITAN_C_START_
+
+struct NtStdinWorker { /* non-inherited */
+ volatile char sync; /* spin sync start */
+ int refs; /* reference count */
+ uint32_t tid; /* of the worker */
+ int64_t reader; /* the real handle */
+ int64_t writer; /* for the worker */
+ int64_t worker; /* thread handle */
+ int64_t consumer; /* same as Fd::handle */
+ char16_t name[64]; /* for named pipe */
+};
+
+struct NtStdinWorker *NewNtStdinWorker(int) hidden;
+struct NtStdinWorker *RefNtStdinWorker(struct NtStdinWorker *) hidden;
+bool UnrefNtStdinWorker(struct NtStdinWorker *) hidden;
+void ForkNtStdinWorker(void) hidden;
+
+COSMOPOLITAN_C_END_
+#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
+#endif /* COSMOPOLITAN_LIBC_SOCK_NTSTDIN_INTERNAL_H_ */
diff --git a/libc/sock/poll-metal.c b/libc/sock/poll-metal.c
index 4eb946176..60c77a423 100644
--- a/libc/sock/poll-metal.c
+++ b/libc/sock/poll-metal.c
@@ -73,7 +73,7 @@ int sys_poll_metal(struct pollfd *fds, size_t nfds, unsigned timeout_ms) {
if (rc || !blocking || unsignedsubtract(rdtsc(), start) >= timeout) {
break;
} else {
- asm("pause");
+ __builtin_ia32_pause();
}
}
return rc;
diff --git a/libc/sock/poll-nt.c b/libc/sock/poll-nt.c
index ee9b39a80..c0718242c 100644
--- a/libc/sock/poll-nt.c
+++ b/libc/sock/poll-nt.c
@@ -18,42 +18,189 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/bits/bits.h"
#include "libc/bits/weaken.h"
+#include "libc/calls/calls.h"
#include "libc/calls/internal.h"
#include "libc/calls/sig.internal.h"
+#include "libc/calls/sigbits.h"
+#include "libc/calls/strace.internal.h"
#include "libc/calls/struct/sigaction.h"
+#include "libc/intrin/kprintf.h"
+#include "libc/intrin/spinlock.h"
#include "libc/macros.internal.h"
+#include "libc/mem/mem.h"
+#include "libc/nt/errors.h"
+#include "libc/nt/ipc.h"
+#include "libc/nt/runtime.h"
#include "libc/nt/struct/pollfd.h"
+#include "libc/nt/synchronization.h"
#include "libc/nt/winsock.h"
#include "libc/sock/internal.h"
+#include "libc/sock/ntstdin.internal.h"
#include "libc/sock/yoink.inc"
#include "libc/sysv/consts/poll.h"
+#include "libc/sysv/consts/sig.h"
#include "libc/sysv/errfuns.h"
-textwindows int sys_poll_nt(struct pollfd *fds, uint64_t nfds, uint64_t ms) {
- int i, got, waitfor;
- struct sys_pollfd_nt ntfds[64];
- if (nfds >= ARRAYLEN(ntfds)) return einval();
- for (i = 0; i < nfds; ++i) {
- if (fds[i].fd >= 0) {
- if (!__isfdkind(fds[i].fd, kFdSocket)) return enotsock();
- ntfds[i].handle = g_fds.p[fds[i].fd].handle;
- ntfds[i].events = fds[i].events & (POLLPRI | POLLIN | POLLOUT);
+#undef STRACE // too verbosen
+#define STRACE(...) // but don't want to delete
+
+_Alignas(64) static char poll_lock;
+
+/**
+ * Polls on the New Technology.
+ *
+ * This function is used to implement poll() and select(). You may poll
+ * on both sockets and files at the same time. We also poll for signals
+ * while poll is polling.
+ */
+textwindows int sys_poll_nt(struct pollfd *fds, uint64_t nfds, uint64_t *ms) {
+ bool ok;
+ uint32_t avail;
+ struct sys_pollfd_nt pipefds[8];
+ struct sys_pollfd_nt sockfds[64];
+ int pipeindices[ARRAYLEN(pipefds)];
+ int sockindices[ARRAYLEN(sockfds)];
+ int i, sn, pn, failed, gotpipes, gotsocks, waitfor;
+
+ // check for interrupts early before doing work
+ if (_check_interrupts(false, g_fds.p)) return eintr();
+
+ // do the planning
+ // we need to read static variables
+ // we might need to spawn threads and open pipes
+ _spinlock(&poll_lock);
+ for (failed = sn = pn = i = 0; i < nfds; ++i) {
+ if (fds[i].fd < 0) continue;
+ if (__isfdopen(fds[i].fd)) {
+ if (__isfdkind(fds[i].fd, kFdSocket)) {
+ if (sn < ARRAYLEN(sockfds)) {
+ sockindices[sn] = i;
+ sockfds[sn].handle = g_fds.p[fds[i].fd].handle;
+ sockfds[sn].events = fds[i].events & (POLLPRI | POLLIN | POLLOUT);
+ sn += 1;
+ } else {
+ // too many socket fds
+ failed = enomem();
+ break;
+ }
+ } else if (fds[i].events & POLLIN) {
+ if (!g_fds.p[fds[i].fd].worker) {
+ if (!(g_fds.p[fds[i].fd].worker = NewNtStdinWorker(fds[i].fd))) {
+ // failed to launch stdin worker
+ failed = -1;
+ break;
+ }
+ }
+ if (pn < ARRAYLEN(pipefds)) {
+ pipeindices[pn] = i;
+ pipefds[pn].handle = g_fds.p[fds[i].fd].handle;
+ pipefds[pn].events = fds[i].events & (POLLPRI | POLLIN | POLLOUT);
+ pn += 1;
+ } else {
+ // too many non-socket fds
+ failed = enomem();
+ break;
+ }
+ } else {
+ // non-sock w/o pollin
+ failed = enotsock();
+ break;
+ }
} else {
- ntfds[i].handle = -1;
- ntfds[i].events = POLLIN;
+ // non-open file descriptor
+ failed = einval();
+ break;
}
}
+ _spunlock(&poll_lock);
+ if (failed) {
+ // failed to create a polling solution
+ return failed;
+ }
+
+ // perform the i/o and sleeping and looping
for (;;) {
- if (_check_interrupts(false, g_fds.p)) return eintr();
- waitfor = MIN(__SIG_POLLING_INTERVAL_MS, ms); /* for ctrl+c */
- if ((got = WSAPoll(ntfds, nfds, waitfor)) != -1) {
- if (!got && (ms -= waitfor) > 0) continue;
- for (i = 0; i < nfds; ++i) {
- fds[i].revents = ntfds[i].handle < 0 ? 0 : ntfds[i].revents;
+ // see if input is available on non-sockets
+ for (gotpipes = i = 0; i < pn; ++i) {
+ ok = PeekNamedPipe(pipefds[i].handle, 0, 0, 0, &avail, 0);
+ STRACE("PeekNamedPipe(%ld, 0, 0, 0, [%'u], 0) → %hhhd% m",
+ pipefds[i].handle, avail, ok);
+ if (ok) {
+ if (avail) {
+ pipefds[i].revents = POLLIN;
+ gotpipes += 1;
+ } else {
+ pipefds[i].revents = 0;
+ }
+ } else {
+ pipefds[i].revents = POLLERR;
+ gotpipes += 1;
}
- return got;
+ }
+ // if we haven't found any good results yet then here we
+ // compute a small time slice we don't mind sleeping for
+ waitfor = gotpipes ? 0 : MIN(__SIG_POLLING_INTERVAL_MS, *ms);
+ if (sn) {
+ // we need to poll the socket handles separately because
+ // microsoft certainly loves to challenge us with coding
+ // please note that winsock will fail if we pass zero fd
+ STRACE("WSAPoll(%p, %u, %'d) out of %'lu", sockfds, sn, waitfor, *ms);
+ if ((gotsocks = WSAPoll(sockfds, sn, waitfor)) == -1) {
+ return __winsockerr();
+ }
+ *ms -= waitfor;
} else {
- return __winsockerr();
+ gotsocks = 0;
+ if (!gotpipes && waitfor) {
+ // if we've only got pipes and none of them are ready
+ // then we'll just explicitly sleep for the time left
+ STRACE("SleepEx(%'d, false) out of %'lu", waitfor, *ms);
+ if (SleepEx(waitfor, true) == kNtWaitIoCompletion) {
+ STRACE("IOCP TRIGGERED EINTR");
+ return eintr();
+ }
+ *ms -= waitfor;
+ }
+ }
+ // we gave all the sockets and all the named pipes a shot
+ // if we found anything at all then it's time to end work
+ if (gotpipes || gotsocks || *ms <= 0) {
+ break;
+ }
+ // otherwise loop limitlessly for timeout to elapse while
+ // checking for signal delivery interrupts, along the way
+ if (_check_interrupts(false, g_fds.p)) {
+ return eintr();
+ }
+ }
+
+ // we got some
+ // assemble the result
+ for (i = 0; i < pn; ++i) {
+ fds[pipeindices[i]].revents =
+ pipefds[i].handle < 0 ? 0 : pipefds[i].revents;
+ }
+ for (i = 0; i < sn; ++i) {
+ fds[sockindices[i]].revents =
+ sockfds[i].handle < 0 ? 0 : sockfds[i].revents;
+ }
+ return gotpipes + gotsocks;
+}
+
+static textexit void __freefds_workers(void) {
+ int i;
+ STRACE("__freefds_workers()");
+ for (i = g_fds.n; i--;) {
+ if (g_fds.p[i].kind && g_fds.p[i].worker) {
+ close(i);
}
}
}
+
+static textstartup void __freefds_workers_init(void) {
+ atexit(__freefds_workers);
+}
+
+const void *const __freefds_workers_ctor[] initarray = {
+ __freefds_workers_init,
+};
diff --git a/libc/sock/poll.c b/libc/sock/poll.c
index 4e27a000d..d2eed6ecd 100644
--- a/libc/sock/poll.c
+++ b/libc/sock/poll.c
@@ -28,19 +28,20 @@
/**
* Waits for something to happen on multiple file descriptors at once.
*
- * @param fds[𝑖].fd should have been created with SOCK_NONBLOCK passed
- * to socket() or accept4()
- * @param fds[𝑖].events flags can have POLL{IN,OUT,PRI}
+ * @param fds[𝑖].fd should be a socket, input pipe, or conosle input
+ * @param fds[𝑖].events flags can have POLLIN, POLLOUT, and POLLPRI
* @param timeout_ms if 0 means don't wait and -1 means wait forever
* @return number of items fds whose revents field has been set to
* nonzero to describe its events, or -1 w/ errno
* @return fds[𝑖].revents flags can have:
* (fds[𝑖].events & POLL{IN,OUT,PRI,HUP,ERR,NVAL})
* @asyncsignalsafe
+ * @threadsafe
* @norestart
*/
-int poll(struct pollfd *fds, uint64_t nfds, int32_t timeout_ms) {
+int poll(struct pollfd *fds, size_t nfds, int timeout_ms) {
int rc;
+ uint64_t millis;
if (IsAsan() && !__asan_is_valid(fds, nfds * sizeof(struct pollfd))) {
rc = efault();
} else if (!IsWindows()) {
@@ -50,8 +51,9 @@ int poll(struct pollfd *fds, uint64_t nfds, int32_t timeout_ms) {
rc = sys_poll_metal(fds, nfds, timeout_ms);
}
} else {
- rc = sys_poll_nt(fds, nfds, timeout_ms);
+ millis = timeout_ms;
+ rc = sys_poll_nt(fds, nfds, &millis);
}
- STRACE("poll(%p, %'lu, %'d) → %d% m", fds, nfds, timeout_ms, rc);
+ STRACE("poll(%p, %'lu, %'d) → %d% lm", fds, nfds, timeout_ms, rc);
return rc;
}
diff --git a/libc/sock/recv-nt.c b/libc/sock/recv-nt.c
new file mode 100644
index 000000000..651075102
--- /dev/null
+++ b/libc/sock/recv-nt.c
@@ -0,0 +1,90 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2020 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/assert.h"
+#include "libc/bits/weaken.h"
+#include "libc/calls/internal.h"
+#include "libc/calls/sig.internal.h"
+#include "libc/calls/strace.internal.h"
+#include "libc/intrin/kprintf.h"
+#include "libc/log/backtrace.internal.h"
+#include "libc/nt/enum/wait.h"
+#include "libc/nt/errors.h"
+#include "libc/nt/struct/overlapped.h"
+#include "libc/nt/winsock.h"
+#include "libc/sock/internal.h"
+#include "libc/sock/yoink.inc"
+#include "libc/sysv/errfuns.h"
+
+/**
+ * Performs stream socket receive on New Technology.
+ *
+ * @param fd must be a socket
+ * @return number of bytes received, or -1 w/ errno
+ */
+textwindows ssize_t sys_recv_nt(struct Fd *fd, const struct iovec *iov,
+ size_t iovlen, uint32_t flags) {
+ ssize_t rc;
+ uint32_t i, got = 0;
+ struct NtIovec iovnt[16];
+ struct NtOverlapped overlapped = {.hEvent = WSACreateEvent()};
+
+ if (_check_interrupts(true, g_fds.p)) return eintr();
+
+ if (!WSARecv(fd->handle, iovnt, __iovec2nt(iovnt, iov, iovlen), &got, &flags,
+ &overlapped, NULL)) {
+ rc = got;
+ goto Finished;
+ }
+
+ if (WSAGetLastError() != kNtErrorIoPending) {
+ STRACE("WSARecv failed %lm");
+ rc = __winsockerr();
+ goto Finished;
+ }
+
+ for (;;) {
+ i = WSAWaitForMultipleEvents(1, &overlapped.hEvent, true,
+ __SIG_POLLING_INTERVAL_MS, true);
+ if (i == kNtWaitFailed) {
+ STRACE("WSAWaitForMultipleEvents failed %lm");
+ rc = __winsockerr();
+ goto Finished;
+ } else if (i == kNtWaitTimeout) {
+ if (_check_interrupts(true, g_fds.p)) {
+ rc = eintr();
+ goto Finished;
+ }
+ } else if (i == kNtWaitIoCompletion) {
+ STRACE("IOCP TRIGGERED EINTR");
+ } else {
+ break;
+ }
+ }
+
+ if (!WSAGetOverlappedResult(fd->handle, &overlapped, &got, false, &flags)) {
+ STRACE("WSAGetOverlappedResult failed %lm");
+ rc = __winsockerr();
+ goto Finished;
+ }
+
+ rc = got;
+Finished:
+ WSACloseEvent(overlapped.hEvent);
+ return rc;
+}
diff --git a/libc/sock/recv.c b/libc/sock/recv.c
index 8030bfc2a..5136f3f2b 100644
--- a/libc/sock/recv.c
+++ b/libc/sock/recv.c
@@ -16,7 +16,13 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/calls/internal.h"
+#include "libc/calls/strace.internal.h"
+#include "libc/dce.h"
+#include "libc/intrin/asan.internal.h"
+#include "libc/sock/internal.h"
#include "libc/sock/sock.h"
+#include "libc/sysv/errfuns.h"
/**
* Receives data from network socket.
@@ -32,5 +38,27 @@
* @restartable (unless SO_RCVTIMEO)
*/
ssize_t recv(int fd, void *buf, size_t size, int flags) {
- return recvfrom(fd, buf, size, flags, NULL, 0);
+ ssize_t rc, got;
+ if (IsAsan() && !__asan_is_valid(buf, size)) {
+ rc = efault();
+ } else if (!IsWindows()) {
+ rc = sys_recvfrom(fd, buf, size, flags, 0, 0);
+ } else if (__isfdopen(fd)) {
+ if (__isfdkind(fd, kFdSocket)) {
+ rc = sys_recv_nt(&g_fds.p[fd], (struct iovec[]){{buf, size}}, 1, flags);
+ } else if (__isfdkind(fd, kFdFile)) {
+ if (flags) {
+ rc = einval();
+ } else {
+ rc = sys_read_nt(&g_fds.p[fd], (struct iovec[]){{buf, size}}, 1, -1);
+ }
+ } else {
+ rc = enotsock();
+ }
+ } else {
+ rc = ebadf();
+ }
+ STRACE("recv(%d, [%#.*hhs%s], %'zu, %#x) → %'ld% lm", fd, MAX(0, MIN(40, rc)),
+ buf, rc > 40 ? "..." : "", size, flags);
+ return rc;
}
diff --git a/libc/sock/recvfrom-nt.c b/libc/sock/recvfrom-nt.c
index ac1f8a732..5edc144a5 100644
--- a/libc/sock/recvfrom-nt.c
+++ b/libc/sock/recvfrom-nt.c
@@ -1,5 +1,5 @@
-/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
-│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8
+-*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
╞══════════════════════════════════════════════════════════════════════════════╡
│ Copyright 2020 Justine Alexandra Roberts Tunney │
│ │
@@ -16,13 +16,17 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
-#include "libc/assert.h"
-#include "libc/calls/internal.h"
+#include "libc/calls/sig.internal.h"
+#include "libc/calls/strace.internal.h"
+#include "libc/nt/enum/wait.h"
+#include "libc/nt/errors.h"
+#include "libc/nt/struct/overlapped.h"
+#include "libc/nt/winsock.h"
#include "libc/sock/internal.h"
-#include "libc/sock/yoink.inc"
+#include "libc/sysv/errfuns.h"
/**
- * Performs recv(), recvfrom(), or readv() on Windows NT.
+ * Performs datagram receive on New Technology.
*
* @param fd must be a socket
* @return number of bytes received, or -1 w/ errno
@@ -31,14 +35,53 @@ textwindows ssize_t sys_recvfrom_nt(struct Fd *fd, const struct iovec *iov,
size_t iovlen, uint32_t flags,
void *opt_out_srcaddr,
uint32_t *opt_inout_srcaddrsize) {
- uint32_t got;
+ ssize_t rc;
+ uint32_t i, got = 0;
struct NtIovec iovnt[16];
- got = 0;
- if (WSARecvFrom(fd->handle, iovnt, __iovec2nt(iovnt, iov, iovlen), &got,
- &flags, opt_out_srcaddr, opt_inout_srcaddrsize, NULL,
- NULL) != -1) {
- return got;
- } else {
- return __winsockerr();
+ struct NtOverlapped overlapped = {.hEvent = WSACreateEvent()};
+
+ if (_check_interrupts(true, g_fds.p)) return eintr();
+
+ if (!WSARecvFrom(fd->handle, iovnt, __iovec2nt(iovnt, iov, iovlen), &got,
+ &flags, opt_out_srcaddr, opt_inout_srcaddrsize, &overlapped,
+ NULL)) {
+ rc = got;
+ goto Finished;
}
+
+ if (WSAGetLastError() != kNtErrorIoPending) {
+ STRACE("WSARecvFrom failed %lm");
+ rc = __winsockerr();
+ goto Finished;
+ }
+
+ for (;;) {
+ i = WSAWaitForMultipleEvents(1, &overlapped.hEvent, true,
+ __SIG_POLLING_INTERVAL_MS, true);
+ if (i == kNtWaitFailed) {
+ STRACE("WSAWaitForMultipleEvents failed %lm");
+ rc = __winsockerr();
+ goto Finished;
+ } else if (i == kNtWaitTimeout) {
+ if (_check_interrupts(true, g_fds.p)) {
+ rc = eintr();
+ goto Finished;
+ }
+ } else if (i == kNtWaitIoCompletion) {
+ STRACE("IOCP TRIGGERED EINTR");
+ } else {
+ break;
+ }
+ }
+
+ if (!WSAGetOverlappedResult(fd->handle, &overlapped, &got, false, &flags)) {
+ STRACE("WSAGetOverlappedResult failed %lm");
+ rc = __winsockerr();
+ goto Finished;
+ }
+
+ rc = got;
+Finished:
+ WSACloseEvent(overlapped.hEvent);
+ return rc;
}
diff --git a/libc/sock/recvfrom.c b/libc/sock/recvfrom.c
index bca04cb11..f93c649cf 100644
--- a/libc/sock/recvfrom.c
+++ b/libc/sock/recvfrom.c
@@ -51,8 +51,6 @@ ssize_t recvfrom(int fd, void *buf, size_t size, uint32_t flags,
(opt_out_srcaddr &&
!__asan_is_valid(opt_out_srcaddr, *opt_inout_srcaddrsize)))) {
rc = efault();
- } else if (IsWindows() && _check_interrupts(false, g_fds.p)) {
- rc = eintr();
} else if (!IsWindows()) {
got = sys_recvfrom(fd, buf, size, flags, opt_out_srcaddr,
opt_inout_srcaddrsize);
@@ -60,22 +58,23 @@ ssize_t recvfrom(int fd, void *buf, size_t size, uint32_t flags,
sockaddr2linux(opt_out_srcaddr);
}
rc = got;
- } else {
- if (__isfdopen(fd)) {
- if (__isfdkind(fd, kFdSocket)) {
- rc = sys_recvfrom_nt(&g_fds.p[fd], (struct iovec[]){{buf, size}}, 1,
- flags, opt_out_srcaddr, opt_inout_srcaddrsize);
- } else if (__isfdkind(fd, kFdFile) && !opt_out_srcaddr) { /* socketpair */
- if (flags) rc = einval();
- rc = sys_read_nt(&g_fds.p[fd], (struct iovec[]){{buf, size}}, 1, -1);
+ } else if (__isfdopen(fd)) {
+ if (__isfdkind(fd, kFdSocket)) {
+ rc = sys_recvfrom_nt(&g_fds.p[fd], (struct iovec[]){{buf, size}}, 1,
+ flags, opt_out_srcaddr, opt_inout_srcaddrsize);
+ } else if (__isfdkind(fd, kFdFile) && !opt_out_srcaddr) { /* socketpair */
+ if (flags) {
+ rc = einval();
} else {
- rc = enotsock();
+ rc = sys_read_nt(&g_fds.p[fd], (struct iovec[]){{buf, size}}, 1, -1);
}
} else {
- rc = ebadf();
+ rc = enotsock();
}
+ } else {
+ rc = ebadf();
}
- STRACE("recvfrom(%d, %#.*hhs, %'zu, %#x) → %'ld% m", fd, size, buf, size,
- flags, rc);
+ STRACE("recvfrom(%d, [%#.*hhs%s], %'zu, %#x) → %'ld% lm", fd,
+ MAX(0, MIN(40, rc)), buf, rc > 40 ? "..." : "", size, flags, rc);
return rc;
}
diff --git a/libc/sock/select-nt.c b/libc/sock/select-nt.c
index a1caf6a5c..c2080efad 100644
--- a/libc/sock/select-nt.c
+++ b/libc/sock/select-nt.c
@@ -16,117 +16,66 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
-#include "libc/bits/popcnt.h"
-#include "libc/calls/calls.h"
-#include "libc/calls/internal.h"
+#include "libc/calls/struct/timeval.h"
#include "libc/macros.internal.h"
-#include "libc/mem/mem.h"
-#include "libc/nt/winsock.h"
#include "libc/sock/internal.h"
-#include "libc/sock/yoink.inc"
+#include "libc/sock/select.h"
+#include "libc/sock/sock.h"
+#include "libc/sysv/consts/poll.h"
#include "libc/sysv/errfuns.h"
-static int GetFdsPopcnt(int nfds, fd_set *fds) {
- int i, n = 0;
- if (fds) {
- for (i = 0; i < nfds; ++i) {
- n += popcnt(fds->fds_bits[i]);
- }
- }
- return n;
-}
-
-static int FindFdByHandle(int nfds, int64_t h) {
- int i, n;
- n = MIN(nfds << 3, g_fds.n);
- for (i = 0; i < n; ++i) {
- if (h == g_fds.p[i].handle && g_fds.p[i].kind != kFdEmpty) {
- return i;
- }
- }
- return -1;
-}
-
-static struct NtFdSet *FdSetToNtFdSet(int nfds, fd_set *fds) {
- int i, j, k, n, m, fd;
- struct NtFdSet *ntfds;
- if (fds && (n = GetFdsPopcnt(nfds, fds))) {
- m = MIN(n, ARRAYLEN(ntfds->fd_array));
- ntfds = malloc(sizeof(struct NtFdSet));
- for (k = i = 0; i < nfds; ++i) {
- if (fds->fds_bits[i]) {
- for (j = 0; j < 64 && k < m; ++j) {
- if ((fds->fds_bits[i] & (1ul << j)) && i * 8 + j < g_fds.n) {
- ntfds->fd_array[k++] = g_fds.p[i * 8 + j].handle;
- }
- }
- }
- }
- ntfds->fd_count = m;
- return ntfds;
- } else {
- return NULL;
- }
-}
-
-static void NtFdSetToFdSet(int nfds, fd_set *fds, struct NtFdSet *ntfds) {
- int i, fd;
- if (ntfds) {
- for (i = 0; i < nfds; ++i) {
- fds->fds_bits[i] = 0;
- }
- for (i = 0; i < ntfds->fd_count; ++i) {
- if ((fd = FindFdByHandle(nfds, ntfds->fd_array[i])) != -1) {
- fds->fds_bits[fd >> 3] |= 1ul << (fd & 7);
- }
- }
- }
-}
-
-static struct NtTimeval *TimevalToNtTimeval(struct timeval *tv,
- struct NtTimeval *nttv) {
- if (tv) {
- nttv->tv_sec = tv->tv_sec;
- nttv->tv_usec = tv->tv_usec;
- return nttv;
- } else {
- return NULL;
- }
-}
-
int sys_select_nt(int nfds, fd_set *readfds, fd_set *writefds,
fd_set *exceptfds, struct timeval *timeout) {
- int n, rc;
- struct timespec req, rem;
- struct NtTimeval nttimeout, *nttimeoutp;
- struct NtFdSet *ntreadfds, *ntwritefds, *ntexceptfds;
- if (readfds || writefds || exceptfds) {
- nfds = MIN(ARRAYLEN(readfds->fds_bits), ROUNDUP(nfds, 8)) >> 3;
- ntreadfds = FdSetToNtFdSet(nfds, readfds);
- ntwritefds = FdSetToNtFdSet(nfds, writefds);
- ntexceptfds = FdSetToNtFdSet(nfds, exceptfds);
- nttimeoutp = TimevalToNtTimeval(timeout, &nttimeout);
- if ((rc = __sys_select_nt(0, ntreadfds, ntwritefds, ntexceptfds,
- nttimeoutp)) != -1) {
- NtFdSetToFdSet(nfds, readfds, ntreadfds);
- NtFdSetToFdSet(nfds, writefds, ntwritefds);
- NtFdSetToFdSet(nfds, exceptfds, ntexceptfds);
- } else {
- __winsockerr();
+ uint64_t millis;
+ int i, pfds, events, fdcount;
+ struct pollfd fds[64];
+
+ // check for interrupts early before doing work
+ if (_check_interrupts(false, g_fds.p)) return eintr();
+
+ // convert bitsets to pollfd
+ for (pfds = i = 0; i < nfds; ++i) {
+ events = 0;
+ if (readfds && FD_ISSET(i, readfds)) events |= POLLIN;
+ if (writefds && FD_ISSET(i, writefds)) events |= POLLOUT;
+ if (exceptfds && FD_ISSET(i, exceptfds)) events |= POLLERR;
+ if (events) {
+ if (pfds < ARRAYLEN(fds)) {
+ fds[pfds].fd = i;
+ fds[pfds].events = events;
+ fds[pfds].revents = 0;
+ pfds += 1;
+ } else {
+ return enomem();
+ }
}
- free(ntreadfds);
- free(ntwritefds);
- free(ntexceptfds);
- } else if (timeout) {
- req.tv_sec = timeout->tv_sec;
- req.tv_nsec = timeout->tv_usec * 1000;
- rem.tv_sec = 0;
- rem.tv_nsec = 0;
- rc = sys_nanosleep_nt(&req, &rem);
- timeout->tv_sec = rem.tv_sec;
- timeout->tv_usec = rem.tv_nsec / 1000;
- } else {
- rc = pause();
}
- return rc;
+
+ // convert the wait time to a word
+ if (!timeout || __builtin_add_overflow(timeout->tv_sec,
+ timeout->tv_usec / 1000, &millis)) {
+ millis = -1;
+ }
+
+ // call our nt poll implementation
+ fdcount = sys_poll_nt(fds, pfds, &millis);
+ if (fdcount == -1) return -1;
+
+ // convert pollfd back to bitsets
+ if (readfds) FD_ZERO(readfds);
+ if (writefds) FD_ZERO(writefds);
+ if (exceptfds) FD_ZERO(exceptfds);
+ for (i = 0; i < fdcount; ++i) {
+ if (fds[i].revents & POLLIN) FD_SET(fds[i].fd, readfds);
+ if (fds[i].revents & POLLOUT) FD_SET(fds[i].fd, writefds);
+ if (fds[i].revents & (POLLERR | POLLNVAL)) FD_SET(fds[i].fd, exceptfds);
+ }
+
+ // store remaining time back in caller's timeval
+ if (timeout) {
+ timeout->tv_sec = millis / 1000;
+ timeout->tv_usec = millis % 1000 * 1000;
+ }
+
+ return fdcount;
}
diff --git a/libc/sock/select.c b/libc/sock/select.c
index 887872fcd..9e4f147d0 100644
--- a/libc/sock/select.c
+++ b/libc/sock/select.c
@@ -19,7 +19,7 @@
#include "libc/calls/struct/timeval.h"
#include "libc/dce.h"
#include "libc/sock/internal.h"
-#include "libc/sock/sock.h"
+#include "libc/sock/select.h"
/**
* Does what poll() does except with a complicated bitset API.
diff --git a/libc/sock/send-nt.c b/libc/sock/send-nt.c
new file mode 100644
index 000000000..52b7e81d2
--- /dev/null
+++ b/libc/sock/send-nt.c
@@ -0,0 +1,89 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2020 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/assert.h"
+#include "libc/calls/calls.h"
+#include "libc/calls/internal.h"
+#include "libc/calls/sig.internal.h"
+#include "libc/calls/strace.internal.h"
+#include "libc/nt/enum/wait.h"
+#include "libc/nt/errors.h"
+#include "libc/nt/winsock.h"
+#include "libc/sock/internal.h"
+#include "libc/sock/yoink.inc"
+#include "libc/sysv/consts/fileno.h"
+#include "libc/sysv/errfuns.h"
+
+/**
+ * Performs stream socket send on the New Technology.
+ *
+ * @param fd must be a socket
+ * @return number of bytes handed off, or -1 w/ errno
+ */
+textwindows ssize_t sys_send_nt(int fd, const struct iovec *iov, size_t iovlen,
+ uint32_t flags) {
+ ssize_t rc;
+ uint32_t i, sent = 0;
+ struct NtIovec iovnt[16];
+ struct NtOverlapped overlapped = {.hEvent = WSACreateEvent()};
+
+ if (_check_interrupts(true, g_fds.p)) return eintr();
+
+ if (!WSASend(g_fds.p[fd].handle, iovnt, __iovec2nt(iovnt, iov, iovlen), &sent,
+ flags, &overlapped, NULL)) {
+ rc = sent;
+ goto Finished;
+ }
+
+ if (WSAGetLastError() != kNtErrorIoPending) {
+ STRACE("WSASend failed %lm");
+ rc = __winsockerr();
+ goto Finished;
+ }
+
+ for (;;) {
+ i = WSAWaitForMultipleEvents(1, &overlapped.hEvent, true,
+ __SIG_POLLING_INTERVAL_MS, true);
+ if (i == kNtWaitFailed) {
+ STRACE("WSAWaitForMultipleEvents failed %lm");
+ rc = __winsockerr();
+ goto Finished;
+ } else if (i == kNtWaitTimeout) {
+ if (_check_interrupts(true, g_fds.p)) {
+ rc = eintr();
+ goto Finished;
+ }
+ } else if (i == kNtWaitIoCompletion) {
+ STRACE("IOCP TRIGGERED EINTR");
+ } else {
+ break;
+ }
+ }
+
+ if (!WSAGetOverlappedResult(g_fds.p[fd].handle, &overlapped, &sent, false,
+ &flags)) {
+ STRACE("WSAGetOverlappedResult failed %lm");
+ rc = __winsockerr();
+ goto Finished;
+ }
+
+ rc = sent;
+Finished:
+ WSACloseEvent(overlapped.hEvent);
+ return rc;
+}
diff --git a/libc/sock/send.c b/libc/sock/send.c
index 9f71edbef..0fbb19cad 100644
--- a/libc/sock/send.c
+++ b/libc/sock/send.c
@@ -16,7 +16,15 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/calls/internal.h"
+#include "libc/calls/strace.internal.h"
+#include "libc/calls/struct/iovec.h"
+#include "libc/dce.h"
+#include "libc/intrin/asan.internal.h"
+#include "libc/macros.internal.h"
+#include "libc/sock/internal.h"
#include "libc/sock/sock.h"
+#include "libc/sysv/errfuns.h"
/**
* Sends data to network socket.
@@ -32,5 +40,27 @@
* @restartable (unless SO_RCVTIMEO)
*/
ssize_t send(int fd, const void *buf, size_t size, int flags) {
- return sendto(fd, buf, size, flags, NULL, 0);
+ ssize_t rc;
+ if (IsAsan() && !__asan_is_valid(buf, size)) {
+ rc = efault();
+ } else if (!IsWindows()) {
+ rc = sys_sendto(fd, buf, size, flags, 0, 0);
+ } else if (__isfdopen(fd)) {
+ if (__isfdkind(fd, kFdSocket)) {
+ rc = sys_send_nt(fd, (struct iovec[]){{buf, size}}, 1, flags);
+ } else if (__isfdkind(fd, kFdFile)) {
+ if (flags) {
+ rc = einval();
+ } else {
+ rc = sys_write_nt(fd, (struct iovec[]){{buf, size}}, 1, -1);
+ }
+ } else {
+ rc = enotsock();
+ }
+ } else {
+ rc = ebadf();
+ }
+ STRACE("send(%d, %#.*hhs%s, %'zu, %#x) → %'ld% lm", fd, MAX(0, MIN(40, rc)),
+ buf, rc > 40 ? "..." : "", size, flags, rc);
+ return rc;
}
diff --git a/libc/sock/sendto-nt.c b/libc/sock/sendto-nt.c
index b9dc47316..5bdd3b19d 100644
--- a/libc/sock/sendto-nt.c
+++ b/libc/sock/sendto-nt.c
@@ -16,16 +16,17 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
-#include "libc/assert.h"
-#include "libc/calls/calls.h"
-#include "libc/calls/internal.h"
+#include "libc/calls/sig.internal.h"
+#include "libc/calls/strace.internal.h"
+#include "libc/nt/enum/wait.h"
+#include "libc/nt/errors.h"
+#include "libc/nt/struct/overlapped.h"
#include "libc/nt/winsock.h"
#include "libc/sock/internal.h"
-#include "libc/sock/yoink.inc"
-#include "libc/sysv/consts/fileno.h"
+#include "libc/sysv/errfuns.h"
/**
- * Performs send(), sendto(), or writev() on Windows NT.
+ * Performs datagram socket send on the New Technology.
*
* @param fd must be a socket
* @return number of bytes handed off, or -1 w/ errno
@@ -33,12 +34,53 @@
textwindows ssize_t sys_sendto_nt(int fd, const struct iovec *iov,
size_t iovlen, uint32_t flags,
void *opt_in_addr, uint32_t in_addrsize) {
- uint32_t sent;
+ ssize_t rc;
+ uint32_t i, sent = 0;
struct NtIovec iovnt[16];
- if (WSASendTo(g_fds.p[fd].handle, iovnt, __iovec2nt(iovnt, iov, iovlen),
- &sent, flags, opt_in_addr, in_addrsize, NULL, NULL) != -1) {
- return sent;
- } else {
- return __winsockerr();
+ struct NtOverlapped overlapped = {.hEvent = WSACreateEvent()};
+
+ if (_check_interrupts(true, g_fds.p)) return eintr();
+
+ if (!WSASendTo(g_fds.p[fd].handle, iovnt, __iovec2nt(iovnt, iov, iovlen),
+ &sent, flags, opt_in_addr, in_addrsize, &overlapped, NULL)) {
+ rc = sent;
+ goto Finished;
}
+
+ if (WSAGetLastError() != kNtErrorIoPending) {
+ STRACE("WSASendTo failed %lm");
+ rc = __winsockerr();
+ goto Finished;
+ }
+
+ for (;;) {
+ i = WSAWaitForMultipleEvents(1, &overlapped.hEvent, true,
+ __SIG_POLLING_INTERVAL_MS, true);
+ if (i == kNtWaitFailed) {
+ STRACE("WSAWaitForMultipleEvents failed %lm");
+ rc = __winsockerr();
+ goto Finished;
+ } else if (i == kNtWaitTimeout) {
+ if (_check_interrupts(true, g_fds.p)) {
+ rc = eintr();
+ goto Finished;
+ }
+ } else if (i == kNtWaitIoCompletion) {
+ STRACE("IOCP TRIGGERED EINTR");
+ } else {
+ break;
+ }
+ }
+
+ if (!WSAGetOverlappedResult(g_fds.p[fd].handle, &overlapped, &sent, false,
+ &flags)) {
+ STRACE("WSAGetOverlappedResult failed %lm");
+ rc = __winsockerr();
+ goto Finished;
+ }
+
+ rc = sent;
+Finished:
+ WSACloseEvent(overlapped.hEvent);
+ return rc;
}
diff --git a/libc/sock/sendto.c b/libc/sock/sendto.c
index 1ef1c9cbd..2bb77564f 100644
--- a/libc/sock/sendto.c
+++ b/libc/sock/sendto.c
@@ -18,9 +18,11 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/assert.h"
#include "libc/calls/internal.h"
+#include "libc/calls/strace.internal.h"
#include "libc/calls/struct/iovec.h"
#include "libc/dce.h"
#include "libc/intrin/asan.internal.h"
+#include "libc/macros.internal.h"
#include "libc/sock/internal.h"
#include "libc/sock/sock.h"
#include "libc/str/str.h"
@@ -49,36 +51,44 @@
*/
ssize_t sendto(int fd, const void *buf, size_t size, uint32_t flags,
const void *opt_addr, uint32_t addrsize) {
+ ssize_t rc;
+ char addr2[sizeof(struct sockaddr_un_bsd)];
if (IsAsan() && (!__asan_is_valid(buf, size) ||
(opt_addr && !__asan_is_valid(opt_addr, addrsize)))) {
- return efault();
- }
- _firewall(opt_addr, addrsize);
- if (!IsWindows()) {
- if (!IsBsd() || !opt_addr) {
- return sys_sendto(fd, buf, size, flags, opt_addr, addrsize);
- } else {
- char addr2[sizeof(
- struct sockaddr_un_bsd)]; /* sockaddr_un_bsd is the largest */
- if (addrsize > sizeof(addr2)) return einval();
- memcpy(&addr2, opt_addr, addrsize);
- sockaddr2bsd(&addr2[0]);
- return sys_sendto(fd, buf, size, flags, &addr2[0], addrsize);
- }
+ rc = efault();
} else {
- if (__isfdopen(fd)) {
- if (__isfdkind(fd, kFdSocket)) {
- return sys_sendto_nt(fd, (struct iovec[]){{buf, size}}, 1, flags,
- opt_addr, addrsize);
- } else if (__isfdkind(fd, kFdFile)) { /* e.g. socketpair() */
- if (flags) return einval();
- if (opt_addr) return eisconn();
- return sys_write_nt(fd, (struct iovec[]){{buf, size}}, 1, -1);
+ _firewall(opt_addr, addrsize);
+ if (!IsWindows()) {
+ if (!IsBsd() || !opt_addr) {
+ rc = sys_sendto(fd, buf, size, flags, opt_addr, addrsize);
+ } else if (addrsize > sizeof(addr2)) {
+ rc = einval();
} else {
- return enotsock();
+ memcpy(&addr2, opt_addr, addrsize);
+ sockaddr2bsd(&addr2[0]);
+ rc = sys_sendto(fd, buf, size, flags, &addr2[0], addrsize);
+ }
+ } else if (__isfdopen(fd)) {
+ if (__isfdkind(fd, kFdSocket)) {
+ rc = sys_sendto_nt(fd, (struct iovec[]){{buf, size}}, 1, flags,
+ opt_addr, addrsize);
+ } else if (__isfdkind(fd, kFdFile)) {
+ if (flags) {
+ rc = einval();
+ } else if (opt_addr) {
+ rc = eisconn();
+ } else {
+ rc = sys_write_nt(fd, (struct iovec[]){{buf, size}}, 1, -1);
+ }
+ } else {
+ rc = enotsock();
}
} else {
- return ebadf();
+ rc = ebadf();
}
}
+ STRACE("sendto(%d, %#.*hhs%s, %'zu, %#x, %p, %u) → %'ld% lm", fd,
+ MAX(0, MIN(40, rc)), buf, rc > 40 ? "..." : "", size, flags, opt_addr,
+ addrsize, rc);
+ return rc;
}
diff --git a/libc/sock/setsockopt.c b/libc/sock/setsockopt.c
index 7c56a1b9c..a9e831b48 100644
--- a/libc/sock/setsockopt.c
+++ b/libc/sock/setsockopt.c
@@ -73,7 +73,7 @@ int setsockopt(int fd, int level, int optname, const void *optval,
} else {
rc = ebadf();
}
- STRACE("setsockopt(%d, %#x, %#x, %p, %'u) → %d% m", fd, level, optname,
+ STRACE("setsockopt(%d, %#x, %#x, %p, %'u) → %d% lm", fd, level, optname,
optval, optlen, rc);
return rc;
}
diff --git a/libc/sock/shutdown.c b/libc/sock/shutdown.c
index 7c9f3df8c..47b0ade0f 100644
--- a/libc/sock/shutdown.c
+++ b/libc/sock/shutdown.c
@@ -40,6 +40,6 @@ int shutdown(int fd, int how) {
} else {
rc = ebadf();
}
- STRACE("shutdown(%d, %d) -> %d% m", fd, how, rc);
+ STRACE("shutdown(%d, %d) -> %d% lm", fd, how, rc);
return rc;
}
diff --git a/libc/sock/sock.mk b/libc/sock/sock.mk
index ec58139a3..e8d40fa58 100644
--- a/libc/sock/sock.mk
+++ b/libc/sock/sock.mk
@@ -53,6 +53,11 @@ $(LIBC_SOCK_A).pkg: \
$(LIBC_SOCK_A_OBJS) \
$(foreach x,$(LIBC_SOCK_A_DIRECTDEPS),$($(x)_A).pkg)
+o/$(MODE)/libc/sock/ntstdin.greg.o: \
+ OVERRIDE_COPTS += \
+ -ffreestanding \
+ $(NO_MAGIC)
+
LIBC_SOCK_LIBS = $(foreach x,$(LIBC_SOCK_ARTIFACTS),$($(x)))
LIBC_SOCK_SRCS = $(foreach x,$(LIBC_SOCK_ARTIFACTS),$($(x)_SRCS))
LIBC_SOCK_HDRS = $(foreach x,$(LIBC_SOCK_ARTIFACTS),$($(x)_HDRS))
diff --git a/libc/sock/socket-nt.c b/libc/sock/socket-nt.c
index 4a7497c36..e919f017a 100644
--- a/libc/sock/socket-nt.c
+++ b/libc/sock/socket-nt.c
@@ -18,6 +18,7 @@
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/internal.h"
#include "libc/mem/mem.h"
+#include "libc/nt/enum/fileflagandattributes.h"
#include "libc/nt/iphlpapi.h"
#include "libc/nt/winsock.h"
#include "libc/sock/internal.h"
@@ -42,9 +43,10 @@ textwindows int sys_socket_nt(int family, int type, int protocol) {
int64_t h;
struct SockFd *sockfd;
int fd, oflags, truetype;
- if ((fd = __reservefd()) == -1) return -1;
+ if ((fd = __reservefd(-1)) == -1) return -1;
truetype = type & ~(SOCK_CLOEXEC | SOCK_NONBLOCK);
- if ((h = WSASocket(family, truetype, protocol, NULL, 0, 0)) != -1) {
+ if ((h = WSASocket(family, truetype, protocol, NULL, 0,
+ kNtWsaFlagOverlapped)) != -1) {
oflags = 0;
if (type & SOCK_CLOEXEC) oflags |= O_CLOEXEC;
if (type & SOCK_NONBLOCK) oflags |= O_NONBLOCK;
@@ -59,9 +61,9 @@ textwindows int sys_socket_nt(int family, int type, int protocol) {
sockfd->family = family;
sockfd->type = truetype;
sockfd->protocol = protocol;
- sockfd->event = WSACreateEvent();
g_fds.p[fd].kind = kFdSocket;
g_fds.p[fd].flags = oflags;
+ g_fds.p[fd].mode = 0140666;
g_fds.p[fd].handle = h;
g_fds.p[fd].extra = (uintptr_t)sockfd;
return fd;
diff --git a/libc/sock/socket.c b/libc/sock/socket.c
index b90436571..1e6952f45 100644
--- a/libc/sock/socket.c
+++ b/libc/sock/socket.c
@@ -46,7 +46,7 @@ int socket(int family, int type, int protocol) {
} else {
rc = sys_socket_nt(family, type, protocol);
}
- STRACE("socket(%s, %s, %s) → %d% m", __describe_socket_family(family),
+ STRACE("socket(%s, %s, %s) → %d% lm", __describe_socket_family(family),
__describe_socket_type(type), __describe_socket_protocol(protocol),
rc);
return rc;
diff --git a/libc/sock/socketpair-nt.c b/libc/sock/socketpair-nt.c
index a4b7ac1f0..fb71e728f 100644
--- a/libc/sock/socketpair-nt.c
+++ b/libc/sock/socketpair-nt.c
@@ -19,6 +19,7 @@
#include "libc/nt/createfile.h"
#include "libc/nt/enum/accessmask.h"
#include "libc/nt/enum/creationdisposition.h"
+#include "libc/nt/enum/fileflagandattributes.h"
#include "libc/nt/ipc.h"
#include "libc/nt/runtime.h"
#include "libc/sock/internal.h"
@@ -42,34 +43,32 @@ textwindows int sys_socketpair_nt(int family, int type, int proto, int sv[2]) {
if (type & SOCK_CLOEXEC) oflags |= O_CLOEXEC;
type &= ~SOCK_CLOEXEC;
- mode = kNtPipeWait;
if (type == SOCK_STREAM) {
- mode |= kNtPipeReadmodeByte | kNtPipeTypeByte;
+ mode = kNtPipeTypeByte | kNtPipeReadmodeByte;
} else if ((type == SOCK_DGRAM) || (type == SOCK_SEQPACKET)) {
- mode |= kNtPipeReadmodeMessage | kNtPipeTypeMessage;
+ mode = kNtPipeTypeMessage | kNtPipeReadmodeMessage;
} else {
return eopnotsupp();
}
CreatePipeName(pipename);
- if ((reader = __reservefd()) == -1) return -1;
- if ((writer = __reservefd()) == -1) {
+ if ((reader = __reservefd(-1)) == -1) return -1;
+ if ((writer = __reservefd(-1)) == -1) {
__releasefd(reader);
return -1;
}
- if ((hpipe = CreateNamedPipe(pipename, kNtPipeAccessDuplex, mode, 1, 65536,
- 65536, 0, &kNtIsInheritable)) == -1) {
+ if ((hpipe = CreateNamedPipe(
+ pipename, kNtPipeAccessDuplex | kNtFileFlagOverlapped, mode, 1,
+ 65536, 65536, 0, &kNtIsInheritable)) == -1) {
__releasefd(writer);
__releasefd(reader);
return -1;
}
- h1 = CreateFile(pipename, kNtGenericWrite | kNtGenericRead,
- 0, // Not shared
- &kNtIsInheritable, kNtOpenExisting, 0, 0);
+ h1 = CreateFile(pipename, kNtGenericWrite | kNtGenericRead, 0,
+ &kNtIsInheritable, kNtOpenExisting, kNtFileFlagOverlapped, 0);
if (h1 == -1) {
CloseHandle(hpipe);
- __winerr();
__releasefd(writer);
__releasefd(reader);
return -1;
@@ -77,10 +76,12 @@ textwindows int sys_socketpair_nt(int family, int type, int proto, int sv[2]) {
g_fds.p[reader].kind = kFdFile;
g_fds.p[reader].flags = oflags;
+ g_fds.p[reader].mode = 0140444;
g_fds.p[reader].handle = hpipe;
g_fds.p[writer].kind = kFdFile;
g_fds.p[writer].flags = oflags;
+ g_fds.p[writer].mode = 0140222;
g_fds.p[writer].handle = h1;
sv[0] = reader;
diff --git a/libc/sock/winsockblock.c b/libc/sock/winsockblock.c
index 5bdb44613..9ccbe4b3e 100644
--- a/libc/sock/winsockblock.c
+++ b/libc/sock/winsockblock.c
@@ -30,6 +30,11 @@ textwindows int64_t __winsockblock(int64_t fh, unsigned eventbit, int64_t rc) {
if (WSAGetLastError() != EWOULDBLOCK) return __winsockerr();
eh = WSACreateEvent();
bzero(&ev, sizeof(ev));
+ /* The proper way to reset the state of an event object used with the
+ WSAEventSelect function is to pass the handle of the event object
+ to the WSAEnumNetworkEvents function in the hEventObject parameter.
+ This will reset the event object and adjust the status of active FD
+ events on the socket in an atomic fashion. -- MSDN */
if (WSAEventSelect(fh, eh, 1u << eventbit) != -1 &&
WSAEnumNetworkEvents(fh, eh, &ev) != -1) {
if (!ev.iErrorCode[eventbit]) {
diff --git a/libc/sock/yoink.inc b/libc/sock/yoink.inc
index f17e7eb2f..4a210880d 100644
--- a/libc/sock/yoink.inc
+++ b/libc/sock/yoink.inc
@@ -1,4 +1,5 @@
STATIC_YOINK("kNtWsaData"); // for winmain
+STATIC_YOINK("WSAGetLastError"); // for kprintf
STATIC_YOINK("sys_closesocket_nt"); // for close
-STATIC_YOINK("sys_recvfrom_nt"); // for readv
-STATIC_YOINK("sys_sendto_nt"); // for writev
+STATIC_YOINK("sys_recv_nt"); // for readv
+STATIC_YOINK("sys_send_nt"); // for writev
diff --git a/libc/stdio/appendr.c b/libc/stdio/appendr.c
index 7fb25e265..b29f0e067 100644
--- a/libc/stdio/appendr.c
+++ b/libc/stdio/appendr.c
@@ -57,8 +57,8 @@ ssize_t appendr(char **b, size_t i) {
n = ROUNDUP(i + 1, 8) + W;
if (n > z.n || bsrl(n) < bsrl(z.n)) {
if ((p = realloc(p, n))) {
- n = malloc_usable_size(p);
- assert(!(n & (W - 1)));
+ z.n = malloc_usable_size(p);
+ assert(!(z.n & (W - 1)));
*b = p;
} else {
return -1;
@@ -69,7 +69,7 @@ ssize_t appendr(char **b, size_t i) {
} else {
p[i] = 0;
}
- *(size_t *)(p + n - W) =
+ *(size_t *)(p + z.n - W) =
i | (!IsTiny() && W == 8 ? (size_t)APPEND_COOKIE << 48 : 0);
}
return i;
diff --git a/libc/stdio/tmpfile.c b/libc/stdio/tmpfile.c
index ca3184590..67e54721a 100644
--- a/libc/stdio/tmpfile.c
+++ b/libc/stdio/tmpfile.c
@@ -19,7 +19,6 @@
#include "libc/bits/safemacros.internal.h"
#include "libc/calls/calls.h"
#include "libc/fmt/fmt.h"
-#include "libc/intrin/kprintf.h"
#include "libc/macros.internal.h"
#include "libc/runtime/runtime.h"
#include "libc/stdio/stdio.h"
diff --git a/libc/sysv/calls/geteuid.s b/libc/sysv/calls/geteuid.s
deleted file mode 100644
index 070feb0b9..000000000
--- a/libc/sysv/calls/geteuid.s
+++ /dev/null
@@ -1,2 +0,0 @@
-.include "o/libc/sysv/macros.internal.inc"
-.scall geteuid,0xfff019019201906b,globl
diff --git a/libc/sysv/calls/recv.s b/libc/sysv/calls/recv.s
deleted file mode 100644
index 5d163d2bd..000000000
--- a/libc/sysv/calls/recv.s
+++ /dev/null
@@ -1,2 +0,0 @@
-.include "o/libc/sysv/macros.internal.inc"
-.scall recv,0xffffff066fffffff,globl
diff --git a/libc/sysv/calls/sendmmsg.s b/libc/sysv/calls/sendmmsg.s
deleted file mode 100644
index 8996506aa..000000000
--- a/libc/sysv/calls/sendmmsg.s
+++ /dev/null
@@ -1,2 +0,0 @@
-.include "o/libc/sysv/macros.internal.inc"
-.scall sendmmsg,0x1dcffffffffff133,globl
diff --git a/libc/sysv/calls/setgid.s b/libc/sysv/calls/setgid.s
deleted file mode 100644
index df63895fe..000000000
--- a/libc/sysv/calls/setgid.s
+++ /dev/null
@@ -1,2 +0,0 @@
-.include "o/libc/sysv/macros.internal.inc"
-.scall setgid,0x0b50b50b520b506a,globl
diff --git a/libc/sysv/calls/setuid.s b/libc/sysv/calls/setuid.s
deleted file mode 100644
index 7e25e3fb5..000000000
--- a/libc/sysv/calls/setuid.s
+++ /dev/null
@@ -1,2 +0,0 @@
-.include "o/libc/sysv/macros.internal.inc"
-.scall setuid,0x0170170172017069,globl
diff --git a/libc/sysv/calls/sys_epoll_create.s b/libc/sysv/calls/sys_epoll_create.s
index 1c2455e18..865111423 100644
--- a/libc/sysv/calls/sys_epoll_create.s
+++ b/libc/sysv/calls/sys_epoll_create.s
@@ -1,2 +1,2 @@
.include "o/libc/sysv/macros.internal.inc"
-.scall sys_epoll_create,0xfffffffffffff0d5,globl
+.scall sys_epoll_create,0xfffffffffffff0d5,globl,hidden
diff --git a/libc/sysv/calls/sys_epoll_create1.s b/libc/sysv/calls/sys_epoll_create1.s
index ff749f988..f6fdbbb9f 100644
--- a/libc/sysv/calls/sys_epoll_create1.s
+++ b/libc/sysv/calls/sys_epoll_create1.s
@@ -1,2 +1,2 @@
.include "o/libc/sysv/macros.internal.inc"
-.scall sys_epoll_create1,0xfffffffffffff123,globl
+.scall sys_epoll_create1,0xfffffffffffff123,globl,hidden
diff --git a/libc/sysv/calls/sys_epoll_ctl.s b/libc/sysv/calls/sys_epoll_ctl.s
index f6de57e4a..76c23cd1d 100644
--- a/libc/sysv/calls/sys_epoll_ctl.s
+++ b/libc/sysv/calls/sys_epoll_ctl.s
@@ -1,2 +1,2 @@
.include "o/libc/sysv/macros.internal.inc"
-.scall sys_epoll_ctl,0xfffffffffffff0e9,globl
+.scall sys_epoll_ctl,0xfffffffffffff0e9,globl,hidden
diff --git a/libc/sysv/calls/sys_epoll_wait.s b/libc/sysv/calls/sys_epoll_wait.s
index 2c97356a3..f38c97423 100644
--- a/libc/sysv/calls/sys_epoll_wait.s
+++ b/libc/sysv/calls/sys_epoll_wait.s
@@ -1,2 +1,2 @@
.include "o/libc/sysv/macros.internal.inc"
-.scall sys_epoll_wait,0xfffffffffffff0e8,globl
+.scall sys_epoll_wait,0xfffffffffffff0e8,globl,hidden
diff --git a/libc/sysv/calls/sys_geteuid.s b/libc/sysv/calls/sys_geteuid.s
new file mode 100644
index 000000000..ed2898bac
--- /dev/null
+++ b/libc/sysv/calls/sys_geteuid.s
@@ -0,0 +1,2 @@
+.include "o/libc/sysv/macros.internal.inc"
+.scall sys_geteuid,0xfff019019201906b,globl,hidden
diff --git a/libc/sysv/calls/sys_mount.s b/libc/sysv/calls/sys_mount.s
index ef58d044d..27393826d 100644
--- a/libc/sysv/calls/sys_mount.s
+++ b/libc/sysv/calls/sys_mount.s
@@ -1,2 +1,2 @@
.include "o/libc/sysv/macros.internal.inc"
-.scall sys_mount,0x19a01501520a70a5,globl
+.scall sys_mount,0x19a01501520a70a5,globl,hidden
diff --git a/libc/sysv/calls/sys_setgid.s b/libc/sysv/calls/sys_setgid.s
new file mode 100644
index 000000000..71c2a9d6d
--- /dev/null
+++ b/libc/sysv/calls/sys_setgid.s
@@ -0,0 +1,2 @@
+.include "o/libc/sysv/macros.internal.inc"
+.scall sys_setgid,0x0b50b50b520b506a,globl,hidden
diff --git a/libc/sysv/calls/sys_setuid.s b/libc/sysv/calls/sys_setuid.s
new file mode 100644
index 000000000..6b4afc2c2
--- /dev/null
+++ b/libc/sysv/calls/sys_setuid.s
@@ -0,0 +1,2 @@
+.include "o/libc/sysv/macros.internal.inc"
+.scall sys_setuid,0x0170170172017069,globl,hidden
diff --git a/libc/sysv/calls/sys_sigqueue.s b/libc/sysv/calls/sys_sigqueue.s
index 5faa160bf..31271d5ae 100644
--- a/libc/sysv/calls/sys_sigqueue.s
+++ b/libc/sysv/calls/sys_sigqueue.s
@@ -1,2 +1,2 @@
.include "o/libc/sysv/macros.internal.inc"
-.scall sys_sigqueue,0xffffff1c8fffffff,globl
+.scall sys_sigqueue,0xffffff1c8fffffff,globl,hidden
diff --git a/libc/sysv/calls/sys_sigqueueinfo.s b/libc/sysv/calls/sys_sigqueueinfo.s
index 4e521ca9f..ce3920e10 100644
--- a/libc/sysv/calls/sys_sigqueueinfo.s
+++ b/libc/sysv/calls/sys_sigqueueinfo.s
@@ -1,2 +1,2 @@
.include "o/libc/sysv/macros.internal.inc"
-.scall sys_sigqueueinfo,0x0f5ffffffffff081,globl
+.scall sys_sigqueueinfo,0x0f5ffffffffff081,globl,hidden
diff --git a/libc/sysv/calls/sys_umask.s b/libc/sysv/calls/sys_umask.s
index 1ccadeb66..17a2e9774 100644
--- a/libc/sysv/calls/sys_umask.s
+++ b/libc/sysv/calls/sys_umask.s
@@ -1,2 +1,2 @@
.include "o/libc/sysv/macros.internal.inc"
-.scall sys_umask,0x03c03c03c203c05f,globl
+.scall sys_umask,0x03c03c03c203c05f,globl,hidden
diff --git a/libc/sysv/consts.sh b/libc/sysv/consts.sh
index d75678793..dfe9822fd 100755
--- a/libc/sysv/consts.sh
+++ b/libc/sysv/consts.sh
@@ -127,7 +127,7 @@ syscon errno ENONET 64 0 0 0 0 0 # unilateral; raised by accept(
syscon errno ERESTART 85 -1 -1 -1 -3 0 # should only be seen in ptrace()
syscon errno ENOSR 63 98 0 90 90 0 # out of streams resources; something like EAGAIN; it's in POSIX; maybe some commercial UNIX returns it with openat, putmsg, putpmsg, posix_openpt, ioctl, open
syscon errno ENOSTR 60 99 0 0 91 0 # not a stream; returned by getmsg, putmsg, putpmsg, getpmsg
-syscon errno ENODATA 61 96 0 0 89 0 # no data available; barely in posix; returned by ioctl
+syscon errno ENODATA 61 96 0 0 89 232 # no message is available in xsi stream or named pipe is being closed; no data available; barely in posix; returned by ioctl; very close in spirit to EPIPE?
syscon errno EMULTIHOP 72 95 90 0 94 0 # barely in posix
syscon errno ENOLINK 67 97 91 0 95 0 # barely in posix
syscon errno ENOMEDIUM 123 0 0 85 0 0 # not posix; not documented
@@ -244,18 +244,18 @@ syscon compat MAP_32BIT 0x40 0 0x080000 0 0 0 # iffy
# madvise() flags
#
# group name GNU/Systemd XNU's Not UNIX! FreeBSD OpenBSD NetBSD The New Technology Commentary
-syscon madv MADV_NORMAL 0 0 0 0 0 0x00000080 # consensus & kNtFileAttributeNormal
-syscon compat POSIX_FADV_NORMAL 0 0 0 0 0 0x00000080 # consensus & kNtFileAttributeNormal
-syscon compat POSIX_MADV_NORMAL 0 0 0 0 0 0x00000080 # consensus & kNtFileAttributeNormal
+syscon madv MADV_NORMAL 0 0 0 0 0 0 # consensus
+syscon compat POSIX_FADV_NORMAL 0 0 0 0 0 0 # consensus
+syscon compat POSIX_MADV_NORMAL 0 0 0 0 0 0 # consensus
syscon madv MADV_DONTNEED 4 4 4 4 4 127 # TODO(jart): weird nt decommit thing?
syscon compat POSIX_MADV_DONTNEED 4 4 4 4 4 127 # unix consensus
syscon compat POSIX_FADV_DONTNEED 4 127 4 4 4 127 # unix consensus
-syscon madv MADV_RANDOM 1 1 1 1 1 0x10000000 # unix consensus & kNtFileFlagRandomAccess
-syscon compat POSIX_MADV_RANDOM 1 1 1 1 1 0x10000000 # unix consensus & kNtFileFlagRandomAccess
-syscon compat POSIX_FADV_RANDOM 1 127 1 1 1 0x10000000 # unix consensus & kNtFileFlagRandomAccess
-syscon madv MADV_SEQUENTIAL 2 2 2 2 2 0x8000000 # unix consensus & kNtFileFlagSequentialScan
-syscon compat POSIX_MADV_SEQUENTIAL 2 2 2 2 2 0x8000000 # unix consensus
-syscon compat POSIX_FADV_SEQUENTIAL 2 127 2 2 2 0x8000000 # TODO(jart): double check xnu
+syscon madv MADV_RANDOM 1 1 1 1 1 1 # unix consensus
+syscon compat POSIX_MADV_RANDOM 1 1 1 1 1 1 # unix consensus
+syscon compat POSIX_FADV_RANDOM 1 127 1 1 1 1 # unix consensus
+syscon madv MADV_SEQUENTIAL 2 2 2 2 2 2 # unix consensus
+syscon compat POSIX_MADV_SEQUENTIAL 2 2 2 2 2 2 # unix consensus
+syscon compat POSIX_FADV_SEQUENTIAL 2 127 2 2 2 2 # TODO(jart): double check xnu
syscon madv MADV_WILLNEED 3 3 3 3 3 3 # unix consensus (faked on NT)
syscon compat POSIX_MADV_WILLNEED 3 3 3 3 3 3 # unix consensus
syscon compat POSIX_FADV_WILLNEED 3 127 3 3 3 3 # TODO(jart): double check xnu
diff --git a/libc/sysv/consts/ENODATA.S b/libc/sysv/consts/ENODATA.S
index e9b938bbb..0fd7a9733 100644
--- a/libc/sysv/consts/ENODATA.S
+++ b/libc/sysv/consts/ENODATA.S
@@ -1,2 +1,2 @@
#include "libc/sysv/consts/syscon.internal.h"
-.syscon errno,ENODATA,61,96,0,0,89,0
+.syscon errno,ENODATA,61,96,0,0,89,232
diff --git a/libc/sysv/consts/MADV_NORMAL.S b/libc/sysv/consts/MADV_NORMAL.S
index ec8f1ef00..c38bc7dae 100644
--- a/libc/sysv/consts/MADV_NORMAL.S
+++ b/libc/sysv/consts/MADV_NORMAL.S
@@ -1,2 +1,2 @@
#include "libc/sysv/consts/syscon.internal.h"
-.syscon madv,MADV_NORMAL,0,0,0,0,0,0x00000080
+.syscon madv,MADV_NORMAL,0,0,0,0,0,0
diff --git a/libc/sysv/consts/MADV_RANDOM.S b/libc/sysv/consts/MADV_RANDOM.S
index dfeafff6f..e7c7b43c6 100644
--- a/libc/sysv/consts/MADV_RANDOM.S
+++ b/libc/sysv/consts/MADV_RANDOM.S
@@ -1,2 +1,2 @@
#include "libc/sysv/consts/syscon.internal.h"
-.syscon madv,MADV_RANDOM,1,1,1,1,1,0x10000000
+.syscon madv,MADV_RANDOM,1,1,1,1,1,1
diff --git a/libc/sysv/consts/MADV_SEQUENTIAL.S b/libc/sysv/consts/MADV_SEQUENTIAL.S
index b40e06f26..8b54e4626 100644
--- a/libc/sysv/consts/MADV_SEQUENTIAL.S
+++ b/libc/sysv/consts/MADV_SEQUENTIAL.S
@@ -1,2 +1,2 @@
#include "libc/sysv/consts/syscon.internal.h"
-.syscon madv,MADV_SEQUENTIAL,2,2,2,2,2,0x8000000
+.syscon madv,MADV_SEQUENTIAL,2,2,2,2,2,2
diff --git a/libc/sysv/consts/POSIX_FADV_NORMAL.S b/libc/sysv/consts/POSIX_FADV_NORMAL.S
index 02d3dc2b5..99b3e2bda 100644
--- a/libc/sysv/consts/POSIX_FADV_NORMAL.S
+++ b/libc/sysv/consts/POSIX_FADV_NORMAL.S
@@ -1,2 +1,2 @@
#include "libc/sysv/consts/syscon.internal.h"
-.syscon compat,POSIX_FADV_NORMAL,0,0,0,0,0,0x00000080
+.syscon compat,POSIX_FADV_NORMAL,0,0,0,0,0,0
diff --git a/libc/sysv/consts/POSIX_FADV_RANDOM.S b/libc/sysv/consts/POSIX_FADV_RANDOM.S
index 978a1cf76..5c197c3bc 100644
--- a/libc/sysv/consts/POSIX_FADV_RANDOM.S
+++ b/libc/sysv/consts/POSIX_FADV_RANDOM.S
@@ -1,2 +1,2 @@
#include "libc/sysv/consts/syscon.internal.h"
-.syscon compat,POSIX_FADV_RANDOM,1,127,1,1,1,0x10000000
+.syscon compat,POSIX_FADV_RANDOM,1,127,1,1,1,1
diff --git a/libc/sysv/consts/POSIX_FADV_SEQUENTIAL.S b/libc/sysv/consts/POSIX_FADV_SEQUENTIAL.S
index 5f743db4d..a8dfb1688 100644
--- a/libc/sysv/consts/POSIX_FADV_SEQUENTIAL.S
+++ b/libc/sysv/consts/POSIX_FADV_SEQUENTIAL.S
@@ -1,2 +1,2 @@
#include "libc/sysv/consts/syscon.internal.h"
-.syscon compat,POSIX_FADV_SEQUENTIAL,2,127,2,2,2,0x8000000
+.syscon compat,POSIX_FADV_SEQUENTIAL,2,127,2,2,2,2
diff --git a/libc/sysv/consts/POSIX_MADV_NORMAL.S b/libc/sysv/consts/POSIX_MADV_NORMAL.S
index eaf9dba1c..5bf67af47 100644
--- a/libc/sysv/consts/POSIX_MADV_NORMAL.S
+++ b/libc/sysv/consts/POSIX_MADV_NORMAL.S
@@ -1,2 +1,2 @@
#include "libc/sysv/consts/syscon.internal.h"
-.syscon compat,POSIX_MADV_NORMAL,0,0,0,0,0,0x00000080
+.syscon compat,POSIX_MADV_NORMAL,0,0,0,0,0,0
diff --git a/libc/sysv/consts/POSIX_MADV_RANDOM.S b/libc/sysv/consts/POSIX_MADV_RANDOM.S
index b1b800e01..5e4976e8e 100644
--- a/libc/sysv/consts/POSIX_MADV_RANDOM.S
+++ b/libc/sysv/consts/POSIX_MADV_RANDOM.S
@@ -1,2 +1,2 @@
#include "libc/sysv/consts/syscon.internal.h"
-.syscon compat,POSIX_MADV_RANDOM,1,1,1,1,1,0x10000000
+.syscon compat,POSIX_MADV_RANDOM,1,1,1,1,1,1
diff --git a/libc/sysv/consts/POSIX_MADV_SEQUENTIAL.S b/libc/sysv/consts/POSIX_MADV_SEQUENTIAL.S
index 6aa32f454..0c6183349 100644
--- a/libc/sysv/consts/POSIX_MADV_SEQUENTIAL.S
+++ b/libc/sysv/consts/POSIX_MADV_SEQUENTIAL.S
@@ -1,2 +1,2 @@
#include "libc/sysv/consts/syscon.internal.h"
-.syscon compat,POSIX_MADV_SEQUENTIAL,2,2,2,2,2,0x8000000
+.syscon compat,POSIX_MADV_SEQUENTIAL,2,2,2,2,2,2
diff --git a/libc/sysv/consts/madv.h b/libc/sysv/consts/madv.h
index cb81cb926..d03b6053e 100644
--- a/libc/sysv/consts/madv.h
+++ b/libc/sysv/consts/madv.h
@@ -1,24 +1,6 @@
#ifndef COSMOPOLITAN_LIBC_SYSV_CONSTS_MADV_H_
#define COSMOPOLITAN_LIBC_SYSV_CONSTS_MADV_H_
#include "libc/runtime/symbolic.h"
-
-#define MADV_DODUMP SYMBOLIC(MADV_DODUMP)
-#define MADV_DOFORK SYMBOLIC(MADV_DOFORK)
-#define MADV_DONTDUMP SYMBOLIC(MADV_DONTDUMP)
-#define MADV_DONTFORK SYMBOLIC(MADV_DONTFORK)
-#define MADV_DONTNEED SYMBOLIC(MADV_DONTNEED)
-#define MADV_FREE SYMBOLIC(MADV_FREE)
-#define MADV_HUGEPAGE SYMBOLIC(MADV_HUGEPAGE)
-#define MADV_HWPOISON SYMBOLIC(MADV_HWPOISON)
-#define MADV_MERGEABLE SYMBOLIC(MADV_MERGEABLE)
-#define MADV_NOHUGEPAGE SYMBOLIC(MADV_NOHUGEPAGE)
-#define MADV_NORMAL SYMBOLIC(MADV_NORMAL)
-#define MADV_RANDOM SYMBOLIC(MADV_RANDOM)
-#define MADV_REMOVE SYMBOLIC(MADV_REMOVE)
-#define MADV_SEQUENTIAL SYMBOLIC(MADV_SEQUENTIAL)
-#define MADV_UNMERGEABLE SYMBOLIC(MADV_UNMERGEABLE)
-#define MADV_WILLNEED SYMBOLIC(MADV_WILLNEED)
-
#if !(__ASSEMBLER__ + __LINKER__ + 0)
COSMOPOLITAN_C_START_
@@ -41,4 +23,23 @@ extern const unsigned MADV_WILLNEED;
COSMOPOLITAN_C_END_
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
+
+#define MADV_NORMAL LITERALLY(0)
+#define MADV_RANDOM LITERALLY(1)
+#define MADV_SEQUENTIAL LITERALLY(2)
+#define MADV_WILLNEED LITERALLY(3)
+
+#define MADV_DODUMP SYMBOLIC(MADV_DODUMP)
+#define MADV_DOFORK SYMBOLIC(MADV_DOFORK)
+#define MADV_DONTDUMP SYMBOLIC(MADV_DONTDUMP)
+#define MADV_DONTFORK SYMBOLIC(MADV_DONTFORK)
+#define MADV_DONTNEED SYMBOLIC(MADV_DONTNEED)
+#define MADV_FREE SYMBOLIC(MADV_FREE)
+#define MADV_HUGEPAGE SYMBOLIC(MADV_HUGEPAGE)
+#define MADV_HWPOISON SYMBOLIC(MADV_HWPOISON)
+#define MADV_MERGEABLE SYMBOLIC(MADV_MERGEABLE)
+#define MADV_NOHUGEPAGE SYMBOLIC(MADV_NOHUGEPAGE)
+#define MADV_REMOVE SYMBOLIC(MADV_REMOVE)
+#define MADV_UNMERGEABLE SYMBOLIC(MADV_UNMERGEABLE)
+
#endif /* COSMOPOLITAN_LIBC_SYSV_CONSTS_MADV_H_ */
diff --git a/libc/sysv/syscalls.sh b/libc/sysv/syscalls.sh
index 840a736c9..c0a2b0f20 100755
--- a/libc/sysv/syscalls.sh
+++ b/libc/sysv/syscalls.sh
@@ -134,7 +134,7 @@ scall sys_fchmod 0x07c07c07c207c05b globl hidden
scall sys_chown 0x010010010201005c globl hidden # impl. w/ fchownat() @asyncsignalsafe
scall sys_fchown 0x07b07b07b207b05d globl hidden # @asyncsignalsafe
scall sys_lchown 0x1130fe0fe216c05e globl hidden # impl. w/ fchownat()
-scall sys_umask 0x03c03c03c203c05f globl
+scall sys_umask 0x03c03c03c203c05f globl hidden
scall sys_gettimeofday 0x1a20430742074060 globl hidden # xnu esi/edx=0
scall sys_getrlimit 0x0c20c20c220c2061 globl hidden
scall __sys_getrusage 0x1bd0130752075062 globl hidden
@@ -150,14 +150,14 @@ scall sys_setsid 0x0930930932093070 globl hidden
scall sys_getsid 0x11e0ff136213607c globl hidden
scall sys_getpgid 0x0cf0cf0cf2097079 globl hidden
scall sys_setpgid 0x052052052205206d globl hidden
-scall geteuid 0xfff019019201906b globl
+scall sys_geteuid 0xfff019019201906b globl hidden
scall getegid 0xfff02b02b202b06c globl
scall getgroups 0x04f04f04f204f073 globl
scall setgroups 0x0500500502050074 globl
scall setreuid 0x07e07e07e207e071 globl
scall setregid 0x07f07f07f207f072 globl
-scall setuid 0x0170170172017069 globl
-scall setgid 0x0b50b50b520b506a globl
+scall sys_setuid 0x0170170172017069 globl hidden
+scall sys_setgid 0x0b50b50b520b506a globl hidden
scall sys_setresuid 0xfff11a137ffff075 globl hidden # polyfilled for xnu
scall sys_setresgid 0xfff11c138ffff077 globl hidden # polyfilled for xnu
scall getresuid 0xfff119168ffff076 globl # semantics aren't well-defined
@@ -182,7 +182,7 @@ scall chroot 0x03d03d03d203d0a1 globl
scall sys_sync 0xfff02402420240a2 globl hidden
scall acct 0x03303303320330a3 globl
scall settimeofday 0x1a304407a207a0a4 globl
-scall sys_mount 0x19a01501520a70a5 globl
+scall sys_mount 0x19a01501520a70a5 globl hidden
scall sys_unmount 0x016016016209f0a6 globl hidden # umount2() on linux
scall umount2 0x016016016209f0a6 globl hidden # unmount() on bsd
scall sys_reboot 0x0d003703720370a9 globl hidden # two arguments b/c netbsd/sparc lool
@@ -192,8 +192,8 @@ scall setfsgid 0xfffffffffffff07b globl
scall capget 0xfffffffffffff07d globl
scall capset 0xfffffffffffff07e globl
scall sigtimedwait 0xffffff159ffff080 globl
-scall sys_sigqueue 0xffffff1c8fffffff globl
-scall sys_sigqueueinfo 0x0f5ffffffffff081 globl # a.k.a. rt_sigqueueinfo on linux
+scall sys_sigqueue 0xffffff1c8fffffff globl hidden
+scall sys_sigqueueinfo 0x0f5ffffffffff081 globl hidden # a.k.a. rt_sigqueueinfo on linux
scall personality 0xfffffffffffff087 globl
scall ustat 0xfffffffffffff088 globl
scall sysfs 0xfffffffffffff08b globl
@@ -243,9 +243,9 @@ scall io_getevents 0xfffffffffffff0d0 globl
scall io_submit 0xfffffffffffff0d1 globl
scall io_cancel 0xfffffffffffff0d2 globl
scall lookup_dcookie 0xfffffffffffff0d4 globl
-scall sys_epoll_create 0xfffffffffffff0d5 globl
-scall sys_epoll_wait 0xfffffffffffff0e8 globl
-scall sys_epoll_ctl 0xfffffffffffff0e9 globl
+scall sys_epoll_create 0xfffffffffffff0d5 globl hidden
+scall sys_epoll_wait 0xfffffffffffff0e8 globl hidden
+scall sys_epoll_ctl 0xfffffffffffff0e9 globl hidden
scall getdents 0x18606311020c40d9 globl hidden # four args b/c xnu, getdirentries on xnu, 32-bit on xnu/freebsd, a.k.a. getdents64 on linux, 64-bit on openbsd
scall set_tid_address 0xfffffffffffff0da globl
scall restart_syscall 0xfffffffffffff0db globl
@@ -318,7 +318,7 @@ scall __sys_accept4 0xfff05d21dffff120 globl hidden # Linux 2.6.28+
scall __sys_dup3 0x1c6066fffffff124 globl hidden # Linux 2.6.27+
scall __sys_pipe2 0x1c506521effff125 globl hidden # Linux 2.6.27+
scall epoll_pwait 0xfffffffffffff119 globl
-scall sys_epoll_create1 0xfffffffffffff123 globl
+scall sys_epoll_create1 0xfffffffffffff123 globl hidden
scall perf_event_open 0xfffffffffffff12a globl
scall inotify_init1 0xfffffffffffff126 globl
scall rt_tgsigqueueinfo 0xfffffffffffff129 globl
@@ -338,7 +338,7 @@ scall name_to_handle_at 0xfffffffffffff12f globl
scall open_by_handle_at 0xfffffffffffff130 globl
scall clock_adjtime 0xfffffffffffff131 globl
scall syncfs 0xfffffffffffff132 globl
-scall sendmmsg 0x1dcffffffffff133 globl
+#scall sendmmsg 0x1dcffffffffff133 globl
scall setns 0xfffffffffffff134 globl
scall getcpu 0xfffffffffffff135 globl
scall process_vm_readv 0xfffffffffffff136 globl
@@ -724,7 +724,7 @@ scall rctl_get_limits 0xffffff20ffffffff globl
scall rctl_get_racct 0xffffff20dfffffff globl
scall rctl_get_rules 0xffffff20efffffff globl
scall rctl_remove_rule 0xffffff211fffffff globl
-scall recv 0xffffff066fffffff globl
+#scall recv 0xffffff066fffffff globl
scall rfork 0xffffff0fbfffffff globl
scall rtprio 0xffffff0a6fffffff globl
scall rtprio_thread 0xffffff1d2fffffff globl
diff --git a/libc/testlib/quota.c b/libc/testlib/quota.c
index da71fa5c9..7bfbf6243 100644
--- a/libc/testlib/quota.c
+++ b/libc/testlib/quota.c
@@ -21,6 +21,7 @@
#include "libc/calls/sigbits.h"
#include "libc/errno.h"
#include "libc/intrin/kprintf.h"
+#include "libc/log/backtrace.internal.h"
#include "libc/log/internal.h"
#include "libc/log/libfatal.internal.h"
#include "libc/log/log.h"
@@ -29,7 +30,7 @@
#include "libc/stdio/stdio.h"
#include "libc/str/str.h"
#include "libc/testlib/testlib.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
+#include "third_party/dlmalloc/dlmalloc.h"
static noasan relegated uint64_t CountMappedBytes(void) {
size_t i;
@@ -64,21 +65,11 @@ static relegated void OnXfsz(int sig) {
relegated void __oom_hook(size_t request) {
int e;
uint64_t toto, newlim;
- struct MallocStats stats;
__restore_tty(2);
e = errno;
toto = CountMappedBytes();
- stats = dlmalloc_stats(g_dlmalloc);
kprintf("\n\nWE REQUIRE MORE VESPENE GAS");
if (e != ENOMEM) kprintf(" (%s)", strerror(e));
- kprintf("\n"
- "mmap last request = %'ld\n"
- "mmapped system bytes = %'ld\n"
- "malloc max system bytes = %'ld\n"
- "malloc system bytes = %'ld\n"
- "malloc in use bytes = %'ld\n"
- "\n",
- request, toto, stats.maxfp, stats.fp, stats.used);
if (IsRunningUnderMake()) {
newlim = toto + request;
newlim += newlim >> 1;
diff --git a/libc/testlib/testmain.c b/libc/testlib/testmain.c
index ae0dc2f69..25115534c 100644
--- a/libc/testlib/testmain.c
+++ b/libc/testlib/testmain.c
@@ -37,7 +37,7 @@
#include "libc/sysv/consts/exit.h"
#include "libc/sysv/consts/sig.h"
#include "libc/testlib/testlib.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
+#include "third_party/dlmalloc/dlmalloc.h"
#include "third_party/getopt/getopt.h"
#define USAGE \
diff --git a/libc/thread/sem.c b/libc/thread/sem.c
index b4c98b2ad..e1b5bfa81 100644
--- a/libc/thread/sem.c
+++ b/libc/thread/sem.c
@@ -27,7 +27,7 @@
static void pause(int attempt) {
if (attempt < 16) {
for (int i = 0; i < (1 << attempt); ++i) {
- asm("pause");
+ __builtin_ia32_pause();
}
} else {
cthread_yield();
diff --git a/libc/type2str.h b/libc/type2str.h
new file mode 100644
index 000000000..ad48948b3
--- /dev/null
+++ b/libc/type2str.h
@@ -0,0 +1,44 @@
+#ifndef COSMOPOLITAN_LIBC_TYPE2STR_H_
+#define COSMOPOLITAN_LIBC_TYPE2STR_H_
+#if __STDC_VERSION__ + 0 >= 201112
+/* clang-format off */
+
+#define _TYPE2STR(X) \
+ _Generic(X, \
+ _Bool: "_Bool", \
+ signed char: "signed char", \
+ unsigned char: "unsigned char", \
+ char: "char", \
+ short: "short", \
+ unsigned short: "unsigned short", \
+ int: "int", \
+ unsigned: "unsigned", \
+ long: "long", \
+ unsigned long: "unsigned long", \
+ long long: "long long", \
+ unsigned long long: "unsigned long long", \
+ float: "float", \
+ double: "double", \
+ long double: "long double")
+
+#define _PRINTF_GENERIC(X, D, U) \
+ _Generic(X, \
+ _Bool: "hhh" U, \
+ signed char: "hh" D, \
+ unsigned char: "hh" U, \
+ char: "hh" D, \
+ short: "h" D, \
+ unsigned short: "h" U, \
+ int: D, \
+ unsigned: U, \
+ long: "l" D, \
+ unsigned long: "l" U, \
+ long long: "ll" D, \
+ unsigned long long: "ll" U, \
+ float: "f", \
+ double: "f", \
+ long double: "Lf")
+
+/* clang-format on */
+#endif /* C11 */
+#endif /* COSMOPOLITAN_LIBC_TYPE2STR_H_ */
diff --git a/libc/x/xbarf.c b/libc/x/xbarf.c
index 0162a812a..9041dca8d 100644
--- a/libc/x/xbarf.c
+++ b/libc/x/xbarf.c
@@ -26,7 +26,7 @@
* Writes data to file.
*
* @param size can be -1 to strlen(data)
- * @return if failed, -1 w/ errno
+ * @return 0 on success or -1 w/ errno
* @note this is uninterruptible
*/
int xbarf(const char *path, const void *data, size_t size) {
diff --git a/libc/zipos/get.c b/libc/zipos/get.c
index 1c54f96f4..c92d238a4 100644
--- a/libc/zipos/get.c
+++ b/libc/zipos/get.c
@@ -87,14 +87,11 @@ struct Zipos *__zipos_get(void) {
STRACE("__zipos_get(%#s)", program_executable_name);
} else {
munmap(map, size);
- kprintf("__zipos_get(%#s) → eocd not found%n",
- program_executable_name);
STRACE("__zipos_get(%#s) → eocd not found", program_executable_name);
}
}
close(fd);
} else {
- kprintf("__zipos_get(%#s) → open failed %m%n", program_executable_name);
STRACE("__zipos_get(%#s) → open failed %m", program_executable_name);
}
once = true;
diff --git a/libc/zipos/open.c b/libc/zipos/open.c
index 4798b6216..9e8fcf4d9 100644
--- a/libc/zipos/open.c
+++ b/libc/zipos/open.c
@@ -124,12 +124,13 @@ static int __zipos_load(struct Zipos *zipos, size_t cf, unsigned flags,
h->mem = NULL;
}
if (h->mem) {
- if ((fd = IsWindows() ? __reservefd() : dup(2)) != -1) {
+ if ((fd = IsWindows() ? __reservefd(-1) : dup(2)) != -1) {
if (__ensurefds(fd) != -1) {
h->handle = g_fds.p[fd].handle;
g_fds.p[fd].kind = kFdZip;
g_fds.p[fd].handle = (intptr_t)h;
g_fds.p[fd].flags = flags | O_CLOEXEC;
+ g_fds.p[fd].mode = mode;
return fd;
}
close(fd);
diff --git a/test/libc/calls/fcntl_test.c b/test/libc/calls/fcntl_test.c
index 5ebf46bdf..3c9ee8a41 100644
--- a/test/libc/calls/fcntl_test.c
+++ b/test/libc/calls/fcntl_test.c
@@ -17,6 +17,7 @@
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/calls/calls.h"
+#include "libc/dce.h"
#include "libc/fmt/fmt.h"
#include "libc/log/check.h"
#include "libc/macros.internal.h"
@@ -40,9 +41,12 @@ TEST(fcntl_getfl, testRemembersAccessMode) {
}
TEST(fcntl_setfl, testChangeAppendStatus) {
+ if (IsWindows()) {
+ // no obivous way to do fcntl(fd, F_SETFL, O_APPEND)
+ return;
+ }
int fd;
char buf[8] = {0};
- if (IsWindows()) return; /* doesn't appear possible on windows */
ASSERT_NE(-1, (fd = open("foo", O_CREAT | O_RDWR, 0644)));
EXPECT_EQ(3, write(fd, "foo", 3));
EXPECT_NE(-1, lseek(fd, 0, SEEK_SET));
diff --git a/test/libc/calls/open_test.c b/test/libc/calls/open_test.c
new file mode 100644
index 000000000..7bf2fc618
--- /dev/null
+++ b/test/libc/calls/open_test.c
@@ -0,0 +1,59 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2020 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/sysv/consts/o.h"
+#include "libc/testlib/testlib.h"
+#include "libc/x/x.h"
+
+char testlib_enable_tmp_setup_teardown;
+
+TEST(open, testOpenExistingForWriteOnly_seeksToStart) {
+ char buf[8] = {0};
+ ASSERT_SYS(0, 0, xbarf("hello.txt", "hello", -1));
+ ASSERT_SYS(0, 3, open("hello.txt", O_WRONLY));
+ EXPECT_SYS(0, 1, write(3, "H", 1));
+ EXPECT_SYS(0, 0, close(3));
+ ASSERT_SYS(0, 3, open("hello.txt", O_RDONLY));
+ EXPECT_SYS(0, 5, read(3, buf, 7));
+ EXPECT_STREQ("Hello", buf);
+ EXPECT_SYS(0, 0, close(3));
+}
+
+TEST(open, testOpenExistingForReadWrite_seeksToStart) {
+ char buf[8] = {0};
+ ASSERT_SYS(0, 0, xbarf("hello.txt", "hello", -1));
+ ASSERT_SYS(0, 3, open("hello.txt", O_RDWR));
+ EXPECT_SYS(0, 1, write(3, "H", 1));
+ EXPECT_SYS(0, 0, close(3));
+ ASSERT_SYS(0, 3, open("hello.txt", O_RDONLY));
+ EXPECT_SYS(0, 5, read(3, buf, 7));
+ EXPECT_STREQ("Hello", buf);
+ EXPECT_SYS(0, 0, close(3));
+}
+
+TEST(open, testOpenExistingForAppendWriteOnly_seeksToEnd) {
+ char buf[8] = {0};
+ ASSERT_SYS(0, 0, xbarf("hello.txt", "hell", -1));
+ ASSERT_SYS(0, 3, open("hello.txt", O_WRONLY | O_APPEND));
+ EXPECT_SYS(0, 1, write(3, "o", 1));
+ EXPECT_SYS(0, 0, close(3));
+ ASSERT_SYS(0, 3, open("hello.txt", O_RDONLY));
+ EXPECT_SYS(0, 5, read(3, buf, 7));
+ EXPECT_STREQ("hello", buf);
+ EXPECT_SYS(0, 0, close(3));
+}
diff --git a/test/libc/mem/malloc_test.c b/test/libc/mem/malloc_test.c
index b598989f9..767467b28 100644
--- a/test/libc/mem/malloc_test.c
+++ b/test/libc/mem/malloc_test.c
@@ -20,6 +20,8 @@
#include "libc/bits/safemacros.internal.h"
#include "libc/calls/calls.h"
#include "libc/calls/struct/stat.h"
+#include "libc/dce.h"
+#include "libc/intrin/kprintf.h"
#include "libc/macros.internal.h"
#include "libc/mem/mem.h"
#include "libc/rand/rand.h"
@@ -37,6 +39,11 @@
#define N 1024
#define M 20
+void SetUp(void) {
+ // TODO(jart): what is wrong?
+ if (IsWindows()) exit(0);
+}
+
TEST(malloc, zeroMeansOne) {
ASSERT_GE(malloc_usable_size(gc(malloc(0))), 1);
}
@@ -85,9 +92,9 @@ TEST(malloc, test) {
if (fds[k] == -1) {
ASSERT_NE(-1, (fds[k] = open(program_invocation_name, O_RDONLY)));
ASSERT_NE(-1, fstat(fds[k], &st));
- ASSERT_NE(MAP_FAILED,
- (maps[k] = mmap(NULL, (mapsizes[k] = st.st_size), PROT_READ,
- MAP_SHARED, fds[k], 0)));
+ mapsizes[k] = st.st_size;
+ ASSERT_NE(MAP_FAILED, (maps[k] = mmap(NULL, mapsizes[k], PROT_READ,
+ MAP_SHARED, fds[k], 0)));
} else {
ASSERT_NE(-1, munmap(maps[k], mapsizes[k]));
ASSERT_NE(-1, close(fds[k]));
diff --git a/test/libc/rand/rand64_test.c b/test/libc/rand/rand64_test.c
index 38a0a1fcf..d71e88e7d 100644
--- a/test/libc/rand/rand64_test.c
+++ b/test/libc/rand/rand64_test.c
@@ -30,7 +30,7 @@
#include "libc/sysv/consts/sig.h"
#include "libc/testlib/testlib.h"
-#define THREADS 4
+#define THREADS 8
#define ENTRIES 256
volatile bool ready;
@@ -42,7 +42,9 @@ void OnChld(int sig) {
int Thrasher(void *arg) {
int i, id = (intptr_t)arg;
- while (!ready) asm("pause");
+ while (!ready) {
+ __builtin_ia32_pause();
+ }
for (i = 0; i < ENTRIES; ++i) {
A[id * ENTRIES + i] = rand64();
}
diff --git a/test/libc/runtime/clone_test.c b/test/libc/runtime/clone_test.c
index 73ea0a031..6754115ec 100644
--- a/test/libc/runtime/clone_test.c
+++ b/test/libc/runtime/clone_test.c
@@ -43,6 +43,6 @@ TEST(clone, test) {
EXPECT_NE(-1, (tid = clone(thread, stack + FRAMESIZE,
CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
0, &ptid, &tls, &ctid)));
- while ((nowl() - t) < 1 && !x) asm("pause");
+ while ((nowl() - t) < 1 && !x) __builtin_ia32_pause();
ASSERT_EQ(42, x);
}
diff --git a/test/libc/sock/poll_test.c b/test/libc/sock/poll_test.c
index 1ae1c8018..9a6271de1 100644
--- a/test/libc/sock/poll_test.c
+++ b/test/libc/sock/poll_test.c
@@ -16,16 +16,25 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/calls/calls.h"
+#include "libc/calls/sigbits.h"
+#include "libc/dce.h"
+#include "libc/intrin/kprintf.h"
#include "libc/log/libfatal.internal.h"
+#include "libc/nexgen32e/rdtsc.h"
+#include "libc/nexgen32e/rdtscp.h"
+#include "libc/nt/synchronization.h"
#include "libc/runtime/gc.internal.h"
#include "libc/sock/sock.h"
#include "libc/sysv/consts/af.h"
#include "libc/sysv/consts/inaddr.h"
#include "libc/sysv/consts/ipproto.h"
#include "libc/sysv/consts/poll.h"
+#include "libc/sysv/consts/sig.h"
#include "libc/sysv/consts/sock.h"
#include "libc/testlib/testlib.h"
#include "libc/x/x.h"
+#include "third_party/chibicc/test/test.h"
#include "tool/decode/lib/flagger.h"
#include "tool/decode/lib/pollnames.h"
@@ -36,6 +45,10 @@ dontdiscard char *FormatPollFd(struct pollfd p[2]) {
p[1].fd, gc(RecreateFlags(kPollNames, p[1].revents)));
}
+TEST(poll, allZero_doesNothing_exceptValidateAndCheckForSignals) {
+ EXPECT_SYS(0, 0, poll(0, 0, 0));
+}
+
TEST(poll, testNegativeOneFd_isIgnored) {
ASSERT_SYS(0, 3, socket(AF_INET, SOCK_STREAM, IPPROTO_TCP));
struct sockaddr_in addr = {AF_INET, 0, {htonl(INADDR_LOOPBACK)}};
@@ -48,3 +61,79 @@ TEST(poll, testNegativeOneFd_isIgnored) {
gc(FormatPollFd(&fds[0])));
ASSERT_SYS(0, 0, close(3));
}
+
+TEST(poll, pipe_noInput) {
+ // we can't test stdin here since
+ // we can't assume it isn't /dev/null
+ // since nil is always pollin as eof
+ int pipefds[2];
+ EXPECT_SYS(0, 0, pipe(pipefds));
+ struct pollfd fds[] = {{pipefds[0], POLLIN}};
+ EXPECT_SYS(0, 0, poll(fds, 1, 0));
+ EXPECT_EQ(0, fds[0].revents);
+ EXPECT_SYS(0, 0, close(pipefds[0]));
+ EXPECT_SYS(0, 0, close(pipefds[1]));
+}
+
+TEST(poll, pipe_hasInputFromSameProcess) {
+ char buf[2];
+ int pipefds[2];
+ EXPECT_SYS(0, 0, pipe(pipefds));
+ struct pollfd fds[] = {{pipefds[0], POLLIN}};
+ EXPECT_SYS(0, 2, write(pipefds[1], "hi", 2));
+ EXPECT_SYS(0, 1, poll(fds, 1, 1000)); // flake nt!
+ EXPECT_EQ(POLLIN, fds[0].revents);
+ EXPECT_SYS(0, 2, read(pipefds[0], buf, 2));
+ EXPECT_SYS(0, 0, poll(fds, 1, 0));
+ EXPECT_SYS(0, 0, close(pipefds[0]));
+ EXPECT_SYS(0, 0, close(pipefds[1]));
+}
+
+TEST(poll, pipe_hasInput) {
+ char buf[2];
+ sigset_t chldmask, savemask;
+ int ws, pid, sync[2], pipefds[2];
+ EXPECT_EQ(0, sigemptyset(&chldmask));
+ EXPECT_EQ(0, sigaddset(&chldmask, SIGCHLD));
+ EXPECT_EQ(0, sigprocmask(SIG_BLOCK, &chldmask, &savemask));
+ EXPECT_SYS(0, 0, pipe(pipefds));
+ EXPECT_NE(-1, (pid = fork()));
+ if (!pid) {
+ EXPECT_SYS(0, 0, close(pipefds[0]));
+ EXPECT_SYS(0, 2, write(pipefds[1], "hi", 2));
+ EXPECT_SYS(0, 2, write(pipefds[1], "hi", 2));
+ EXPECT_SYS(0, 0, close(pipefds[1]));
+ _Exit(0);
+ }
+ EXPECT_SYS(0, 0, close(pipefds[1]));
+ EXPECT_SYS(0, 2, read(pipefds[0], buf, 2));
+ struct pollfd fds[] = {{pipefds[0], POLLIN}};
+ EXPECT_SYS(0, 1, poll(fds, 1, -1));
+ EXPECT_EQ(POLLIN, fds[0].revents & POLLIN);
+ EXPECT_SYS(0, 2, read(pipefds[0], buf, 2));
+ EXPECT_SYS(0, 0, close(pipefds[0]));
+ ASSERT_NE(-1, wait(&ws));
+ EXPECT_TRUE(WIFEXITED(ws));
+ EXPECT_EQ(0, WEXITSTATUS(ws));
+ EXPECT_EQ(0, sigprocmask(SIG_SETMASK, &savemask, 0));
+}
+
+#if 0
+TEST(poll, emptyFds_becomesSleep) {
+ // timing tests w/o mocks are always the hardest
+ int64_t a, b, c, p, i = 0;
+ do {
+ if (++i == 5) {
+ kprintf("too much cpu churn%n");
+ return;
+ }
+ p = TSC_AUX_CORE(rdpid());
+ a = rdtsc();
+ EXPECT_SYS(0, 0, poll(0, 0, 5));
+ b = rdtsc();
+ EXPECT_SYS(0, 0, poll(0, 0, 50));
+ c = rdtsc();
+ } while (TSC_AUX_CORE(rdpid()) != p);
+ EXPECT_LT((b - a) * 2, c - b);
+}
+#endif
diff --git a/test/libc/sock/select_test.c b/test/libc/sock/select_test.c
index bedaffb28..b46e657d3 100644
--- a/test/libc/sock/select_test.c
+++ b/test/libc/sock/select_test.c
@@ -16,17 +16,37 @@
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/calls/calls.h"
#include "libc/calls/struct/timeval.h"
#include "libc/sock/select.h"
#include "libc/sock/sock.h"
#include "libc/testlib/testlib.h"
#include "libc/time/time.h"
-TEST(select, allZero) {
- // todo: figure out how to test block until signal w/ select
- // EXPECT_SYS(0, 0, select(0, 0, 0, 0, 0));
+// TEST(select, allZero) {
+// // todo: figure out how to test block until signal w/ select
+// EXPECT_SYS(0, 0, select(0, 0, 0, 0, 0));
+// }
+
+TEST(select, pipe_hasInputFromSameProcess) {
+ fd_set rfds;
+ char buf[2];
+ int pipefds[2];
+ struct timeval tv = {.tv_usec = 100 * 1000};
+ EXPECT_SYS(0, 0, pipe(pipefds));
+ FD_ZERO(&rfds);
+ FD_SET(pipefds[0], &rfds);
+ EXPECT_SYS(0, 2, write(pipefds[1], "hi", 2));
+ EXPECT_SYS(0, 1, select(pipefds[0] + 1, &rfds, 0, 0, &tv));
+ EXPECT_TRUE(FD_ISSET(pipefds[0], &rfds));
+ EXPECT_SYS(0, 2, read(pipefds[0], buf, 2));
+ EXPECT_SYS(0, 0, select(pipefds[0] + 1, &rfds, 0, 0, &tv));
+ EXPECT_TRUE(!FD_ISSET(pipefds[0], &rfds));
+ EXPECT_SYS(0, 0, close(pipefds[0]));
+ EXPECT_SYS(0, 0, close(pipefds[1]));
}
+#if 0 // flaky
TEST(select, testSleep) {
int64_t e;
long double n;
@@ -40,3 +60,4 @@ TEST(select, testSleep) {
EXPECT_EQ(0, t.tv_usec);
}
}
+#endif
diff --git a/test/libc/stdio/fwrite_test.c b/test/libc/stdio/fwrite_test.c
index 6bb2f6402..4d8005799 100644
--- a/test/libc/stdio/fwrite_test.c
+++ b/test/libc/stdio/fwrite_test.c
@@ -29,8 +29,6 @@
#include "libc/testlib/testlib.h"
#include "libc/time/time.h"
-/* TODO(jart): O_APPEND on Windows */
-
#define PATH "hog"
FILE *f;
diff --git a/test/libc/stdio/vappendf_test.c b/test/libc/stdio/vappendf_test.c
index 6535ee66d..8489e3e5e 100644
--- a/test/libc/stdio/vappendf_test.c
+++ b/test/libc/stdio/vappendf_test.c
@@ -17,10 +17,15 @@
│ PERFORMANCE OF THIS SOFTWARE. │
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/bits/bits.h"
+#include "libc/intrin/kprintf.h"
#include "libc/stdio/append.internal.h"
#include "libc/testlib/ezbench.h"
#include "libc/testlib/testlib.h"
+static void PrintMemory(void *p) {
+ kprintf("%#.*hhs%n", malloc_usable_size(p), p);
+}
+
TEST(vappendf, test) {
char *b = 0;
ASSERT_NE(-1, appendf(&b, "hello "));
@@ -137,6 +142,7 @@ TEST(appendr, testExtend_zeroFills) {
TEST(appendr, testAbsent_allocatesNul) {
char *b = 0;
ASSERT_NE(-1, appendr(&b, 0));
+ ASSERT_BINEQ(u" ", b);
EXPECT_EQ(0, appendz(b).i);
ASSERT_BINEQ(u" ", b);
free(b);
diff --git a/test/tool/plinko/plinko_test.c b/test/tool/plinko/plinko_test.c
index 2be11ee2d..4b95011b0 100644
--- a/test/tool/plinko/plinko_test.c
+++ b/test/tool/plinko/plinko_test.c
@@ -20,10 +20,13 @@
#include "libc/calls/sigbits.h"
#include "libc/calls/struct/sigaction.h"
#include "libc/errno.h"
+#include "libc/intrin/kprintf.h"
#include "libc/macros.internal.h"
#include "libc/mem/io.h"
#include "libc/mem/mem.h"
#include "libc/runtime/runtime.h"
+#include "libc/stdio/stdio.h"
+#include "libc/str/str.h"
#include "libc/sysv/consts/o.h"
#include "libc/sysv/consts/sig.h"
#include "libc/testlib/testlib.h"
@@ -61,8 +64,9 @@ void SetUpOnce(void) {
}
TEST(plinko, worksOrPrintsNiceError) {
- ssize_t rc;
- char buf[16], drain[64];
+ size_t n;
+ ssize_t rc, got;
+ char buf[1024], drain[64];
sigset_t chldmask, savemask;
int i, pid, fdin, wstatus, pfds[2][2];
struct sigaction ignore, saveint, savequit, savepipe;
@@ -100,7 +104,8 @@ TEST(plinko, worksOrPrintsNiceError) {
EXPECT_NE(-1, close(fdin));
}
EXPECT_NE(-1, close(pfds[0][1]));
- EXPECT_NE(-1, read(pfds[1][0], buf, sizeof(buf) - 1));
+ EXPECT_NE(-1, (got = read(pfds[1][0], buf, sizeof(buf) - 1)));
+ EXPECT_NE(0, got);
while (read(pfds[1][0], drain, sizeof(drain)) > 0) donothing;
EXPECT_NE(-1, close(pfds[1][0]));
EXPECT_NE(-1, waitpid(pid, &wstatus, 0));
@@ -115,4 +120,7 @@ TEST(plinko, worksOrPrintsNiceError) {
EXPECT_EQ(0, sigaction(SIGQUIT, &savequit, 0));
EXPECT_EQ(0, sigaction(SIGPIPE, &savepipe, 0));
EXPECT_EQ(0, sigprocmask(SIG_SETMASK, &savemask, 0));
+ if (g_testlib_failed) {
+ kprintf("note: got the following in pipe: %s%n", buf);
+ }
}
diff --git a/third_party/dlmalloc/COPYING b/third_party/dlmalloc/COPYING
deleted file mode 100644
index 3ce09cea1..000000000
--- a/third_party/dlmalloc/COPYING
+++ /dev/null
@@ -1,10 +0,0 @@
-/ Since dlmalloc is public domain, we intend to keep it that way. To the
-/ extent possible under law, Justine Tunney has waived all copyright and
-/ related or neighboring rights to her /third_party/dlmalloc changes, as
-/ it is written in the following disclaimers:
-/ • unlicense.org
-/ • creativecommons.org/publicdomain/zero/1.0/
-
-.ident "\n
-dlmalloc (Public Domain CC0)
-Credit: Doug Lea
"
diff --git a/third_party/dlmalloc/README b/third_party/dlmalloc/README
index 6c542bd1a..690efa141 100644
--- a/third_party/dlmalloc/README
+++ b/third_party/dlmalloc/README
@@ -1,10 +1,9 @@
-
This is a version (aka dlmalloc) of malloc/free/realloc written by
Doug Lea and released to the public domain, as explained at
http://creativecommons.org/publicdomain/zero/1.0/ Send questions,
comments, complaints, performance data, etc to dl@cs.oswego.edu
- Version 2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
+* Version 2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
Note: There may be an updated version of this malloc obtainable at
ftp://gee.cs.oswego.edu/pub/misc/malloc.c
Check before installing!
@@ -41,7 +40,9 @@
(e.g. 2.7.2) supporting these.)
Alignment: 8 bytes (minimum)
- Is set to 16 for NexGen32e.
+ This suffices for nearly all current machines and C compilers.
+ However, you can define MALLOC_ALIGNMENT to be wider than this
+ if necessary (up to 128bytes), at the expense of using more space.
Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes)
8 or 16 bytes (if 8byte sizes)
@@ -98,7 +99,7 @@
If you don't like either of these options, you can define
CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
else. And if if you are sure that your program using malloc has
- no errors or vulnerabilities, you can define TRUSTWORTHY to 1,
+ no errors or vulnerabilities, you can define INSECURE to 1,
which might (or might not) provide a small performance improvement.
It is also possible to limit the maximum total allocatable
@@ -182,6 +183,371 @@
For a longer but out of date high-level description, see
http://gee.cs.oswego.edu/dl/html/malloc.html
+ ----------------------- Chunk representations ------------------------
+
+ (The following includes lightly edited explanations by Colin Plumb.)
+
+ The malloc_chunk declaration below is misleading (but accurate and
+ necessary). It declares a "view" into memory allowing access to
+ necessary fields at known offsets from a given base.
+
+ Chunks of memory are maintained using a `boundary tag' method as
+ originally described by Knuth. (See the paper by Paul Wilson
+ ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
+ techniques.) Sizes of free chunks are stored both in the front of
+ each chunk and at the end. This makes consolidating fragmented
+ chunks into bigger chunks fast. The head fields also hold bits
+ representing whether chunks are free or in use.
+
+ Here are some pictures to make it clearer. They are "exploded" to
+ show that the state of a chunk can be thought of as extending from
+ the high 31 bits of the head field of its header through the
+ prev_foot and PINUSE_BIT bit of the following chunk header.
+
+ A chunk that's in use looks like:
+
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of previous chunk (if P = 0) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
+ | Size of this chunk 1| +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |
+ +- -+
+ | |
+ +- -+
+ | :
+ +- size - sizeof(size_t) available payload bytes -+
+ : |
+ chunk-> +- -+
+ | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
+ | Size of next chunk (may or may not be in use) | +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ And if it's free, it looks like this:
+
+ chunk-> +- -+
+ | User payload (must be in use, or we would have merged!) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
+ | Size of this chunk 0| +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Next pointer |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Prev pointer |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | :
+ +- size - sizeof(struct chunk) unused bytes -+
+ : |
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of this chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
+ | Size of next chunk (must be in use, or we would have merged)| +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | :
+ +- User payload -+
+ : |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |0|
+ +-+
+ Note that since we always merge adjacent free chunks, the chunks
+ adjacent to a free chunk must be in use.
+
+ Given a pointer to a chunk (which can be derived trivially from the
+ payload pointer) we can, in O(1) time, find out whether the adjacent
+ chunks are free, and if so, unlink them from the lists that they
+ are on and merge them with the current chunk.
+
+ Chunks always begin on even word boundaries, so the mem portion
+ (which is returned to the user) is also on an even word boundary, and
+ thus at least double-word aligned.
+
+ The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
+ chunk size (which is always a multiple of two words), is an in-use
+ bit for the *previous* chunk. If that bit is *clear*, then the
+ word before the current chunk size contains the previous chunk
+ size, and can be used to find the front of the previous chunk.
+ The very first chunk allocated always has this bit set, preventing
+ access to non-existent (or non-owned) memory. If pinuse is set for
+ any given chunk, then you CANNOT determine the size of the
+ previous chunk, and might even get a memory addressing fault when
+ trying to do so.
+
+ The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
+ the chunk size redundantly records whether the current chunk is
+ inuse (unless the chunk is mmapped). This redundancy enables usage
+ checks within free and realloc, and reduces indirection when freeing
+ and consolidating chunks.
+
+ Each freshly allocated chunk must have both cinuse and pinuse set.
+ That is, each allocated chunk borders either a previously allocated
+ and still in-use chunk, or the base of its memory arena. This is
+ ensured by making all allocations from the `lowest' part of any
+ found chunk. Further, no free chunk physically borders another one,
+ so each free chunk is known to be preceded and followed by either
+ inuse chunks or the ends of memory.
+
+ Note that the `foot' of the current chunk is actually represented
+ as the prev_foot of the NEXT chunk. This makes it easier to
+ deal with alignments etc but can be very confusing when trying
+ to extend or adapt this code.
+
+ The exceptions to all this are
+
+ 1. The special chunk `top' is the top-most available chunk (i.e.,
+ the one bordering the end of available memory). It is treated
+ specially. Top is never included in any bin, is used only if
+ no other chunk is available, and is released back to the
+ system if it is very large (see M_TRIM_THRESHOLD). In effect,
+ the top chunk is treated as larger (and thus less well
+ fitting) than any other available chunk. The top chunk
+ doesn't update its trailing size field since there is no next
+ contiguous chunk that would have to index off it. However,
+ space is still allocated for it (TOP_FOOT_SIZE) to enable
+ separation or merging when space is extended.
+
+ 3. Chunks allocated via mmap, have both cinuse and pinuse bits
+ cleared in their head fields. Because they are allocated
+ one-by-one, each must carry its own prev_foot field, which is
+ also used to hold the offset this chunk has within its mmapped
+ region, which is needed to preserve alignment. Each mmapped
+ chunk is trailed by the first two fields of a fake next-chunk
+ for sake of usage checks.
+
+ ---------------------- Overlaid data structures -----------------------
+
+ When chunks are not in use, they are treated as nodes of either
+ lists or trees.
+
+ "Small" chunks are stored in circular doubly-linked lists, and look
+ like this:
+
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of previous chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ `head:' | Size of chunk, in bytes |P|
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Forward pointer to next chunk in list |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Back pointer to previous chunk in list |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Unused space (may be 0 bytes long) .
+ . .
+ . |
+nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ `foot:' | Size of chunk, in bytes |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Larger chunks are kept in a form of bitwise digital trees (aka
+ tries) keyed on chunksizes. Because malloc_tree_chunks are only for
+ free chunks greater than 256 bytes, their size doesn't impose any
+ constraints on user chunk sizes. Each node looks like:
+
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of previous chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ `head:' | Size of chunk, in bytes |P|
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Forward pointer to next chunk of same size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Back pointer to previous chunk of same size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Pointer to left child (child[0]) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Pointer to right child (child[1]) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Pointer to parent |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | bin index of this chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Unused space .
+ . |
+nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ `foot:' | Size of chunk, in bytes |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Each tree holding treenodes is a tree of unique chunk sizes. Chunks
+ of the same size are arranged in a circularly-linked list, with only
+ the oldest chunk (the next to be used, in our FIFO ordering)
+ actually in the tree. (Tree members are distinguished by a non-null
+ parent pointer.) If a chunk with the same size an an existing node
+ is inserted, it is linked off the existing node using pointers that
+ work in the same way as fd/bk pointers of small chunks.
+
+ Each tree contains a power of 2 sized range of chunk sizes (the
+ smallest is 0x100 <= x < 0x180), which is is divided in half at each
+ tree level, with the chunks in the smaller half of the range (0x100
+ <= x < 0x140 for the top nose) in the left subtree and the larger
+ half (0x140 <= x < 0x180) in the right subtree. This is, of course,
+ done by inspecting individual bits.
+
+ Using these rules, each node's left subtree contains all smaller
+ sizes than its right subtree. However, the node at the root of each
+ subtree has no particular ordering relationship to either. (The
+ dividing line between the subtree sizes is based on trie relation.)
+ If we remove the last chunk of a given size from the interior of the
+ tree, we need to replace it with a leaf node. The tree ordering
+ rules permit a node to be replaced by any leaf below it.
+
+ The smallest chunk in a tree (a common operation in a best-fit
+ allocator) can be found by walking a path to the leftmost leaf in
+ the tree. Unlike a usual binary tree, where we follow left child
+ pointers until we reach a null, here we follow the right child
+ pointer any time the left one is null, until we reach a leaf with
+ both child pointers null. The smallest chunk in the tree will be
+ somewhere along that path.
+
+ The worst case number of steps to add, find, or remove a node is
+ bounded by the number of bits differentiating chunks within
+ bins. Under current bin calculations, this ranges from 6 up to 21
+ (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
+ is of course much better.
+
+ ----------------------------- Segments --------------------------------
+
+ Each malloc space may include non-contiguous segments, held in a
+ list headed by an embedded malloc_segment record representing the
+ top-most space. Segments also include flags holding properties of
+ the space. Large chunks that are directly allocated by mmap are not
+ included in this list. They are instead independently created and
+ destroyed without otherwise keeping track of them.
+
+ Segment management mainly comes into play for spaces allocated by
+ MMAP. Any call to MMAP might or might not return memory that is
+ adjacent to an existing segment. MORECORE normally contiguously
+ extends the current space, so this space is almost always adjacent,
+ which is simpler and faster to deal with. (This is why MORECORE is
+ used preferentially to MMAP when both are available -- see
+ sys_alloc.) When allocating using MMAP, we don't use any of the
+ hinting mechanisms (inconsistently) supported in various
+ implementations of unix mmap, or distinguish reserving from
+ committing memory. Instead, we just ask for space, and exploit
+ contiguity when we get it. It is probably possible to do
+ better than this on some systems, but no general scheme seems
+ to be significantly better.
+
+ Management entails a simpler variant of the consolidation scheme
+ used for chunks to reduce fragmentation -- new adjacent memory is
+ normally prepended or appended to an existing segment. However,
+ there are limitations compared to chunk consolidation that mostly
+ reflect the fact that segment processing is relatively infrequent
+ (occurring only when getting memory from system) and that we
+ don't expect to have huge numbers of segments:
+
+ * Segments are not indexed, so traversal requires linear scans. (It
+ would be possible to index these, but is not worth the extra
+ overhead and complexity for most programs on most platforms.)
+ * New segments are only appended to old ones when holding top-most
+ memory; if they cannot be prepended to others, they are held in
+ different segments.
+
+ Except for the top-most segment of an mstate, each segment record
+ is kept at the tail of its segment. Segments are added by pushing
+ segment records onto the list headed by &mstate.seg for the
+ containing mstate.
+
+ Segment flags control allocation/merge/deallocation policies:
+ * If EXTERN_BIT set, then we did not allocate this segment,
+ and so should not try to deallocate or merge with others.
+ (This currently holds only for the initial segment passed
+ into create_mspace_with_base.)
+ * If USE_MMAP_BIT set, the segment may be merged with
+ other surrounding mmapped segments and trimmed/de-allocated
+ using munmap.
+ * If neither bit is set, then the segment was obtained using
+ MORECORE so can be merged with surrounding MORECORE'd segments
+ and deallocated/trimmed using MORECORE with negative arguments.
+
+ ---------------------------- malloc_state -----------------------------
+
+ A malloc_state holds all of the bookkeeping for a space.
+ The main fields are:
+
+ Top
+ The topmost chunk of the currently active segment. Its size is
+ cached in topsize. The actual size of topmost space is
+ topsize+TOP_FOOT_SIZE, which includes space reserved for adding
+ fenceposts and segment records if necessary when getting more
+ space from the system. The size at which to autotrim top is
+ cached from mparams in trim_check, except that it is disabled if
+ an autotrim fails.
+
+ Designated victim (dv)
+ This is the preferred chunk for servicing small requests that
+ don't have exact fits. It is normally the chunk split off most
+ recently to service another small request. Its size is cached in
+ dvsize. The link fields of this chunk are not maintained since it
+ is not kept in a bin.
+
+ SmallBins
+ An array of bin headers for free chunks. These bins hold chunks
+ with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
+ chunks of all the same size, spaced 8 bytes apart. To simplify
+ use in double-linked lists, each bin header acts as a malloc_chunk
+ pointing to the real first node, if it exists (else pointing to
+ itself). This avoids special-casing for headers. But to avoid
+ waste, we allocate only the fd/bk pointers of bins, and then use
+ repositioning tricks to treat these as the fields of a chunk.
+
+ TreeBins
+ Treebins are pointers to the roots of trees holding a range of
+ sizes. There are 2 equally spaced treebins for each power of two
+ from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
+ larger.
+
+ Bin maps
+ There is one bit map for small bins ("smallmap") and one for
+ treebins ("treemap). Each bin sets its bit when non-empty, and
+ clears the bit when empty. Bit operations are then used to avoid
+ bin-by-bin searching -- nearly all "search" is done without ever
+ looking at bins that won't be selected. The bit maps
+ conservatively use 32 bits per map word, even if on 64bit system.
+ For a good description of some of the bit-based techniques used
+ here, see Henry S. Warren Jr's book "Hacker's Delight" (and
+ supplement at http://hackersdelight.org/). Many of these are
+ intended to reduce the branchiness of paths through malloc etc, as
+ well as to reduce the number of memory locations read or written.
+
+ Segments
+ A list of segments headed by an embedded malloc_segment record
+ representing the initial space.
+
+ Address check support
+ The least_addr field is the least address ever obtained from
+ MORECORE or MMAP. Attempted frees and reallocs of any address less
+ than this are trapped (unless INSECURE is defined).
+
+ Magic tag
+ A cross-check field that should always hold same value as mparams.magic.
+
+ Max allowed footprint
+ The maximum allowed bytes to allocate from system (zero means no limit)
+
+ Flags
+ Bits recording whether to use MMAP, locks, or contiguous MORECORE
+
+ Statistics
+ Each space keeps track of current and maximum system memory
+ obtained via MORECORE or MMAP.
+
+ Trim support
+ Fields holding the amount of unused topmost memory that should trigger
+ trimming, and a counter to force periodic scanning to release unused
+ non-topmost segments.
+
+ Locking
+ If USE_LOCKS is defined, the "mutex" lock is acquired and released
+ around every public call using this mspace.
+
+ Extension support
+ A void* pointer and a size_t field that can be used to help implement
+ extensions to this malloc.
+
+////////////////////////////////////////////////////////////////////////////////
+
* MSPACES
If MSPACES is defined, then in addition to malloc, free, etc.,
this file also defines mspace_malloc, mspace_free, etc. These
@@ -213,12 +579,12 @@
indicating its originating mspace, and frees are directed to their
originating spaces. Normally, this requires use of locks.
- ───────────────────────── Compile-time options ───────────────────────────
+ ------------------------- Compile-time options ---------------------------
Be careful in setting #define values for numerical constants of type
size_t. On some systems, literal values are not automatically extended
to size_t precision unless they are explicitly casted. You can also
-use the symbolic values SIZE_MAX, SIZE_T_ONE, etc below.
+use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below.
WIN32 default: defined if _WIN32 defined
Defining WIN32 sets up defaults for MS environment and compilers.
@@ -287,7 +653,7 @@ FOOTERS default: 0
information in the footers of allocated chunks. This adds
space and time overhead.
-TRUSTWORTHY default: 0
+INSECURE default: 0
If true, omit checks for usage errors and heap space overwrites.
USE_DL_PREFIX default: NOT defined
@@ -301,7 +667,7 @@ MALLOC_INSPECT_ALL default: NOT defined
functions is otherwise restricted, you probably do not want to
include them in secure implementations.
-MALLOC_ABORT default: defined as abort()
+ABORT default: defined as abort()
Defines how to abort on failed checks. On most systems, a failed
check cannot die with an "assert" or even print an informative
message, because the underlying print routines in turn call malloc,
@@ -389,6 +755,10 @@ HAVE_MMAP default: 1 (true)
able to unmap memory that may have be allocated using multiple calls
to MMAP, so long as they are adjacent.
+HAVE_MREMAP default: 1 on linux, else 0
+ If true realloc() uses mremap() to re-allocate large blocks and
+ extend or shrink allocation spaces.
+
MMAP_CLEARS default: 1 except on WINCE.
True if mmap clears memory so calloc doesn't need to. This is true
for standard unix mmap using /dev/zero and on WIN32 except for WINCE.
@@ -408,6 +778,10 @@ malloc_getpagesize default: derive from system includes, or 4096.
if WIN32, where page size is determined using getSystemInfo during
initialization.
+USE_DEV_RANDOM default: 0 (i.e., not used)
+ Causes malloc to use /dev/random to initialize secure magic seed for
+ stamping footers. Otherwise, the current time is used.
+
NO_MALLINFO default: 0
If defined, don't compile "mallinfo". This can be a simple way
of dealing with mismatches between system declarations and
@@ -466,7 +840,7 @@ DEFAULT_TRIM_THRESHOLD default: 2MB
and released in ways that can reuse each other's storage, perhaps
mixed with phases where there are no such chunks at all. The trim
value must be greater than page size to have any useful effect. To
- disable trimming completely, you can set to SIZE_MAX. Note that the trick
+ disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
some people use of mallocing a huge space and then freeing it at
program startup, in an attempt to reserve system memory, doesn't
have the intended effect under automatic trimming, since that memory
@@ -494,7 +868,7 @@ DEFAULT_MMAP_THRESHOLD default: 256K
nearly always outweigh disadvantages for "large" chunks, but the
value of "large" may vary across systems. The default is an
empirically derived value that works well in most systems. You can
- disable mmap by setting to SIZE_MAX.
+ disable mmap by setting to MAX_SIZE_T.
MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP
The number of consolidated frees between checks to release
@@ -507,13 +881,100 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP
consolidation. The best value for this parameter is a compromise
between slowing down frees with relatively costly checks that
rarely trigger versus holding on to unused memory. To effectively
- disable, set to SIZE_MAX. This may lead to a very slight speed
+ disable, set to MAX_SIZE_T. This may lead to a very slight speed
improvement at the expense of carrying around more memory.
-────────────────────────────────────────────────────────────────────────────────
+ Guidelines for creating a custom version of MORECORE:
+ * For best performance, MORECORE should allocate in multiples of pagesize.
+ * MORECORE may allocate more memory than requested. (Or even less,
+ but this will usually result in a malloc failure.)
+ * MORECORE must not allocate memory when given argument zero, but
+ instead return one past the end address of memory from previous
+ nonzero call.
+ * For best performance, consecutive calls to MORECORE with positive
+ arguments should return increasing addresses, indicating that
+ space has been contiguously extended.
+ * Even though consecutive calls to MORECORE need not return contiguous
+ addresses, it must be OK for malloc'ed chunks to span multiple
+ regions in those cases where they do happen to be contiguous.
+ * MORECORE need not handle negative arguments -- it may instead
+ just return MFAIL when given negative arguments.
+ Negative arguments are always multiples of pagesize. MORECORE
+ must not misinterpret negative args as large positive unsigned
+ args. You can suppress all such calls from even occurring by defining
+ MORECORE_CANNOT_TRIM,
+
+ As an example alternative MORECORE, here is a custom allocator
+ kindly contributed for pre-OSX macOS. It uses virtually but not
+ necessarily physically contiguous non-paged memory (locked in,
+ present and won't get swapped out). You can use it by uncommenting
+ this section, adding some #includes, and setting up the appropriate
+ defines above:
+
+ #define MORECORE osMoreCore
+
+ There is also a shutdown routine that should somehow be called for
+ cleanup upon program exit.
+
+ #define MAX_POOL_ENTRIES 100
+ #define MINIMUM_MORECORE_SIZE (64 * 1024U)
+ static int next_os_pool;
+ void *our_os_pools[MAX_POOL_ENTRIES];
+
+ void *osMoreCore(int size)
+ {
+ void *ptr = 0;
+ static void *sbrk_top = 0;
+
+ if (size > 0)
+ {
+ if (size < MINIMUM_MORECORE_SIZE)
+ size = MINIMUM_MORECORE_SIZE;
+ if (CurrentExecutionLevel() == kTaskLevel)
+ ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
+ if (ptr == 0)
+ {
+ return (void *) MFAIL;
+ }
+ // save ptrs so they can be freed during cleanup
+ our_os_pools[next_os_pool] = ptr;
+ next_os_pool++;
+ ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
+ sbrk_top = (char *) ptr + size;
+ return ptr;
+ }
+ else if (size < 0)
+ {
+ // we don't currently support shrink behavior
+ return (void *) MFAIL;
+ }
+ else
+ {
+ return sbrk_top;
+ }
+ }
+
+ // cleanup any allocated memory pools
+ // called as last thing before shutting down driver
+
+ void osCleanupMem(void)
+ {
+ void **ptr;
+
+ for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
+ if (*ptr)
+ {
+ PoolDeallocate(*ptr);
+ *ptr = 0;
+ }
+ }
+
+*/
+
+
+/* -----------------------------------------------------------------------
History:
-
v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
* fix bad comparison in dlposix_memalign
* don't reuse adjusted asize in sys_alloc
@@ -521,7 +982,7 @@ History:
* reduce compiler warnings -- thanks to all who reported/suggested these
v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee)
- * Always perform unlink checks unless TRUSTWORTHY
+ * Always perform unlink checks unless INSECURE
* Add posix_memalign.
* Improve realloc to expand in more cases; expose realloc_in_place.
Thanks to Peter Buhr for the suggestion.
@@ -728,94 +1189,3 @@ History:
Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
* Based loosely on libg++-1.2X malloc. (It retains some of the overall
structure of old version, but most details differ.)
-
-/* ──────────────────── Alternative MORECORE functions ─────────────────── */
-
-/*
- Guidelines for creating a custom version of MORECORE:
-
- * For best performance, MORECORE should allocate in multiples of pagesize.
- * MORECORE may allocate more memory than requested. (Or even less,
- but this will usually result in a malloc failure.)
- * MORECORE must not allocate memory when given argument zero, but
- instead return one past the end address of memory from previous
- nonzero call.
- * For best performance, consecutive calls to MORECORE with positive
- arguments should return increasing addresses, indicating that
- space has been contiguously extended.
- * Even though consecutive calls to MORECORE need not return contiguous
- addresses, it must be OK for malloc'ed chunks to span multiple
- regions in those cases where they do happen to be contiguous.
- * MORECORE need not handle negative arguments -- it may instead
- just return MFAIL when given negative arguments.
- Negative arguments are always multiples of pagesize. MORECORE
- must not misinterpret negative args as large positive unsigned
- args. You can suppress all such calls from even occurring by defining
- MORECORE_CANNOT_TRIM,
-
- As an example alternative MORECORE, here is a custom allocator
- kindly contributed for pre-OSX macOS. It uses virtually but not
- necessarily physically contiguous non-paged memory (locked in,
- present and won't get swapped out). You can use it by uncommenting
- this section, adding some #includes, and setting up the appropriate
- defines above:
-
- #define MORECORE osMoreCore
-
- There is also a shutdown routine that should somehow be called for
- cleanup upon program exit.
-
- #define MAX_POOL_ENTRIES 100
- #define MINIMUM_MORECORE_SIZE (64 * 1024U)
- static int next_os_pool;
- void *our_os_pools[MAX_POOL_ENTRIES];
-
- void *osMoreCore(int size)
- {
- void *ptr = 0;
- static void *sbrk_top = 0;
-
- if (size > 0)
- {
- if (size < MINIMUM_MORECORE_SIZE)
- size = MINIMUM_MORECORE_SIZE;
- if (CurrentExecutionLevel() == kTaskLevel)
- ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
- if (ptr == 0)
- {
- return (void *) MFAIL;
- }
- // save ptrs so they can be freed during cleanup
- our_os_pools[next_os_pool] = ptr;
- next_os_pool++;
- ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
- sbrk_top = (char *) ptr + size;
- return ptr;
- }
- else if (size < 0)
- {
- // we don't currently support shrink behavior
- return (void *) MFAIL;
- }
- else
- {
- return sbrk_top;
- }
- }
-
- // cleanup any allocated memory pools
- // called as last thing before shutting down driver
-
- void osCleanupMem(void)
- {
- void **ptr;
-
- for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
- if (*ptr)
- {
- PoolDeallocate(*ptr);
- *ptr = 0;
- }
- }
-
-*/
diff --git a/third_party/dlmalloc/README.cosmo b/third_party/dlmalloc/README.cosmo
index 95e599734..d364ed7b6 100644
--- a/third_party/dlmalloc/README.cosmo
+++ b/third_party/dlmalloc/README.cosmo
@@ -1,21 +1,13 @@
-ORIGIN
+DESCRIPTION
- http://gee.cs.oswego.edu/
+ malloc-2.8.6
+ written by Doug Lea
+
+LICENSE
+
+ http://creativecommons.org/publicdomain/zero/1.0/
LOCAL CHANGES
- Numerous local changes were made while vendoring Doug Lee's original
- dlmalloc sources. Those changes basically boil down to:
-
- 1. Fewer #ifdefs
- 2. More modules (so linker can do a better job)
- 3. Delete code we don't need (cf. Knight Capital)
- 4. Readability / stylistic consistency
-
- Since we haven't made any genuine improvements to Doug Lee's legendary
- allocator, we feel this folder faithfully presents his intended work, in
- harmony with Cosmopolitan conventions.
-
- The only deleted code we're sure has compelling merit is the mspace
- functionality. If we ever need memory pools, they might be more
- appropriately vendored under //third_party/dlmalloc_mspace.
+ - Introduce __oom_hook()
+ - Favor pause (rather than sched_yield) for spin locks
diff --git a/third_party/dlmalloc/bulk_free.c b/third_party/dlmalloc/bulk_free.c
deleted file mode 100644
index 54fa98926..000000000
--- a/third_party/dlmalloc/bulk_free.c
+++ /dev/null
@@ -1,61 +0,0 @@
-#include "libc/mem/mem.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-/**
- * Frees and clears (sets to NULL) each non-null pointer in the given
- * array. This is twice as fast as freeing them one-by-one. If footers
- * are used, pointers that have been allocated in different mspaces are
- * not freed or cleared, and the count of all such pointers is returned.
- * For large arrays of pointers with poor locality, it may be worthwhile
- * to sort this array before calling bulk_free.
- */
-size_t dlbulk_free(void *array[], size_t nelem) {
- void **a, **b, *mem, **fence;
- struct MallocChunk *p, *next;
- size_t psize, newsize, unfreed;
- /*
- * Try to free all pointers in the given array. Note: this could be
- * made faster, by delaying consolidation, at the price of disabling
- * some user integrity checks, We still optimize some consolidations
- * by combining adjacent chunks before freeing, which will occur often
- * if allocated with ialloc or the array is sorted.
- */
- unfreed = 0;
- if (!PREACTION(g_dlmalloc)) {
- a;
- fence = &(array[nelem]);
- for (a = array; a != fence; ++a) {
- mem = *a;
- if (mem != 0) {
- p = mem2chunk(AddressDeathAction(mem));
- psize = chunksize(p);
-#if FOOTERS
- if (get_mstate_for(p) != g_dlmalloc) {
- ++unfreed;
- continue;
- }
-#endif
- check_inuse_chunk(g_dlmalloc, p);
- *a = 0;
- if (RTCHECK(ok_address(g_dlmalloc, p) && ok_inuse(p))) {
- b = a + 1; /* try to merge with next chunk */
- next = next_chunk(p);
- if (b != fence && *b == chunk2mem(next)) {
- newsize = chunksize(next) + psize;
- set_inuse(g_dlmalloc, p, newsize);
- *b = chunk2mem(p);
- } else
- dlmalloc_dispose_chunk(g_dlmalloc, p, psize);
- } else {
- CORRUPTION_ERROR_ACTION(g_dlmalloc);
- break;
- }
- }
- }
- if (should_trim(g_dlmalloc, g_dlmalloc->topsize)) {
- dlmalloc_sys_trim(g_dlmalloc, 0);
- }
- POSTACTION(g_dlmalloc);
- }
- return unfreed;
-}
diff --git a/third_party/dlmalloc/dlcalloc.c b/third_party/dlmalloc/dlcalloc.c
deleted file mode 100644
index ea84df994..000000000
--- a/third_party/dlmalloc/dlcalloc.c
+++ /dev/null
@@ -1,13 +0,0 @@
-#include "libc/str/str.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-void *dlcalloc(size_t n_elements, size_t elem_size) {
- void *mem;
- size_t req;
- if (__builtin_mul_overflow(n_elements, elem_size, &req)) req = -1;
- mem = dlmalloc(req);
- if (mem && calloc_must_clear(mem2chunk(mem))) {
- bzero(mem, req);
- }
- return mem;
-}
diff --git a/third_party/dlmalloc/dlindependent_calloc.c b/third_party/dlmalloc/dlindependent_calloc.c
deleted file mode 100644
index a847335b0..000000000
--- a/third_party/dlmalloc/dlindependent_calloc.c
+++ /dev/null
@@ -1,227 +0,0 @@
-#include "libc/mem/mem.h"
-#include "libc/str/str.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-/*
- Common support for independent_X routines, handling
- all of the combinations that can result.
- The opts arg has:
- bit 0 set if all elements are same size (using sizes[0])
- bit 1 set if elements should be zeroed
-*/
-static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts,
- void *chunks[]) {
- size_t element_size; /* chunksize of each element, if all same */
- size_t contents_size; /* total size of elements */
- size_t array_size; /* request size of pointer array */
- void *mem; /* malloced aggregate space */
- mchunkptr p; /* corresponding chunk */
- size_t remainder_size; /* remaining bytes while splitting */
- void **marray; /* either "chunks" or malloced ptr array */
- mchunkptr array_chunk; /* chunk for malloced ptr array */
- flag_t was_enabled; /* to disable mmap */
- size_t size;
- size_t i;
-
- ensure_initialization();
- /* compute array length, if needed */
- if (chunks != 0) {
- if (n_elements == 0) return chunks; /* nothing to do */
- marray = chunks;
- array_size = 0;
- } else {
- /* if empty req, must still return chunk representing empty array */
- if (n_elements == 0) return (void **)dlmalloc(0);
- marray = 0;
- array_size = request2size(n_elements * (sizeof(void *)));
- }
-
- /* compute total element size */
- if (opts & 0x1) { /* all-same-size */
- element_size = request2size(*sizes);
- contents_size = n_elements * element_size;
- } else { /* add up all the sizes */
- element_size = 0;
- contents_size = 0;
- for (i = 0; i != n_elements; ++i) contents_size += request2size(sizes[i]);
- }
-
- size = contents_size + array_size;
-
- /*
- Allocate the aggregate chunk. First disable direct-mmapping so
- malloc won't use it, since we would not be able to later
- free/realloc space internal to a segregated mmap region.
- */
- was_enabled = use_mmap(m);
- disable_mmap(m);
- mem = dlmalloc(size - CHUNK_OVERHEAD);
- if (was_enabled) enable_mmap(m);
- if (mem == 0) return 0;
-
- if (PREACTION(m)) return 0;
- p = mem2chunk(mem);
- remainder_size = chunksize(p);
-
- assert(!is_mmapped(p));
-
- if (opts & 0x2) { /* optionally clear the elements */
- bzero((size_t *)mem, remainder_size - SIZE_T_SIZE - array_size);
- }
-
- /* If not provided, allocate the pointer array as final part of chunk */
- if (marray == 0) {
- size_t array_chunk_size;
- array_chunk = chunk_plus_offset(p, contents_size);
- array_chunk_size = remainder_size - contents_size;
- marray = AddressBirthAction((void **)(chunk2mem(array_chunk)));
- set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
- remainder_size = contents_size;
- }
-
- /* split out elements */
- for (i = 0;; ++i) {
- marray[i] = AddressBirthAction(chunk2mem(p));
- if (i != n_elements - 1) {
- if (element_size != 0)
- size = element_size;
- else
- size = request2size(sizes[i]);
- remainder_size -= size;
- set_size_and_pinuse_of_inuse_chunk(m, p, size);
- p = chunk_plus_offset(p, size);
- } else { /* the final element absorbs any overallocation slop */
- set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
- break;
- }
- }
-
-#ifdef DEBUG
- if (marray != chunks) {
- /* final element must have exactly exhausted chunk */
- if (element_size != 0) {
- assert(remainder_size == element_size);
- } else {
- assert(remainder_size == request2size(sizes[i]));
- }
- check_inuse_chunk(m, mem2chunk(marray));
- }
- for (i = 0; i != n_elements; ++i) {
- check_inuse_chunk(m, mem2chunk(marray[i]));
- }
-#endif /* IsModeDbg() */
-
- POSTACTION(m);
- return marray;
-}
-
-/**
- * independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
- *
- * independent_calloc is similar to calloc, but instead of returning a
- * single cleared space, it returns an array of pointers to n_elements
- * independent elements that can hold contents of size elem_size, each
- * of which starts out cleared, and can be independently freed,
- * realloc'ed etc. The elements are guaranteed to be adjacently
- * allocated (this is not guaranteed to occur with multiple callocs or
- * mallocs), which may also improve cache locality in some applications.
- *
- * The "chunks" argument is optional (i.e., may be null, which is
- * probably the most typical usage). If it is null, the returned array
- * is itself dynamically allocated and should also be freed when it is
- * no longer needed. Otherwise, the chunks array must be of at least
- * n_elements in length. It is filled in with the pointers to the
- * chunks.
- *
- * In either case, independent_calloc returns this pointer array, or
- * null if the allocation failed. * If n_elements is zero and "chunks"
- * is null, it returns a chunk representing an array with zero elements
- * (which should be freed if not wanted).
- *
- * Each element must be freed when it is no longer needed. This can be
- * done all at once using bulk_free.
- *
- * independent_calloc simplifies and speeds up implementations of many
- * kinds of pools. * It may also be useful when constructing large data
- * structures that initially have a fixed number of fixed-sized nodes,
- * but the number is not known at compile time, and some of the nodes
- * may later need to be freed. For example:
- *
- * struct Node { int item; struct Node* next; };
- * struct Node* build_list() {
- * struct Node **pool;
- * int n = read_number_of_nodes_needed();
- * if (n <= 0) return 0;
- * pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
- * if (pool == 0) __die();
- * // organize into a linked list...
- * struct Node* first = pool[0];
- * for (i = 0; i < n-1; ++i)
- * pool[i]->next = pool[i+1];
- * free(pool); * // Can now free the array (or not, if it is needed later)
- * return first;
- * }
- */
-void **dlindependent_calloc(size_t n_elements, size_t elem_size,
- void *chunks[]) {
- size_t sz = elem_size; /* serves as 1-element array */
- return ialloc(g_dlmalloc, n_elements, &sz, 3, chunks);
-}
-
-/**
- * independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
- *
- * independent_comalloc allocates, all at once, a set of n_elements
- * chunks with sizes indicated in the "sizes" array. It returns an array
- * of pointers to these elements, each of which can be independently
- * freed, realloc'ed etc. The elements are guaranteed to be adjacently
- * allocated (this is not guaranteed to occur with multiple callocs or
- * mallocs), which may also improve cache locality in some applications.
- *
- * The "chunks" argument is optional (i.e., may be null). If it is null
- * the returned array is itself dynamically allocated and should also
- * be freed when it is no longer needed. Otherwise, the chunks array
- * must be of at least n_elements in length. It is filled in with the
- * pointers to the chunks.
- *
- * In either case, independent_comalloc returns this pointer array, or
- * null if the allocation failed. If n_elements is zero and chunks is
- * null, it returns a chunk representing an array with zero elements
- * (which should be freed if not wanted).
- *
- * Each element must be freed when it is no longer needed. This can be
- * done all at once using bulk_free.
- *
- * independent_comallac differs from independent_calloc in that each
- * element may have a different size, and also that it does not
- * automatically clear elements.
- *
- * independent_comalloc can be used to speed up allocation in cases
- * where several structs or objects must always be allocated at the
- * same time. For example:
- *
- * struct Head { ... }
- * struct Foot { ... }
- * void send_message(char* msg) {
- * int msglen = strlen(msg);
- * size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
- * void* chunks[3];
- * if (independent_comalloc(3, sizes, chunks) == 0) __die();
- * struct Head* head = (struct Head*)(chunks[0]);
- * char* body = (char*)(chunks[1]);
- * struct Foot* foot = (struct Foot*)(chunks[2]);
- * // ...
- * }
- *
- * In general though, independent_comalloc is worth using only for
- * larger values of n_elements. For small values, you probably won't
- * detect enough difference from series of malloc calls to bother.
- *
- * Overuse of independent_comalloc can increase overall memory usage,
- * since it cannot reuse existing noncontiguous small chunks that might
- * be available for some of the elements.
- */
-void **dlindependent_comalloc(size_t n_elements, size_t sizes[],
- void *chunks[]) {
- return ialloc(g_dlmalloc, n_elements, sizes, 0, chunks);
-}
diff --git a/third_party/dlmalloc/dlmalloc-debug.c b/third_party/dlmalloc/dlmalloc-debug.c
deleted file mode 100644
index bd4209506..000000000
--- a/third_party/dlmalloc/dlmalloc-debug.c
+++ /dev/null
@@ -1,247 +0,0 @@
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-/* Check properties of any chunk, whether free, inuse, mmapped etc */
-forceinline void do_check_any_chunk(mstate m, mchunkptr p) {
- assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
- assert(ok_address(m, p));
-}
-
-/* Check properties of top chunk */
-void do_check_top_chunk(mstate m, mchunkptr p) {
- msegmentptr sp = segment_holding(m, (char*)p);
- size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
- assert(sp != 0);
- assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
- assert(ok_address(m, p));
- assert(sz == m->topsize);
- assert(sz > 0);
- assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
- assert(pinuse(p));
- assert(!pinuse(chunk_plus_offset(p, sz)));
-}
-
-/* Check properties of (inuse) mmapped chunks */
-void do_check_mmapped_chunk(mstate m, mchunkptr p) {
- size_t sz = chunksize(p);
- size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
- assert(is_mmapped(p));
- assert(use_mmap(m));
- assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
- assert(ok_address(m, p));
- assert(!is_small(sz));
- assert((len & (g_mparams.page_size - SIZE_T_ONE)) == 0);
- assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
- assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0);
-}
-
-/* Check properties of inuse chunks */
-void do_check_inuse_chunk(mstate m, mchunkptr p) {
- do_check_any_chunk(m, p);
- assert(is_inuse(p));
- assert(next_pinuse(p));
- /* If not pinuse and not mmapped, previous chunk has OK offset */
- assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
- if (is_mmapped(p)) do_check_mmapped_chunk(m, p);
-}
-
-/* Check properties of free chunks */
-void do_check_free_chunk(mstate m, mchunkptr p) {
- size_t sz = chunksize(p);
- mchunkptr next = chunk_plus_offset(p, sz);
- do_check_any_chunk(m, p);
- assert(!is_inuse(p));
- assert(!next_pinuse(p));
- assert(!is_mmapped(p));
- if (p != m->dv && p != m->top) {
- if (sz >= MIN_CHUNK_SIZE) {
- assert((sz & CHUNK_ALIGN_MASK) == 0);
- assert(is_aligned(chunk2mem(p)));
- assert(next->prev_foot == sz);
- assert(pinuse(p));
- assert(next == m->top || is_inuse(next));
- assert(p->fd->bk == p);
- assert(p->bk->fd == p);
- } else /* markers are always of size SIZE_T_SIZE */
- assert(sz == SIZE_T_SIZE);
- }
-}
-
-/* Check properties of malloced chunks at the point they are malloced */
-void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
- if (mem != 0) {
- mchunkptr p = mem2chunk(mem);
- size_t sz = p->head & ~INUSE_BITS;
- do_check_inuse_chunk(m, p);
- assert((sz & CHUNK_ALIGN_MASK) == 0);
- assert(sz >= MIN_CHUNK_SIZE);
- assert(sz >= s);
- /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
- assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
- }
-}
-
-/* Check a tree and its subtrees. */
-static void do_check_tree(mstate m, tchunkptr t) {
- tchunkptr head = 0;
- tchunkptr u = t;
- bindex_t tindex = t->index;
- size_t tsize = chunksize(t);
- bindex_t idx;
- compute_tree_index(tsize, idx);
- assert(tindex == idx);
- assert(tsize >= MIN_LARGE_SIZE);
- assert(tsize >= minsize_for_tree_index(idx));
- assert((idx == NTREEBINS - 1) || (tsize < minsize_for_tree_index((idx + 1))));
-
- do { /* traverse through chain of same-sized nodes */
- do_check_any_chunk(m, ((mchunkptr)u));
- assert(u->index == tindex);
- assert(chunksize(u) == tsize);
- assert(!is_inuse(u));
- assert(!next_pinuse(u));
- assert(u->fd->bk == u);
- assert(u->bk->fd == u);
- if (u->parent == 0) {
- assert(u->child[0] == 0);
- assert(u->child[1] == 0);
- } else {
- assert(head == 0); /* only one node on chain has parent */
- head = u;
- assert(u->parent != u);
- assert(u->parent->child[0] == u || u->parent->child[1] == u ||
- *((tbinptr*)(u->parent)) == u);
- if (u->child[0] != 0) {
- assert(u->child[0]->parent == u);
- assert(u->child[0] != u);
- do_check_tree(m, u->child[0]);
- }
- if (u->child[1] != 0) {
- assert(u->child[1]->parent == u);
- assert(u->child[1] != u);
- do_check_tree(m, u->child[1]);
- }
- if (u->child[0] != 0 && u->child[1] != 0) {
- assert(chunksize(u->child[0]) < chunksize(u->child[1]));
- }
- }
- u = u->fd;
- } while (u != t);
- assert(head != 0);
-}
-
-/* Check all the chunks in a treebin. */
-static void do_check_treebin(mstate m, bindex_t i) {
- tbinptr* tb = treebin_at(m, i);
- tchunkptr t = *tb;
- int empty = (m->treemap & (1U << i)) == 0;
- if (t == 0) assert(empty);
- if (!empty) do_check_tree(m, t);
-}
-
-/* Check all the chunks in a smallbin. */
-static void do_check_smallbin(mstate m, bindex_t i) {
- sbinptr b = smallbin_at(m, i);
- mchunkptr p = b->bk;
- unsigned int empty = (m->smallmap & (1U << i)) == 0;
- if (p == b) assert(empty);
- if (!empty) {
- for (; p != b; p = p->bk) {
- size_t size = chunksize(p);
- mchunkptr q;
- /* each chunk claims to be free */
- do_check_free_chunk(m, p);
- /* chunk belongs in bin */
- assert(small_index(size) == i);
- assert(p->bk == b || chunksize(p->bk) == chunksize(p));
- /* chunk is followed by an inuse chunk */
- q = next_chunk(p);
- if (q->head != FENCEPOST_HEAD) do_check_inuse_chunk(m, q);
- }
- }
-}
-
-/* Find x in a bin. Used in other check functions. */
-static int bin_find(mstate m, mchunkptr x) {
- size_t size = chunksize(x);
- if (is_small(size)) {
- bindex_t sidx = small_index(size);
- sbinptr b = smallbin_at(m, sidx);
- if (smallmap_is_marked(m, sidx)) {
- mchunkptr p = b;
- do {
- if (p == x) return 1;
- } while ((p = p->fd) != b);
- }
- } else {
- bindex_t tidx;
- compute_tree_index(size, tidx);
- if (treemap_is_marked(m, tidx)) {
- tchunkptr t = *treebin_at(m, tidx);
- size_t sizebits = size << leftshift_for_tree_index(tidx);
- while (t != 0 && chunksize(t) != size) {
- t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
- sizebits <<= 1;
- }
- if (t != 0) {
- tchunkptr u = t;
- do {
- if (u == (tchunkptr)x) return 1;
- } while ((u = u->fd) != t);
- }
- }
- }
- return 0;
-}
-
-/* Traverse each chunk and check it; return total */
-static size_t traverse_and_check(mstate m) {
- size_t sum = 0;
- if (is_initialized(m)) {
- msegmentptr s = &m->seg;
- sum += m->topsize + TOP_FOOT_SIZE;
- while (s != 0) {
- mchunkptr q = align_as_chunk(s->base);
- mchunkptr lastq = 0;
- assert(pinuse(q));
- while (segment_holds(s, q) && q != m->top && q->head != FENCEPOST_HEAD) {
- sum += chunksize(q);
- if (is_inuse(q)) {
- assert(!bin_find(m, q));
- do_check_inuse_chunk(m, q);
- } else {
- assert(q == m->dv || bin_find(m, q));
- assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
- do_check_free_chunk(m, q);
- }
- lastq = q;
- q = next_chunk(q);
- }
- s = s->next;
- }
- }
- return sum;
-}
-
-/* Check all properties of MallocState. */
-void do_check_malloc_state(mstate m) {
- bindex_t i;
- size_t total;
- /* check bins */
- for (i = 0; i < NSMALLBINS; ++i) do_check_smallbin(m, i);
- for (i = 0; i < NTREEBINS; ++i) do_check_treebin(m, i);
- if (m->dvsize != 0) { /* check dv chunk */
- do_check_any_chunk(m, m->dv);
- assert(m->dvsize == chunksize(m->dv));
- assert(m->dvsize >= MIN_CHUNK_SIZE);
- assert(bin_find(m, m->dv) == 0);
- }
- if (m->top != 0) { /* check top chunk */
- do_check_top_chunk(m, m->top);
- /*assert(m->topsize == chunksize(m->top)); redundant */
- assert(m->topsize > 0);
- assert(bin_find(m, m->top) == 0);
- }
- total = traverse_and_check(m);
- assert(total <= m->footprint);
- assert(m->footprint <= m->max_footprint);
-}
diff --git a/third_party/dlmalloc/dlmalloc.c b/third_party/dlmalloc/dlmalloc.c
index e60252ea9..4cb486612 100644
--- a/third_party/dlmalloc/dlmalloc.c
+++ b/third_party/dlmalloc/dlmalloc.c
@@ -1,181 +1,2797 @@
-#include "libc/assert.h"
-#include "libc/bits/initializer.internal.h"
-#include "libc/bits/safemacros.internal.h"
-#include "libc/bits/weaken.h"
-#include "libc/calls/calls.h"
-#include "libc/calls/internal.h"
-#include "libc/calls/struct/sysinfo.h"
+#include "libc/nexgen32e/rdtsc.h"
#include "libc/dce.h"
-#include "libc/fmt/conv.h"
-#include "libc/intrin/asan.internal.h"
-#include "libc/limits.h"
-#include "libc/log/backtrace.internal.h"
-#include "libc/log/libfatal.internal.h"
-#include "libc/macros.internal.h"
-#include "libc/mem/mem.h"
-#include "libc/nt/systeminfo.h"
-#include "libc/runtime/memtrack.internal.h"
-#include "libc/runtime/runtime.h"
-#include "libc/str/str.h"
-#include "libc/sysv/consts/fileno.h"
+#include "libc/assert.h"
+#include "libc/rand/rand.h"
+#include "libc/runtime/sysconf.h"
#include "libc/sysv/consts/map.h"
#include "libc/sysv/consts/prot.h"
-#include "libc/sysv/errfuns.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
+#include "libc/runtime/runtime.h"
+#include "libc/errno.h"
+#include "libc/errno.h"
+#include "libc/stdio/stdio.h"
+#include "libc/intrin/kprintf.h"
+#include "third_party/dlmalloc/vespene.internal.h"
+#include "libc/calls/calls.h"
+#include "libc/calls/calls.h"
+#include "libc/runtime/runtime.h"
+#include "libc/bits/weaken.h"
+#include "libc/intrin/kprintf.h"
+#include "libc/mem/mem.h"
+// clang-format off
-STATIC_YOINK("_init_dlmalloc");
+#define FOOTERS 0
+#define MSPACES 0
-#define OOM_WARNING "warning: running out of physical memory\n"
-#define is_global(M) ((M) == g_dlmalloc)
+#define HAVE_MMAP 1
+#define HAVE_MREMAP 0
+#define HAVE_MORECORE 0
+#define USE_LOCKS 1
+#define MORECORE_CONTIGUOUS 0
+#define MALLOC_INSPECT_ALL 1
-hidden struct MallocState g_dlmalloc[1];
-hidden struct MallocParams g_mparams;
+#if IsTiny()
+#define INSECURE 1
+#define PROCEED_ON_ERROR 1
+#define ABORT_ON_ASSERT_FAILURE 0
+#endif
+
+#if IsModeDbg()
+#define DEBUG 1
+#endif
+
+#define LACKS_UNISTD_H
+#define LACKS_FCNTL_H
+#define LACKS_SYS_PARAM_H
+#define LACKS_SYS_MMAN_H
+#define LACKS_STRINGS_H
+#define LACKS_STRING_H
+#define LACKS_SYS_TYPES_H
+#define LACKS_ERRNO_H
+#define LACKS_STDLIB_H
+#define LACKS_SCHED_H
+#define LACKS_TIME_H
+
+/* Version identifier to allow people to support multiple versions */
+#ifndef DLMALLOC_VERSION
+#define DLMALLOC_VERSION 20806
+#endif /* DLMALLOC_VERSION */
+
+#ifndef DLMALLOC_EXPORT
+#define DLMALLOC_EXPORT extern
+#endif
+
+/* The maximum possible size_t value has all bits set */
+#define MAX_SIZE_T (~(size_t)0)
+
+#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */
+#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \
+ (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0))
+#endif /* USE_LOCKS */
+
+#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */
+#if ((defined(__GNUC__) && \
+ ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \
+ defined(__i386__) || defined(__x86_64__))) || \
+ (defined(_MSC_VER) && _MSC_VER>=1310))
+#ifndef USE_SPIN_LOCKS
+#define USE_SPIN_LOCKS 1
+#endif /* USE_SPIN_LOCKS */
+#elif USE_SPIN_LOCKS
+#error "USE_SPIN_LOCKS defined without implementation"
+#endif /* ... locks available... */
+#elif !defined(USE_SPIN_LOCKS)
+#define USE_SPIN_LOCKS 0
+#endif /* USE_LOCKS */
+
+#ifndef ONLY_MSPACES
+#define ONLY_MSPACES 0
+#endif /* ONLY_MSPACES */
+#ifndef MSPACES
+#if ONLY_MSPACES
+#define MSPACES 1
+#else /* ONLY_MSPACES */
+#define MSPACES 0
+#endif /* ONLY_MSPACES */
+#endif /* MSPACES */
+#ifndef MALLOC_ALIGNMENT
+#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *)))
+#endif /* MALLOC_ALIGNMENT */
+#ifndef FOOTERS
+#define FOOTERS 0
+#endif /* FOOTERS */
+#ifndef ABORT
+#define ABORT abort()
+#endif /* ABORT */
+#ifndef ABORT_ON_ASSERT_FAILURE
+#define ABORT_ON_ASSERT_FAILURE 1
+#endif /* ABORT_ON_ASSERT_FAILURE */
+#ifndef PROCEED_ON_ERROR
+#define PROCEED_ON_ERROR 0
+#endif /* PROCEED_ON_ERROR */
+
+#ifndef INSECURE
+#define INSECURE 0
+#endif /* INSECURE */
+#ifndef MALLOC_INSPECT_ALL
+#define MALLOC_INSPECT_ALL 0
+#endif /* MALLOC_INSPECT_ALL */
+#ifndef HAVE_MMAP
+#define HAVE_MMAP 1
+#endif /* HAVE_MMAP */
+#ifndef MMAP_CLEARS
+#define MMAP_CLEARS 1
+#endif /* MMAP_CLEARS */
+#ifndef HAVE_MREMAP
+#ifdef linux
+#define HAVE_MREMAP 1
+#define _GNU_SOURCE /* Turns on mremap() definition */
+#else /* linux */
+#define HAVE_MREMAP 0
+#endif /* linux */
+#endif /* HAVE_MREMAP */
+#ifndef MALLOC_FAILURE_ACTION
+#define MALLOC_FAILURE_ACTION errno = ENOMEM;
+#endif /* MALLOC_FAILURE_ACTION */
+#ifndef HAVE_MORECORE
+#if ONLY_MSPACES
+#define HAVE_MORECORE 0
+#else /* ONLY_MSPACES */
+#define HAVE_MORECORE 1
+#endif /* ONLY_MSPACES */
+#endif /* HAVE_MORECORE */
+#if !HAVE_MORECORE
+#define MORECORE_CONTIGUOUS 0
+#else /* !HAVE_MORECORE */
+#define MORECORE_DEFAULT sbrk
+#ifndef MORECORE_CONTIGUOUS
+#define MORECORE_CONTIGUOUS 1
+#endif /* MORECORE_CONTIGUOUS */
+#endif /* HAVE_MORECORE */
+#ifndef DEFAULT_GRANULARITY
+#if (MORECORE_CONTIGUOUS || defined(WIN32))
+#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
+#else /* MORECORE_CONTIGUOUS */
+#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
+#endif /* MORECORE_CONTIGUOUS */
+#endif /* DEFAULT_GRANULARITY */
+#ifndef DEFAULT_TRIM_THRESHOLD
+#ifndef MORECORE_CANNOT_TRIM
+#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
+#else /* MORECORE_CANNOT_TRIM */
+#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
+#endif /* MORECORE_CANNOT_TRIM */
+#endif /* DEFAULT_TRIM_THRESHOLD */
+#ifndef DEFAULT_MMAP_THRESHOLD
+#if HAVE_MMAP
+#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
+#else /* HAVE_MMAP */
+#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
+#endif /* HAVE_MMAP */
+#endif /* DEFAULT_MMAP_THRESHOLD */
+#ifndef MAX_RELEASE_CHECK_RATE
+#if HAVE_MMAP
+#define MAX_RELEASE_CHECK_RATE 4095
+#else
+#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
+#endif /* HAVE_MMAP */
+#endif /* MAX_RELEASE_CHECK_RATE */
+#ifndef USE_BUILTIN_FFS
+#define USE_BUILTIN_FFS 0
+#endif /* USE_BUILTIN_FFS */
+#ifndef USE_DEV_RANDOM
+#define USE_DEV_RANDOM 0
+#endif /* USE_DEV_RANDOM */
+#ifndef NO_MALLINFO
+#define NO_MALLINFO 0
+#endif /* NO_MALLINFO */
+#ifndef MALLINFO_FIELD_TYPE
+#define MALLINFO_FIELD_TYPE size_t
+#endif /* MALLINFO_FIELD_TYPE */
+#ifndef NO_MALLOC_STATS
+#define NO_MALLOC_STATS 0
+#endif /* NO_MALLOC_STATS */
+#ifndef NO_SEGMENT_TRAVERSAL
+#define NO_SEGMENT_TRAVERSAL 0
+#endif /* NO_SEGMENT_TRAVERSAL */
+
+/*
+ mallopt tuning options. SVID/XPG defines four standard parameter
+ numbers for mallopt, normally defined in malloc.h. None of these
+ are used in this malloc, so setting them has no effect. But this
+ malloc does support the following options.
+*/
+
+#define M_TRIM_THRESHOLD (-1)
+#define M_GRANULARITY (-2)
+#define M_MMAP_THRESHOLD (-3)
+
+/* ------------------------ Mallinfo declarations ------------------------ */
+
+/*
+ Try to persuade compilers to inline. The most critical functions for
+ inlining are defined as macros, so these aren't used for them.
+*/
+
+#define FORCEINLINE forceinline
+#define NOINLINE dontinline
+
+#if !ONLY_MSPACES
+
+/* ------------------- Declarations of public routines ------------------- */
+
+/*
+ malloc(size_t n)
+ Returns a pointer to a newly allocated chunk of at least n bytes, or
+ null if no space is available, in which case errno is set to ENOMEM
+ on ANSI C systems.
+
+ If n is zero, malloc returns a minimum-sized chunk. (The minimum
+ size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
+ systems.) Note that size_t is an unsigned type, so calls with
+ arguments that would be negative if signed are interpreted as
+ requests for huge amounts of space, which will often fail. The
+ maximum supported value of n differs across systems, but is in all
+ cases less than the maximum representable value of a size_t.
+*/
+DLMALLOC_EXPORT void* dlmalloc(size_t);
+
+/*
+ free(void* p)
+ Releases the chunk of memory pointed to by p, that had been previously
+ allocated using malloc or a related routine such as realloc.
+ It has no effect if p is null. If p was not malloced or already
+ freed, free(p) will by default cause the current program to abort.
+*/
+DLMALLOC_EXPORT void dlfree(void*);
+
+/*
+ calloc(size_t n_elements, size_t element_size);
+ Returns a pointer to n_elements * element_size bytes, with all locations
+ set to zero.
+*/
+DLMALLOC_EXPORT void* dlcalloc(size_t, size_t);
+
+/*
+ realloc(void* p, size_t n)
+ Returns a pointer to a chunk of size n that contains the same data
+ as does chunk p up to the minimum of (n, p's size) bytes, or null
+ if no space is available.
+
+ The returned pointer may or may not be the same as p. The algorithm
+ prefers extending p in most cases when possible, otherwise it
+ employs the equivalent of a malloc-copy-free sequence.
+
+ If p is null, realloc is equivalent to malloc.
+
+ If space is not available, realloc returns null, errno is set (if on
+ ANSI) and p is NOT freed.
+
+ if n is for fewer bytes than already held by p, the newly unused
+ space is lopped off and freed if possible. realloc with a size
+ argument of zero (re)allocates a minimum-sized chunk.
+
+ The old unix realloc convention of allowing the last-free'd chunk
+ to be used as an argument to realloc is not supported.
+*/
+DLMALLOC_EXPORT void* dlrealloc(void*, size_t);
+
+/*
+ realloc_in_place(void* p, size_t n)
+ Resizes the space allocated for p to size n, only if this can be
+ done without moving p (i.e., only if there is adjacent space
+ available if n is greater than p's current allocated size, or n is
+ less than or equal to p's size). This may be used instead of plain
+ realloc if an alternative allocation strategy is needed upon failure
+ to expand space; for example, reallocation of a buffer that must be
+ memory-aligned or cleared. You can use realloc_in_place to trigger
+ these alternatives only when needed.
+
+ Returns p if successful; otherwise null.
+*/
+DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t);
+
+/*
+ memalign(size_t alignment, size_t n);
+ Returns a pointer to a newly allocated chunk of n bytes, aligned
+ in accord with the alignment argument.
+
+ The alignment argument should be a power of two. If the argument is
+ not a power of two, the nearest greater power is used.
+ 8-byte alignment is guaranteed by normal malloc calls, so don't
+ bother calling memalign with an argument of 8 or less.
+
+ Overreliance on memalign is a sure way to fragment space.
+*/
+DLMALLOC_EXPORT void* dlmemalign(size_t, size_t);
+
+/*
+ int posix_memalign(void** pp, size_t alignment, size_t n);
+ Allocates a chunk of n bytes, aligned in accord with the alignment
+ argument. Differs from memalign only in that it (1) assigns the
+ allocated memory to *pp rather than returning it, (2) fails and
+ returns EINVAL if the alignment is not a power of two (3) fails and
+ returns ENOMEM if memory cannot be allocated.
+*/
+DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t);
+
+/*
+ valloc(size_t n);
+ Equivalent to memalign(pagesize, n), where pagesize is the page
+ size of the system. If the pagesize is unknown, 4096 is used.
+*/
+DLMALLOC_EXPORT void* dlvalloc(size_t);
+
+/*
+ mallopt(int parameter_number, int parameter_value)
+ Sets tunable parameters The format is to provide a
+ (parameter-number, parameter-value) pair. mallopt then sets the
+ corresponding parameter to the argument value if it can (i.e., so
+ long as the value is meaningful), and returns 1 if successful else
+ 0. To workaround the fact that mallopt is specified to use int,
+ not size_t parameters, the value -1 is specially treated as the
+ maximum unsigned size_t value.
+
+ SVID/XPG/ANSI defines four standard param numbers for mallopt,
+ normally defined in malloc.h. None of these are use in this malloc,
+ so setting them has no effect. But this malloc also supports other
+ options in mallopt. See below for details. Briefly, supported
+ parameters are as follows (listed defaults are for "typical"
+ configurations).
+
+ Symbol param # default allowed param values
+ M_TRIM_THRESHOLD -1 2*1024*1024 any (-1 disables)
+ M_GRANULARITY -2 page size any power of 2 >= page size
+ M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
+*/
+DLMALLOC_EXPORT int dlmallopt(int, int);
+
+/*
+ malloc_footprint();
+ Returns the number of bytes obtained from the system. The total
+ number of bytes allocated by malloc, realloc etc., is less than this
+ value. Unlike mallinfo, this function returns only a precomputed
+ result, so can be called frequently to monitor memory consumption.
+ Even if locks are otherwise defined, this function does not use them,
+ so results might not be up to date.
+*/
+DLMALLOC_EXPORT size_t dlmalloc_footprint(void);
+
+/*
+ malloc_max_footprint();
+ Returns the maximum number of bytes obtained from the system. This
+ value will be greater than current footprint if deallocated space
+ has been reclaimed by the system. The peak number of bytes allocated
+ by malloc, realloc etc., is less than this value. Unlike mallinfo,
+ this function returns only a precomputed result, so can be called
+ frequently to monitor memory consumption. Even if locks are
+ otherwise defined, this function does not use them, so results might
+ not be up to date.
+*/
+DLMALLOC_EXPORT size_t dlmalloc_max_footprint(void);
+
+/*
+ malloc_footprint_limit();
+ Returns the number of bytes that the heap is allowed to obtain from
+ the system, returning the last value returned by
+ malloc_set_footprint_limit, or the maximum size_t value if
+ never set. The returned value reflects a permission. There is no
+ guarantee that this number of bytes can actually be obtained from
+ the system.
+*/
+DLMALLOC_EXPORT size_t dlmalloc_footprint_limit();
+
+/*
+ malloc_set_footprint_limit();
+ Sets the maximum number of bytes to obtain from the system, causing
+ failure returns from malloc and related functions upon attempts to
+ exceed this value. The argument value may be subject to page
+ rounding to an enforceable limit; this actual value is returned.
+ Using an argument of the maximum possible size_t effectively
+ disables checks. If the argument is less than or equal to the
+ current malloc_footprint, then all future allocations that require
+ additional system memory will fail. However, invocation cannot
+ retroactively deallocate existing used memory.
+*/
+DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes);
+
+#if MALLOC_INSPECT_ALL
+/*
+ malloc_inspect_all(void(*handler)(void *start,
+ void *end,
+ size_t used_bytes,
+ void* callback_arg),
+ void* arg);
+ Traverses the heap and calls the given handler for each managed
+ region, skipping all bytes that are (or may be) used for bookkeeping
+ purposes. Traversal does not include include chunks that have been
+ directly memory mapped. Each reported region begins at the start
+ address, and continues up to but not including the end address. The
+ first used_bytes of the region contain allocated data. If
+ used_bytes is zero, the region is unallocated. The handler is
+ invoked with the given callback argument. If locks are defined, they
+ are held during the entire traversal. It is a bad idea to invoke
+ other malloc functions from within the handler.
+
+ For example, to count the number of in-use chunks with size greater
+ than 1000, you could write:
+ static int count = 0;
+ void count_chunks(void* start, void* end, size_t used, void* arg) {
+ if (used >= 1000) ++count;
+ }
+ then:
+ malloc_inspect_all(count_chunks, NULL);
+
+ malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.
+*/
+DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),
+ void* arg);
+
+#endif /* MALLOC_INSPECT_ALL */
+
+/*
+ mallinfo()
+ Returns (by copy) a struct containing various summary statistics:
+
+ arena: current total non-mmapped bytes allocated from system
+ ordblks: the number of free chunks
+ smblks: always zero.
+ hblks: current number of mmapped regions
+ hblkhd: total bytes held in mmapped regions
+ usmblks: the maximum total allocated space. This will be greater
+ than current total if trimming has occurred.
+ fsmblks: always zero
+ uordblks: current total allocated space (normal or mmapped)
+ fordblks: total free space
+ keepcost: the maximum number of bytes that could ideally be released
+ back to system via malloc_trim. ("ideally" means that
+ it ignores page restrictions etc.)
+
+ Because these fields are ints, but internal bookkeeping may
+ be kept as longs, the reported values may wrap around zero and
+ thus be inaccurate.
+*/
+DLMALLOC_EXPORT struct mallinfo dlmallinfo(void);
+
+/*
+ independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
+
+ independent_calloc is similar to calloc, but instead of returning a
+ single cleared space, it returns an array of pointers to n_elements
+ independent elements that can hold contents of size elem_size, each
+ of which starts out cleared, and can be independently freed,
+ realloc'ed etc. The elements are guaranteed to be adjacently
+ allocated (this is not guaranteed to occur with multiple callocs or
+ mallocs), which may also improve cache locality in some
+ applications.
+
+ The "chunks" argument is optional (i.e., may be null, which is
+ probably the most typical usage). If it is null, the returned array
+ is itself dynamically allocated and should also be freed when it is
+ no longer needed. Otherwise, the chunks array must be of at least
+ n_elements in length. It is filled in with the pointers to the
+ chunks.
+
+ In either case, independent_calloc returns this pointer array, or
+ null if the allocation failed. If n_elements is zero and "chunks"
+ is null, it returns a chunk representing an array with zero elements
+ (which should be freed if not wanted).
+
+ Each element must be freed when it is no longer needed. This can be
+ done all at once using bulk_free.
+
+ independent_calloc simplifies and speeds up implementations of many
+ kinds of pools. It may also be useful when constructing large data
+ structures that initially have a fixed number of fixed-sized nodes,
+ but the number is not known at compile time, and some of the nodes
+ may later need to be freed. For example:
+
+ struct Node { int item; struct Node* next; };
+
+ struct Node* build_list() {
+ struct Node** pool;
+ int n = read_number_of_nodes_needed();
+ if (n <= 0) return 0;
+ pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
+ if (pool == 0) die();
+ // organize into a linked list...
+ struct Node* first = pool[0];
+ for (i = 0; i < n-1; ++i)
+ pool[i]->next = pool[i+1];
+ free(pool); // Can now free the array (or not, if it is needed later)
+ return first;
+ }
+*/
+DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**);
+
+/*
+ independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
+
+ independent_comalloc allocates, all at once, a set of n_elements
+ chunks with sizes indicated in the "sizes" array. It returns
+ an array of pointers to these elements, each of which can be
+ independently freed, realloc'ed etc. The elements are guaranteed to
+ be adjacently allocated (this is not guaranteed to occur with
+ multiple callocs or mallocs), which may also improve cache locality
+ in some applications.
+
+ The "chunks" argument is optional (i.e., may be null). If it is null
+ the returned array is itself dynamically allocated and should also
+ be freed when it is no longer needed. Otherwise, the chunks array
+ must be of at least n_elements in length. It is filled in with the
+ pointers to the chunks.
+
+ In either case, independent_comalloc returns this pointer array, or
+ null if the allocation failed. If n_elements is zero and chunks is
+ null, it returns a chunk representing an array with zero elements
+ (which should be freed if not wanted).
+
+ Each element must be freed when it is no longer needed. This can be
+ done all at once using bulk_free.
+
+ independent_comallac differs from independent_calloc in that each
+ element may have a different size, and also that it does not
+ automatically clear elements.
+
+ independent_comalloc can be used to speed up allocation in cases
+ where several structs or objects must always be allocated at the
+ same time. For example:
+
+ struct Head { ... }
+ struct Foot { ... }
+
+ void send_message(char* msg) {
+ int msglen = strlen(msg);
+ size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
+ void* chunks[3];
+ if (independent_comalloc(3, sizes, chunks) == 0)
+ die();
+ struct Head* head = (struct Head*)(chunks[0]);
+ char* body = (char*)(chunks[1]);
+ struct Foot* foot = (struct Foot*)(chunks[2]);
+ // ...
+ }
+
+ In general though, independent_comalloc is worth using only for
+ larger values of n_elements. For small values, you probably won't
+ detect enough difference from series of malloc calls to bother.
+
+ Overuse of independent_comalloc can increase overall memory usage,
+ since it cannot reuse existing noncontiguous small chunks that
+ might be available for some of the elements.
+*/
+DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**);
+
+/*
+ bulk_free(void* array[], size_t n_elements)
+ Frees and clears (sets to null) each non-null pointer in the given
+ array. This is likely to be faster than freeing them one-by-one.
+ If footers are used, pointers that have been allocated in different
+ mspaces are not freed or cleared, and the count of all such pointers
+ is returned. For large arrays of pointers with poor locality, it
+ may be worthwhile to sort this array before calling bulk_free.
+*/
+DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements);
+
+/*
+ pvalloc(size_t n);
+ Equivalent to valloc(minimum-page-that-holds(n)), that is,
+ round up n to nearest pagesize.
+ */
+DLMALLOC_EXPORT void* dlpvalloc(size_t);
+
+/*
+ malloc_trim(size_t pad);
+
+ If possible, gives memory back to the system (via negative arguments
+ to sbrk) if there is unused memory at the `high' end of the malloc
+ pool or in unused MMAP segments. You can call this after freeing
+ large blocks of memory to potentially reduce the system-level memory
+ requirements of a program. However, it cannot guarantee to reduce
+ memory. Under some allocation patterns, some large free blocks of
+ memory will be locked between two used chunks, so they cannot be
+ given back to the system.
+
+ The `pad' argument to malloc_trim represents the amount of free
+ trailing space to leave untrimmed. If this argument is zero, only
+ the minimum amount of memory to maintain internal data structures
+ will be left. Non-zero arguments can be supplied to maintain enough
+ trailing space to service future expected allocations without having
+ to re-obtain memory from the system.
+
+ Malloc_trim returns 1 if it actually released any memory, else 0.
+*/
+DLMALLOC_EXPORT int dlmalloc_trim(size_t);
+
+/*
+ malloc_stats();
+ Prints on stderr the amount of space obtained from the system (both
+ via sbrk and mmap), the maximum amount (which may be more than
+ current if malloc_trim and/or munmap got called), and the current
+ number of bytes allocated via malloc (or realloc, etc) but not yet
+ freed. Note that this is the number of bytes allocated, not the
+ number requested. It will be larger than the number requested
+ because of alignment and bookkeeping overhead. Because it includes
+ alignment wastage as being in use, this figure may be greater than
+ zero even when no user-level chunks are allocated.
+
+ The reported current and maximum system memory can be inaccurate if
+ a program makes other calls to system memory allocation functions
+ (normally sbrk) outside of malloc.
+
+ malloc_stats prints only the most commonly interesting statistics.
+ More information can be obtained by calling mallinfo.
+*/
+DLMALLOC_EXPORT void dlmalloc_stats(void);
+
+/*
+ malloc_usable_size(void* p);
+
+ Returns the number of bytes you can actually use in
+ an allocated chunk, which may be more than you requested (although
+ often not) due to alignment and minimum size constraints.
+ You can use this many bytes without worrying about
+ overwriting other allocated objects. This is not a particularly great
+ programming practice. malloc_usable_size can be more useful in
+ debugging and assertions, for example:
+
+ p = malloc(n);
+ assert(malloc_usable_size(p) >= 256);
+*/
+size_t dlmalloc_usable_size(void*);
+
+#endif /* ONLY_MSPACES */
+
+#if MSPACES
+
+/*
+ mspace is an opaque type representing an independent
+ region of space that supports mspace_malloc, etc.
+*/
+typedef void* mspace;
+
+/*
+ create_mspace creates and returns a new independent space with the
+ given initial capacity, or, if 0, the default granularity size. It
+ returns null if there is no system memory available to create the
+ space. If argument locked is non-zero, the space uses a separate
+ lock to control access. The capacity of the space will grow
+ dynamically as needed to service mspace_malloc requests. You can
+ control the sizes of incremental increases of this space by
+ compiling with a different DEFAULT_GRANULARITY or dynamically
+ setting with mallopt(M_GRANULARITY, value).
+*/
+DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked);
+
+/*
+ destroy_mspace destroys the given space, and attempts to return all
+ of its memory back to the system, returning the total number of
+ bytes freed. After destruction, the results of access to all memory
+ used by the space become undefined.
+*/
+DLMALLOC_EXPORT size_t destroy_mspace(mspace msp);
+
+/*
+ create_mspace_with_base uses the memory supplied as the initial base
+ of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
+ space is used for bookkeeping, so the capacity must be at least this
+ large. (Otherwise 0 is returned.) When this initial space is
+ exhausted, additional memory will be obtained from the system.
+ Destroying this space will deallocate all additionally allocated
+ space (if possible) but not the initial base.
+*/
+DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked);
+
+/*
+ mspace_track_large_chunks controls whether requests for large chunks
+ are allocated in their own untracked mmapped regions, separate from
+ others in this mspace. By default large chunks are not tracked,
+ which reduces fragmentation. However, such chunks are not
+ necessarily released to the system upon destroy_mspace. Enabling
+ tracking by setting to true may increase fragmentation, but avoids
+ leakage when relying on destroy_mspace to release all memory
+ allocated using this space. The function returns the previous
+ setting.
+*/
+DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable);
+
+
+/*
+ mspace_malloc behaves as malloc, but operates within
+ the given space.
+*/
+DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes);
+
+/*
+ mspace_free behaves as free, but operates within
+ the given space.
+
+ If compiled with FOOTERS==1, mspace_free is not actually needed.
+ free may be called instead of mspace_free because freed chunks from
+ any space are handled by their originating spaces.
+*/
+DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem);
+
+/*
+ mspace_realloc behaves as realloc, but operates within
+ the given space.
+
+ If compiled with FOOTERS==1, mspace_realloc is not actually
+ needed. realloc may be called instead of mspace_realloc because
+ realloced chunks from any space are handled by their originating
+ spaces.
+*/
+DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize);
+
+/*
+ mspace_calloc behaves as calloc, but operates within
+ the given space.
+*/
+DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
+
+/*
+ mspace_memalign behaves as memalign, but operates within
+ the given space.
+*/
+DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
+
+/*
+ mspace_independent_calloc behaves as independent_calloc, but
+ operates within the given space.
+*/
+DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements,
+ size_t elem_size, void* chunks[]);
+
+/*
+ mspace_independent_comalloc behaves as independent_comalloc, but
+ operates within the given space.
+*/
+DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+ size_t sizes[], void* chunks[]);
+
+/*
+ mspace_footprint() returns the number of bytes obtained from the
+ system for this space.
+*/
+DLMALLOC_EXPORT size_t mspace_footprint(mspace msp);
+
+/*
+ mspace_max_footprint() returns the peak number of bytes obtained from the
+ system for this space.
+*/
+DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp);
+
+
+#if !NO_MALLINFO
+/*
+ mspace_mallinfo behaves as mallinfo, but reports properties of
+ the given space.
+*/
+DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp);
+#endif /* NO_MALLINFO */
+
+/*
+ malloc_usable_size(void* p) behaves the same as malloc_usable_size;
+*/
+DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem);
+
+/*
+ mspace_malloc_stats behaves as malloc_stats, but reports
+ properties of the given space.
+*/
+DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp);
+
+/*
+ mspace_trim behaves as malloc_trim, but
+ operates within the given space.
+*/
+DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad);
+
+/*
+ An alias for mallopt.
+*/
+DLMALLOC_EXPORT int mspace_mallopt(int, int);
+
+#endif /* MSPACES */
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif /* __cplusplus */
+
+/*
+ ========================================================================
+ To make a fully customizable malloc.h header file, cut everything
+ above this line, put into file malloc.h, edit to suit, and #include it
+ on the next line, as well as in programs that use this malloc.
+ ========================================================================
+*/
+
+/* #include "malloc.h" */
+
+/*------------------------------ internal #includes ---------------------- */
+
+#ifdef _MSC_VER
+#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
+#endif /* _MSC_VER */
+#if !NO_MALLOC_STATS
+#endif /* NO_MALLOC_STATS */
+#ifndef LACKS_ERRNO_H
+#include /* for MALLOC_FAILURE_ACTION */
+#endif /* LACKS_ERRNO_H */
+#ifdef DEBUG
+#if ABORT_ON_ASSERT_FAILURE
+#undef assert
+#define assert(x) if(!(x)) ABORT
+#else /* ABORT_ON_ASSERT_FAILURE */
+#include
+#endif /* ABORT_ON_ASSERT_FAILURE */
+#else /* DEBUG */
+#ifndef assert
+#define assert(x)
+#endif
+#define DEBUG 0
+#endif /* DEBUG */
+#if !defined(WIN32) && !defined(LACKS_TIME_H)
+#include /* for magic initialization */
+#endif /* WIN32 */
+#ifndef LACKS_STDLIB_H
+#include /* for abort() */
+#endif /* LACKS_STDLIB_H */
+#ifndef LACKS_STRING_H
+#include /* for memset etc */
+#endif /* LACKS_STRING_H */
+#if USE_BUILTIN_FFS
+#ifndef LACKS_STRINGS_H
+#include /* for ffs */
+#endif /* LACKS_STRINGS_H */
+#endif /* USE_BUILTIN_FFS */
+#if HAVE_MMAP
+#ifndef LACKS_SYS_MMAN_H
+/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
+#if (defined(linux) && !defined(__USE_GNU))
+#define __USE_GNU 1
+#include /* for mmap */
+#undef __USE_GNU
+#else
+#include /* for mmap */
+#endif /* linux */
+#endif /* LACKS_SYS_MMAN_H */
+#ifndef LACKS_FCNTL_H
+#include
+#endif /* LACKS_FCNTL_H */
+#endif /* HAVE_MMAP */
+#ifndef LACKS_UNISTD_H
+#include /* for sbrk, sysconf */
+#else /* LACKS_UNISTD_H */
+#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) && !defined(__COSMOPOLITAN__)
+extern void* sbrk(ptrdiff_t);
+#endif /* FreeBSD etc */
+#endif /* LACKS_UNISTD_H */
+
+/* Declarations for locking */
+#if USE_LOCKS
+#ifndef WIN32
+#if defined (__SVR4) && defined (__sun) /* solaris */
+#include
+#elif !defined(LACKS_SCHED_H)
+#include
+#endif /* solaris or LACKS_SCHED_H */
+#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
+#include
+#endif /* USE_RECURSIVE_LOCKS ... */
+#elif defined(_MSC_VER)
+#ifndef _M_AMD64
+/* These are already defined on AMD64 builds */
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
+LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* _M_AMD64 */
+#pragma intrinsic (_InterlockedCompareExchange)
+#pragma intrinsic (_InterlockedExchange)
+#define interlockedcompareexchange _InterlockedCompareExchange
+#define interlockedexchange _InterlockedExchange
+#elif defined(WIN32) && defined(__GNUC__)
+#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
+#define interlockedexchange __sync_lock_test_and_set
+#endif /* Win32 */
+#else /* USE_LOCKS */
+#endif /* USE_LOCKS */
+
+#ifndef LOCK_AT_FORK
+#define LOCK_AT_FORK 0
+#endif
+
+/* Declarations for bit scanning on win32 */
+#if defined(_MSC_VER) && _MSC_VER>=1300
+#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
+unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#define BitScanForward _BitScanForward
+#define BitScanReverse _BitScanReverse
+#pragma intrinsic(_BitScanForward)
+#pragma intrinsic(_BitScanReverse)
+#endif /* BitScanForward */
+#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
+
+#ifndef WIN32
+#ifndef malloc_getpagesize
+# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
+# ifndef _SC_PAGE_SIZE
+# define _SC_PAGE_SIZE _SC_PAGESIZE
+# endif
+# endif
+# ifdef _SC_PAGE_SIZE
+# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
+# else
+# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
+ extern size_t getpagesize();
+# define malloc_getpagesize getpagesize()
+# else
+# ifdef WIN32 /* use supplied emulation of getpagesize */
+# define malloc_getpagesize getpagesize()
+# else
+# ifndef LACKS_SYS_PARAM_H
+# include
+# endif
+# ifdef EXEC_PAGESIZE
+# define malloc_getpagesize EXEC_PAGESIZE
+# else
+# ifdef NBPG
+# ifndef CLSIZE
+# define malloc_getpagesize NBPG
+# else
+# define malloc_getpagesize (NBPG * CLSIZE)
+# endif
+# else
+# ifdef NBPC
+# define malloc_getpagesize NBPC
+# else
+# ifdef PAGESIZE
+# define malloc_getpagesize PAGESIZE
+# else /* just guess */
+# define malloc_getpagesize ((size_t)4096U)
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+#endif
+#endif
+
+/* ------------------- size_t and alignment properties -------------------- */
+
+/* The byte and bit size of a size_t */
+#define SIZE_T_SIZE (sizeof(size_t))
+#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
+
+/* Some constants coerced to size_t */
+/* Annoying but necessary to avoid errors on some platforms */
+#define SIZE_T_ZERO ((size_t)0)
+#define SIZE_T_ONE ((size_t)1)
+#define SIZE_T_TWO ((size_t)2)
+#define SIZE_T_FOUR ((size_t)4)
+#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
+#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
+#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
+#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
+
+/* The bit mask value corresponding to MALLOC_ALIGNMENT */
+#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
+
+/* True if address a has acceptable alignment */
+#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
+
+/* the number of bytes to offset an address to align it */
+#define align_offset(A)\
+ ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
+ ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
+
+/* -------------------------- MMAP preliminaries ------------------------- */
+
+/*
+ If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
+ checks to fail so compiler optimizer can delete code rather than
+ using so many "#if"s.
+*/
+
+
+/* MORECORE and MMAP must return MFAIL on failure */
+#define MFAIL ((void*)(MAX_SIZE_T))
+#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
+
+#if HAVE_MMAP
+
+#ifndef WIN32
+#define MUNMAP_DEFAULT(a, s) munmap((a), (s))
+#define MMAP_PROT (PROT_READ|PROT_WRITE)
+#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
+#define MAP_ANONYMOUS MAP_ANON
+#endif /* MAP_ANON */
+#ifdef MAP_ANONYMOUS
+#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
+#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
+#else /* MAP_ANONYMOUS */
+/*
+ Nearly all versions of mmap support MAP_ANONYMOUS, so the following
+ is unlikely to be needed, but is supplied just in case.
+*/
+#define MMAP_FLAGS (MAP_PRIVATE)
+static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
+#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
+ (dev_zero_fd = open("/dev/zero", O_RDWR), \
+ mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
+ mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
+#endif /* MAP_ANONYMOUS */
+
+#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
+
+#else /* WIN32 */
+
+/* Win32 MMAP via VirtualAlloc */
+FORCEINLINE void* win32mmap(size_t size) {
+ void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
+ return (ptr != 0)? ptr: MFAIL;
+}
+
+/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
+FORCEINLINE void* win32direct_mmap(size_t size) {
+ void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
+ PAGE_READWRITE);
+ return (ptr != 0)? ptr: MFAIL;
+}
+
+/* This function supports releasing coalesed segments */
+FORCEINLINE int win32munmap(void* ptr, size_t size) {
+ MEMORY_BASIC_INFORMATION minfo;
+ char* cptr = (char*)ptr;
+ while (size) {
+ if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
+ return -1;
+ if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
+ minfo.State != MEM_COMMIT || minfo.RegionSize > size)
+ return -1;
+ if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
+ return -1;
+ cptr += minfo.RegionSize;
+ size -= minfo.RegionSize;
+ }
+ return 0;
+}
+
+#define MMAP_DEFAULT(s) win32mmap(s)
+#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
+#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
+#endif /* WIN32 */
+#endif /* HAVE_MMAP */
+
+#if HAVE_MREMAP
+#ifndef WIN32
+#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
+#endif /* WIN32 */
+#endif /* HAVE_MREMAP */
/**
- * Acquires more system memory for dlmalloc.
- *
- * Each time dlmalloc needs 64kb, we ask for a 2mb page directory. The
- * empty space could help with buffer overflow detection; mremap() has
- * plenty of room to grow; and debuggability is greatly enhanced. This
- * should have less page table overhead than in security blanket mode.
- * Note that contiguous allocations are what Doug Lea recommends.
+ * Define CALL_MORECORE
*/
-static void *dlmalloc_requires_more_vespene_gas(size_t size) {
- char *p;
- if ((p = mapanon(size)) != MAP_FAILED) {
- if (weaken(__asan_poison)) {
- weaken(__asan_poison)((uintptr_t)p, size, kAsanHeapFree);
- }
+#if HAVE_MORECORE
+ #ifdef MORECORE
+ #define CALL_MORECORE(S) MORECORE(S)
+ #else /* MORECORE */
+ #define CALL_MORECORE(S) MORECORE_DEFAULT(S)
+ #endif /* MORECORE */
+#else /* HAVE_MORECORE */
+ #define CALL_MORECORE(S) MFAIL
+#endif /* HAVE_MORECORE */
+
+/**
+ * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
+ */
+#if HAVE_MMAP
+ #define USE_MMAP_BIT (SIZE_T_ONE)
+
+ #ifdef MMAP
+ #define CALL_MMAP(s) MMAP(s)
+ #else /* MMAP */
+ #define CALL_MMAP(s) MMAP_DEFAULT(s)
+ #endif /* MMAP */
+ #ifdef MUNMAP
+ #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
+ #else /* MUNMAP */
+ #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
+ #endif /* MUNMAP */
+ #ifdef DIRECT_MMAP
+ #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
+ #else /* DIRECT_MMAP */
+ #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
+ #endif /* DIRECT_MMAP */
+#else /* HAVE_MMAP */
+ #define USE_MMAP_BIT (SIZE_T_ZERO)
+
+ #define MMAP(s) MFAIL
+ #define MUNMAP(a, s) (-1)
+ #define DIRECT_MMAP(s) MFAIL
+ #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
+ #define CALL_MMAP(s) MMAP(s)
+ #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
+#endif /* HAVE_MMAP */
+
+/**
+ * Define CALL_MREMAP
+ */
+#if HAVE_MMAP && HAVE_MREMAP
+ #ifdef MREMAP
+ #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
+ #else /* MREMAP */
+ #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
+ #endif /* MREMAP */
+#else /* HAVE_MMAP && HAVE_MREMAP */
+ #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
+#endif /* HAVE_MMAP && HAVE_MREMAP */
+
+/* mstate bit set if continguous morecore disabled or failed */
+#define USE_NONCONTIGUOUS_BIT (4U)
+
+/* segment bit set in create_mspace_with_base */
+#define EXTERN_BIT (8U)
+
+
+/* --------------------------- Lock preliminaries ------------------------ */
+
+/*
+ When locks are defined, there is one global lock, plus
+ one per-mspace lock.
+
+ The global lock_ensures that mparams.magic and other unique
+ mparams values are initialized only once. It also protects
+ sequences of calls to MORECORE. In many cases sys_alloc requires
+ two calls, that should not be interleaved with calls by other
+ threads. This does not protect against direct calls to MORECORE
+ by other threads not using this lock, so there is still code to
+ cope the best we can on interference.
+
+ Per-mspace locks surround calls to malloc, free, etc.
+ By default, locks are simple non-reentrant mutexes.
+
+ Because lock-protected regions generally have bounded times, it is
+ OK to use the supplied simple spinlocks. Spinlocks are likely to
+ improve performance for lightly contended applications, but worsen
+ performance under heavy contention.
+
+ If USE_LOCKS is > 1, the definitions of lock routines here are
+ bypassed, in which case you will need to define the type MLOCK_T,
+ and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
+ and TRY_LOCK. You must also declare a
+ static MLOCK_T malloc_global_mutex = { initialization values };.
+
+*/
+
+#if !USE_LOCKS
+#define USE_LOCK_BIT (0U)
+#define INITIAL_LOCK(l) (0)
+#define DESTROY_LOCK(l) (0)
+#define ACQUIRE_MALLOC_GLOBAL_LOCK()
+#define RELEASE_MALLOC_GLOBAL_LOCK()
+
+#else
+#if USE_LOCKS > 1
+/* ----------------------- User-defined locks ------------------------ */
+/* Define your own lock implementation here */
+/* #define INITIAL_LOCK(lk) ... */
+/* #define DESTROY_LOCK(lk) ... */
+/* #define ACQUIRE_LOCK(lk) ... */
+/* #define RELEASE_LOCK(lk) ... */
+/* #define TRY_LOCK(lk) ... */
+/* static MLOCK_T malloc_global_mutex = ... */
+
+#elif USE_SPIN_LOCKS
+
+/* First, define CAS_LOCK and CLEAR_LOCK on ints */
+/* Note CAS_LOCK defined to return 0 on success */
+
+#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
+#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
+#define CLEAR_LOCK(sl) __sync_lock_release(sl)
+
+#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
+/* Custom spin locks for older gcc on x86 */
+FORCEINLINE int x86_cas_lock(int *sl) {
+ int ret;
+ int val = 1;
+ int cmp = 0;
+ __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
+ : "=a" (ret)
+ : "r" (val), "m" (*(sl)), "0"(cmp)
+ : "memory", "cc");
+ return ret;
+}
+
+FORCEINLINE void x86_clear_lock(int* sl) {
+ assert(*sl != 0);
+ int prev = 0;
+ int ret;
+ __asm__ __volatile__ ("lock; xchgl %0, %1"
+ : "=r" (ret)
+ : "m" (*(sl)), "0"(prev)
+ : "memory");
+}
+
+#define CAS_LOCK(sl) x86_cas_lock(sl)
+#define CLEAR_LOCK(sl) x86_clear_lock(sl)
+
+#else /* Win32 MSC */
+#define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)
+#define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)
+
+#endif /* ... gcc spins locks ... */
+
+#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
+/* Plain spin locks use single word (embedded in malloc_states) */
+static dontinline int spin_acquire_lock(int *sl) {
+ while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
+ __builtin_ia32_pause();
}
- return p;
+ return 0;
}
-/* ─────────────────────────── mspace management ─────────────────────────── */
+#define MLOCK_T int
+#define TRY_LOCK(sl) !CAS_LOCK(sl)
+#define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
+#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
+#define INITIAL_LOCK(sl) (*sl = 0)
+#define DESTROY_LOCK(sl) (0)
+static MLOCK_T malloc_global_mutex = 0;
-/* Initialize top chunk and its size */
-static void dlmalloc_init_top(struct MallocState *m, mchunkptr p,
- size_t psize) {
- /* Ensure alignment */
- size_t offset = align_offset(chunk2mem(p));
- p = (mchunkptr)((char *)p + offset);
- psize -= offset;
- m->top = p;
- m->topsize = psize;
- p->head = psize | PINUSE_BIT;
- /* set size of fake trailing chunk holding overhead space only once */
- chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
- m->trim_check = g_mparams.trim_threshold; /* reset on each update */
-}
+#else /* USE_RECURSIVE_LOCKS */
+/* types for lock owners */
+#ifdef WIN32
+#define THREAD_ID_T DWORD
+#define CURRENT_THREAD GetCurrentThreadId()
+#define EQ_OWNER(X,Y) ((X) == (Y))
+#else
+/*
+ Note: the following assume that pthread_t is a type that can be
+ initialized to (casted) zero. If this is not the case, you will need to
+ somehow redefine these or not use spin locks.
+*/
+#define THREAD_ID_T pthread_t
+#define CURRENT_THREAD pthread_self()
+#define EQ_OWNER(X,Y) pthread_equal(X, Y)
+#endif
-/* Initialize bins for a new mstate that is otherwise zeroed out */
-static void init_bins(struct MallocState *m) {
- /* Establish circular links for smallbins */
- bindex_t i;
- for (i = 0; i < NSMALLBINS; ++i) {
- sbinptr bin = smallbin_at(m, i);
- bin->fd = bin->bk = bin;
+struct malloc_recursive_lock {
+ int sl;
+ unsigned int c;
+ THREAD_ID_T threadid;
+};
+
+#define MLOCK_T struct malloc_recursive_lock
+static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
+
+FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
+ assert(lk->sl != 0);
+ if (--lk->c == 0) {
+ CLEAR_LOCK(&lk->sl);
}
}
-/* Allocate chunk and prepend remainder with chunk in successor base. */
-static void *dlmalloc_prepend_alloc(struct MallocState *m, char *newbase,
- char *oldbase, size_t nb) {
- mchunkptr p = align_as_chunk(newbase);
- mchunkptr oldfirst = align_as_chunk(oldbase);
- size_t psize = (char *)oldfirst - (char *)p;
- mchunkptr q = chunk_plus_offset(p, nb);
- size_t qsize = psize - nb;
- set_size_and_pinuse_of_inuse_chunk(m, p, nb);
- assert((char *)oldfirst > (char *)q);
- assert(pinuse(oldfirst));
- assert(qsize >= MIN_CHUNK_SIZE);
- /* consolidate remainder with first chunk of old base */
- if (oldfirst == m->top) {
- size_t tsize = m->topsize += qsize;
- m->top = q;
- q->head = tsize | PINUSE_BIT;
- check_top_chunk(m, q);
- } else if (oldfirst == m->dv) {
- size_t dsize = m->dvsize += qsize;
- m->dv = q;
- set_size_and_pinuse_of_free_chunk(q, dsize);
- } else {
- if (!is_inuse(oldfirst)) {
- size_t nsize = chunksize(oldfirst);
- unlink_chunk(m, oldfirst, nsize);
- oldfirst = chunk_plus_offset(oldfirst, nsize);
- qsize += nsize;
- }
- set_free_with_pinuse(q, qsize, oldfirst);
- insert_chunk(m, q, qsize);
- check_free_chunk(m, q);
- }
- check_malloced_chunk(m, chunk2mem(p), nb);
- return chunk2mem(p);
-}
-
-/* Add a segment to hold a new noncontiguous region */
-static void dlmalloc_add_segment(struct MallocState *m, char *tbase,
- size_t tsize, flag_t mmapped) {
- /* Determine locations and sizes of segment, fenceposts, old top */
- char *old_top = (char *)m->top;
- msegmentptr oldsp = segment_holding(m, old_top);
- char *old_end = oldsp->base + oldsp->size;
- size_t ssize = pad_request(sizeof(struct MallocSegment));
- char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
- size_t offset = align_offset(chunk2mem(rawsp));
- char *asp = rawsp + offset;
- char *csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp;
- mchunkptr sp = (mchunkptr)csp;
- msegmentptr ss = (msegmentptr)(chunk2mem(sp));
- mchunkptr tnext = chunk_plus_offset(sp, ssize);
- mchunkptr p = tnext;
- int nfences = 0;
- /* reset top to new space */
- dlmalloc_init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
- /* Set up segment record */
- assert(is_aligned(ss));
- set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
- *ss = m->seg; /* Push current record */
- m->seg.base = tbase;
- m->seg.size = tsize;
- m->seg.sflags = mmapped;
- m->seg.next = ss;
- /* Insert trailing fenceposts */
+FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
+ THREAD_ID_T mythreadid = CURRENT_THREAD;
for (;;) {
- mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
- p->head = FENCEPOST_HEAD;
- ++nfences;
- if ((char *)(&(nextp->head)) < old_end)
- p = nextp;
- else
- break;
+ if (*((volatile int *)(&lk->sl)) == 0) {
+ if (!CAS_LOCK(&lk->sl)) {
+ lk->threadid = mythreadid;
+ lk->c = 1;
+ return 0;
+ }
+ }
+ else if (EQ_OWNER(lk->threadid, mythreadid)) {
+ ++lk->c;
+ return 0;
+ }
+ __builtin_ia32_pause();
}
- assert(nfences >= 2);
- /* Insert the rest of old top into a bin as an ordinary free chunk */
- if (csp != old_top) {
- mchunkptr q = (mchunkptr)old_top;
- size_t psize = csp - old_top;
- mchunkptr tn = chunk_plus_offset(q, psize);
- set_free_with_pinuse(q, psize, tn);
- insert_chunk(m, q, psize);
- }
- check_top_chunk(m, m->top);
}
-/* ─────────────────────────── system integration ─────────────────────────── */
+FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
+ THREAD_ID_T mythreadid = CURRENT_THREAD;
+ if (*((volatile int *)(&lk->sl)) == 0) {
+ if (!CAS_LOCK(&lk->sl)) {
+ lk->threadid = mythreadid;
+ lk->c = 1;
+ return 1;
+ }
+ }
+ else if (EQ_OWNER(lk->threadid, mythreadid)) {
+ ++lk->c;
+ return 1;
+ }
+ return 0;
+}
+
+#define RELEASE_LOCK(lk) recursive_release_lock(lk)
+#define TRY_LOCK(lk) recursive_try_lock(lk)
+#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
+#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
+#define DESTROY_LOCK(lk) (0)
+#endif /* USE_RECURSIVE_LOCKS */
+
+#elif defined(WIN32) /* Win32 critical sections */
+#define MLOCK_T CRITICAL_SECTION
+#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
+#define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
+#define TRY_LOCK(lk) TryEnterCriticalSection(lk)
+#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
+#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
+#define NEED_GLOBAL_LOCK_INIT
+
+static MLOCK_T malloc_global_mutex;
+static volatile LONG malloc_global_mutex_status;
+
+/* Use spin loop to initialize global lock */
+static void init_malloc_global_mutex() {
+ for (;;) {
+ long stat = malloc_global_mutex_status;
+ if (stat > 0)
+ return;
+ /* transition to < 0 while initializing, then to > 0) */
+ if (stat == 0 &&
+ interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
+ InitializeCriticalSection(&malloc_global_mutex);
+ interlockedexchange(&malloc_global_mutex_status, (LONG)1);
+ return;
+ }
+ SleepEx(0, FALSE);
+ }
+}
+
+#else /* pthreads-based locks */
+#define MLOCK_T pthread_mutex_t
+#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
+#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
+#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
+#define INITIAL_LOCK(lk) pthread_init_lock(lk)
+#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
+
+#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
+/* Cope with old-style linux recursive lock initialization by adding */
+/* skipped internal declaration from pthread.h */
+extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
+ int __kind));
+#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
+#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
+#endif /* USE_RECURSIVE_LOCKS ... */
+
+static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static int pthread_init_lock (MLOCK_T *lk) {
+ pthread_mutexattr_t attr;
+ if (pthread_mutexattr_init(&attr)) return 1;
+#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
+ if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
+#endif
+ if (pthread_mutex_init(lk, &attr)) return 1;
+ if (pthread_mutexattr_destroy(&attr)) return 1;
+ return 0;
+}
+
+#endif /* ... lock types ... */
+
+/* Common code for all lock types */
+#define USE_LOCK_BIT (2U)
+
+#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
+#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
+#endif
+
+#ifndef RELEASE_MALLOC_GLOBAL_LOCK
+#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
+#endif
+
+#endif /* USE_LOCKS */
+
+struct malloc_chunk {
+ size_t prev_foot; /* Size of previous chunk (if free). */
+ size_t head; /* Size and inuse bits. */
+ struct malloc_chunk* fd; /* double links -- used only if free. */
+ struct malloc_chunk* bk;
+};
+
+typedef struct malloc_chunk mchunk;
+typedef struct malloc_chunk* mchunkptr;
+typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
+typedef unsigned int bindex_t; /* Described below */
+typedef unsigned int binmap_t; /* Described below */
+typedef unsigned int flag_t; /* The type of various bit flag sets */
+
+/* ------------------- Chunks sizes and alignments ----------------------- */
+
+#define MCHUNK_SIZE (sizeof(mchunk))
+
+#if FOOTERS
+#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
+#else /* FOOTERS */
+#define CHUNK_OVERHEAD (SIZE_T_SIZE)
+#endif /* FOOTERS */
+
+/* MMapped chunks need a second word of overhead ... */
+#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
+/* ... and additional padding for fake next-chunk at foot */
+#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
+
+/* The smallest size we can malloc is an aligned minimal chunk */
+#define MIN_CHUNK_SIZE\
+ ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* conversion from malloc headers to user pointers, and back */
+#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
+#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
+/* chunk associated with aligned address A */
+#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
+
+/* Bounds on request (not chunk) sizes. */
+#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
+#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
+
+/* pad request bytes into a usable size */
+#define pad_request(req) \
+ (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* pad request, checking for minimum (but not maximum) */
+#define request2size(req) \
+ (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
+
+
+/* ------------------ Operations on head and foot fields ----------------- */
+
+/*
+ The head field of a chunk is or'ed with PINUSE_BIT when previous
+ adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
+ use, unless mmapped, in which case both bits are cleared.
+
+ FLAG4_BIT is not used by this malloc, but might be useful in extensions.
+*/
+
+#define PINUSE_BIT (SIZE_T_ONE)
+#define CINUSE_BIT (SIZE_T_TWO)
+#define FLAG4_BIT (SIZE_T_FOUR)
+#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
+#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
+
+/* Head value for fenceposts */
+#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
+
+/* extraction of fields from head words */
+#define cinuse(p) ((p)->head & CINUSE_BIT)
+#define pinuse(p) ((p)->head & PINUSE_BIT)
+#define flag4inuse(p) ((p)->head & FLAG4_BIT)
+#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
+#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
+
+#define chunksize(p) ((p)->head & ~(FLAG_BITS))
+
+#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
+#define set_flag4(p) ((p)->head |= FLAG4_BIT)
+#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
+
+/* Treat space at ptr +/- offset as a chunk */
+#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
+#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
+
+/* Ptr to next or previous physical malloc_chunk. */
+#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
+#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
+
+/* extract next chunk's pinuse bit */
+#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
+
+/* Get/set size at footer */
+#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
+#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
+
+/* Set size, pinuse bit, and foot */
+#define set_size_and_pinuse_of_free_chunk(p, s)\
+ ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
+
+/* Set size, pinuse bit, foot, and clear next pinuse */
+#define set_free_with_pinuse(p, s, n)\
+ (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
+
+/* Get the internal overhead associated with chunk p */
+#define overhead_for(p)\
+ (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
+
+/* Return true if malloced space is not necessarily cleared */
+#if MMAP_CLEARS
+#define calloc_must_clear(p) (!is_mmapped(p))
+#else /* MMAP_CLEARS */
+#define calloc_must_clear(p) (1)
+#endif /* MMAP_CLEARS */
+
+
+struct malloc_tree_chunk {
+ /* The first four fields must be compatible with malloc_chunk */
+ size_t prev_foot;
+ size_t head;
+ struct malloc_tree_chunk* fd;
+ struct malloc_tree_chunk* bk;
+
+ struct malloc_tree_chunk* child[2];
+ struct malloc_tree_chunk* parent;
+ bindex_t index;
+};
+
+typedef struct malloc_tree_chunk tchunk;
+typedef struct malloc_tree_chunk* tchunkptr;
+typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
+
+/* A little helper macro for trees */
+#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
+
+struct malloc_segment {
+ char* base; /* base address */
+ size_t size; /* allocated size */
+ struct malloc_segment* next; /* ptr to next segment */
+ flag_t sflags; /* mmap and extern flag */
+};
+
+#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
+#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
+
+typedef struct malloc_segment msegment;
+typedef struct malloc_segment* msegmentptr;
+
+/* Bin types, widths and sizes */
+#define NSMALLBINS (32U)
+#define NTREEBINS (32U)
+#define SMALLBIN_SHIFT (3U)
+#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
+#define TREEBIN_SHIFT (8U)
+#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
+#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
+#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
+
+struct malloc_state {
+ binmap_t smallmap;
+ binmap_t treemap;
+ size_t dvsize;
+ size_t topsize;
+ char* least_addr;
+ mchunkptr dv;
+ mchunkptr top;
+ size_t trim_check;
+ size_t release_checks;
+ size_t magic;
+ mchunkptr smallbins[(NSMALLBINS+1)*2];
+ tbinptr treebins[NTREEBINS];
+ size_t footprint;
+ size_t max_footprint;
+ size_t footprint_limit; /* zero means no limit */
+ flag_t mflags;
+#if USE_LOCKS
+ MLOCK_T mutex; /* locate lock among fields that rarely change */
+#endif /* USE_LOCKS */
+ msegment seg;
+ void* extp; /* Unused but available for extensions */
+ size_t exts;
+};
+
+typedef struct malloc_state* mstate;
+
+/* ------------- Global malloc_state and malloc_params ------------------- */
+
+/*
+ malloc_params holds global properties, including those that can be
+ dynamically set using mallopt. There is a single instance, mparams,
+ initialized in init_mparams. Note that the non-zeroness of "magic"
+ also serves as an initialization flag.
+*/
+
+struct malloc_params {
+ size_t magic;
+ size_t page_size;
+ size_t granularity;
+ size_t mmap_threshold;
+ size_t trim_threshold;
+ flag_t default_mflags;
+};
+
+static struct malloc_params mparams;
+
+/* Ensure mparams initialized */
+#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
+
+#if !ONLY_MSPACES
+
+/* The global malloc_state used for all non-"mspace" calls */
+static struct malloc_state _gm_;
+#define gm (&_gm_)
+#define is_global(M) ((M) == &_gm_)
+
+#endif /* !ONLY_MSPACES */
+
+#define is_initialized(M) ((M)->top != 0)
+
+/* -------------------------- system alloc setup ------------------------- */
+
+/* Operations on mflags */
+
+#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
+#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
+#if USE_LOCKS
+#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
+#else
+#define disable_lock(M)
+#endif
+
+#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
+#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
+#if HAVE_MMAP
+#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
+#else
+#define disable_mmap(M)
+#endif
+
+#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
+#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
+
+#define set_lock(M,L)\
+ ((M)->mflags = (L)?\
+ ((M)->mflags | USE_LOCK_BIT) :\
+ ((M)->mflags & ~USE_LOCK_BIT))
+
+/* page-align a size */
+#define page_align(S)\
+ (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
+
+/* granularity-align a size */
+#define granularity_align(S)\
+ (((S) + (mparams.granularity - SIZE_T_ONE))\
+ & ~(mparams.granularity - SIZE_T_ONE))
+
+
+/* For mmap, use granularity alignment on windows, else page-align */
+#if defined(WIN32) || defined(__COSMOPOLITAN__)
+#define mmap_align(S) granularity_align(S)
+#else
+#define mmap_align(S) page_align(S)
+#endif
+
+/* For sys_alloc, enough padding to ensure can malloc request on success */
+#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
+
+#define is_page_aligned(S)\
+ (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
+#define is_granularity_aligned(S)\
+ (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
+
+/* True if segment S holds address A */
+#define segment_holds(S, A)\
+ ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
+
+/* Return segment holding given address */
+static msegmentptr segment_holding(mstate m, char* addr) {
+ msegmentptr sp = &m->seg;
+ for (;;) {
+ if (addr >= sp->base && addr < sp->base + sp->size)
+ return sp;
+ if ((sp = sp->next) == 0)
+ return 0;
+ }
+}
/* Return true if segment contains a segment link */
-dontinline int has_segment_link(struct MallocState *m, msegmentptr ss) {
- msegmentptr sp;
- assert(m);
- sp = &m->seg;
+static int has_segment_link(mstate m, msegmentptr ss) {
+ msegmentptr sp = &m->seg;
for (;;) {
- if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size) return 1;
- if ((sp = sp->next) == 0) return 0;
+ if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
+ return 1;
+ if ((sp = sp->next) == 0)
+ return 0;
}
}
+#ifndef MORECORE_CANNOT_TRIM
+#define should_trim(M,s) ((s) > (M)->trim_check)
+#else /* MORECORE_CANNOT_TRIM */
+#define should_trim(M,s) (0)
+#endif /* MORECORE_CANNOT_TRIM */
+
+/*
+ TOP_FOOT_SIZE is padding at the end of a segment, including space
+ that may be needed to place segment records and fenceposts when new
+ noncontiguous segments are added.
+*/
+#define TOP_FOOT_SIZE\
+ (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
+
+
+/* ------------------------------- Hooks -------------------------------- */
+
+/*
+ PREACTION should be defined to return 0 on success, and nonzero on
+ failure. If you are not using locking, you can redefine these to do
+ anything you like.
+*/
+
+#if USE_LOCKS
+#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
+#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
+#else /* USE_LOCKS */
+
+#ifndef PREACTION
+#define PREACTION(M) (0)
+#endif /* PREACTION */
+
+#ifndef POSTACTION
+#define POSTACTION(M)
+#endif /* POSTACTION */
+
+#endif /* USE_LOCKS */
+
+/*
+ CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
+ USAGE_ERROR_ACTION is triggered on detected bad frees and
+ reallocs. The argument p is an address that might have triggered the
+ fault. It is ignored by the two predefined actions, but might be
+ useful in custom actions that try to help diagnose errors.
+*/
+
+#if PROCEED_ON_ERROR
+
+/* A count of the number of corruption errors causing resets */
+int malloc_corruption_error_count;
+
+/* default corruption action */
+static void reset_on_error(mstate m);
+
+#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
+#define USAGE_ERROR_ACTION(m, p)
+
+#else /* PROCEED_ON_ERROR */
+
+#ifndef CORRUPTION_ERROR_ACTION
+#define CORRUPTION_ERROR_ACTION(m) ABORT
+#endif /* CORRUPTION_ERROR_ACTION */
+
+#ifndef USAGE_ERROR_ACTION
+#define USAGE_ERROR_ACTION(m,p) ABORT
+#endif /* USAGE_ERROR_ACTION */
+
+#endif /* PROCEED_ON_ERROR */
+
+
+/* -------------------------- Debugging setup ---------------------------- */
+
+#if ! DEBUG
+
+#define check_free_chunk(M,P)
+#define check_inuse_chunk(M,P)
+#define check_malloced_chunk(M,P,N)
+#define check_mmapped_chunk(M,P)
+#define check_malloc_state(M)
+#define check_top_chunk(M,P)
+
+#else /* DEBUG */
+#define check_free_chunk(M,P) do_check_free_chunk(M,P)
+#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
+#define check_top_chunk(M,P) do_check_top_chunk(M,P)
+#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
+#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
+#define check_malloc_state(M) do_check_malloc_state(M)
+
+static void do_check_any_chunk(mstate m, mchunkptr p);
+static void do_check_top_chunk(mstate m, mchunkptr p);
+static void do_check_mmapped_chunk(mstate m, mchunkptr p);
+static void do_check_inuse_chunk(mstate m, mchunkptr p);
+static void do_check_free_chunk(mstate m, mchunkptr p);
+static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
+static void do_check_tree(mstate m, tchunkptr t);
+static void do_check_treebin(mstate m, bindex_t i);
+static void do_check_smallbin(mstate m, bindex_t i);
+static void do_check_malloc_state(mstate m);
+static int bin_find(mstate m, mchunkptr x);
+static size_t traverse_and_check(mstate m);
+#endif /* DEBUG */
+
+/* ---------------------------- Indexing Bins ---------------------------- */
+
+#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
+#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
+#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
+#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
+
+/* addressing by index. See above about smallbin repositioning */
+#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
+#define treebin_at(M,i) (&((M)->treebins[i]))
+
+/* assign tree index for size S to variable I. Use x86 asm if possible */
+#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+#define compute_tree_index(S, I)\
+{\
+ unsigned int X = S >> TREEBIN_SHIFT;\
+ if (X == 0)\
+ I = 0;\
+ else if (X > 0xFFFF)\
+ I = NTREEBINS-1;\
+ else {\
+ unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
+ I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
+ }\
+}
+
+#elif defined (__INTEL_COMPILER)
+#define compute_tree_index(S, I)\
+{\
+ size_t X = S >> TREEBIN_SHIFT;\
+ if (X == 0)\
+ I = 0;\
+ else if (X > 0xFFFF)\
+ I = NTREEBINS-1;\
+ else {\
+ unsigned int K = _bit_scan_reverse (X); \
+ I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
+ }\
+}
+
+#elif defined(_MSC_VER) && _MSC_VER>=1300
+#define compute_tree_index(S, I)\
+{\
+ size_t X = S >> TREEBIN_SHIFT;\
+ if (X == 0)\
+ I = 0;\
+ else if (X > 0xFFFF)\
+ I = NTREEBINS-1;\
+ else {\
+ unsigned int K;\
+ _BitScanReverse((DWORD *) &K, (DWORD) X);\
+ I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
+ }\
+}
+
+#else /* GNUC */
+#define compute_tree_index(S, I)\
+{\
+ size_t X = S >> TREEBIN_SHIFT;\
+ if (X == 0)\
+ I = 0;\
+ else if (X > 0xFFFF)\
+ I = NTREEBINS-1;\
+ else {\
+ unsigned int Y = (unsigned int)X;\
+ unsigned int N = ((Y - 0x100) >> 16) & 8;\
+ unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
+ N += K;\
+ N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
+ K = 14 - N + ((Y <<= K) >> 15);\
+ I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
+ }\
+}
+#endif /* GNUC */
+
+/* Bit representing maximum resolved size in a treebin at i */
+#define bit_for_tree_index(i) \
+ (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
+
+/* Shift placing maximum resolved bit in a treebin at i as sign bit */
+#define leftshift_for_tree_index(i) \
+ ((i == NTREEBINS-1)? 0 : \
+ ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
+
+/* The size of the smallest chunk held in bin with index i */
+#define minsize_for_tree_index(i) \
+ ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
+ (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
+
+
+/* ------------------------ Operations on bin maps ----------------------- */
+
+/* bit corresponding to given index */
+#define idx2bit(i) ((binmap_t)(1) << (i))
+
+/* Mark/Clear bits with given index */
+#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
+#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
+#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
+
+#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
+#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
+#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
+
+/* isolate the least set bit of a bitmap */
+#define least_bit(x) ((x) & -(x))
+
+/* mask with all bits to left of least bit of x on */
+#define left_bits(x) ((x<<1) | -(x<<1))
+
+/* mask with all bits to left of or equal to least bit of x on */
+#define same_or_left_bits(x) ((x) | -(x))
+
+/* index corresponding to given bit. Use x86 asm if possible */
+
+#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+#define compute_bit2idx(X, I)\
+{\
+ unsigned int J;\
+ J = __builtin_ctz(X); \
+ I = (bindex_t)J;\
+}
+
+#elif defined (__INTEL_COMPILER)
+#define compute_bit2idx(X, I)\
+{\
+ unsigned int J;\
+ J = _bit_scan_forward (X); \
+ I = (bindex_t)J;\
+}
+
+#elif defined(_MSC_VER) && _MSC_VER>=1300
+#define compute_bit2idx(X, I)\
+{\
+ unsigned int J;\
+ _BitScanForward((DWORD *) &J, X);\
+ I = (bindex_t)J;\
+}
+
+#elif USE_BUILTIN_FFS
+#define compute_bit2idx(X, I) I = ffs(X)-1
+
+#else
+#define compute_bit2idx(X, I)\
+{\
+ unsigned int Y = X - 1;\
+ unsigned int K = Y >> (16-4) & 16;\
+ unsigned int N = K; Y >>= K;\
+ N += K = Y >> (8-3) & 8; Y >>= K;\
+ N += K = Y >> (4-2) & 4; Y >>= K;\
+ N += K = Y >> (2-1) & 2; Y >>= K;\
+ N += K = Y >> (1-0) & 1; Y >>= K;\
+ I = (bindex_t)(N + Y);\
+}
+#endif /* GNUC */
+
+
+/* ----------------------- Runtime Check Support ------------------------- */
+
+/*
+ For security, the main invariant is that malloc/free/etc never
+ writes to a static address other than malloc_state, unless static
+ malloc_state itself has been corrupted, which cannot occur via
+ malloc (because of these checks). In essence this means that we
+ believe all pointers, sizes, maps etc held in malloc_state, but
+ check all of those linked or offsetted from other embedded data
+ structures. These checks are interspersed with main code in a way
+ that tends to minimize their run-time cost.
+
+ When FOOTERS is defined, in addition to range checking, we also
+ verify footer fields of inuse chunks, which can be used guarantee
+ that the mstate controlling malloc/free is intact. This is a
+ streamlined version of the approach described by William Robertson
+ et al in "Run-time Detection of Heap-based Overflows" LISA'03
+ http://www.usenix.org/events/lisa03/tech/robertson.html The footer
+ of an inuse chunk holds the xor of its mstate and a random seed,
+ that is checked upon calls to free() and realloc(). This is
+ (probabalistically) unguessable from outside the program, but can be
+ computed by any code successfully malloc'ing any chunk, so does not
+ itself provide protection against code that has already broken
+ security through some other means. Unlike Robertson et al, we
+ always dynamically check addresses of all offset chunks (previous,
+ next, etc). This turns out to be cheaper than relying on hashes.
+*/
+
+#if !INSECURE
+/* Check if address a is at least as high as any from MORECORE or MMAP */
+#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
+/* Check if address of next chunk n is higher than base chunk p */
+#define ok_next(p, n) ((char*)(p) < (char*)(n))
+/* Check if p has inuse status */
+#define ok_inuse(p) is_inuse(p)
+/* Check if p has its pinuse bit on */
+#define ok_pinuse(p) pinuse(p)
+
+#else /* !INSECURE */
+#define ok_address(M, a) (1)
+#define ok_next(b, n) (1)
+#define ok_inuse(p) (1)
+#define ok_pinuse(p) (1)
+#endif /* !INSECURE */
+
+#if (FOOTERS && !INSECURE)
+/* Check if (alleged) mstate m has expected magic field */
+#define ok_magic(M) ((M)->magic == mparams.magic)
+#else /* (FOOTERS && !INSECURE) */
+#define ok_magic(M) (1)
+#endif /* (FOOTERS && !INSECURE) */
+
+/* In gcc, use __builtin_expect to minimize impact of checks */
+#if !INSECURE
+#if defined(__GNUC__) && __GNUC__ >= 3
+#define RTCHECK(e) __builtin_expect(e, 1)
+#else /* GNUC */
+#define RTCHECK(e) (e)
+#endif /* GNUC */
+#else /* !INSECURE */
+#define RTCHECK(e) (1)
+#endif /* !INSECURE */
+
+/* macros to set up inuse chunks with or without footers */
+
+#if !FOOTERS
+
+#define mark_inuse_foot(M,p,s)
+
+/* Macros for setting head/foot of non-mmapped chunks */
+
+/* Set cinuse bit and pinuse bit of next chunk */
+#define set_inuse(M,p,s)\
+ ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
+ ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
+
+/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
+#define set_inuse_and_pinuse(M,p,s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+ ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
+
+/* Set size, cinuse and pinuse bit of this chunk */
+#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
+
+#else /* FOOTERS */
+
+/* Set foot of inuse chunk to be xor of mstate and seed */
+#define mark_inuse_foot(M,p,s)\
+ (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
+
+#define get_mstate_for(p)\
+ ((mstate)(((mchunkptr)((char*)(p) +\
+ (chunksize(p))))->prev_foot ^ mparams.magic))
+
+#define set_inuse(M,p,s)\
+ ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
+ (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
+ mark_inuse_foot(M,p,s))
+
+#define set_inuse_and_pinuse(M,p,s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+ (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
+ mark_inuse_foot(M,p,s))
+
+#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+ mark_inuse_foot(M, p, s))
+
+#endif /* !FOOTERS */
+
+/* ---------------------------- setting mparams -------------------------- */
+
+#if LOCK_AT_FORK
+static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); }
+static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
+static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); }
+#endif /* LOCK_AT_FORK */
+
+/* Initialize mparams */
+static int init_mparams(void) {
+#ifdef NEED_GLOBAL_LOCK_INIT
+ if (malloc_global_mutex_status <= 0)
+ init_malloc_global_mutex();
+#endif
+
+ ACQUIRE_MALLOC_GLOBAL_LOCK();
+ if (mparams.magic == 0) {
+ size_t magic;
+ size_t psize;
+ size_t gsize;
+
+#ifndef WIN32
+ psize = malloc_getpagesize;
+ gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
+#else /* WIN32 */
+ {
+ SYSTEM_INFO system_info;
+ GetSystemInfo(&system_info);
+ psize = system_info.dwPageSize;
+ gsize = ((DEFAULT_GRANULARITY != 0)?
+ DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
+ }
+#endif /* WIN32 */
+
+ /* Sanity-check configuration:
+ size_t must be unsigned and as wide as pointer type.
+ ints must be at least 4 bytes.
+ alignment must be at least 8.
+ Alignment, min chunk size, and page size must all be powers of 2.
+ */
+ if ((sizeof(size_t) != sizeof(char*)) ||
+ (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
+ (sizeof(int) < 4) ||
+ (MALLOC_ALIGNMENT < (size_t)8U) ||
+ ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
+ ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
+ ((gsize & (gsize-SIZE_T_ONE)) != 0) ||
+ ((psize & (psize-SIZE_T_ONE)) != 0))
+ ABORT;
+ mparams.granularity = gsize;
+ mparams.page_size = psize;
+ mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
+ mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
+#if MORECORE_CONTIGUOUS
+ mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
+#else /* MORECORE_CONTIGUOUS */
+ mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
+#endif /* MORECORE_CONTIGUOUS */
+
+#if !ONLY_MSPACES
+ /* Set up lock for main malloc area */
+ gm->mflags = mparams.default_mflags;
+ (void)INITIAL_LOCK(&gm->mutex);
+#endif
+#if LOCK_AT_FORK
+ pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
+#endif
+
+ {
+#if USE_DEV_RANDOM
+ int fd;
+ unsigned char buf[sizeof(size_t)];
+ /* Try to use /dev/urandom, else fall back on using time */
+ if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
+ read(fd, buf, sizeof(buf)) == sizeof(buf)) {
+ magic = *((size_t *) buf);
+ close(fd);
+ }
+ else
+#endif /* USE_DEV_RANDOM */
+ magic = (size_t)(rand64() ^ (size_t)0x55555555U);
+ magic |= (size_t)8U; /* ensure nonzero */
+ magic &= ~(size_t)7U; /* improve chances of fault for bad values */
+ /* Until memory modes commonly available, use volatile-write */
+ (*(volatile size_t *)(&(mparams.magic))) = magic;
+ }
+ }
+
+ RELEASE_MALLOC_GLOBAL_LOCK();
+ return 1;
+}
+
+/* support for mallopt */
+static int change_mparam(int param_number, int value) {
+ size_t val;
+ ensure_initialization();
+ val = (value == -1)? MAX_SIZE_T : (size_t)value;
+ switch(param_number) {
+ case M_TRIM_THRESHOLD:
+ mparams.trim_threshold = val;
+ return 1;
+ case M_GRANULARITY:
+ if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
+ mparams.granularity = val;
+ return 1;
+ }
+ else
+ return 0;
+ case M_MMAP_THRESHOLD:
+ mparams.mmap_threshold = val;
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+#if DEBUG
+/* ------------------------- Debugging Support --------------------------- */
+
+/* Check properties of any chunk, whether free, inuse, mmapped etc */
+static void do_check_any_chunk(mstate m, mchunkptr p) {
+ assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
+ assert(ok_address(m, p));
+}
+
+/* Check properties of top chunk */
+static void do_check_top_chunk(mstate m, mchunkptr p) {
+ msegmentptr sp = segment_holding(m, (char*)p);
+ size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
+ assert(sp != 0);
+ assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
+ assert(ok_address(m, p));
+ assert(sz == m->topsize);
+ assert(sz > 0);
+ assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
+ assert(pinuse(p));
+ assert(!pinuse(chunk_plus_offset(p, sz)));
+}
+
+/* Check properties of (inuse) mmapped chunks */
+static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
+ size_t sz = chunksize(p);
+ size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
+ assert(is_mmapped(p));
+ assert(use_mmap(m));
+ assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
+ assert(ok_address(m, p));
+ assert(!is_small(sz));
+ assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
+ assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
+ assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
+}
+
+/* Check properties of inuse chunks */
+static void do_check_inuse_chunk(mstate m, mchunkptr p) {
+ do_check_any_chunk(m, p);
+ assert(is_inuse(p));
+ assert(next_pinuse(p));
+ /* If not pinuse and not mmapped, previous chunk has OK offset */
+ assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
+ if (is_mmapped(p))
+ do_check_mmapped_chunk(m, p);
+}
+
+/* Check properties of free chunks */
+static void do_check_free_chunk(mstate m, mchunkptr p) {
+ size_t sz = chunksize(p);
+ mchunkptr next = chunk_plus_offset(p, sz);
+ do_check_any_chunk(m, p);
+ assert(!is_inuse(p));
+ assert(!next_pinuse(p));
+ assert (!is_mmapped(p));
+ if (p != m->dv && p != m->top) {
+ if (sz >= MIN_CHUNK_SIZE) {
+ assert((sz & CHUNK_ALIGN_MASK) == 0);
+ assert(is_aligned(chunk2mem(p)));
+ assert(next->prev_foot == sz);
+ assert(pinuse(p));
+ assert (next == m->top || is_inuse(next));
+ assert(p->fd->bk == p);
+ assert(p->bk->fd == p);
+ }
+ else /* markers are always of size SIZE_T_SIZE */
+ assert(sz == SIZE_T_SIZE);
+ }
+}
+
+/* Check properties of malloced chunks at the point they are malloced */
+static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
+ if (mem != 0) {
+ mchunkptr p = mem2chunk(mem);
+ size_t sz = p->head & ~INUSE_BITS;
+ do_check_inuse_chunk(m, p);
+ assert((sz & CHUNK_ALIGN_MASK) == 0);
+ assert(sz >= MIN_CHUNK_SIZE);
+ assert(sz >= s);
+ /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
+ assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
+ }
+}
+
+/* Check a tree and its subtrees. */
+static void do_check_tree(mstate m, tchunkptr t) {
+ tchunkptr head = 0;
+ tchunkptr u = t;
+ bindex_t tindex = t->index;
+ size_t tsize = chunksize(t);
+ bindex_t idx;
+ compute_tree_index(tsize, idx);
+ assert(tindex == idx);
+ assert(tsize >= MIN_LARGE_SIZE);
+ assert(tsize >= minsize_for_tree_index(idx));
+ assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
+
+ do { /* traverse through chain of same-sized nodes */
+ do_check_any_chunk(m, ((mchunkptr)u));
+ assert(u->index == tindex);
+ assert(chunksize(u) == tsize);
+ assert(!is_inuse(u));
+ assert(!next_pinuse(u));
+ assert(u->fd->bk == u);
+ assert(u->bk->fd == u);
+ if (u->parent == 0) {
+ assert(u->child[0] == 0);
+ assert(u->child[1] == 0);
+ }
+ else {
+ assert(head == 0); /* only one node on chain has parent */
+ head = u;
+ assert(u->parent != u);
+ assert (u->parent->child[0] == u ||
+ u->parent->child[1] == u ||
+ *((tbinptr*)(u->parent)) == u);
+ if (u->child[0] != 0) {
+ assert(u->child[0]->parent == u);
+ assert(u->child[0] != u);
+ do_check_tree(m, u->child[0]);
+ }
+ if (u->child[1] != 0) {
+ assert(u->child[1]->parent == u);
+ assert(u->child[1] != u);
+ do_check_tree(m, u->child[1]);
+ }
+ if (u->child[0] != 0 && u->child[1] != 0) {
+ assert(chunksize(u->child[0]) < chunksize(u->child[1]));
+ }
+ }
+ u = u->fd;
+ } while (u != t);
+ assert(head != 0);
+}
+
+/* Check all the chunks in a treebin. */
+static void do_check_treebin(mstate m, bindex_t i) {
+ tbinptr* tb = treebin_at(m, i);
+ tchunkptr t = *tb;
+ int empty = (m->treemap & (1U << i)) == 0;
+ if (t == 0)
+ assert(empty);
+ if (!empty)
+ do_check_tree(m, t);
+}
+
+/* Check all the chunks in a smallbin. */
+static void do_check_smallbin(mstate m, bindex_t i) {
+ sbinptr b = smallbin_at(m, i);
+ mchunkptr p = b->bk;
+ unsigned int empty = (m->smallmap & (1U << i)) == 0;
+ if (p == b)
+ assert(empty);
+ if (!empty) {
+ for (; p != b; p = p->bk) {
+ size_t size = chunksize(p);
+ mchunkptr q;
+ /* each chunk claims to be free */
+ do_check_free_chunk(m, p);
+ /* chunk belongs in bin */
+ assert(small_index(size) == i);
+ assert(p->bk == b || chunksize(p->bk) == chunksize(p));
+ /* chunk is followed by an inuse chunk */
+ q = next_chunk(p);
+ if (q->head != FENCEPOST_HEAD)
+ do_check_inuse_chunk(m, q);
+ }
+ }
+}
+
+/* Find x in a bin. Used in other check functions. */
+static int bin_find(mstate m, mchunkptr x) {
+ size_t size = chunksize(x);
+ if (is_small(size)) {
+ bindex_t sidx = small_index(size);
+ sbinptr b = smallbin_at(m, sidx);
+ if (smallmap_is_marked(m, sidx)) {
+ mchunkptr p = b;
+ do {
+ if (p == x)
+ return 1;
+ } while ((p = p->fd) != b);
+ }
+ }
+ else {
+ bindex_t tidx;
+ compute_tree_index(size, tidx);
+ if (treemap_is_marked(m, tidx)) {
+ tchunkptr t = *treebin_at(m, tidx);
+ size_t sizebits = size << leftshift_for_tree_index(tidx);
+ while (t != 0 && chunksize(t) != size) {
+ t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
+ sizebits <<= 1;
+ }
+ if (t != 0) {
+ tchunkptr u = t;
+ do {
+ if (u == (tchunkptr)x)
+ return 1;
+ } while ((u = u->fd) != t);
+ }
+ }
+ }
+ return 0;
+}
+
+/* Traverse each chunk and check it; return total */
+static size_t traverse_and_check(mstate m) {
+ size_t sum = 0;
+ if (is_initialized(m)) {
+ msegmentptr s = &m->seg;
+ sum += m->topsize + TOP_FOOT_SIZE;
+ while (s != 0) {
+ mchunkptr q = align_as_chunk(s->base);
+ mchunkptr lastq = 0;
+ assert(pinuse(q));
+ while (segment_holds(s, q) &&
+ q != m->top && q->head != FENCEPOST_HEAD) {
+ sum += chunksize(q);
+ if (is_inuse(q)) {
+ assert(!bin_find(m, q));
+ do_check_inuse_chunk(m, q);
+ }
+ else {
+ assert(q == m->dv || bin_find(m, q));
+ assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
+ do_check_free_chunk(m, q);
+ }
+ lastq = q;
+ q = next_chunk(q);
+ }
+ s = s->next;
+ }
+ }
+ return sum;
+}
+
+
+/* Check all properties of malloc_state. */
+static void do_check_malloc_state(mstate m) {
+ bindex_t i;
+ size_t total;
+ /* check bins */
+ for (i = 0; i < NSMALLBINS; ++i)
+ do_check_smallbin(m, i);
+ for (i = 0; i < NTREEBINS; ++i)
+ do_check_treebin(m, i);
+
+ if (m->dvsize != 0) { /* check dv chunk */
+ do_check_any_chunk(m, m->dv);
+ assert(m->dvsize == chunksize(m->dv));
+ assert(m->dvsize >= MIN_CHUNK_SIZE);
+ assert(bin_find(m, m->dv) == 0);
+ }
+
+ if (m->top != 0) { /* check top chunk */
+ do_check_top_chunk(m, m->top);
+ /*assert(m->topsize == chunksize(m->top)); redundant */
+ assert(m->topsize > 0);
+ assert(bin_find(m, m->top) == 0);
+ }
+
+ total = traverse_and_check(m);
+ assert(total <= m->footprint);
+ assert(m->footprint <= m->max_footprint);
+}
+#endif /* DEBUG */
+
+/* ----------------------------- statistics ------------------------------ */
+
+#if !NO_MALLINFO
+static struct mallinfo internal_mallinfo(mstate m) {
+ struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ ensure_initialization();
+ if (!PREACTION(m)) {
+ check_malloc_state(m);
+ if (is_initialized(m)) {
+ size_t nfree = SIZE_T_ONE; /* top always free */
+ size_t mfree = m->topsize + TOP_FOOT_SIZE;
+ size_t sum = mfree;
+ msegmentptr s = &m->seg;
+ while (s != 0) {
+ mchunkptr q = align_as_chunk(s->base);
+ while (segment_holds(s, q) &&
+ q != m->top && q->head != FENCEPOST_HEAD) {
+ size_t sz = chunksize(q);
+ sum += sz;
+ if (!is_inuse(q)) {
+ mfree += sz;
+ ++nfree;
+ }
+ q = next_chunk(q);
+ }
+ s = s->next;
+ }
+
+ nm.arena = sum;
+ nm.ordblks = nfree;
+ nm.hblkhd = m->footprint - sum;
+ nm.usmblks = m->max_footprint;
+ nm.uordblks = m->footprint - mfree;
+ nm.fordblks = mfree;
+ nm.keepcost = m->topsize;
+ }
+
+ POSTACTION(m);
+ }
+ return nm;
+}
+#endif /* !NO_MALLINFO */
+
+#if !NO_MALLOC_STATS
+static void internal_malloc_stats(mstate m) {
+ ensure_initialization();
+ if (!PREACTION(m)) {
+ size_t maxfp = 0;
+ size_t fp = 0;
+ size_t used = 0;
+ check_malloc_state(m);
+ if (is_initialized(m)) {
+ msegmentptr s = &m->seg;
+ maxfp = m->max_footprint;
+ fp = m->footprint;
+ used = fp - (m->topsize + TOP_FOOT_SIZE);
+
+ while (s != 0) {
+ mchunkptr q = align_as_chunk(s->base);
+ while (segment_holds(s, q) &&
+ q != m->top && q->head != FENCEPOST_HEAD) {
+ if (!is_inuse(q))
+ used -= chunksize(q);
+ q = next_chunk(q);
+ }
+ s = s->next;
+ }
+ }
+ POSTACTION(m); /* drop lock */
+ kprintf("max system bytes = %10lu\n", (unsigned long)(maxfp));
+ kprintf("system bytes = %10lu\n", (unsigned long)(fp));
+ kprintf("in use bytes = %10lu\n", (unsigned long)(used));
+ }
+}
+#endif /* NO_MALLOC_STATS */
+
+/* ----------------------- Operations on smallbins ----------------------- */
+
+/*
+ Various forms of linking and unlinking are defined as macros. Even
+ the ones for trees, which are very long but have very short typical
+ paths. This is ugly but reduces reliance on inlining support of
+ compilers.
+*/
+
+/* Link a free chunk into a smallbin */
+#define insert_small_chunk(M, P, S) {\
+ bindex_t I = small_index(S);\
+ mchunkptr B = smallbin_at(M, I);\
+ mchunkptr F = B;\
+ assert(S >= MIN_CHUNK_SIZE);\
+ if (!smallmap_is_marked(M, I))\
+ mark_smallmap(M, I);\
+ else if (RTCHECK(ok_address(M, B->fd)))\
+ F = B->fd;\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ B->fd = P;\
+ F->bk = P;\
+ P->fd = F;\
+ P->bk = B;\
+}
+
+/* Unlink a chunk from a smallbin */
+#define unlink_small_chunk(M, P, S) {\
+ mchunkptr F = P->fd;\
+ mchunkptr B = P->bk;\
+ bindex_t I = small_index(S);\
+ assert(P != B);\
+ assert(P != F);\
+ assert(chunksize(P) == small_index2size(I));\
+ if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
+ if (B == F) {\
+ clear_smallmap(M, I);\
+ }\
+ else if (RTCHECK(B == smallbin_at(M,I) ||\
+ (ok_address(M, B) && B->fd == P))) {\
+ F->bk = B;\
+ B->fd = F;\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+}
+
+/* Unlink the first chunk from a smallbin */
+#define unlink_first_small_chunk(M, B, P, I) {\
+ mchunkptr F = P->fd;\
+ assert(P != B);\
+ assert(P != F);\
+ assert(chunksize(P) == small_index2size(I));\
+ if (B == F) {\
+ clear_smallmap(M, I);\
+ }\
+ else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
+ F->bk = B;\
+ B->fd = F;\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+}
+
+/* Replace dv node, binning the old one */
+/* Used only when dvsize known to be small */
+#define replace_dv(M, P, S) {\
+ size_t DVS = M->dvsize;\
+ assert(is_small(DVS));\
+ if (DVS != 0) {\
+ mchunkptr DV = M->dv;\
+ insert_small_chunk(M, DV, DVS);\
+ }\
+ M->dvsize = S;\
+ M->dv = P;\
+}
+
+/* ------------------------- Operations on trees ------------------------- */
+
+/* Insert chunk into tree */
+#define insert_large_chunk(M, X, S) {\
+ tbinptr* H;\
+ bindex_t I;\
+ compute_tree_index(S, I);\
+ H = treebin_at(M, I);\
+ X->index = I;\
+ X->child[0] = X->child[1] = 0;\
+ if (!treemap_is_marked(M, I)) {\
+ mark_treemap(M, I);\
+ *H = X;\
+ X->parent = (tchunkptr)H;\
+ X->fd = X->bk = X;\
+ }\
+ else {\
+ tchunkptr T = *H;\
+ size_t K = S << leftshift_for_tree_index(I);\
+ for (;;) {\
+ if (chunksize(T) != S) {\
+ tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
+ K <<= 1;\
+ if (*C != 0)\
+ T = *C;\
+ else if (RTCHECK(ok_address(M, C))) {\
+ *C = X;\
+ X->parent = T;\
+ X->fd = X->bk = X;\
+ break;\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ break;\
+ }\
+ }\
+ else {\
+ tchunkptr F = T->fd;\
+ if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
+ T->fd = F->bk = X;\
+ X->fd = F;\
+ X->bk = T;\
+ X->parent = 0;\
+ break;\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ break;\
+ }\
+ }\
+ }\
+ }\
+}
+
+/*
+ Unlink steps:
+
+ 1. If x is a chained node, unlink it from its same-sized fd/bk links
+ and choose its bk node as its replacement.
+ 2. If x was the last node of its size, but not a leaf node, it must
+ be replaced with a leaf node (not merely one with an open left or
+ right), to make sure that lefts and rights of descendents
+ correspond properly to bit masks. We use the rightmost descendent
+ of x. We could use any other leaf, but this is easy to locate and
+ tends to counteract removal of leftmosts elsewhere, and so keeps
+ paths shorter than minimally guaranteed. This doesn't loop much
+ because on average a node in a tree is near the bottom.
+ 3. If x is the base of a chain (i.e., has parent links) relink
+ x's parent and children to x's replacement (or null if none).
+*/
+
+#define unlink_large_chunk(M, X) {\
+ tchunkptr XP = X->parent;\
+ tchunkptr R;\
+ if (X->bk != X) {\
+ tchunkptr F = X->fd;\
+ R = X->bk;\
+ if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
+ F->bk = R;\
+ R->fd = F;\
+ }\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ }\
+ else {\
+ tchunkptr* RP;\
+ if (((R = *(RP = &(X->child[1]))) != 0) ||\
+ ((R = *(RP = &(X->child[0]))) != 0)) {\
+ tchunkptr* CP;\
+ while ((*(CP = &(R->child[1])) != 0) ||\
+ (*(CP = &(R->child[0])) != 0)) {\
+ R = *(RP = CP);\
+ }\
+ if (RTCHECK(ok_address(M, RP)))\
+ *RP = 0;\
+ else {\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ }\
+ }\
+ if (XP != 0) {\
+ tbinptr* H = treebin_at(M, X->index);\
+ if (X == *H) {\
+ if ((*H = R) == 0) \
+ clear_treemap(M, X->index);\
+ }\
+ else if (RTCHECK(ok_address(M, XP))) {\
+ if (XP->child[0] == X) \
+ XP->child[0] = R;\
+ else \
+ XP->child[1] = R;\
+ }\
+ else\
+ CORRUPTION_ERROR_ACTION(M);\
+ if (R != 0) {\
+ if (RTCHECK(ok_address(M, R))) {\
+ tchunkptr C0, C1;\
+ R->parent = XP;\
+ if ((C0 = X->child[0]) != 0) {\
+ if (RTCHECK(ok_address(M, C0))) {\
+ R->child[0] = C0;\
+ C0->parent = R;\
+ }\
+ else\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ if ((C1 = X->child[1]) != 0) {\
+ if (RTCHECK(ok_address(M, C1))) {\
+ R->child[1] = C1;\
+ C1->parent = R;\
+ }\
+ else\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ }\
+ else\
+ CORRUPTION_ERROR_ACTION(M);\
+ }\
+ }\
+}
+
+/* Relays to large vs small bin operations */
+
+#define insert_chunk(M, P, S)\
+ if (is_small(S)) insert_small_chunk(M, P, S)\
+ else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
+
+#define unlink_chunk(M, P, S)\
+ if (is_small(S)) unlink_small_chunk(M, P, S)\
+ else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
+
+
+/* Relays to internal calls to malloc/free from realloc, memalign etc */
+
+#if ONLY_MSPACES
+#define internal_malloc(m, b) mspace_malloc(m, b)
+#define internal_free(m, mem) mspace_free(m,mem);
+#else /* ONLY_MSPACES */
+#if MSPACES
+#define internal_malloc(m, b)\
+ ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
+#define internal_free(m, mem)\
+ if (m == gm) dlfree(mem); else mspace_free(m,mem);
+#else /* MSPACES */
+#define internal_malloc(m, b) dlmalloc(b)
+#define internal_free(m, mem) dlfree(mem)
+#endif /* MSPACES */
+#endif /* ONLY_MSPACES */
+
+/* ----------------------- Direct-mmapping chunks ----------------------- */
+
/*
Directly mmapped chunks are set up with an offset to the start of
the mmapped region stored in the prev_foot field of the chunk. This
@@ -184,18 +2800,16 @@ dontinline int has_segment_link(struct MallocState *m, msegmentptr ss) {
requirements (especially in memalign).
*/
-/* For sys_alloc, enough padding to ensure can malloc request on success */
-#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
-
/* Malloc using mmap */
-static void *mmap_alloc(struct MallocState *m, size_t nb) {
+static void* mmap_alloc(mstate m, size_t nb) {
size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
if (m->footprint_limit != 0) {
size_t fp = m->footprint + mmsize;
- if (fp <= m->footprint || fp > m->footprint_limit) return 0;
+ if (fp <= m->footprint || fp > m->footprint_limit)
+ return 0;
}
- if (mmsize > nb) { /* Check for wrap around 0 */
- char *mm = (char *)(dlmalloc_requires_more_vespene_gas(mmsize));
+ if (mmsize > nb) { /* Check for wrap around 0 */
+ char* mm = (char*)(dlmalloc_requires_more_vespene_gas(mmsize));
if (mm != CMFAIL) {
size_t offset = align_offset(chunk2mem(mm));
size_t psize = mmsize - offset - MMAP_FOOT_PAD;
@@ -204,8 +2818,10 @@ static void *mmap_alloc(struct MallocState *m, size_t nb) {
p->head = psize;
mark_inuse_foot(m, p, psize);
chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
- chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0;
- if (m->least_addr == 0 || mm < m->least_addr) m->least_addr = mm;
+ chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
+
+ if (m->least_addr == 0 || mm < m->least_addr)
+ m->least_addr = mm;
if ((m->footprint += mmsize) > m->max_footprint)
m->max_footprint = m->footprint;
assert(is_aligned(chunk2mem(p)));
@@ -216,11 +2832,188 @@ static void *mmap_alloc(struct MallocState *m, size_t nb) {
return 0;
}
-/**
- * Gets memory from system.
- */
-static void *dlmalloc_sys_alloc(struct MallocState *m, size_t nb) {
- char *tbase = CMFAIL;
+/* Realloc using mmap */
+static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
+ size_t oldsize = chunksize(oldp);
+ (void)flags; /* placate people compiling -Wunused */
+ if (is_small(nb)) /* Can't shrink mmap regions below small size */
+ return 0;
+ /* Keep old chunk if big enough but not too big */
+ if (oldsize >= nb + SIZE_T_SIZE &&
+ (oldsize - nb) <= (mparams.granularity << 1))
+ return oldp;
+ else {
+ size_t offset = oldp->prev_foot;
+ size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
+ size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
+ char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
+ oldmmsize, newmmsize, flags);
+ if (cp != CMFAIL) {
+ mchunkptr newp = (mchunkptr)(cp + offset);
+ size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
+ newp->head = psize;
+ mark_inuse_foot(m, newp, psize);
+ chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
+ chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
+
+ if (cp < m->least_addr)
+ m->least_addr = cp;
+ if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
+ m->max_footprint = m->footprint;
+ check_mmapped_chunk(m, newp);
+ return newp;
+ }
+ }
+ return 0;
+}
+
+
+/* -------------------------- mspace management -------------------------- */
+
+/* Initialize top chunk and its size */
+static void init_top(mstate m, mchunkptr p, size_t psize) {
+ /* Ensure alignment */
+ size_t offset = align_offset(chunk2mem(p));
+ p = (mchunkptr)((char*)p + offset);
+ psize -= offset;
+
+ m->top = p;
+ m->topsize = psize;
+ p->head = psize | PINUSE_BIT;
+ /* set size of fake trailing chunk holding overhead space only once */
+ chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
+ m->trim_check = mparams.trim_threshold; /* reset on each update */
+}
+
+/* Initialize bins for a new mstate that is otherwise zeroed out */
+static void init_bins(mstate m) {
+ /* Establish circular links for smallbins */
+ bindex_t i;
+ for (i = 0; i < NSMALLBINS; ++i) {
+ sbinptr bin = smallbin_at(m,i);
+ bin->fd = bin->bk = bin;
+ }
+}
+
+#if PROCEED_ON_ERROR
+
+/* default corruption action */
+static void reset_on_error(mstate m) {
+ int i;
+ ++malloc_corruption_error_count;
+ /* Reinitialize fields to forget about all memory */
+ m->smallmap = m->treemap = 0;
+ m->dvsize = m->topsize = 0;
+ m->seg.base = 0;
+ m->seg.size = 0;
+ m->seg.next = 0;
+ m->top = m->dv = 0;
+ for (i = 0; i < NTREEBINS; ++i)
+ *treebin_at(m, i) = 0;
+ init_bins(m);
+}
+#endif /* PROCEED_ON_ERROR */
+
+/* Allocate chunk and prepend remainder with chunk in successor base. */
+static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
+ size_t nb) {
+ mchunkptr p = align_as_chunk(newbase);
+ mchunkptr oldfirst = align_as_chunk(oldbase);
+ size_t psize = (char*)oldfirst - (char*)p;
+ mchunkptr q = chunk_plus_offset(p, nb);
+ size_t qsize = psize - nb;
+ set_size_and_pinuse_of_inuse_chunk(m, p, nb);
+
+ assert((char*)oldfirst > (char*)q);
+ assert(pinuse(oldfirst));
+ assert(qsize >= MIN_CHUNK_SIZE);
+
+ /* consolidate remainder with first chunk of old base */
+ if (oldfirst == m->top) {
+ size_t tsize = m->topsize += qsize;
+ m->top = q;
+ q->head = tsize | PINUSE_BIT;
+ check_top_chunk(m, q);
+ }
+ else if (oldfirst == m->dv) {
+ size_t dsize = m->dvsize += qsize;
+ m->dv = q;
+ set_size_and_pinuse_of_free_chunk(q, dsize);
+ }
+ else {
+ if (!is_inuse(oldfirst)) {
+ size_t nsize = chunksize(oldfirst);
+ unlink_chunk(m, oldfirst, nsize);
+ oldfirst = chunk_plus_offset(oldfirst, nsize);
+ qsize += nsize;
+ }
+ set_free_with_pinuse(q, qsize, oldfirst);
+ insert_chunk(m, q, qsize);
+ check_free_chunk(m, q);
+ }
+
+ check_malloced_chunk(m, chunk2mem(p), nb);
+ return chunk2mem(p);
+}
+
+/* Add a segment to hold a new noncontiguous region */
+static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
+ /* Determine locations and sizes of segment, fenceposts, old top */
+ char* old_top = (char*)m->top;
+ msegmentptr oldsp = segment_holding(m, old_top);
+ char* old_end = oldsp->base + oldsp->size;
+ size_t ssize = pad_request(sizeof(struct malloc_segment));
+ char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
+ size_t offset = align_offset(chunk2mem(rawsp));
+ char* asp = rawsp + offset;
+ char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
+ mchunkptr sp = (mchunkptr)csp;
+ msegmentptr ss = (msegmentptr)(chunk2mem(sp));
+ mchunkptr tnext = chunk_plus_offset(sp, ssize);
+ mchunkptr p = tnext;
+ int nfences = 0;
+
+ /* reset top to new space */
+ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
+
+ /* Set up segment record */
+ assert(is_aligned(ss));
+ set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
+ *ss = m->seg; /* Push current record */
+ m->seg.base = tbase;
+ m->seg.size = tsize;
+ m->seg.sflags = mmapped;
+ m->seg.next = ss;
+
+ /* Insert trailing fenceposts */
+ for (;;) {
+ mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
+ p->head = FENCEPOST_HEAD;
+ ++nfences;
+ if ((char*)(&(nextp->head)) < old_end)
+ p = nextp;
+ else
+ break;
+ }
+ assert(nfences >= 2);
+
+ /* Insert the rest of old top into a bin as an ordinary free chunk */
+ if (csp != old_top) {
+ mchunkptr q = (mchunkptr)old_top;
+ size_t psize = csp - old_top;
+ mchunkptr tn = chunk_plus_offset(q, psize);
+ set_free_with_pinuse(q, psize, tn);
+ insert_chunk(m, q, psize);
+ }
+
+ check_top_chunk(m, m->top);
+}
+
+/* -------------------------- System allocation -------------------------- */
+
+/* Get memory from system using MORECORE or MMAP */
+static void* sys_alloc(mstate m, size_t nb) {
+ char* tbase = CMFAIL;
size_t tsize = 0;
flag_t mmap_flag = 0;
size_t asize; /* allocation size */
@@ -228,20 +3021,106 @@ static void *dlmalloc_sys_alloc(struct MallocState *m, size_t nb) {
ensure_initialization();
/* Directly map large chunks, but only if already initialized */
- if (use_mmap(m) && nb >= g_mparams.mmap_threshold && m->topsize != 0) {
- void *mem = mmap_alloc(m, nb);
- if (mem != 0) return mem;
+ if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
+ void* mem = mmap_alloc(m, nb);
+ if (mem != 0)
+ return mem;
}
asize = granularity_align(nb + SYS_ALLOC_PADDING);
- if (asize <= nb) return 0; /* wraparound */
+ if (asize <= nb)
+ return 0; /* wraparound */
if (m->footprint_limit != 0) {
size_t fp = m->footprint + asize;
- if (fp <= m->footprint || fp > m->footprint_limit) return 0;
+ if (fp <= m->footprint || fp > m->footprint_limit)
+ return 0;
}
- if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
- char *mp = (char *)(dlmalloc_requires_more_vespene_gas(asize));
+ /*
+ Try getting memory in any of three ways (in most-preferred to
+ least-preferred order):
+ 1. A call to MORECORE that can normally contiguously extend memory.
+ (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
+ or main space is mmapped or a previous contiguous call failed)
+ 2. A call to MMAP new space (disabled if not HAVE_MMAP).
+ Note that under the default settings, if MORECORE is unable to
+ fulfill a request, and HAVE_MMAP is true, then mmap is
+ used as a noncontiguous system allocator. This is a useful backup
+ strategy for systems with holes in address spaces -- in this case
+ sbrk cannot contiguously expand the heap, but mmap may be able to
+ find space.
+ 3. A call to MORECORE that cannot usually contiguously extend memory.
+ (disabled if not HAVE_MORECORE)
+
+ In all cases, we need to request enough bytes from system to ensure
+ we can malloc nb bytes upon success, so pad with enough space for
+ top_foot, plus alignment-pad to make sure we don't lose bytes if
+ not on boundary, and round this up to a granularity unit.
+ */
+
+ if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
+ char* br = CMFAIL;
+ size_t ssize = asize; /* sbrk call size */
+ msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
+ ACQUIRE_MALLOC_GLOBAL_LOCK();
+
+ if (ss == 0) { /* First time through or recovery */
+ char* base = (char*)CALL_MORECORE(0);
+ if (base != CMFAIL) {
+ size_t fp;
+ /* Adjust to end on a page boundary */
+ if (!is_page_aligned(base))
+ ssize += (page_align((size_t)base) - (size_t)base);
+ fp = m->footprint + ssize; /* recheck limits */
+ if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
+ (m->footprint_limit == 0 ||
+ (fp > m->footprint && fp <= m->footprint_limit)) &&
+ (br = (char*)(CALL_MORECORE(ssize))) == base) {
+ tbase = base;
+ tsize = ssize;
+ }
+ }
+ }
+ else {
+ /* Subtract out existing available top space from MORECORE request. */
+ ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
+ /* Use mem here only if it did continuously extend old space */
+ if (ssize < HALF_MAX_SIZE_T &&
+ (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
+ tbase = br;
+ tsize = ssize;
+ }
+ }
+
+ if (tbase == CMFAIL) { /* Cope with partial failure */
+ if (br != CMFAIL) { /* Try to use/extend the space we did get */
+ if (ssize < HALF_MAX_SIZE_T &&
+ ssize < nb + SYS_ALLOC_PADDING) {
+ size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
+ if (esize < HALF_MAX_SIZE_T) {
+ char* end = (char*)CALL_MORECORE(esize);
+ if (end != CMFAIL)
+ ssize += esize;
+ else { /* Can't use; try to release */
+ (void) CALL_MORECORE(-ssize);
+ br = CMFAIL;
+ }
+ }
+ }
+ }
+ if (br != CMFAIL) { /* Use the space we did get */
+ tbase = br;
+ tsize = ssize;
+ }
+ else
+ disable_contiguous(m); /* Don't try contiguous path in the future */
+ }
+
+ RELEASE_MALLOC_GLOBAL_LOCK();
+ }
+
+ if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
+ char* mp = (char*)(dlmalloc_requires_more_vespene_gas(asize));
if (mp != CMFAIL) {
tbase = mp;
tsize = asize;
@@ -249,25 +3128,47 @@ static void *dlmalloc_sys_alloc(struct MallocState *m, size_t nb) {
}
}
+ if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
+ if (asize < HALF_MAX_SIZE_T) {
+ char* br = CMFAIL;
+ char* end = CMFAIL;
+ ACQUIRE_MALLOC_GLOBAL_LOCK();
+ br = (char*)(CALL_MORECORE(asize));
+ end = (char*)(CALL_MORECORE(0));
+ RELEASE_MALLOC_GLOBAL_LOCK();
+ if (br != CMFAIL && end != CMFAIL && br < end) {
+ size_t ssize = end - br;
+ if (ssize > nb + TOP_FOOT_SIZE) {
+ tbase = br;
+ tsize = ssize;
+ }
+ }
+ }
+ }
+
if (tbase != CMFAIL) {
+
if ((m->footprint += tsize) > m->max_footprint)
m->max_footprint = m->footprint;
if (!is_initialized(m)) { /* first-time initialization */
- if (m->least_addr == 0 || tbase < m->least_addr) m->least_addr = tbase;
+ if (m->least_addr == 0 || tbase < m->least_addr)
+ m->least_addr = tbase;
m->seg.base = tbase;
m->seg.size = tsize;
m->seg.sflags = mmap_flag;
- m->magic = g_mparams.magic;
+ m->magic = mparams.magic;
m->release_checks = MAX_RELEASE_CHECK_RATE;
init_bins(m);
- if (is_global(m)) {
- dlmalloc_init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
- } else {
- /* Offset top by embedded MallocState */
+#if !ONLY_MSPACES
+ if (is_global(m))
+ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
+ else
+#endif
+ {
+ /* Offset top by embedded malloc_state */
mchunkptr mn = next_chunk(mem2chunk(m));
- dlmalloc_init_top(
- m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
+ init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
}
}
@@ -277,24 +3178,29 @@ static void *dlmalloc_sys_alloc(struct MallocState *m, size_t nb) {
/* Only consider most recent segment if traversal suppressed */
while (sp != 0 && tbase != sp->base + sp->size)
sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
- if (sp != 0 && !is_extern_segment(sp) &&
+ if (sp != 0 &&
+ !is_extern_segment(sp) &&
(sp->sflags & USE_MMAP_BIT) == mmap_flag &&
segment_holds(sp, m->top)) { /* append */
sp->size += tsize;
- dlmalloc_init_top(m, m->top, m->topsize + tsize);
- } else {
- if (tbase < m->least_addr) m->least_addr = tbase;
+ init_top(m, m->top, m->topsize + tsize);
+ }
+ else {
+ if (tbase < m->least_addr)
+ m->least_addr = tbase;
sp = &m->seg;
while (sp != 0 && sp->base != tbase + tsize)
sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
- if (sp != 0 && !is_extern_segment(sp) &&
+ if (sp != 0 &&
+ !is_extern_segment(sp) &&
(sp->sflags & USE_MMAP_BIT) == mmap_flag) {
- char *oldbase = sp->base;
+ char* oldbase = sp->base;
sp->base = tbase;
sp->size += tsize;
- return dlmalloc_prepend_alloc(m, tbase, oldbase, nb);
- } else
- dlmalloc_add_segment(m, tbase, tsize, mmap_flag);
+ return prepend_alloc(m, tbase, oldbase, nb);
+ }
+ else
+ add_segment(m, tbase, tsize, mmap_flag);
}
}
@@ -310,18 +3216,20 @@ static void *dlmalloc_sys_alloc(struct MallocState *m, size_t nb) {
}
}
- enomem();
+ MALLOC_FAILURE_ACTION;
return 0;
}
+/* ----------------------- system deallocation -------------------------- */
+
/* Unmap and unlink any mmapped segments that don't contain used chunks */
-static size_t dlmalloc_release_unused_segments(struct MallocState *m) {
+static size_t release_unused_segments(mstate m) {
size_t released = 0;
int nsegs = 0;
msegmentptr pred = &m->seg;
msegmentptr sp = pred->next;
while (sp != 0) {
- char *base = sp->base;
+ char* base = sp->base;
size_t size = sp->size;
msegmentptr next = sp->next;
++nsegs;
@@ -329,111 +3237,116 @@ static size_t dlmalloc_release_unused_segments(struct MallocState *m) {
mchunkptr p = align_as_chunk(base);
size_t psize = chunksize(p);
/* Can unmap if first chunk holds entire segment and not pinned */
- if (!is_inuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) {
+ if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
tchunkptr tp = (tchunkptr)p;
- assert(segment_holds(sp, (char *)sp));
+ assert(segment_holds(sp, (char*)sp));
if (p == m->dv) {
m->dv = 0;
m->dvsize = 0;
- } else {
+ }
+ else {
unlink_large_chunk(m, tp);
}
- if (munmap(base, size) == 0) {
+ if (CALL_MUNMAP(base, size) == 0) {
released += size;
m->footprint -= size;
/* unlink obsoleted record */
sp = pred;
sp->next = next;
- } else { /* back out if cannot unmap */
+ }
+ else { /* back out if cannot unmap */
insert_large_chunk(m, tp, psize);
}
}
}
- if (NO_SEGMENT_TRAVERSAL) { /* scan only first segment */
+ if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
break;
- }
pred = sp;
sp = next;
}
/* Reset check counter */
- m->release_checks = (((size_t)nsegs > (size_t)MAX_RELEASE_CHECK_RATE)
- ? (size_t)nsegs
- : (size_t)MAX_RELEASE_CHECK_RATE);
+ m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
+ (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
return released;
}
-int dlmalloc_sys_trim(struct MallocState *m, size_t pad) {
+static int sys_trim(mstate m, size_t pad) {
size_t released = 0;
ensure_initialization();
if (pad < MAX_REQUEST && is_initialized(m)) {
pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
+
if (m->topsize > pad) {
/* Shrink top space in granularity-size units, keeping at least one */
- size_t unit = g_mparams.granularity;
- size_t extra =
- ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - SIZE_T_ONE) * unit;
- msegmentptr sp = segment_holding(m, (char *)m->top);
+ size_t unit = mparams.granularity;
+ size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
+ SIZE_T_ONE) * unit;
+ msegmentptr sp = segment_holding(m, (char*)m->top);
+
if (!is_extern_segment(sp)) {
if (is_mmapped_segment(sp)) {
- if (HAVE_MMAP && sp->size >= extra &&
+ if (HAVE_MMAP &&
+ sp->size >= extra &&
!has_segment_link(m, sp)) { /* can't shrink if pinned */
size_t newsize = sp->size - extra;
(void)newsize; /* placate people compiling -Wunused-variable */
/* Prefer mremap, fall back to munmap */
- int err = errno;
- if (mremap(sp->base, sp->size, newsize, 0, 0) != MAP_FAILED) {
+ if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
+ (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
released = extra;
- } else {
- errno = err;
- if (!munmap(sp->base + newsize, extra)) {
- released = extra;
- }
}
}
}
+ else if (HAVE_MORECORE) {
+ if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
+ extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
+ ACQUIRE_MALLOC_GLOBAL_LOCK();
+ {
+ /* Make sure end of memory is where we last set it. */
+ char* old_br = (char*)(CALL_MORECORE(0));
+ if (old_br == sp->base + sp->size) {
+ char* rel_br = (char*)(CALL_MORECORE(-extra));
+ char* new_br = (char*)(CALL_MORECORE(0));
+ if (rel_br != CMFAIL && new_br < old_br)
+ released = old_br - new_br;
+ }
+ }
+ RELEASE_MALLOC_GLOBAL_LOCK();
+ }
}
+
if (released != 0) {
sp->size -= released;
m->footprint -= released;
- dlmalloc_init_top(m, m->top, m->topsize - released);
+ init_top(m, m->top, m->topsize - released);
check_top_chunk(m, m->top);
}
}
+
/* Unmap any unused mmapped segments */
- if (HAVE_MMAP) released += dlmalloc_release_unused_segments(m);
+ if (HAVE_MMAP)
+ released += release_unused_segments(m);
+
/* On failure, disable autotrim to avoid repeated failed future calls */
- if (released == 0 && m->topsize > m->trim_check) m->trim_check = SIZE_MAX;
+ if (released == 0 && m->topsize > m->trim_check)
+ m->trim_check = MAX_SIZE_T;
}
- return (released != 0) ? 1 : 0;
-}
-/* ──────────────────────────── setting mparams ────────────────────────── */
-
-#if LOCK_AT_FORK
-static void pre_fork(void) {
- ACQUIRE_LOCK(&(g_dlmalloc)->mutex);
+ return (released != 0)? 1 : 0;
}
-static void post_fork_parent(void) {
- RELEASE_LOCK(&(g_dlmalloc)->mutex);
-}
-static void post_fork_child(void) {
- INITIAL_LOCK(&(g_dlmalloc)->mutex);
-}
-#endif /* LOCK_AT_FORK */
-
-/* ───────────────────────────── statistics ────────────────────────────── */
/* Consolidate and bin a chunk. Differs from exported versions
of free mainly in that the chunk need not be marked as inuse.
*/
-void dlmalloc_dispose_chunk(struct MallocState *m, mchunkptr p, size_t psize) {
+static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
mchunkptr next = chunk_plus_offset(p, psize);
if (!pinuse(p)) {
mchunkptr prev;
size_t prevsize = p->prev_foot;
if (is_mmapped(p)) {
psize += prevsize + MMAP_FOOT_PAD;
- if (munmap((char *)p - prevsize, psize) == 0) m->footprint -= psize;
+ if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
+ m->footprint -= psize;
return;
}
prev = chunk_minus_offset(p, prevsize);
@@ -442,18 +3355,20 @@ void dlmalloc_dispose_chunk(struct MallocState *m, mchunkptr p, size_t psize) {
if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */
if (p != m->dv) {
unlink_chunk(m, p, prevsize);
- } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
+ }
+ else if ((next->head & INUSE_BITS) == INUSE_BITS) {
m->dvsize = psize;
set_free_with_pinuse(p, psize, next);
return;
}
- } else {
+ }
+ else {
CORRUPTION_ERROR_ACTION(m);
return;
}
}
if (RTCHECK(ok_address(m, next))) {
- if (!cinuse(next)) { /* consolidate forward */
+ if (!cinuse(next)) { /* consolidate forward */
if (next == m->top) {
size_t tsize = m->topsize += psize;
m->top = p;
@@ -463,12 +3378,14 @@ void dlmalloc_dispose_chunk(struct MallocState *m, mchunkptr p, size_t psize) {
m->dvsize = 0;
}
return;
- } else if (next == m->dv) {
+ }
+ else if (next == m->dv) {
size_t dsize = m->dvsize += psize;
m->dv = p;
set_size_and_pinuse_of_free_chunk(p, dsize);
return;
- } else {
+ }
+ else {
size_t nsize = chunksize(next);
psize += nsize;
unlink_chunk(m, next, nsize);
@@ -478,54 +3395,21 @@ void dlmalloc_dispose_chunk(struct MallocState *m, mchunkptr p, size_t psize) {
return;
}
}
- } else {
+ }
+ else {
set_free_with_pinuse(p, psize, next);
}
insert_chunk(m, p, psize);
- } else {
+ }
+ else {
CORRUPTION_ERROR_ACTION(m);
}
}
-/* ──────────────────────────── malloc ─────────────────────────── */
-
-/* allocate a small request from the best fitting chunk in a treebin */
-static void *tmalloc_small(struct MallocState *m, size_t nb) {
- tchunkptr t, v;
- size_t rsize;
- bindex_t i;
- binmap_t leastbit = least_bit(m->treemap);
- compute_bit2idx(leastbit, i);
- v = t = *treebin_at(m, i);
- rsize = chunksize(t) - nb;
- while ((t = leftmost_child(t)) != 0) {
- size_t trem = chunksize(t) - nb;
- if (trem < rsize) {
- rsize = trem;
- v = t;
- }
- }
- if (RTCHECK(ok_address(m, v))) {
- mchunkptr r = chunk_plus_offset(v, nb);
- assert(chunksize(v) == rsize + nb);
- if (RTCHECK(ok_next(v, r))) {
- unlink_large_chunk(m, v);
- if (rsize < MIN_CHUNK_SIZE)
- set_inuse_and_pinuse(m, v, (rsize + nb));
- else {
- set_size_and_pinuse_of_inuse_chunk(m, v, nb);
- set_size_and_pinuse_of_free_chunk(r, rsize);
- replace_dv(m, r, rsize);
- }
- return chunk2mem(v);
- }
- }
- CORRUPTION_ERROR_ACTION(m);
- return 0;
-}
+/* ---------------------------- malloc --------------------------- */
/* allocate a large request from the best fitting chunk in a treebin */
-static void *tmalloc_large(struct MallocState *m, size_t nb) {
+static void* tmalloc_large(mstate m, size_t nb) {
tchunkptr v = 0;
size_t rsize = -nb; /* Unsigned negation */
tchunkptr t;
@@ -534,17 +3418,19 @@ static void *tmalloc_large(struct MallocState *m, size_t nb) {
if ((t = *treebin_at(m, idx)) != 0) {
/* Traverse tree for this bin looking for node with size == nb */
size_t sizebits = nb << leftshift_for_tree_index(idx);
- tchunkptr rst = 0; /* The deepest untaken right subtree */
+ tchunkptr rst = 0; /* The deepest untaken right subtree */
for (;;) {
tchunkptr rt;
size_t trem = chunksize(t) - nb;
if (trem < rsize) {
v = t;
- if ((rsize = trem) == 0) break;
+ if ((rsize = trem) == 0)
+ break;
}
rt = t->child[1];
- t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
- if (rt != 0 && rt != t) rst = rt;
+ t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
+ if (rt != 0 && rt != t)
+ rst = rt;
if (t == 0) {
t = rst; /* set t to least subtree holding sizes > nb */
break;
@@ -561,6 +3447,7 @@ static void *tmalloc_large(struct MallocState *m, size_t nb) {
t = *treebin_at(m, i);
}
}
+
while (t != 0) { /* find smallest of tree or subtree */
size_t trem = chunksize(t) - nb;
if (trem < rsize) {
@@ -569,6 +3456,7 @@ static void *tmalloc_large(struct MallocState *m, size_t nb) {
}
t = leftmost_child(t);
}
+
/* If dv is a better fit, return 0 so malloc will use it */
if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
if (RTCHECK(ok_address(m, v))) { /* split */
@@ -591,7 +3479,47 @@ static void *tmalloc_large(struct MallocState *m, size_t nb) {
return 0;
}
-void *dlmalloc_impl(size_t bytes, bool takeaction) {
+/* allocate a small request from the best fitting chunk in a treebin */
+static void* tmalloc_small(mstate m, size_t nb) {
+ tchunkptr t, v;
+ size_t rsize;
+ bindex_t i;
+ binmap_t leastbit = least_bit(m->treemap);
+ compute_bit2idx(leastbit, i);
+ v = t = *treebin_at(m, i);
+ rsize = chunksize(t) - nb;
+
+ while ((t = leftmost_child(t)) != 0) {
+ size_t trem = chunksize(t) - nb;
+ if (trem < rsize) {
+ rsize = trem;
+ v = t;
+ }
+ }
+
+ if (RTCHECK(ok_address(m, v))) {
+ mchunkptr r = chunk_plus_offset(v, nb);
+ assert(chunksize(v) == rsize + nb);
+ if (RTCHECK(ok_next(v, r))) {
+ unlink_large_chunk(m, v);
+ if (rsize < MIN_CHUNK_SIZE)
+ set_inuse_and_pinuse(m, v, (rsize + nb));
+ else {
+ set_size_and_pinuse_of_inuse_chunk(m, v, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ replace_dv(m, r, rsize);
+ }
+ return chunk2mem(v);
+ }
+ }
+
+ CORRUPTION_ERROR_ACTION(m);
+ return 0;
+}
+
+#if !ONLY_MSPACES
+
+void* dlmalloc(size_t bytes) {
/*
Basic algorithm:
If a small request (< 256 bytes minus per-chunk overhead):
@@ -619,30 +3547,30 @@ void *dlmalloc_impl(size_t bytes, bool takeaction) {
ensure_initialization(); /* initialize in sys_alloc if not using locks */
#endif
- if (!PREACTION(g_dlmalloc)) {
- void *mem;
+ if (!PREACTION(gm)) {
+ void* mem;
size_t nb;
if (bytes <= MAX_SMALL_REQUEST) {
bindex_t idx;
binmap_t smallbits;
- nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
+ nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
idx = small_index(nb);
- smallbits = g_dlmalloc->smallmap >> idx;
+ smallbits = gm->smallmap >> idx;
if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
mchunkptr b, p;
- idx += ~smallbits & 1; /* Uses next bin if idx empty */
- b = smallbin_at(g_dlmalloc, idx);
+ idx += ~smallbits & 1; /* Uses next bin if idx empty */
+ b = smallbin_at(gm, idx);
p = b->fd;
assert(chunksize(p) == small_index2size(idx));
- unlink_first_small_chunk(g_dlmalloc, b, p, idx);
- set_inuse_and_pinuse(g_dlmalloc, p, small_index2size(idx));
+ unlink_first_small_chunk(gm, b, p, idx);
+ set_inuse_and_pinuse(gm, p, small_index2size(idx));
mem = chunk2mem(p);
- check_malloced_chunk(g_dlmalloc, mem, nb);
+ check_malloced_chunk(gm, mem, nb);
goto postaction;
}
- else if (nb > g_dlmalloc->dvsize) {
+ else if (nb > gm->dvsize) {
if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
mchunkptr b, p, r;
size_t rsize;
@@ -650,108 +3578,108 @@ void *dlmalloc_impl(size_t bytes, bool takeaction) {
binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
binmap_t leastbit = least_bit(leftbits);
compute_bit2idx(leastbit, i);
- b = smallbin_at(g_dlmalloc, i);
+ b = smallbin_at(gm, i);
p = b->fd;
assert(chunksize(p) == small_index2size(i));
- unlink_first_small_chunk(g_dlmalloc, b, p, i);
+ unlink_first_small_chunk(gm, b, p, i);
rsize = small_index2size(i) - nb;
/* Fit here cannot be remainderless if 4byte sizes */
if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
- set_inuse_and_pinuse(g_dlmalloc, p, small_index2size(i));
+ set_inuse_and_pinuse(gm, p, small_index2size(i));
else {
- set_size_and_pinuse_of_inuse_chunk(g_dlmalloc, p, nb);
+ set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
r = chunk_plus_offset(p, nb);
set_size_and_pinuse_of_free_chunk(r, rsize);
- replace_dv(g_dlmalloc, r, rsize);
+ replace_dv(gm, r, rsize);
}
mem = chunk2mem(p);
- check_malloced_chunk(g_dlmalloc, mem, nb);
+ check_malloced_chunk(gm, mem, nb);
goto postaction;
}
- else if (g_dlmalloc->treemap != 0 &&
- (mem = tmalloc_small(g_dlmalloc, nb)) != 0) {
- check_malloced_chunk(g_dlmalloc, mem, nb);
+ else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
+ check_malloced_chunk(gm, mem, nb);
goto postaction;
}
}
- } else if (bytes >= MAX_REQUEST) {
- nb = SIZE_MAX; /* Too big to allocate. Force failure (in sys alloc) */
- } else {
+ }
+ else if (bytes >= MAX_REQUEST)
+ nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
+ else {
nb = pad_request(bytes);
- if (g_dlmalloc->treemap != 0 &&
- (mem = tmalloc_large(g_dlmalloc, nb)) != 0) {
- check_malloced_chunk(g_dlmalloc, mem, nb);
+ if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
+ check_malloced_chunk(gm, mem, nb);
goto postaction;
}
}
- if (nb <= g_dlmalloc->dvsize) {
- size_t rsize = g_dlmalloc->dvsize - nb;
- mchunkptr p = g_dlmalloc->dv;
+ if (nb <= gm->dvsize) {
+ size_t rsize = gm->dvsize - nb;
+ mchunkptr p = gm->dv;
if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
- mchunkptr r = g_dlmalloc->dv = chunk_plus_offset(p, nb);
- g_dlmalloc->dvsize = rsize;
+ mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
+ gm->dvsize = rsize;
set_size_and_pinuse_of_free_chunk(r, rsize);
- set_size_and_pinuse_of_inuse_chunk(g_dlmalloc, p, nb);
- } else { /* exhaust dv */
- size_t dvs = g_dlmalloc->dvsize;
- g_dlmalloc->dvsize = 0;
- g_dlmalloc->dv = 0;
- set_inuse_and_pinuse(g_dlmalloc, p, dvs);
+ set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
+ }
+ else { /* exhaust dv */
+ size_t dvs = gm->dvsize;
+ gm->dvsize = 0;
+ gm->dv = 0;
+ set_inuse_and_pinuse(gm, p, dvs);
}
mem = chunk2mem(p);
- check_malloced_chunk(g_dlmalloc, mem, nb);
+ check_malloced_chunk(gm, mem, nb);
goto postaction;
}
- else if (nb < g_dlmalloc->topsize) { /* Split top */
- size_t rsize = g_dlmalloc->topsize -= nb;
- mchunkptr p = g_dlmalloc->top;
- mchunkptr r = g_dlmalloc->top = chunk_plus_offset(p, nb);
+ else if (nb < gm->topsize) { /* Split top */
+ size_t rsize = gm->topsize -= nb;
+ mchunkptr p = gm->top;
+ mchunkptr r = gm->top = chunk_plus_offset(p, nb);
r->head = rsize | PINUSE_BIT;
- set_size_and_pinuse_of_inuse_chunk(g_dlmalloc, p, nb);
+ set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
mem = chunk2mem(p);
- check_top_chunk(g_dlmalloc, g_dlmalloc->top);
- check_malloced_chunk(g_dlmalloc, mem, nb);
+ check_top_chunk(gm, gm->top);
+ check_malloced_chunk(gm, mem, nb);
goto postaction;
}
- mem = dlmalloc_sys_alloc(g_dlmalloc, nb);
+ mem = sys_alloc(gm, nb);
+ POSTACTION(gm);
+ if (mem == MAP_FAILED && weaken(__oom_hook)) {
+ weaken(__oom_hook)(bytes);
+ }
+ return mem;
postaction:
- POSTACTION(g_dlmalloc);
- return takeaction ? AddressBirthAction(mem) : mem;
+ POSTACTION(gm);
+ return mem;
}
return 0;
}
-void dlfree(void *mem) {
- /* asan runtime depends on this function */
+/* ---------------------------- free --------------------------- */
+
+void dlfree(void* mem) {
/*
Consolidate freed chunks with preceeding or succeeding bordering
free chunks, if they exist, and then place in a bin. Intermixed
with special cases for top, dv, mmapped chunks, and usage errors.
*/
- if (mem != 0) {
- mem = AddressDeathAction(mem);
- mchunkptr p = mem2chunk(mem);
+ if (mem != 0) {
+ mchunkptr p = mem2chunk(mem);
#if FOOTERS
- struct MallocState *fm = get_mstate_for(p);
- if (!ok_magic(fm)) { /* HELLO
- * TRY #1: rm -rf o && make -j8 -O MODE=dbg
- * TRY #2: gdb: p/x (long*)(p+(*(long*)(p-8)&~(1|2|3)))
- * gdb: watch *0xDEADBEEF
- */
+ mstate fm = get_mstate_for(p);
+ if (!ok_magic(fm)) {
USAGE_ERROR_ACTION(fm, p);
return;
}
#else /* FOOTERS */
-#define fm g_dlmalloc
+#define fm gm
#endif /* FOOTERS */
-
if (!PREACTION(fm)) {
check_inuse_chunk(fm, p);
if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
@@ -761,28 +3689,31 @@ void dlfree(void *mem) {
size_t prevsize = p->prev_foot;
if (is_mmapped(p)) {
psize += prevsize + MMAP_FOOT_PAD;
- if (munmap((char *)p - prevsize, psize) == 0)
+ if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
fm->footprint -= psize;
goto postaction;
- } else {
+ }
+ else {
mchunkptr prev = chunk_minus_offset(p, prevsize);
psize += prevsize;
p = prev;
if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
if (p != fm->dv) {
unlink_chunk(fm, p, prevsize);
- } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
+ }
+ else if ((next->head & INUSE_BITS) == INUSE_BITS) {
fm->dvsize = psize;
set_free_with_pinuse(p, psize, next);
goto postaction;
}
- } else
+ }
+ else
goto erroraction;
}
}
if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
- if (!cinuse(next)) { /* consolidate forward */
+ if (!cinuse(next)) { /* consolidate forward */
if (next == fm->top) {
size_t tsize = fm->topsize += psize;
fm->top = p;
@@ -791,16 +3722,17 @@ void dlfree(void *mem) {
fm->dv = 0;
fm->dvsize = 0;
}
- if (should_trim(fm, tsize)) {
- dlmalloc_sys_trim(fm, 0);
- }
+ if (should_trim(fm, tsize))
+ sys_trim(fm, 0);
goto postaction;
- } else if (next == fm->dv) {
+ }
+ else if (next == fm->dv) {
size_t dsize = fm->dvsize += psize;
fm->dv = p;
set_size_and_pinuse_of_free_chunk(p, dsize);
goto postaction;
- } else {
+ }
+ else {
size_t nsize = chunksize(next);
psize += nsize;
unlink_chunk(fm, next, nsize);
@@ -810,24 +3742,25 @@ void dlfree(void *mem) {
goto postaction;
}
}
- } else {
- set_free_with_pinuse(p, psize, next);
}
+ else
+ set_free_with_pinuse(p, psize, next);
if (is_small(psize)) {
insert_small_chunk(fm, p, psize);
check_free_chunk(fm, p);
- } else {
+ }
+ else {
tchunkptr tp = (tchunkptr)p;
insert_large_chunk(fm, tp, psize);
check_free_chunk(fm, p);
- if (--fm->release_checks == 0) dlmalloc_release_unused_segments(fm);
+ if (--fm->release_checks == 0)
+ release_unused_segments(fm);
}
goto postaction;
}
}
erroraction:
- if (IsArenaFrame((intptr_t)p >> 16)) return;
USAGE_ERROR_ACTION(fm, p);
postaction:
POSTACTION(fm);
@@ -838,71 +3771,128 @@ void dlfree(void *mem) {
#endif /* FOOTERS */
}
-size_t dlmalloc_usable_size(const void *mem) {
- /* asan runtime depends on this function */
- if (mem != 0) {
- mchunkptr p = mem2chunk(mem);
- if (is_inuse(p)) return chunksize(p) - overhead_for(p);
+void* dlcalloc(size_t n_elements, size_t elem_size) {
+ void* mem;
+ size_t req = 0;
+ if (n_elements != 0) {
+ req = n_elements * elem_size;
+ if (((n_elements | elem_size) & ~(size_t)0xffff) &&
+ (req / n_elements != elem_size))
+ req = MAX_SIZE_T; /* force downstream failure on overflow */
}
- return 0;
+ mem = dlmalloc(req);
+ if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
+ bzero(mem, req);
+ return mem;
}
-textstartup void dlmalloc_init(void) {
-#ifdef NEED_GLOBAL_LOCK_INIT
- if (malloc_global_mutex_status <= 0) init_malloc_global_mutex();
-#endif
- ACQUIRE_MALLOC_GLOBAL_LOCK();
- if (g_mparams.magic == 0) {
- size_t magic;
- size_t psize = PAGESIZE;
- size_t gsize = DEFAULT_GRANULARITY;
- /* Sanity-check configuration:
- size_t must be unsigned and as wide as pointer type.
- ints must be at least 4 bytes.
- alignment must be at least 8.
- Alignment, min chunk size, and page size must all be powers of 2.
- */
- if ((sizeof(size_t) != sizeof(char *)) || (SIZE_MAX < MIN_CHUNK_SIZE) ||
- (sizeof(int) < 4) || (MALLOC_ALIGNMENT < (size_t)8U) ||
- ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) ||
- ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) ||
- ((gsize & (gsize - SIZE_T_ONE)) != 0) ||
- ((psize & (psize - SIZE_T_ONE)) != 0))
- MALLOC_ABORT;
- g_mparams.granularity = gsize;
- g_mparams.page_size = psize;
- g_mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
- g_mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
- g_mparams.default_mflags =
- USE_LOCK_BIT | USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT;
- /* Set up lock for main malloc area */
- g_dlmalloc->mflags = g_mparams.default_mflags;
- (void)INITIAL_LOCK(&g_dlmalloc->mutex);
-#if LOCK_AT_FORK
- pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
-#endif
- magic = kStartTsc;
- magic |= (size_t)8U; /* ensure nonzero */
- magic &= ~(size_t)7U; /* improve chances of fault for bad values */
- /* Until memory modes commonly available, use volatile-write */
- (*(volatile size_t *)(&(g_mparams.magic))) = magic;
+#endif /* !ONLY_MSPACES */
+
+/* ------------ Internal support for realloc, memalign, etc -------------- */
+
+/* Try to realloc; only in-place unless can_move true */
+static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
+ int can_move) {
+ mchunkptr newp = 0;
+ size_t oldsize = chunksize(p);
+ mchunkptr next = chunk_plus_offset(p, oldsize);
+ if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
+ ok_next(p, next) && ok_pinuse(next))) {
+ if (is_mmapped(p)) {
+ newp = mmap_resize(m, p, nb, can_move);
+ }
+ else if (oldsize >= nb) { /* already big enough */
+ size_t rsize = oldsize - nb;
+ if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
+ mchunkptr r = chunk_plus_offset(p, nb);
+ set_inuse(m, p, nb);
+ set_inuse(m, r, rsize);
+ dispose_chunk(m, r, rsize);
+ }
+ newp = p;
+ }
+ else if (next == m->top) { /* extend into top */
+ if (oldsize + m->topsize > nb) {
+ size_t newsize = oldsize + m->topsize;
+ size_t newtopsize = newsize - nb;
+ mchunkptr newtop = chunk_plus_offset(p, nb);
+ set_inuse(m, p, nb);
+ newtop->head = newtopsize |PINUSE_BIT;
+ m->top = newtop;
+ m->topsize = newtopsize;
+ newp = p;
+ }
+ }
+ else if (next == m->dv) { /* extend into dv */
+ size_t dvs = m->dvsize;
+ if (oldsize + dvs >= nb) {
+ size_t dsize = oldsize + dvs - nb;
+ if (dsize >= MIN_CHUNK_SIZE) {
+ mchunkptr r = chunk_plus_offset(p, nb);
+ mchunkptr n = chunk_plus_offset(r, dsize);
+ set_inuse(m, p, nb);
+ set_size_and_pinuse_of_free_chunk(r, dsize);
+ clear_pinuse(n);
+ m->dvsize = dsize;
+ m->dv = r;
+ }
+ else { /* exhaust dv */
+ size_t newsize = oldsize + dvs;
+ set_inuse(m, p, newsize);
+ m->dvsize = 0;
+ m->dv = 0;
+ }
+ newp = p;
+ }
+ }
+ else if (!cinuse(next)) { /* extend into next free chunk */
+ size_t nextsize = chunksize(next);
+ if (oldsize + nextsize >= nb) {
+ size_t rsize = oldsize + nextsize - nb;
+ unlink_chunk(m, next, nextsize);
+ if (rsize < MIN_CHUNK_SIZE) {
+ size_t newsize = oldsize + nextsize;
+ set_inuse(m, p, newsize);
+ }
+ else {
+ mchunkptr r = chunk_plus_offset(p, nb);
+ set_inuse(m, p, nb);
+ set_inuse(m, r, rsize);
+ dispose_chunk(m, r, rsize);
+ }
+ newp = p;
+ }
+ }
}
- RELEASE_MALLOC_GLOBAL_LOCK();
+ else {
+ USAGE_ERROR_ACTION(m, chunk2mem(p));
+ }
+ return newp;
}
-void *dlmemalign_impl(struct MallocState *m, size_t al, size_t bytes) {
- char *br, *pos, *mem = 0;
- mchunkptr p, newp, remainder;
- size_t nb, req, size, leadsize, newsize, remainder_size;
- if (bytes < MAX_REQUEST - al) {
- /* alignment is 32+ bytes rounded up to nearest two power */
- al = 2ul << bsrl(MAX(MIN_CHUNK_SIZE, al) - 1);
- nb = request2size(bytes);
- req = nb + al + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
- if ((mem = dlmalloc_impl(req, false))) {
- p = mem2chunk(mem);
- if (PREACTION(m)) return 0;
- if ((((size_t)(mem)) & (al - 1))) { /* misaligned */
+static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
+ void* mem = 0;
+ if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
+ alignment = MIN_CHUNK_SIZE;
+ if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
+ size_t a = MALLOC_ALIGNMENT << 1;
+ while (a < alignment) a <<= 1;
+ alignment = a;
+ }
+ if (bytes >= MAX_REQUEST - alignment) {
+ if (m != 0) { /* Test isn't needed but avoids compiler warning */
+ MALLOC_FAILURE_ACTION;
+ }
+ }
+ else {
+ size_t nb = request2size(bytes);
+ size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
+ mem = internal_malloc(m, req);
+ if (mem != 0) {
+ mchunkptr p = mem2chunk(mem);
+ if (PREACTION(m))
+ return 0;
+ if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */
/*
Find an aligned spot inside chunk. Since we need to give
back leading space in a chunk of at least MIN_CHUNK_SIZE, if
@@ -911,54 +3901,1068 @@ void *dlmemalign_impl(struct MallocState *m, size_t al, size_t bytes) {
We've allocated enough total room so that this is always
possible.
*/
- br = (char *)mem2chunk(ROUNDUP((uintptr_t)mem, al));
- pos = (size_t)(br - (char *)(p)) >= MIN_CHUNK_SIZE ? br : br + al;
- newp = (mchunkptr)pos;
- leadsize = pos - (char *)(p);
- newsize = chunksize(p) - leadsize;
+ char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment -
+ SIZE_T_ONE)) &
+ -alignment));
+ char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
+ br : br+alignment;
+ mchunkptr newp = (mchunkptr)pos;
+ size_t leadsize = pos - (char*)(p);
+ size_t newsize = chunksize(p) - leadsize;
+
if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
newp->prev_foot = p->prev_foot + leadsize;
newp->head = newsize;
- } else { /* Otherwise, give back leader, use the rest */
+ }
+ else { /* Otherwise, give back leader, use the rest */
set_inuse(m, newp, newsize);
set_inuse(m, p, leadsize);
- dlmalloc_dispose_chunk(m, p, leadsize);
+ dispose_chunk(m, p, leadsize);
}
p = newp;
}
+
/* Give back spare room at the end */
if (!is_mmapped(p)) {
- size = chunksize(p);
+ size_t size = chunksize(p);
if (size > nb + MIN_CHUNK_SIZE) {
- remainder_size = size - nb;
- remainder = chunk_plus_offset(p, nb);
+ size_t remainder_size = size - nb;
+ mchunkptr remainder = chunk_plus_offset(p, nb);
set_inuse(m, p, nb);
set_inuse(m, remainder, remainder_size);
- dlmalloc_dispose_chunk(m, remainder, remainder_size);
+ dispose_chunk(m, remainder, remainder_size);
}
}
+
mem = chunk2mem(p);
- assert(chunksize(p) >= nb);
- assert(!((size_t)mem & (al - 1)));
+ assert (chunksize(p) >= nb);
+ assert(((size_t)mem & (alignment - 1)) == 0);
check_inuse_chunk(m, p);
POSTACTION(m);
}
- return AddressBirthAction(mem);
+ }
+ return mem;
+}
+
+/*
+ Common support for independent_X routines, handling
+ all of the combinations that can result.
+ The opts arg has:
+ bit 0 set if all elements are same size (using sizes[0])
+ bit 1 set if elements should be zeroed
+*/
+static void** ialloc(mstate m,
+ size_t n_elements,
+ size_t* sizes,
+ int opts,
+ void* chunks[]) {
+
+ size_t element_size; /* chunksize of each element, if all same */
+ size_t contents_size; /* total size of elements */
+ size_t array_size; /* request size of pointer array */
+ void* mem; /* malloced aggregate space */
+ mchunkptr p; /* corresponding chunk */
+ size_t remainder_size; /* remaining bytes while splitting */
+ void** marray; /* either "chunks" or malloced ptr array */
+ mchunkptr array_chunk; /* chunk for malloced ptr array */
+ flag_t was_enabled; /* to disable mmap */
+ size_t size;
+ size_t i;
+
+ ensure_initialization();
+ /* compute array length, if needed */
+ if (chunks != 0) {
+ if (n_elements == 0)
+ return chunks; /* nothing to do */
+ marray = chunks;
+ array_size = 0;
+ }
+ else {
+ /* if empty req, must still return chunk representing empty array */
+ if (n_elements == 0)
+ return (void**)internal_malloc(m, 0);
+ marray = 0;
+ array_size = request2size(n_elements * (sizeof(void*)));
+ }
+
+ /* compute total element size */
+ if (opts & 0x1) { /* all-same-size */
+ element_size = request2size(*sizes);
+ contents_size = n_elements * element_size;
+ }
+ else { /* add up all the sizes */
+ element_size = 0;
+ contents_size = 0;
+ for (i = 0; i != n_elements; ++i)
+ contents_size += request2size(sizes[i]);
+ }
+
+ size = contents_size + array_size;
+
+ /*
+ Allocate the aggregate chunk. First disable direct-mmapping so
+ malloc won't use it, since we would not be able to later
+ free/realloc space internal to a segregated mmap region.
+ */
+ was_enabled = use_mmap(m);
+ disable_mmap(m);
+ mem = internal_malloc(m, size - CHUNK_OVERHEAD);
+ if (was_enabled)
+ enable_mmap(m);
+ if (mem == 0)
+ return 0;
+
+ if (PREACTION(m)) return 0;
+ p = mem2chunk(mem);
+ remainder_size = chunksize(p);
+
+ assert(!is_mmapped(p));
+
+ if (opts & 0x2) { /* optionally clear the elements */
+ bzero((size_t*)mem, remainder_size - SIZE_T_SIZE - array_size);
+ }
+
+ /* If not provided, allocate the pointer array as final part of chunk */
+ if (marray == 0) {
+ size_t array_chunk_size;
+ array_chunk = chunk_plus_offset(p, contents_size);
+ array_chunk_size = remainder_size - contents_size;
+ marray = (void**) (chunk2mem(array_chunk));
+ set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
+ remainder_size = contents_size;
+ }
+
+ /* split out elements */
+ for (i = 0; ; ++i) {
+ marray[i] = chunk2mem(p);
+ if (i != n_elements-1) {
+ if (element_size != 0)
+ size = element_size;
+ else
+ size = request2size(sizes[i]);
+ remainder_size -= size;
+ set_size_and_pinuse_of_inuse_chunk(m, p, size);
+ p = chunk_plus_offset(p, size);
+ }
+ else { /* the final element absorbs any overallocation slop */
+ set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
+ break;
+ }
+ }
+
+#if DEBUG
+ if (marray != chunks) {
+ /* final element must have exactly exhausted chunk */
+ if (element_size != 0) {
+ assert(remainder_size == element_size);
+ }
+ else {
+ assert(remainder_size == request2size(sizes[i]));
+ }
+ check_inuse_chunk(m, mem2chunk(marray));
+ }
+ for (i = 0; i != n_elements; ++i)
+ check_inuse_chunk(m, mem2chunk(marray[i]));
+
+#endif /* DEBUG */
+
+ POSTACTION(m);
+ return marray;
+}
+
+/* Try to free all pointers in the given array.
+ Note: this could be made faster, by delaying consolidation,
+ at the price of disabling some user integrity checks, We
+ still optimize some consolidations by combining adjacent
+ chunks before freeing, which will occur often if allocated
+ with ialloc or the array is sorted.
+*/
+static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) {
+ size_t unfreed = 0;
+ if (!PREACTION(m)) {
+ void** a;
+ void** fence = &(array[nelem]);
+ for (a = array; a != fence; ++a) {
+ void* mem = *a;
+ if (mem != 0) {
+ mchunkptr p = mem2chunk(mem);
+ size_t psize = chunksize(p);
+#if FOOTERS
+ if (get_mstate_for(p) != m) {
+ ++unfreed;
+ continue;
+ }
+#endif
+ check_inuse_chunk(m, p);
+ *a = 0;
+ if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {
+ void ** b = a + 1; /* try to merge with next chunk */
+ mchunkptr next = next_chunk(p);
+ if (b != fence && *b == chunk2mem(next)) {
+ size_t newsize = chunksize(next) + psize;
+ set_inuse(m, p, newsize);
+ *b = chunk2mem(p);
+ }
+ else
+ dispose_chunk(m, p, psize);
+ }
+ else {
+ CORRUPTION_ERROR_ACTION(m);
+ break;
+ }
+ }
+ }
+ if (should_trim(m, m->topsize))
+ sys_trim(m, 0);
+ POSTACTION(m);
+ }
+ return unfreed;
+}
+
+/* Traversal */
+#if MALLOC_INSPECT_ALL
+static void internal_inspect_all(mstate m,
+ void(*handler)(void *start,
+ void *end,
+ size_t used_bytes,
+ void* callback_arg),
+ void* arg) {
+ if (is_initialized(m)) {
+ mchunkptr top = m->top;
+ msegmentptr s;
+ for (s = &m->seg; s != 0; s = s->next) {
+ mchunkptr q = align_as_chunk(s->base);
+ while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
+ mchunkptr next = next_chunk(q);
+ size_t sz = chunksize(q);
+ size_t used;
+ void* start;
+ if (is_inuse(q)) {
+ used = sz - CHUNK_OVERHEAD; /* must not be mmapped */
+ start = chunk2mem(q);
+ }
+ else {
+ used = 0;
+ if (is_small(sz)) { /* offset by possible bookkeeping */
+ start = (void*)((char*)q + sizeof(struct malloc_chunk));
+ }
+ else {
+ start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
+ }
+ }
+ if (start < (void*)next) /* skip if all space is bookkeeping */
+ handler(start, next, used, arg);
+ if (q == top)
+ break;
+ q = next;
+ }
+ }
+ }
+}
+#endif /* MALLOC_INSPECT_ALL */
+
+/* ------------------ Exported realloc, memalign, etc -------------------- */
+
+#if !ONLY_MSPACES
+
+void* dlrealloc(void* oldmem, size_t bytes) {
+ void* mem = 0;
+ if (oldmem == 0) {
+ mem = dlmalloc(bytes);
+ }
+ else if (bytes >= MAX_REQUEST) {
+ MALLOC_FAILURE_ACTION;
+ }
+#ifdef REALLOC_ZERO_BYTES_FREES
+ else if (bytes == 0) {
+ dlfree(oldmem);
+ }
+#endif /* REALLOC_ZERO_BYTES_FREES */
+ else {
+ size_t nb = request2size(bytes);
+ mchunkptr oldp = mem2chunk(oldmem);
+#if ! FOOTERS
+ mstate m = gm;
+#else /* FOOTERS */
+ mstate m = get_mstate_for(oldp);
+ if (!ok_magic(m)) {
+ USAGE_ERROR_ACTION(m, oldmem);
+ return 0;
+ }
+#endif /* FOOTERS */
+ if (!PREACTION(m)) {
+ mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
+ POSTACTION(m);
+ if (newp != 0) {
+ check_inuse_chunk(m, newp);
+ mem = chunk2mem(newp);
+ }
+ else {
+ mem = internal_malloc(m, bytes);
+ if (mem != 0) {
+ size_t oc = chunksize(oldp) - overhead_for(oldp);
+ memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
+ internal_free(m, oldmem);
+ }
+ }
+ }
+ }
+ return mem;
+}
+
+void* dlrealloc_in_place(void* oldmem, size_t bytes) {
+ void* mem = 0;
+ if (oldmem != 0) {
+ if (bytes >= MAX_REQUEST) {
+ MALLOC_FAILURE_ACTION;
+ }
+ else {
+ size_t nb = request2size(bytes);
+ mchunkptr oldp = mem2chunk(oldmem);
+#if ! FOOTERS
+ mstate m = gm;
+#else /* FOOTERS */
+ mstate m = get_mstate_for(oldp);
+ if (!ok_magic(m)) {
+ USAGE_ERROR_ACTION(m, oldmem);
+ return 0;
+ }
+#endif /* FOOTERS */
+ if (!PREACTION(m)) {
+ mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
+ POSTACTION(m);
+ if (newp == oldp) {
+ check_inuse_chunk(m, newp);
+ mem = oldmem;
+ }
+ }
+ }
+ }
+ return mem;
+}
+
+void* dlmemalign(size_t alignment, size_t bytes) {
+ if (alignment <= MALLOC_ALIGNMENT) {
+ return dlmalloc(bytes);
+ }
+ return internal_memalign(gm, alignment, bytes);
+}
+
+int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {
+ void* mem = 0;
+ if (alignment == MALLOC_ALIGNMENT)
+ mem = dlmalloc(bytes);
+ else {
+ size_t d = alignment / sizeof(void*);
+ size_t r = alignment % sizeof(void*);
+ if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)
+ return EINVAL;
+ else if (bytes <= MAX_REQUEST - alignment) {
+ if (alignment < MIN_CHUNK_SIZE)
+ alignment = MIN_CHUNK_SIZE;
+ mem = internal_memalign(gm, alignment, bytes);
+ }
+ }
+ if (!mem) {
+ return ENOMEM;
} else {
- enomem();
+ *pp = mem;
return 0;
}
}
-void *dlmalloc(size_t bytes) {
- return dlmalloc_impl(bytes, true);
+void* dlvalloc(size_t bytes) {
+ size_t pagesz;
+ ensure_initialization();
+ pagesz = mparams.page_size;
+ return dlmemalign(pagesz, bytes);
}
-void *dlmemalign(size_t alignment, size_t bytes) {
- /* asan runtime depends on this function */
- if (alignment <= MALLOC_ALIGNMENT) {
- return dlmalloc_impl(bytes, true);
- } else {
- return dlmemalign_impl(g_dlmalloc, alignment, bytes);
+void* dlpvalloc(size_t bytes) {
+ size_t pagesz;
+ ensure_initialization();
+ pagesz = mparams.page_size;
+ return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
+}
+
+void** dlindependent_calloc(size_t n_elements, size_t elem_size,
+ void* chunks[]) {
+ size_t sz = elem_size; /* serves as 1-element array */
+ return ialloc(gm, n_elements, &sz, 3, chunks);
+}
+
+void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
+ void* chunks[]) {
+ return ialloc(gm, n_elements, sizes, 0, chunks);
+}
+
+size_t dlbulk_free(void* array[], size_t nelem) {
+ return internal_bulk_free(gm, array, nelem);
+}
+
+#if MALLOC_INSPECT_ALL
+void dlmalloc_inspect_all(void(*handler)(void *start,
+ void *end,
+ size_t used_bytes,
+ void* callback_arg),
+ void* arg) {
+ ensure_initialization();
+ if (!PREACTION(gm)) {
+ internal_inspect_all(gm, handler, arg);
+ POSTACTION(gm);
}
}
+#endif /* MALLOC_INSPECT_ALL */
+
+int dlmalloc_trim(size_t pad) {
+ int result = 0;
+ ensure_initialization();
+ if (!PREACTION(gm)) {
+ result = sys_trim(gm, pad);
+ POSTACTION(gm);
+ }
+ return result;
+}
+
+size_t dlmalloc_footprint(void) {
+ return gm->footprint;
+}
+
+size_t dlmalloc_max_footprint(void) {
+ return gm->max_footprint;
+}
+
+size_t dlmalloc_footprint_limit(void) {
+ size_t maf = gm->footprint_limit;
+ return maf == 0 ? MAX_SIZE_T : maf;
+}
+
+size_t dlmalloc_set_footprint_limit(size_t bytes) {
+ size_t result; /* invert sense of 0 */
+ if (bytes == 0)
+ result = granularity_align(1); /* Use minimal size */
+ if (bytes == MAX_SIZE_T)
+ result = 0; /* disable */
+ else
+ result = granularity_align(bytes);
+ return gm->footprint_limit = result;
+}
+
+#if !NO_MALLINFO
+struct mallinfo dlmallinfo(void) {
+ return internal_mallinfo(gm);
+}
+#endif /* NO_MALLINFO */
+
+#if !NO_MALLOC_STATS
+void dlmalloc_stats() {
+ internal_malloc_stats(gm);
+}
+#endif /* NO_MALLOC_STATS */
+
+int dlmallopt(int param_number, int value) {
+ return change_mparam(param_number, value);
+}
+
+size_t dlmalloc_usable_size(void* mem) {
+ mchunkptr p;
+ size_t bytes;
+ if (mem) {
+ p = mem2chunk(mem);
+ if (is_inuse(p)) {
+ bytes = chunksize(p) - overhead_for(p);
+ } else {
+ bytes = 0;
+ }
+ } else {
+ bytes = 0;
+ }
+ return bytes;
+}
+
+#endif /* !ONLY_MSPACES */
+
+/* ----------------------------- user mspaces ---------------------------- */
+
+#if MSPACES
+
+static mstate init_user_mstate(char* tbase, size_t tsize) {
+ size_t msize = pad_request(sizeof(struct malloc_state));
+ mchunkptr mn;
+ mchunkptr msp = align_as_chunk(tbase);
+ mstate m = (mstate)(chunk2mem(msp));
+ bzero(m, msize);
+ (void)INITIAL_LOCK(&m->mutex);
+ msp->head = (msize|INUSE_BITS);
+ m->seg.base = m->least_addr = tbase;
+ m->seg.size = m->footprint = m->max_footprint = tsize;
+ m->magic = mparams.magic;
+ m->release_checks = MAX_RELEASE_CHECK_RATE;
+ m->mflags = mparams.default_mflags;
+ m->extp = 0;
+ m->exts = 0;
+ disable_contiguous(m);
+ init_bins(m);
+ mn = next_chunk(mem2chunk(m));
+ init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
+ check_top_chunk(m, m->top);
+ return m;
+}
+
+mspace create_mspace(size_t capacity, int locked) {
+ mstate m = 0;
+ size_t msize;
+ ensure_initialization();
+ msize = pad_request(sizeof(struct malloc_state));
+ if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
+ size_t rs = ((capacity == 0)? mparams.granularity :
+ (capacity + TOP_FOOT_SIZE + msize));
+ size_t tsize = granularity_align(rs);
+ char* tbase = (char*)(dlmalloc_requires_more_vespene_gas(tsize));
+ if (tbase != CMFAIL) {
+ m = init_user_mstate(tbase, tsize);
+ m->seg.sflags = USE_MMAP_BIT;
+ set_lock(m, locked);
+ }
+ }
+ return (mspace)m;
+}
+
+mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
+ mstate m = 0;
+ size_t msize;
+ ensure_initialization();
+ msize = pad_request(sizeof(struct malloc_state));
+ if (capacity > msize + TOP_FOOT_SIZE &&
+ capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
+ m = init_user_mstate((char*)base, capacity);
+ m->seg.sflags = EXTERN_BIT;
+ set_lock(m, locked);
+ }
+ return (mspace)m;
+}
+
+int mspace_track_large_chunks(mspace msp, int enable) {
+ int ret = 0;
+ mstate ms = (mstate)msp;
+ if (!PREACTION(ms)) {
+ if (!use_mmap(ms)) {
+ ret = 1;
+ }
+ if (!enable) {
+ enable_mmap(ms);
+ } else {
+ disable_mmap(ms);
+ }
+ POSTACTION(ms);
+ }
+ return ret;
+}
+
+size_t destroy_mspace(mspace msp) {
+ size_t freed = 0;
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ msegmentptr sp = &ms->seg;
+ (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */
+ while (sp != 0) {
+ char* base = sp->base;
+ size_t size = sp->size;
+ flag_t flag = sp->sflags;
+ (void)base; /* placate people compiling -Wunused-variable */
+ sp = sp->next;
+ if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
+ CALL_MUNMAP(base, size) == 0)
+ freed += size;
+ }
+ }
+ else {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+ return freed;
+}
+
+/*
+ mspace versions of routines are near-clones of the global
+ versions. This is not so nice but better than the alternatives.
+*/
+
+void* mspace_malloc(mspace msp, size_t bytes) {
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ if (!PREACTION(ms)) {
+ void* mem;
+ size_t nb;
+ if (bytes <= MAX_SMALL_REQUEST) {
+ bindex_t idx;
+ binmap_t smallbits;
+ nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
+ idx = small_index(nb);
+ smallbits = ms->smallmap >> idx;
+
+ if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
+ mchunkptr b, p;
+ idx += ~smallbits & 1; /* Uses next bin if idx empty */
+ b = smallbin_at(ms, idx);
+ p = b->fd;
+ assert(chunksize(p) == small_index2size(idx));
+ unlink_first_small_chunk(ms, b, p, idx);
+ set_inuse_and_pinuse(ms, p, small_index2size(idx));
+ mem = chunk2mem(p);
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+
+ else if (nb > ms->dvsize) {
+ if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
+ mchunkptr b, p, r;
+ size_t rsize;
+ bindex_t i;
+ binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
+ binmap_t leastbit = least_bit(leftbits);
+ compute_bit2idx(leastbit, i);
+ b = smallbin_at(ms, i);
+ p = b->fd;
+ assert(chunksize(p) == small_index2size(i));
+ unlink_first_small_chunk(ms, b, p, i);
+ rsize = small_index2size(i) - nb;
+ /* Fit here cannot be remainderless if 4byte sizes */
+ if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
+ set_inuse_and_pinuse(ms, p, small_index2size(i));
+ else {
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+ r = chunk_plus_offset(p, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ replace_dv(ms, r, rsize);
+ }
+ mem = chunk2mem(p);
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+
+ else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+ }
+ }
+ else if (bytes >= MAX_REQUEST)
+ nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
+ else {
+ nb = pad_request(bytes);
+ if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+ }
+
+ if (nb <= ms->dvsize) {
+ size_t rsize = ms->dvsize - nb;
+ mchunkptr p = ms->dv;
+ if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
+ mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
+ ms->dvsize = rsize;
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+ }
+ else { /* exhaust dv */
+ size_t dvs = ms->dvsize;
+ ms->dvsize = 0;
+ ms->dv = 0;
+ set_inuse_and_pinuse(ms, p, dvs);
+ }
+ mem = chunk2mem(p);
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+
+ else if (nb < ms->topsize) { /* Split top */
+ size_t rsize = ms->topsize -= nb;
+ mchunkptr p = ms->top;
+ mchunkptr r = ms->top = chunk_plus_offset(p, nb);
+ r->head = rsize | PINUSE_BIT;
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+ mem = chunk2mem(p);
+ check_top_chunk(ms, ms->top);
+ check_malloced_chunk(ms, mem, nb);
+ goto postaction;
+ }
+
+ mem = sys_alloc(ms, nb);
+ POSTACTION(ms);
+ if (mem == MAP_FAILED && weaken(__oom_hook)) {
+ weaken(__oom_hook)(bytes);
+ }
+ return mem;
+
+ postaction:
+ POSTACTION(ms);
+ return mem;
+ }
+
+ return 0;
+}
+
+void mspace_free(mspace msp, void* mem) {
+ if (mem != 0) {
+ mchunkptr p = mem2chunk(mem);
+#if FOOTERS
+ mstate fm = get_mstate_for(p);
+ (void)msp; /* placate people compiling -Wunused */
+#else /* FOOTERS */
+ mstate fm = (mstate)msp;
+#endif /* FOOTERS */
+ if (!ok_magic(fm)) {
+ USAGE_ERROR_ACTION(fm, p);
+ return;
+ }
+ if (!PREACTION(fm)) {
+ check_inuse_chunk(fm, p);
+ if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
+ size_t psize = chunksize(p);
+ mchunkptr next = chunk_plus_offset(p, psize);
+ if (!pinuse(p)) {
+ size_t prevsize = p->prev_foot;
+ if (is_mmapped(p)) {
+ psize += prevsize + MMAP_FOOT_PAD;
+ if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
+ fm->footprint -= psize;
+ goto postaction;
+ }
+ else {
+ mchunkptr prev = chunk_minus_offset(p, prevsize);
+ psize += prevsize;
+ p = prev;
+ if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
+ if (p != fm->dv) {
+ unlink_chunk(fm, p, prevsize);
+ }
+ else if ((next->head & INUSE_BITS) == INUSE_BITS) {
+ fm->dvsize = psize;
+ set_free_with_pinuse(p, psize, next);
+ goto postaction;
+ }
+ }
+ else
+ goto erroraction;
+ }
+ }
+
+ if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
+ if (!cinuse(next)) { /* consolidate forward */
+ if (next == fm->top) {
+ size_t tsize = fm->topsize += psize;
+ fm->top = p;
+ p->head = tsize | PINUSE_BIT;
+ if (p == fm->dv) {
+ fm->dv = 0;
+ fm->dvsize = 0;
+ }
+ if (should_trim(fm, tsize))
+ sys_trim(fm, 0);
+ goto postaction;
+ }
+ else if (next == fm->dv) {
+ size_t dsize = fm->dvsize += psize;
+ fm->dv = p;
+ set_size_and_pinuse_of_free_chunk(p, dsize);
+ goto postaction;
+ }
+ else {
+ size_t nsize = chunksize(next);
+ psize += nsize;
+ unlink_chunk(fm, next, nsize);
+ set_size_and_pinuse_of_free_chunk(p, psize);
+ if (p == fm->dv) {
+ fm->dvsize = psize;
+ goto postaction;
+ }
+ }
+ }
+ else
+ set_free_with_pinuse(p, psize, next);
+
+ if (is_small(psize)) {
+ insert_small_chunk(fm, p, psize);
+ check_free_chunk(fm, p);
+ }
+ else {
+ tchunkptr tp = (tchunkptr)p;
+ insert_large_chunk(fm, tp, psize);
+ check_free_chunk(fm, p);
+ if (--fm->release_checks == 0)
+ release_unused_segments(fm);
+ }
+ goto postaction;
+ }
+ }
+ erroraction:
+ USAGE_ERROR_ACTION(fm, p);
+ postaction:
+ POSTACTION(fm);
+ }
+ }
+}
+
+void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
+ void* mem;
+ size_t req = 0;
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ if (n_elements != 0) {
+ req = n_elements * elem_size;
+ if (((n_elements | elem_size) & ~(size_t)0xffff) &&
+ (req / n_elements != elem_size))
+ req = MAX_SIZE_T; /* force downstream failure on overflow */
+ }
+ mem = internal_malloc(ms, req);
+ if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
+ bzero(mem, req);
+ return mem;
+}
+
+void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
+ void* mem = 0;
+ if (oldmem == 0) {
+ mem = mspace_malloc(msp, bytes);
+ }
+ else if (bytes >= MAX_REQUEST) {
+ MALLOC_FAILURE_ACTION;
+ }
+#ifdef REALLOC_ZERO_BYTES_FREES
+ else if (bytes == 0) {
+ mspace_free(msp, oldmem);
+ }
+#endif /* REALLOC_ZERO_BYTES_FREES */
+ else {
+ size_t nb = request2size(bytes);
+ mchunkptr oldp = mem2chunk(oldmem);
+#if ! FOOTERS
+ mstate m = (mstate)msp;
+#else /* FOOTERS */
+ mstate m = get_mstate_for(oldp);
+ if (!ok_magic(m)) {
+ USAGE_ERROR_ACTION(m, oldmem);
+ return 0;
+ }
+#endif /* FOOTERS */
+ if (!PREACTION(m)) {
+ mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
+ POSTACTION(m);
+ if (newp != 0) {
+ check_inuse_chunk(m, newp);
+ mem = chunk2mem(newp);
+ }
+ else {
+ mem = mspace_malloc(m, bytes);
+ if (mem != 0) {
+ size_t oc = chunksize(oldp) - overhead_for(oldp);
+ memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
+ mspace_free(m, oldmem);
+ }
+ }
+ }
+ }
+ return mem;
+}
+
+void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {
+ void* mem = 0;
+ if (oldmem != 0) {
+ if (bytes >= MAX_REQUEST) {
+ MALLOC_FAILURE_ACTION;
+ }
+ else {
+ size_t nb = request2size(bytes);
+ mchunkptr oldp = mem2chunk(oldmem);
+#if ! FOOTERS
+ mstate m = (mstate)msp;
+#else /* FOOTERS */
+ mstate m = get_mstate_for(oldp);
+ (void)msp; /* placate people compiling -Wunused */
+ if (!ok_magic(m)) {
+ USAGE_ERROR_ACTION(m, oldmem);
+ return 0;
+ }
+#endif /* FOOTERS */
+ if (!PREACTION(m)) {
+ mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
+ POSTACTION(m);
+ if (newp == oldp) {
+ check_inuse_chunk(m, newp);
+ mem = oldmem;
+ }
+ }
+ }
+ }
+ return mem;
+}
+
+void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ if (alignment <= MALLOC_ALIGNMENT)
+ return mspace_malloc(msp, bytes);
+ return internal_memalign(ms, alignment, bytes);
+}
+
+void** mspace_independent_calloc(mspace msp, size_t n_elements,
+ size_t elem_size, void* chunks[]) {
+ size_t sz = elem_size; /* serves as 1-element array */
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ return ialloc(ms, n_elements, &sz, 3, chunks);
+}
+
+void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+ size_t sizes[], void* chunks[]) {
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ return 0;
+ }
+ return ialloc(ms, n_elements, sizes, 0, chunks);
+}
+
+size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) {
+ return internal_bulk_free((mstate)msp, array, nelem);
+}
+
+#if MALLOC_INSPECT_ALL
+void mspace_inspect_all(mspace msp,
+ void(*handler)(void *start,
+ void *end,
+ size_t used_bytes,
+ void* callback_arg),
+ void* arg) {
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ if (!PREACTION(ms)) {
+ internal_inspect_all(ms, handler, arg);
+ POSTACTION(ms);
+ }
+ }
+ else {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+}
+#endif /* MALLOC_INSPECT_ALL */
+
+int mspace_trim(mspace msp, size_t pad) {
+ int result = 0;
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ if (!PREACTION(ms)) {
+ result = sys_trim(ms, pad);
+ POSTACTION(ms);
+ }
+ }
+ else {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+ return result;
+}
+
+#if !NO_MALLOC_STATS
+void mspace_malloc_stats(mspace msp) {
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ internal_malloc_stats(ms);
+ }
+ else {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+}
+#endif /* NO_MALLOC_STATS */
+
+size_t mspace_footprint(mspace msp) {
+ size_t result = 0;
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ result = ms->footprint;
+ }
+ else {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+ return result;
+}
+
+size_t mspace_max_footprint(mspace msp) {
+ size_t result = 0;
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ result = ms->max_footprint;
+ }
+ else {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+ return result;
+}
+
+size_t mspace_footprint_limit(mspace msp) {
+ size_t result = 0;
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ size_t maf = ms->footprint_limit;
+ result = (maf == 0) ? MAX_SIZE_T : maf;
+ }
+ else {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+ return result;
+}
+
+size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {
+ size_t result = 0;
+ mstate ms = (mstate)msp;
+ if (ok_magic(ms)) {
+ if (bytes == 0)
+ result = granularity_align(1); /* Use minimal size */
+ if (bytes == MAX_SIZE_T)
+ result = 0; /* disable */
+ else
+ result = granularity_align(bytes);
+ ms->footprint_limit = result;
+ }
+ else {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+ return result;
+}
+
+#if !NO_MALLINFO
+struct mallinfo mspace_mallinfo(mspace msp) {
+ mstate ms = (mstate)msp;
+ if (!ok_magic(ms)) {
+ USAGE_ERROR_ACTION(ms,ms);
+ }
+ return internal_mallinfo(ms);
+}
+#endif /* NO_MALLINFO */
+
+size_t mspace_usable_size(const void* mem) {
+ if (mem != 0) {
+ mchunkptr p = mem2chunk(mem);
+ if (is_inuse(p))
+ return chunksize(p) - overhead_for(p);
+ }
+ return 0;
+}
+
+int mspace_mallopt(int param_number, int value) {
+ return change_mparam(param_number, value);
+}
+
+#endif /* MSPACES */
diff --git a/third_party/dlmalloc/dlmalloc.h b/third_party/dlmalloc/dlmalloc.h
new file mode 100644
index 000000000..ad98b7595
--- /dev/null
+++ b/third_party/dlmalloc/dlmalloc.h
@@ -0,0 +1,510 @@
+#ifndef COSMOPOLITAN_THIRD_PARTY_DLMALLOC_DLMALLOC_H_
+#define COSMOPOLITAN_THIRD_PARTY_DLMALLOC_DLMALLOC_H_
+#if !(__ASSEMBLER__ + __LINKER__ + 0)
+COSMOPOLITAN_C_START_
+
+/*
+ malloc(size_t n)
+ Returns a pointer to a newly allocated chunk of at least n bytes, or
+ null if no space is available, in which case errno is set to ENOMEM
+ on ANSI C systems.
+
+ If n is zero, malloc returns a minimum-sized chunk. (The minimum
+ size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
+ systems.) Note that size_t is an unsigned type, so calls with
+ arguments that would be negative if signed are interpreted as
+ requests for huge amounts of space, which will often fail. The
+ maximum supported value of n differs across systems, but is in all
+ cases less than the maximum representable value of a size_t.
+*/
+void* dlmalloc(size_t);
+
+/*
+ free(void* p)
+ Releases the chunk of memory pointed to by p, that had been previously
+ allocated using malloc or a related routine such as realloc.
+ It has no effect if p is null. If p was not malloced or already
+ freed, free(p) will by default cuase the current program to abort.
+*/
+void dlfree(void*);
+
+/*
+ calloc(size_t n_elements, size_t element_size);
+ Returns a pointer to n_elements * element_size bytes, with all locations
+ set to zero.
+*/
+void* dlcalloc(size_t, size_t);
+
+/*
+ realloc(void* p, size_t n)
+ Returns a pointer to a chunk of size n that contains the same data
+ as does chunk p up to the minimum of (n, p's size) bytes, or null
+ if no space is available.
+
+ The returned pointer may or may not be the same as p. The algorithm
+ prefers extending p in most cases when possible, otherwise it
+ employs the equivalent of a malloc-copy-free sequence.
+
+ If p is null, realloc is equivalent to malloc.
+
+ If space is not available, realloc returns null, errno is set (if on
+ ANSI) and p is NOT freed.
+
+ if n is for fewer bytes than already held by p, the newly unused
+ space is lopped off and freed if possible. realloc with a size
+ argument of zero (re)allocates a minimum-sized chunk.
+
+ The old unix realloc convention of allowing the last-free'd chunk
+ to be used as an argument to realloc is not supported.
+*/
+void* dlrealloc(void*, size_t);
+
+/*
+ realloc_in_place(void* p, size_t n)
+ Resizes the space allocated for p to size n, only if this can be
+ done without moving p (i.e., only if there is adjacent space
+ available if n is greater than p's current allocated size, or n is
+ less than or equal to p's size). This may be used instead of plain
+ realloc if an alternative allocation strategy is needed upon failure
+ to expand space; for example, reallocation of a buffer that must be
+ memory-aligned or cleared. You can use realloc_in_place to trigger
+ these alternatives only when needed.
+
+ Returns p if successful; otherwise null.
+*/
+void* dlrealloc_in_place(void*, size_t);
+
+/*
+ memalign(size_t alignment, size_t n);
+ Returns a pointer to a newly allocated chunk of n bytes, aligned
+ in accord with the alignment argument.
+
+ The alignment argument should be a power of two. If the argument is
+ not a power of two, the nearest greater power is used.
+ 8-byte alignment is guaranteed by normal malloc calls, so don't
+ bother calling memalign with an argument of 8 or less.
+
+ Overreliance on memalign is a sure way to fragment space.
+*/
+void* dlmemalign(size_t, size_t);
+
+/*
+ int posix_memalign(void** pp, size_t alignment, size_t n);
+ Allocates a chunk of n bytes, aligned in accord with the alignment
+ argument. Differs from memalign only in that it (1) assigns the
+ allocated memory to *pp rather than returning it, (2) fails and
+ returns EINVAL if the alignment is not a power of two (3) fails and
+ returns ENOMEM if memory cannot be allocated.
+*/
+int dlposix_memalign(void**, size_t, size_t);
+
+/*
+ valloc(size_t n);
+ Equivalent to memalign(pagesize, n), where pagesize is the page
+ size of the system. If the pagesize is unknown, 4096 is used.
+*/
+void* dlvalloc(size_t);
+
+/*
+ mallopt(int parameter_number, int parameter_value)
+ Sets tunable parameters The format is to provide a
+ (parameter-number, parameter-value) pair. mallopt then sets the
+ corresponding parameter to the argument value if it can (i.e., so
+ long as the value is meaningful), and returns 1 if successful else
+ 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
+ normally defined in malloc.h. None of these are use in this malloc,
+ so setting them has no effect. But this malloc also supports other
+ options in mallopt:
+
+ Symbol param # default allowed param values
+ M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming)
+ M_GRANULARITY -2 page size any power of 2 >= page size
+ M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
+*/
+int dlmallopt(int, int);
+
+/*
+ malloc_footprint();
+ Returns the number of bytes obtained from the system. The total
+ number of bytes allocated by malloc, realloc etc., is less than this
+ value. Unlike mallinfo, this function returns only a precomputed
+ result, so can be called frequently to monitor memory consumption.
+ Even if locks are otherwise defined, this function does not use them,
+ so results might not be up to date.
+*/
+size_t dlmalloc_footprint(void);
+
+/*
+ malloc_max_footprint();
+ Returns the maximum number of bytes obtained from the system. This
+ value will be greater than current footprint if deallocated space
+ has been reclaimed by the system. The peak number of bytes allocated
+ by malloc, realloc etc., is less than this value. Unlike mallinfo,
+ this function returns only a precomputed result, so can be called
+ frequently to monitor memory consumption. Even if locks are
+ otherwise defined, this function does not use them, so results might
+ not be up to date.
+*/
+size_t dlmalloc_max_footprint(void);
+
+/*
+ malloc_footprint_limit();
+ Returns the number of bytes that the heap is allowed to obtain from
+ the system, returning the last value returned by
+ malloc_set_footprint_limit, or the maximum size_t value if
+ never set. The returned value reflects a permission. There is no
+ guarantee that this number of bytes can actually be obtained from
+ the system.
+*/
+size_t dlmalloc_footprint_limit(void);
+
+/*
+ malloc_set_footprint_limit();
+ Sets the maximum number of bytes to obtain from the system, causing
+ failure returns from malloc and related functions upon attempts to
+ exceed this value. The argument value may be subject to page
+ rounding to an enforceable limit; this actual value is returned.
+ Using an argument of the maximum possible size_t effectively
+ disables checks. If the argument is less than or equal to the
+ current malloc_footprint, then all future allocations that require
+ additional system memory will fail. However, invocation cannot
+ retroactively deallocate existing used memory.
+*/
+size_t dlmalloc_set_footprint_limit(size_t bytes);
+
+/*
+ malloc_inspect_all(void(*handler)(void *start,
+ void *end,
+ size_t used_bytes,
+ void* callback_arg),
+ void* arg);
+ Traverses the heap and calls the given handler for each managed
+ region, skipping all bytes that are (or may be) used for bookkeeping
+ purposes. Traversal does not include include chunks that have been
+ directly memory mapped. Each reported region begins at the start
+ address, and continues up to but not including the end address. The
+ first used_bytes of the region contain allocated data. If
+ used_bytes is zero, the region is unallocated. The handler is
+ invoked with the given callback argument. If locks are defined, they
+ are held during the entire traversal. It is a bad idea to invoke
+ other malloc functions from within the handler.
+
+ For example, to count the number of in-use chunks with size greater
+ than 1000, you could write:
+ static int count = 0;
+ void count_chunks(void* start, void* end, size_t used, void* arg) {
+ if (used >= 1000) ++count;
+ }
+ then:
+ malloc_inspect_all(count_chunks, NULL);
+
+ malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.
+*/
+void dlmalloc_inspect_all(void (*handler)(void*, void*, size_t, void*),
+ void* arg);
+
+/*
+ mallinfo()
+ Returns (by copy) a struct containing various summary statistics:
+
+ arena: current total non-mmapped bytes allocated from system
+ ordblks: the number of free chunks
+ smblks: always zero.
+ hblks: current number of mmapped regions
+ hblkhd: total bytes held in mmapped regions
+ usmblks: the maximum total allocated space. This will be greater
+ than current total if trimming has occurred.
+ fsmblks: always zero
+ uordblks: current total allocated space (normal or mmapped)
+ fordblks: total free space
+ keepcost: the maximum number of bytes that could ideally be released
+ back to system via malloc_trim. ("ideally" means that
+ it ignores page restrictions etc.)
+
+ Because these fields are ints, but internal bookkeeping may
+ be kept as longs, the reported values may wrap around zero and
+ thus be inaccurate.
+*/
+
+struct mallinfo dlmallinfo(void);
+
+/*
+ independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
+
+ independent_calloc is similar to calloc, but instead of returning a
+ single cleared space, it returns an array of pointers to n_elements
+ independent elements that can hold contents of size elem_size, each
+ of which starts out cleared, and can be independently freed,
+ realloc'ed etc. The elements are guaranteed to be adjacently
+ allocated (this is not guaranteed to occur with multiple callocs or
+ mallocs), which may also improve cache locality in some
+ applications.
+
+ The "chunks" argument is optional (i.e., may be null, which is
+ probably the most typical usage). If it is null, the returned array
+ is itself dynamically allocated and should also be freed when it is
+ no longer needed. Otherwise, the chunks array must be of at least
+ n_elements in length. It is filled in with the pointers to the
+ chunks.
+
+ In either case, independent_calloc returns this pointer array, or
+ null if the allocation failed. If n_elements is zero and "chunks"
+ is null, it returns a chunk representing an array with zero elements
+ (which should be freed if not wanted).
+
+ Each element must be freed when it is no longer needed. This can be
+ done all at once using bulk_free.
+
+ independent_calloc simplifies and speeds up implementations of many
+ kinds of pools. It may also be useful when constructing large data
+ structures that initially have a fixed number of fixed-sized nodes,
+ but the number is not known at compile time, and some of the nodes
+ may later need to be freed. For example:
+
+ struct Node { int item; struct Node* next; };
+
+ struct Node* build_list() {
+ struct Node** pool;
+ int n = read_number_of_nodes_needed();
+ if (n <= 0) return 0;
+ pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
+ if (pool == 0) die();
+ // organize into a linked list...
+ struct Node* first = pool[0];
+ for (i = 0; i < n-1; ++i)
+ pool[i]->next = pool[i+1];
+ free(pool); // Can now free the array (or not, if it is needed later)
+ return first;
+ }
+*/
+void** dlindependent_calloc(size_t, size_t, void**);
+
+/*
+ independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
+
+ independent_comalloc allocates, all at once, a set of n_elements
+ chunks with sizes indicated in the "sizes" array. It returns
+ an array of pointers to these elements, each of which can be
+ independently freed, realloc'ed etc. The elements are guaranteed to
+ be adjacently allocated (this is not guaranteed to occur with
+ multiple callocs or mallocs), which may also improve cache locality
+ in some applications.
+
+ The "chunks" argument is optional (i.e., may be null). If it is null
+ the returned array is itself dynamically allocated and should also
+ be freed when it is no longer needed. Otherwise, the chunks array
+ must be of at least n_elements in length. It is filled in with the
+ pointers to the chunks.
+
+ In either case, independent_comalloc returns this pointer array, or
+ null if the allocation failed. If n_elements is zero and chunks is
+ null, it returns a chunk representing an array with zero elements
+ (which should be freed if not wanted).
+
+ Each element must be freed when it is no longer needed. This can be
+ done all at once using bulk_free.
+
+ independent_comallac differs from independent_calloc in that each
+ element may have a different size, and also that it does not
+ automatically clear elements.
+
+ independent_comalloc can be used to speed up allocation in cases
+ where several structs or objects must always be allocated at the
+ same time. For example:
+
+ struct Head { ... }
+ struct Foot { ... }
+
+ void send_message(char* msg) {
+ int msglen = strlen(msg);
+ size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
+ void* chunks[3];
+ if (independent_comalloc(3, sizes, chunks) == 0)
+ die();
+ struct Head* head = (struct Head*)(chunks[0]);
+ char* body = (char*)(chunks[1]);
+ struct Foot* foot = (struct Foot*)(chunks[2]);
+ // ...
+ }
+
+ In general though, independent_comalloc is worth using only for
+ larger values of n_elements. For small values, you probably won't
+ detect enough difference from series of malloc calls to bother.
+
+ Overuse of independent_comalloc can increase overall memory usage,
+ since it cannot reuse existing noncontiguous small chunks that
+ might be available for some of the elements.
+*/
+void** dlindependent_comalloc(size_t, size_t*, void**);
+
+/*
+ bulk_free(void* array[], size_t n_elements)
+ Frees and clears (sets to null) each non-null pointer in the given
+ array. This is likely to be faster than freeing them one-by-one.
+ If footers are used, pointers that have been allocated in different
+ mspaces are not freed or cleared, and the count of all such pointers
+ is returned. For large arrays of pointers with poor locality, it
+ may be worthwhile to sort this array before calling bulk_free.
+*/
+size_t dlbulk_free(void**, size_t n_elements);
+
+/*
+ pvalloc(size_t n);
+ Equivalent to valloc(minimum-page-that-holds(n)), that is,
+ round up n to nearest pagesize.
+ */
+void* dlpvalloc(size_t);
+
+/*
+ malloc_trim(size_t pad);
+
+ If possible, gives memory back to the system (via negative arguments
+ to sbrk) if there is unused memory at the `high' end of the malloc
+ pool or in unused MMAP segments. You can call this after freeing
+ large blocks of memory to potentially reduce the system-level memory
+ requirements of a program. However, it cannot guarantee to reduce
+ memory. Under some allocation patterns, some large free blocks of
+ memory will be locked between two used chunks, so they cannot be
+ given back to the system.
+
+ The `pad' argument to malloc_trim represents the amount of free
+ trailing space to leave untrimmed. If this argument is zero, only
+ the minimum amount of memory to maintain internal data structures
+ will be left. Non-zero arguments can be supplied to maintain enough
+ trailing space to service future expected allocations without having
+ to re-obtain memory from the system.
+
+ Malloc_trim returns 1 if it actually released any memory, else 0.
+*/
+int dlmalloc_trim(size_t);
+
+/*
+ malloc_stats();
+ Prints on stderr the amount of space obtained from the system (both
+ via sbrk and mmap), the maximum amount (which may be more than
+ current if malloc_trim and/or munmap got called), and the current
+ number of bytes allocated via malloc (or realloc, etc) but not yet
+ freed. Note that this is the number of bytes allocated, not the
+ number requested. It will be larger than the number requested
+ because of alignment and bookkeeping overhead. Because it includes
+ alignment wastage as being in use, this figure may be greater than
+ zero even when no user-level chunks are allocated.
+
+ The reported current and maximum system memory can be inaccurate if
+ a program makes other calls to system memory allocation functions
+ (normally sbrk) outside of malloc.
+
+ malloc_stats prints only the most commonly interesting statistics.
+ More information can be obtained by calling mallinfo.
+
+ malloc_stats is not compiled if NO_MALLOC_STATS is defined.
+*/
+void dlmalloc_stats(void);
+
+/*
+ malloc_usable_size(void* p);
+
+ Returns the number of bytes you can actually use in
+ an allocated chunk, which may be more than you requested (although
+ often not) due to alignment and minimum size constraints.
+ You can use this many bytes without worrying about
+ overwriting other allocated objects. This is not a particularly great
+ programming practice. malloc_usable_size can be more useful in
+ debugging and assertions, for example:
+
+ p = malloc(n);
+ assert(malloc_usable_size(p) >= 256);
+*/
+size_t dlmalloc_usable_size(const void*);
+
+/*
+ mspace is an opaque type representing an independent
+ region of space that supports mspace_malloc, etc.
+*/
+typedef void* mspace;
+
+/*
+ create_mspace creates and returns a new independent space with the
+ given initial capacity, or, if 0, the default granularity size. It
+ returns null if there is no system memory available to create the
+ space. If argument locked is non-zero, the space uses a separate
+ lock to control access. The capacity of the space will grow
+ dynamically as needed to service mspace_malloc requests. You can
+ control the sizes of incremental increases of this space by
+ compiling with a different DEFAULT_GRANULARITY or dynamically
+ setting with mallopt(M_GRANULARITY, value).
+*/
+mspace create_mspace(size_t capacity, int locked);
+
+/*
+ destroy_mspace destroys the given space, and attempts to return all
+ of its memory back to the system, returning the total number of
+ bytes freed. After destruction, the results of access to all memory
+ used by the space become undefined.
+*/
+size_t destroy_mspace(mspace msp);
+
+/*
+ create_mspace_with_base uses the memory supplied as the initial base
+ of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
+ space is used for bookkeeping, so the capacity must be at least this
+ large. (Otherwise 0 is returned.) When this initial space is
+ exhausted, additional memory will be obtained from the system.
+ Destroying this space will deallocate all additionally allocated
+ space (if possible) but not the initial base.
+*/
+mspace create_mspace_with_base(void* base, size_t capacity, int locked);
+
+/*
+ mspace_track_large_chunks controls whether requests for large chunks
+ are allocated in their own untracked mmapped regions, separate from
+ others in this mspace. By default large chunks are not tracked,
+ which reduces fragmentation. However, such chunks are not
+ necessarily released to the system upon destroy_mspace. Enabling
+ tracking by setting to true may increase fragmentation, but avoids
+ leakage when relying on destroy_mspace to release all memory
+ allocated using this space. The function returns the previous
+ setting.
+*/
+int mspace_track_large_chunks(mspace msp, int enable);
+
+/*
+ mspace_mallinfo behaves as mallinfo, but reports properties of
+ the given space.
+*/
+struct mallinfo mspace_mallinfo(mspace msp);
+
+/*
+ An alias for mallopt.
+*/
+int mspace_mallopt(int, int);
+
+/*
+ The following operate identically to their malloc counterparts
+ but operate only for the given mspace argument
+*/
+void* mspace_malloc(mspace msp, size_t bytes);
+void mspace_free(mspace msp, void* mem);
+void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
+void* mspace_realloc(mspace msp, void* mem, size_t newsize);
+void* mspace_realloc_in_place(mspace msp, void* mem, size_t newsize);
+void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
+void** mspace_independent_calloc(mspace msp, size_t n_elements,
+ size_t elem_size, void* chunks[]);
+void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+ size_t sizes[], void* chunks[]);
+size_t mspace_bulk_free(mspace msp, void**, size_t n_elements);
+size_t mspace_usable_size(const void* mem);
+void mspace_malloc_stats(mspace msp);
+int mspace_trim(mspace msp, size_t pad);
+size_t mspace_footprint(mspace msp);
+size_t mspace_max_footprint(mspace msp);
+size_t mspace_footprint_limit(mspace msp);
+size_t mspace_set_footprint_limit(mspace msp, size_t bytes);
+void mspace_inspect_all(mspace msp,
+ void (*handler)(void*, void*, size_t, void*),
+ void* arg);
+
+COSMOPOLITAN_C_END_
+#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
+#endif /* COSMOPOLITAN_THIRD_PARTY_DLMALLOC_DLMALLOC_H_ */
diff --git a/third_party/dlmalloc/dlmalloc.internal.h b/third_party/dlmalloc/dlmalloc.internal.h
deleted file mode 100644
index 596a15ac3..000000000
--- a/third_party/dlmalloc/dlmalloc.internal.h
+++ /dev/null
@@ -1,1312 +0,0 @@
-#ifndef COSMOPOLITAN_LIBC_MEM_DLMALLOC_H_
-#define COSMOPOLITAN_LIBC_MEM_DLMALLOC_H_
-#ifndef __STRICT_ANSI__
-#include "libc/assert.h"
-#include "libc/bits/bits.h"
-#include "libc/bits/weaken.h"
-#include "libc/calls/calls.h"
-#include "libc/dce.h"
-#include "libc/log/backtrace.internal.h"
-#include "libc/nexgen32e/bsf.h"
-#include "libc/runtime/runtime.h"
-#include "libc/runtime/symbols.internal.h"
-#if !(__ASSEMBLER__ + __LINKER__ + 0)
-COSMOPOLITAN_C_START_
-#if 0
-/**
- * @fileoverview Internal header for Doug Lea's malloc.
-*/
-#endif
-
-#define DLMALLOC_VERSION 20806
-#define HAVE_MMAP 1
-#define MMAP_CLEARS 1
-#define MALLOC_ALIGNMENT 16
-#define NO_SEGMENT_TRAVERSAL 1
-#define MAX_RELEASE_CHECK_RATE 128
-#define MALLOC_ABORT abort()
-#define FOOTERS !NoDebug()
-#define MAX_REQUEST 0xfffffffffff
-#define DEFAULT_GRANULARITY (64UL * 1024UL)
-#define DEFAULT_TRIM_THRESHOLD (10UL * 1024UL * 1024UL)
-#define DEFAULT_MMAP_THRESHOLD (256UL * 1024UL)
-#define USE_LOCKS 0
-#define USE_SPIN_LOCKS 0
-#define LOCK_AT_FORK 0
-#define NSMALLBINS (32u)
-#define NTREEBINS (32u)
-#define SMALLBIN_SHIFT (3u)
-#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
-#define TREEBIN_SHIFT (8u)
-#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
-#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
-#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
-#define M_TRIM_THRESHOLD (-1)
-#define M_GRANULARITY (-2)
-#define M_MMAP_THRESHOLD (-3)
-
-/* ─────────────────── size_t and alignment properties ──────────────────── */
-
-#define MALLINFO_FIELD_TYPE size_t
-
-/* The byte and bit size of a size_t */
-#define SIZE_T_SIZE (sizeof(size_t))
-#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
-
-/* Some constants coerced to size_t */
-/* Annoying but necessary to avoid errors on some platforms */
-#define SIZE_T_ZERO 0UL
-#define SIZE_T_ONE 1UL
-#define SIZE_T_TWO 2UL
-#define SIZE_T_FOUR 4UL
-#define TWO_SIZE_T_SIZES (SIZE_T_SIZE << 1)
-#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE << 2)
-#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES + TWO_SIZE_T_SIZES)
-#define HALF_SIZE_MAX (__SIZE_MAX__ / 2U)
-
-/* The bit mask value corresponding to MALLOC_ALIGNMENT */
-#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
-
-/* True if address a has acceptable alignment */
-#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
-
-/* the number of bytes to offset an address to align it */
-#define align_offset(A) \
- ((((size_t)(A)&CHUNK_ALIGN_MASK) == 0) \
- ? 0 \
- : ((MALLOC_ALIGNMENT - ((size_t)(A)&CHUNK_ALIGN_MASK)) & \
- CHUNK_ALIGN_MASK))
-
-/* ────────────────────────── MMAP preliminaries ───────────────────────── */
-
-#define MFAIL MAP_FAILED
-#define CMFAIL ((char *)MAP_FAILED)
-#define MMAP_DEFAULT(s) dlmalloc_requires_more_vespene_gas(s)
-#define MUNMAP_DEFAULT(a, s) munmap(a, s)
-#define MMAP_PROT (PROT_READ | PROT_WRITE)
-#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
-#define USE_MMAP_BIT (SIZE_T_ONE)
-#define CALL_MMAP(s) MMAP_DEFAULT(s)
-#define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
-#define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
-#define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
-#define USE_NONCONTIGUOUS_BIT (4U)
-#define EXTERN_BIT (8U)
-
-/* ─────────────────────────── Lock preliminaries ──────────────────────── */
-
-/*
- When locks are defined, there is one global lock, plus
- one per-mspace lock.
-
- The global lock_ensures that mparams.magic and other unique
- mparams values are initialized only once. It also protects
- sequences of calls to MORECORE. In many cases sys_alloc requires
- two calls, that should not be interleaved with calls by other
- threads. This does not protect against direct calls to MORECORE
- by other threads not using this lock, so there is still code to
- cope the best we can on interference.
-
- Per-mspace locks surround calls to malloc, free, etc.
- By default, locks are simple non-reentrant mutexes.
-
- Because lock-protected regions generally have bounded times, it is
- OK to use the supplied simple spinlocks. Spinlocks are likely to
- improve performance for lightly contended applications, but worsen
- performance under heavy contention.
-
- If USE_LOCKS is > 1, the definitions of lock routines here are
- bypassed, in which case you will need to define the type MLOCK_T,
- and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
- and TRY_LOCK. You must also declare a
- static MLOCK_T malloc_global_mutex = { initialization values };.
-*/
-
-#define USE_LOCK_BIT (0U)
-#define INITIAL_LOCK(l) (0)
-#define DESTROY_LOCK(l) (0)
-#define ACQUIRE_MALLOC_GLOBAL_LOCK()
-#define RELEASE_MALLOC_GLOBAL_LOCK()
-
-/* ─────────────────────── Chunk representations ──────────────────────── */
-
-/*
- (The following includes lightly edited explanations by Colin Plumb.)
-
- The MallocChunk declaration below is misleading (but accurate and
- necessary). It declares a "view" into memory allowing access to
- necessary fields at known offsets from a given base.
-
- Chunks of memory are maintained using a `boundary tag' method as
- originally described by Knuth. (See the paper by Paul Wilson
- ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
- techniques.) Sizes of free chunks are stored both in the front of
- each chunk and at the end. This makes consolidating fragmented
- chunks into bigger chunks fast. The head fields also hold bits
- representing whether chunks are free or in use.
-
- Here are some pictures to make it clearer. They are "exploded" to
- show that the state of a chunk can be thought of as extending from
- the high 31 bits of the head field of its header through the
- prev_foot and PINUSE_BIT bit of the following chunk header.
-
- A chunk that's in use looks like:
-
- chunk→ ┌───────────────────────────────────────────────────────────────┐
- │ Size of previous chunk (if P = 0) │
- └─────────────────────────────────────────────────────────────┬─┤
- ┌─────────────────────────────────────────────────────────────┐ │P│
- │ Size of this chunk 1│ └─┘
- mem→ ├───────────────────────────────────────────────────────────────┐
- │ │
- ├─ ─┤
- │ │
- ├─ ─┤
- │ :
- ├─ size - sizeof(size_t) available payload bytes ─┤
- : │
- chunk→ ├─ ─┤
- │ │
- └───────────────────────────────────────────────────────────────┤
- ┌─────────────────────────────────────────────────────────────┐ |1│
- │ Size of next chunk (may or may not be in use) │ ├─┘
- mem→ └───────────────────────────────────────────────────────────────┘
-
- And if it's free, it looks like this:
-
- chunk→ ┌─ ─┐
- │ User payload (must be in use, or we would have merged!) │
- └───────────────────────────────────────────────────────────────┤
- ┌─────────────────────────────────────────────────────────────┐ │P│
- │ Size of this chunk 0│ │─┘
- mem→ ├───────────────────────────────────────────────────────────────┤
- │ Next pointer │
- ├───────────────────────────────────────────────────────────────┤
- │ Prev pointer │
- ├───────────────────────────────────────────────────────────────┤
- │ :
- ├─ size - sizeof(struct chunk) unused bytes ─┤
- : │
- chunk→ ├───────────────────────────────────────────────────────────────┤
- │ Size of this chunk │
- └───────────────────────────────────────────────────────────────┤
- ┌───────────────────────────────────────────────────────────────│0│
- │ Size of next chunk (must be in use, or we would have merged)| │─┘
- mem→ ├───────────────────────────────────────────────────────────────┤
- │ :
- ├─ User payload ─┤
- : │
- └───────────────────────────────────────────────────────────────┤
- │0│
- └─┘
- Note that since we always merge adjacent free chunks, the chunks
- adjacent to a free chunk must be in use.
-
- Given a pointer to a chunk (which can be derived trivially from the
- payload pointer) we can, in O(1) time, find out whether the adjacent
- chunks are free, and if so, unlink them from the lists that they
- are on and merge them with the current chunk.
-
- Chunks always begin on even word boundaries, so the mem portion
- (which is returned to the user) is also on an even word boundary, and
- thus at least double-word aligned.
-
- The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
- chunk size (which is always a multiple of two words), is an in-use
- bit for the *previous* chunk. If that bit is *clear*, then the
- word before the current chunk size contains the previous chunk
- size, and can be used to find the front of the previous chunk.
- The very first chunk allocated always has this bit set, preventing
- access to non-existent (or non-owned) memory. If pinuse is set for
- any given chunk, then you CANNOT determine the size of the
- previous chunk, and might even get a memory addressing fault when
- trying to do so.
-
- The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
- the chunk size redundantly records whether the current chunk is
- inuse (unless the chunk is mmapped). This redundancy enables usage
- checks within free and realloc, and reduces indirection when freeing
- and consolidating chunks.
-
- Each freshly allocated chunk must have both cinuse and pinuse set.
- That is, each allocated chunk borders either a previously allocated
- and still in-use chunk, or the base of its memory arena. This is
- ensured by making all allocations from the `lowest' part of any
- found chunk. Further, no free chunk physically borders another one,
- so each free chunk is known to be preceded and followed by either
- inuse chunks or the ends of memory.
-
- Note that the `foot' of the current chunk is actually represented
- as the prev_foot of the NEXT chunk. This makes it easier to
- deal with alignments etc but can be very confusing when trying
- to extend or adapt this code.
-
- The exceptions to all this are
-
- 1. The special chunk `top' is the top-most available chunk (i.e.,
- the one bordering the end of available memory). It is treated
- specially. Top is never included in any bin, is used only if
- no other chunk is available, and is released back to the
- system if it is very large (see M_TRIM_THRESHOLD). In effect,
- the top chunk is treated as larger (and thus less well
- fitting) than any other available chunk. The top chunk
- doesn't update its trailing size field since there is no next
- contiguous chunk that would have to index off it. However,
- space is still allocated for it (TOP_FOOT_SIZE) to enable
- separation or merging when space is extended.
-
- 3. Chunks allocated via mmap, have both cinuse and pinuse bits
- cleared in their head fields. Because they are allocated
- one-by-one, each must carry its own prev_foot field, which is
- also used to hold the offset this chunk has within its mmapped
- region, which is needed to preserve alignment. Each mmapped
- chunk is trailed by the first two fields of a fake next-chunk
- for sake of usage checks.
-
-*/
-
-struct MallocChunk {
- size_t prev_foot; /* Size of previous chunk (if free). */
- size_t head; /* Size and inuse bits. */
- struct MallocChunk *fd; /* double links -- used only if free. */
- struct MallocChunk *bk;
-};
-
-typedef struct MallocChunk mchunk;
-typedef struct MallocChunk *mchunkptr;
-typedef struct MallocChunk *sbinptr; /* The type of bins of chunks */
-typedef unsigned int bindex_t; /* Described below */
-typedef unsigned int binmap_t; /* Described below */
-typedef unsigned int flag_t; /* The type of various bit flag sets */
-
-/* ─────────────────── Chunks sizes and alignments ─────────────────────── */
-
-#define MCHUNK_SIZE (sizeof(mchunk))
-
-#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
-
-/* MMapped chunks need a second word of overhead ... */
-#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
-/* ... and additional padding for fake next-chunk at foot */
-#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
-
-/* The smallest size we can malloc is an aligned minimal chunk */
-#define MIN_CHUNK_SIZE ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
-
-/* conversion from malloc headers to user pointers, and back */
-#define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
-#define mem2chunk(mem) ((mchunkptr)((char *)(mem)-TWO_SIZE_T_SIZES))
-/* chunk associated with aligned address A */
-#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
-
-/* Bounds on request (not chunk) sizes. */
-#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
-
-/* pad request bytes into a usable size */
-#define pad_request(req) \
- (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
-
-/* pad request, checking for minimum (but not maximum) */
-#define request2size(req) \
- (((req) < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(req))
-
-/* ────────────────── Operations on head and foot fields ───────────────── */
-
-/*
- The head field of a chunk is or'ed with PINUSE_BIT when previous
- adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
- use, unless mmapped, in which case both bits are cleared.
-
- FLAG4_BIT is not used by this malloc, but might be useful in extensions.
-*/
-
-#define PINUSE_BIT (SIZE_T_ONE)
-#define CINUSE_BIT (SIZE_T_TWO)
-#define FLAG4_BIT (SIZE_T_FOUR)
-#define INUSE_BITS (PINUSE_BIT | CINUSE_BIT)
-#define FLAG_BITS (PINUSE_BIT | CINUSE_BIT | FLAG4_BIT)
-
-/* Head value for fenceposts */
-#define FENCEPOST_HEAD (INUSE_BITS | SIZE_T_SIZE)
-
-/* extraction of fields from head words */
-#define cinuse(p) ((p)->head & CINUSE_BIT)
-#define pinuse(p) ((p)->head & PINUSE_BIT)
-#define flag4inuse(p) ((p)->head & FLAG4_BIT)
-#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
-#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
-
-#define chunksize(p) ((p)->head & ~(FLAG_BITS))
-
-#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
-#define set_flag4(p) ((p)->head |= FLAG4_BIT)
-#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
-
-/* Treat space at ptr +/- offset as a chunk */
-#define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s)))
-#define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s)))
-
-/* Ptr to next or previous physical MallocChunk. */
-#define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~FLAG_BITS)))
-#define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot)))
-
-/* extract next chunk's pinuse bit */
-#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
-
-/* Get/set size at footer */
-#define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot)
-#define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
-
-/* Set size, pinuse bit, and foot */
-#define set_size_and_pinuse_of_free_chunk(p, s) \
- ((p)->head = (s | PINUSE_BIT), set_foot(p, s))
-
-/* Set size, pinuse bit, foot, and clear next pinuse */
-#define set_free_with_pinuse(p, s, n) \
- (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
-
-/* Get the internal overhead associated with chunk p */
-#define overhead_for(p) (is_mmapped(p) ? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
-
-/* Return true if malloced space is not necessarily cleared */
-
-#define calloc_must_clear(p) (!is_mmapped(p))
-
-/* ────────────────────── Overlaid data structures ─────────────────────── */
-
-/*
- When chunks are not in use, they are treated as nodes of either
- lists or trees.
-
- "Small" chunks are stored in circular doubly-linked lists, and look
- like this:
-
- chunk→ ┌───────────────────────────────────────────────────────────────┐
- │ Size of previous chunk │
- ├───────────────────────────────────────────────────────────────┤
- `head:' │ Size of chunk, in bytes |P│
- mem→ ├───────────────────────────────────────────────────────────────┤
- │ Forward pointer to next chunk in list │
- ├───────────────────────────────────────────────────────────────┤
- │ Back pointer to previous chunk in list │
- ├───────────────────────────────────────────────────────────────┤
- . Unused space (may be 0 bytes long) .
- . .
- │ │
- nextchunk→ ├───────────────────────────────────────────────────────────────┤
- `foot:' │ Size of chunk, in bytes │
- └───────────────────────────────────────────────────────────────┘
-
- Larger chunks are kept in a form of bitwise digital trees (aka
- tries) keyed on chunksizes. Because MallocTreeChunks are only for
- free chunks greater than 256 bytes, their size doesn't impose any
- constraints on user chunk sizes. Each node looks like:
-
- chunk→ ┌───────────────────────────────────────────────────────────────┐
- │ Size of previous chunk │
- ├─────────────────────────────────────────────────────────────┬─┤
- `head:' │ Size of chunk, in bytes │P│
- mem→ ├─────────────────────────────────────────────────────────────┴─┤
- │ Forward pointer to next chunk of same size │
- ├───────────────────────────────────────────────────────────────┤
- │ Back pointer to previous chunk of same size │
- ├───────────────────────────────────────────────────────────────┤
- │ Pointer to left child (child[0]) │
- ├───────────────────────────────────────────────────────────────┤
- │ Pointer to right child (child[1]) │
- ├───────────────────────────────────────────────────────────────┤
- │ Pointer to parent │
- ├───────────────────────────────────────────────────────────────┤
- │ bin index of this chunk │
- ├───────────────────────────────────────────────────────────────┤
- │ Unused space .
- . │
- nextchunk→ ├───────────────────────────────────────────────────────────────┤
- `foot:' │ Size of chunk, in bytes │
- └───────────────────────────────────────────────────────────────┘
-
- Each tree holding treenodes is a tree of unique chunk sizes. Chunks
- of the same size are arranged in a circularly-linked list, with only
- the oldest chunk (the next to be used, in our FIFO ordering)
- actually in the tree. (Tree members are distinguished by a non-null
- parent pointer.) If a chunk with the same size an an existing node
- is inserted, it is linked off the existing node using pointers that
- work in the same way as fd/bk pointers of small chunks.
-
- Each tree contains a power of 2 sized range of chunk sizes (the
- smallest is 0x100 <= x < 0x180), which is is divided in half at each
- tree level, with the chunks in the smaller half of the range (0x100
- <= x < 0x140 for the top nose) in the left subtree and the larger
- half (0x140 <= x < 0x180) in the right subtree. This is, of course,
- done by inspecting individual bits.
-
- Using these rules, each node's left subtree contains all smaller
- sizes than its right subtree. However, the node at the root of each
- subtree has no particular ordering relationship to either. (The
- dividing line between the subtree sizes is based on trie relation.)
- If we remove the last chunk of a given size from the interior of the
- tree, we need to replace it with a leaf node. The tree ordering
- rules permit a node to be replaced by any leaf below it.
-
- The smallest chunk in a tree (a common operation in a best-fit
- allocator) can be found by walking a path to the leftmost leaf in
- the tree. Unlike a usual binary tree, where we follow left child
- pointers until we reach a null, here we follow the right child
- pointer any time the left one is null, until we reach a leaf with
- both child pointers null. The smallest chunk in the tree will be
- somewhere along that path.
-
- The worst case number of steps to add, find, or remove a node is
- bounded by the number of bits differentiating chunks within
- bins. Under current bin calculations, this ranges from 6 up to 21
- (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
- is of course much better.
-*/
-
-struct MallocTreeChunk {
- /* The first four fields must be compatible with MallocChunk */
- size_t prev_foot;
- size_t head;
- struct MallocTreeChunk *fd;
- struct MallocTreeChunk *bk;
- struct MallocTreeChunk *child[2];
- struct MallocTreeChunk *parent;
- bindex_t index;
-};
-
-typedef struct MallocTreeChunk tchunk;
-typedef struct MallocTreeChunk *tchunkptr;
-typedef struct MallocTreeChunk *tbinptr; /* The type of bins of trees */
-
-/* A little helper macro for trees */
-#define leftmost_child(t) ((t)->child[0] != 0 ? (t)->child[0] : (t)->child[1])
-
-/* ───────────────────────────── Segments ──────────────────────────────── */
-
-/*
- Each malloc space may include non-contiguous segments, held in a list
- headed by an embedded MallocSegment record representing the top-most
- space. Segments also include flags holding properties of the space.
- Large chunks that are directly allocated by mmap are not included in
- this list. They are instead independently created and destroyed
- without otherwise keeping track of them.
-
- Segment management mainly comes into play for spaces allocated by
- MMAP. Any call to MMAP might or might not return memory that is
- adjacent to an existing segment. MORECORE normally contiguously
- extends the current space, so this space is almost always adjacent,
- which is simpler and faster to deal with. (This is why MORECORE is
- used preferentially to MMAP when both are available -- see sys_alloc.)
- When allocating using MMAP, we don't use any of the hinting mechanisms
- (inconsistently) supported in various implementations of unix mmap, or
- distinguish reserving from committing memory. Instead, we just ask for
- space, and exploit contiguity when we get it. It is probably possible
- to do better than this on some systems, but no general scheme seems to
- be significantly better.
-
- Management entails a simpler variant of the consolidation scheme used
- for chunks to reduce fragmentation -- new adjacent memory is normally
- prepended or appended to an existing segment. However, there are
- limitations compared to chunk consolidation that mostly reflect the
- fact that segment processing is relatively infrequent (occurring only
- when getting memory from system) and that we don't expect to have huge
- numbers of segments:
-
- * Segments are not indexed, so traversal requires linear scans. (It
- would be possible to index these, but is not worth the extra
- overhead and complexity for most programs on most platforms.)
- * New segments are only appended to old ones when holding top-most
- memory; if they cannot be prepended to others, they are held in
- different segments.
-
- Except for the top-most segment of an mstate, each segment record is
- kept at the tail of its segment. Segments are added by pushing segment
- records onto the list headed by &mstate.seg for the containing mstate.
-
- Segment flags control allocation/merge/deallocation policies:
- * If EXTERN_BIT set, then we did not allocate this segment,
- and so should not try to deallocate or merge with others.
- (This currently holds only for the initial segment passed
- into create_mspace_with_base.)
- * If USE_MMAP_BIT set, the segment may be merged with
- other surrounding mmapped segments and trimmed/de-allocated
- using munmap.
- * If neither bit is set, then the segment was obtained using
- MORECORE so can be merged with surrounding MORECORE'd segments
- and deallocated/trimmed using MORECORE with negative arguments.
-*/
-
-struct MallocSegment {
- char *base; /* base address */
- size_t size; /* allocated size */
- struct MallocSegment *next; /* ptr to next segment */
- flag_t sflags; /* mmap and extern flag */
-};
-
-#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
-#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
-
-typedef struct MallocSegment msegment;
-typedef struct MallocSegment *msegmentptr;
-
-/* ──────────────────────────── MallocState ───────────────────────────── */
-
-/*
- A MallocState holds all of the bookkeeping for a space.
- The main fields are:
-
- Top
- The topmost chunk of the currently active segment. Its size is
- cached in topsize. The actual size of topmost space is
- topsize+TOP_FOOT_SIZE, which includes space reserved for adding
- fenceposts and segment records if necessary when getting more
- space from the system. The size at which to autotrim top is
- cached from mparams in trim_check, except that it is disabled if
- an autotrim fails.
-
- Designated victim (dv)
- This is the preferred chunk for servicing small requests that
- don't have exact fits. It is normally the chunk split off most
- recently to service another small request. Its size is cached in
- dvsize. The link fields of this chunk are not maintained since it
- is not kept in a bin.
-
- SmallBins
- An array of bin headers for free chunks. These bins hold chunks
- with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
- chunks of all the same size, spaced 8 bytes apart. To simplify
- use in double-linked lists, each bin header acts as a MallocChunk
- pointing to the real first node, if it exists (else pointing to
- itself). This avoids special-casing for headers. But to avoid
- waste, we allocate only the fd/bk pointers of bins, and then use
- repositioning tricks to treat these as the fields of a chunk.
-
- TreeBins
- Treebins are pointers to the roots of trees holding a range of
- sizes. There are 2 equally spaced treebins for each power of two
- from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
- larger.
-
- Bin maps
- There is one bit map for small bins ("smallmap") and one for
- treebins ("treemap). Each bin sets its bit when non-empty, and
- clears the bit when empty. Bit operations are then used to avoid
- bin-by-bin searching -- nearly all "search" is done without ever
- looking at bins that won't be selected. The bit maps
- conservatively use 32 bits per map word, even if on 64bit system.
- For a good description of some of the bit-based techniques used
- here, see Henry S. Warren Jr's book "Hacker's Delight" (and
- supplement at http://hackersdelight.org/). Many of these are
- intended to reduce the branchiness of paths through malloc etc, as
- well as to reduce the number of memory locations read or written.
-
- Segments
- A list of segments headed by an embedded MallocSegment record
- representing the initial space.
-
- Address check support
- The least_addr field is the least address ever obtained from
- MORECORE or MMAP. Attempted frees and reallocs of any address less
- than this are trapped (unless INSECURE is defined).
-
- Magic tag
- A cross-check field that should always hold same value as mparams.magic.
-
- Max allowed footprint
- The maximum allowed bytes to allocate from system (zero means no limit)
-
- Flags
- Bits recording whether to use MMAP, locks, or contiguous MORECORE
-
- Statistics
- Each space keeps track of current and maximum system memory
- obtained via MORECORE or MMAP.
-
- Trim support
- Fields holding the amount of unused topmost memory that should trigger
- trimming, and a counter to force periodic scanning to release unused
- non-topmost segments.
-
- Locking
- If USE_LOCKS is defined, the "mutex" lock is acquired and released
- around every public call using this mspace.
-
- Extension support
- A void* pointer and a size_t field that can be used to help implement
- extensions to this malloc.
-*/
-
-struct MallocState {
- binmap_t smallmap;
- binmap_t treemap;
- size_t dvsize;
- size_t topsize;
- char *least_addr;
- mchunkptr dv;
- mchunkptr top;
- size_t trim_check;
- size_t release_checks;
- size_t magic;
- mchunkptr smallbins[(NSMALLBINS + 1) * 2];
- tbinptr treebins[NTREEBINS];
- size_t footprint;
- size_t max_footprint;
- size_t footprint_limit; /* zero means no limit */
- flag_t mflags;
- msegment seg;
- void *extp; /* Unused but available for extensions */
- size_t exts;
-};
-
-struct MallocStats {
- size_t maxfp;
- size_t fp;
- size_t used;
-};
-
-typedef struct MallocState *mstate;
-
-extern struct MallocState g_dlmalloc[1];
-
-/* ─────────────────────────────── Hooks ──────────────────────────────── */
-
-/*
-d = {}
-lines = open("log").read().split('\n')
-def bad(i):
- while i < len(lines):
- if lines[i].startswith(('BIRTH', 'DEATH')):
- break
- print(lines[i])
- i += 1
-for i, line in enumerate(lines):
- i += 1
- x = line.split()
- if len(x) != 2: continue
- if x[0] == 'DEATH':
- b = int(x[1], 16)
- if b in d:
- if d[b] < 0:
- print("OH NO", i, d[b])
- else:
- d[b] = -d[b]
- else:
- print("wut", i)
- elif x[0] == 'BIRTH':
- b = int(x[1], 16)
- if b in d:
- if d[b] > 0:
- print("bad malloc", i, d[b])
- d[b] = i
- else:
- d[b] = i
-for k,v in d.items():
- if v > 0:
- print("unfreed", v)
- bad(v)
-*/
-#define MALLOC_TRACE 0
-
-forceinline void *AddressBirthAction(void *p) {
-#if MALLOC_TRACE
- (dprintf)(2, "BIRTH %p\n", p);
- if (weaken(ShowBacktrace)) {
- weaken(ShowBacktrace)(2, 0);
- } else if (weaken(PrintBacktraceUsingSymbols) && weaken(GetSymbolTable)) {
- weaken(PrintBacktraceUsingSymbols)(2, __builtin_frame_address(0),
- weaken(GetSymbolTable)());
- }
-#endif
- return p;
-}
-
-forceinline void *AddressDeathAction(void *p) {
-#if MALLOC_TRACE
- (dprintf)(2, "DEATH %p\n", p);
- if (weaken(ShowBacktrace)) {
- weaken(ShowBacktrace)(2, 0);
- } else if (weaken(PrintBacktraceUsingSymbols) && weaken(GetSymbolTable)) {
- weaken(PrintBacktraceUsingSymbols)(2, __builtin_frame_address(0),
- weaken(GetSymbolTable)());
- }
-#endif
- return p;
-}
-
-/*
- PREACTION should be defined to return 0 on success, and nonzero on
- failure. If you are not using locking, you can redefine these to do
- anything you like.
-*/
-
-#define PREACTION(M) (0)
-#define POSTACTION(M)
-
-/*
- CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
- USAGE_ERROR_ACTION is triggered on detected bad frees and
- reallocs. The argument p is an address that might have triggered the
- fault. It is ignored by the two predefined actions, but might be
- useful in custom actions that try to help diagnose errors.
-*/
-
-#define CORRUPTION_ERROR_ACTION(m) MALLOC_ABORT
-#define USAGE_ERROR_ACTION(m, p) MALLOC_ABORT
-
-/* True if segment S holds address A */
-#define segment_holds(S, A) \
- ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
-
-/*
- TOP_FOOT_SIZE is padding at the end of a segment, including space
- that may be needed to place segment records and fenceposts when new
- noncontiguous segments are added.
-*/
-#define TOP_FOOT_SIZE \
- (align_offset(chunk2mem(0)) + pad_request(sizeof(struct MallocSegment)) + \
- MIN_CHUNK_SIZE)
-
-/* ───────────── Global MallocState and MallocParams ─────────────────── */
-
-/*
- MallocParams holds global properties, including those that can be
- dynamically set using mallopt. There is a single instance, mparams,
- initialized in init_mparams. Note that the non-zeroness of "magic"
- also serves as an initialization flag.
-*/
-
-struct MallocParams {
- size_t magic;
- size_t page_size;
- size_t granularity;
- size_t mmap_threshold;
- size_t trim_threshold;
- flag_t default_mflags;
-};
-
-extern struct MallocParams g_mparams;
-
-#define ensure_initialization() \
- /* we use a constructor [jart] */ \
- assert(g_mparams.magic != 0)
-/* (void)(g_mparams.magic != 0 || init_mparams()) */
-
-#define is_initialized(M) ((M)->top != 0)
-#define is_page_aligned(S) \
- (((size_t)(S) & (g_mparams.page_size - SIZE_T_ONE)) == 0)
-#define is_granularity_aligned(S) \
- (((size_t)(S) & (g_mparams.granularity - SIZE_T_ONE)) == 0)
-
-/* ────────────────────────── system alloc setup ───────────────────────── */
-
-/* Operations on mflags */
-
-#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
-#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
-#if USE_LOCKS
-#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
-#else
-#define disable_lock(M)
-#endif
-
-#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
-#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
-#if HAVE_MMAP
-#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
-#else
-#define disable_mmap(M)
-#endif
-
-#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
-#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
-
-#define set_lock(M, L) \
- ((M)->mflags = \
- (L) ? ((M)->mflags | USE_LOCK_BIT) : ((M)->mflags & ~USE_LOCK_BIT))
-
-/* page-align a size */
-#define page_align(S) \
- (((S) + (g_mparams.page_size - SIZE_T_ONE)) & \
- ~(g_mparams.page_size - SIZE_T_ONE))
-
-/* granularity-align a size */
-#define granularity_align(S) \
- (((S) + (g_mparams.granularity - SIZE_T_ONE)) & \
- ~(g_mparams.granularity - SIZE_T_ONE))
-
-#define mmap_align(s) granularity_align((size_t)(s))
-
-/* ──────────────────────── Operations on bin maps ─────────────────────── */
-
-#define idx2bit(i) ((binmap_t)(1) << (i))
-#define mark_smallmap(M, i) ((M)->smallmap |= idx2bit(i))
-#define clear_smallmap(M, i) ((M)->smallmap &= ~idx2bit(i))
-#define smallmap_is_marked(M, i) ((M)->smallmap & idx2bit(i))
-#define mark_treemap(M, i) ((M)->treemap |= idx2bit(i))
-#define clear_treemap(M, i) ((M)->treemap &= ~idx2bit(i))
-#define treemap_is_marked(M, i) ((M)->treemap & idx2bit(i))
-#define least_bit(x) ((x) & -(x))
-#define left_bits(x) ((x << 1) | -(x << 1))
-#define same_or_left_bits(x) ((x) | -(x))
-#define compute_bit2idx(X, I) \
- { \
- unsigned int J; \
- J = bsf(X); \
- I = (bindex_t)J; \
- }
-
-/* ──────────────────────────── Indexing Bins ──────────────────────────── */
-
-#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
-#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
-#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
-#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
-
-/* addressing by index. See above about smallbin repositioning */
-#define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i) << 1])))
-#define treebin_at(M, i) (&((M)->treebins[i]))
-
-/* assign tree index for size S to variable I. */
-#define compute_tree_index(S, I) \
- { \
- unsigned int X = S >> TREEBIN_SHIFT; \
- if (X == 0) \
- I = 0; \
- else if (X > 0xFFFF) \
- I = NTREEBINS - 1; \
- else { \
- unsigned int K = \
- (unsigned)sizeof(X) * __CHAR_BIT__ - 1 - (unsigned)__builtin_clz(X); \
- I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \
- } \
- }
-
-/* Bit representing maximum resolved size in a treebin at i */
-#define bit_for_tree_index(i) \
- (i == NTREEBINS - 1) ? (SIZE_T_BITSIZE - 1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
-
-/* Shift placing maximum resolved bit in a treebin at i as sign bit */
-#define leftshift_for_tree_index(i) \
- ((i == NTREEBINS - 1) \
- ? 0 \
- : ((SIZE_T_BITSIZE - SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
-
-/* The size of the smallest chunk held in bin with index i */
-#define minsize_for_tree_index(i) \
- ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
- (((size_t)((i)&SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
-
-/* ─────────────────────── Runtime Check Support ───────────────────────── */
-
-/*
- For security, the main invariant is that malloc/free/etc never
- writes to a static address other than MallocState, unless static
- MallocState itself has been corrupted, which cannot occur via
- malloc (because of these checks). In essence this means that we
- believe all pointers, sizes, maps etc held in MallocState, but
- check all of those linked or offsetted from other embedded data
- structures. These checks are interspersed with main code in a way
- that tends to minimize their run-time cost.
-
- When FOOTERS is defined, in addition to range checking, we also
- verify footer fields of inuse chunks, which can be used guarantee
- that the mstate controlling malloc/free is intact. This is a
- streamlined version of the approach described by William Robertson
- et al in "Run-time Detection of Heap-based Overflows" LISA'03
- http://www.usenix.org/events/lisa03/tech/robertson.html The footer
- of an inuse chunk holds the xor of its mstate and a random seed,
- that is checked upon calls to free() and realloc(). This is
- (probabalistically) unguessable from outside the program, but can be
- computed by any code successfully malloc'ing any chunk, so does not
- itself provide protection against code that has already broken
- security through some other means. Unlike Robertson et al, we
- always dynamically check addresses of all offset chunks (previous,
- next, etc). This turns out to be cheaper than relying on hashes.
-*/
-
-#if !IsTrustworthy()
-/* Check if address a is at least as high as any from MORECORE or MMAP */
-#define ok_address(M, a) ((char *)(a) >= (M)->least_addr)
-/* Check if address of next chunk n is higher than base chunk p */
-#define ok_next(p, n) ((char *)(p) < (char *)(n))
-/* Check if p has inuse status */
-#define ok_inuse(p) is_inuse(p)
-/* Check if p has its pinuse bit on */
-#define ok_pinuse(p) pinuse(p)
-
-#else /* !IsTrustworthy() */
-#define ok_address(M, a) (1)
-#define ok_next(b, n) (1)
-#define ok_inuse(p) (1)
-#define ok_pinuse(p) (1)
-#endif /* !IsTrustworthy() */
-
-#if (FOOTERS && !IsTrustworthy())
-/* Check if (alleged) mstate m has expected magic field */
-#define ok_magic(M) \
- ((uintptr_t)(M) <= 0x00007ffffffffffful && (M)->magic == g_mparams.magic)
-#else /* (FOOTERS && !IsTrustworthy()) */
-#define ok_magic(M) (1)
-#endif /* (FOOTERS && !IsTrustworthy()) */
-
-/* In gcc, use __builtin_expect to minimize impact of checks */
-#if !IsTrustworthy()
-#if defined(__GNUC__) && __GNUC__ >= 3
-#define RTCHECK(e) __builtin_expect(e, 1)
-#else /* GNUC */
-#define RTCHECK(e) (e)
-#endif /* GNUC */
-#else /* !IsTrustworthy() */
-#define RTCHECK(e) (1)
-#endif /* !IsTrustworthy() */
-
-/* macros to set up inuse chunks with or without footers */
-
-#if !FOOTERS
-
-#define mark_inuse_foot(M, p, s)
-
-/* Macros for setting head/foot of non-mmapped chunks */
-
-/* Set cinuse bit and pinuse bit of next chunk */
-#define set_inuse(M, p, s) \
- ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \
- ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
-
-/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
-#define set_inuse_and_pinuse(M, p, s) \
- ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \
- ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
-
-/* Set size, cinuse and pinuse bit of this chunk */
-#define set_size_and_pinuse_of_inuse_chunk(M, p, s) \
- ((p)->head = (s | PINUSE_BIT | CINUSE_BIT))
-
-#else /* FOOTERS */
-
-/* Set foot of inuse chunk to be xor of mstate and seed */
-#define mark_inuse_foot(M, p, s) \
- (((mchunkptr)((char *)(p) + (s)))->prev_foot = \
- ((size_t)(M) ^ g_mparams.magic))
-
-#define get_mstate_for(p) \
- ((mstate)(((mchunkptr)((char *)(p) + (chunksize(p))))->prev_foot ^ \
- g_mparams.magic))
-
-#define set_inuse(M, p, s) \
- ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \
- (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \
- mark_inuse_foot(M, p, s))
-
-#define set_inuse_and_pinuse(M, p, s) \
- ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \
- (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \
- mark_inuse_foot(M, p, s))
-
-#define set_size_and_pinuse_of_inuse_chunk(M, p, s) \
- ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), mark_inuse_foot(M, p, s))
-
-#endif /* !FOOTERS */
-
-/* Return segment holding given address */
-forceinline msegmentptr segment_holding(mstate m, char *addr) {
- msegmentptr sp = &m->seg;
- for (;;) {
- if (addr >= sp->base && addr < sp->base + sp->size) return sp;
- if ((sp = sp->next) == 0) return 0;
- }
-}
-
-/* ─────────────────────── Operations on smallbins ─────────────────────── */
-
-/*
- Various forms of linking and unlinking are defined as macros. Even
- the ones for trees, which are very long but have very short typical
- paths. This is ugly but reduces reliance on inlining support of
- compilers.
-*/
-
-/* Link a free chunk into a smallbin */
-#define insert_small_chunk(M, P, S) \
- { \
- bindex_t I = small_index(S); \
- mchunkptr B = smallbin_at(M, I); \
- mchunkptr F = B; \
- assert(S >= MIN_CHUNK_SIZE); \
- if (!smallmap_is_marked(M, I)) \
- mark_smallmap(M, I); \
- else if (RTCHECK(ok_address(M, B->fd))) \
- F = B->fd; \
- else { \
- CORRUPTION_ERROR_ACTION(M); \
- } \
- B->fd = P; \
- F->bk = P; \
- P->fd = F; \
- P->bk = B; \
- }
-
-/* Unlink a chunk from a smallbin */
-#define unlink_small_chunk(M, P, S) \
- { \
- mchunkptr F = P->fd; \
- mchunkptr B = P->bk; \
- bindex_t I = small_index(S); \
- assert(P != B); \
- assert(P != F); \
- assert(chunksize(P) == small_index2size(I)); \
- if (RTCHECK(F == smallbin_at(M, I) || (ok_address(M, F) && F->bk == P))) { \
- if (B == F) { \
- clear_smallmap(M, I); \
- } else if (RTCHECK(B == smallbin_at(M, I) || \
- (ok_address(M, B) && B->fd == P))) { \
- F->bk = B; \
- B->fd = F; \
- } else { \
- CORRUPTION_ERROR_ACTION(M); \
- } \
- } else { \
- CORRUPTION_ERROR_ACTION(M); \
- } \
- }
-
-/* Unlink the first chunk from a smallbin */
-#define unlink_first_small_chunk(M, B, P, I) \
- { \
- mchunkptr F = P->fd; \
- assert(P != B); \
- assert(P != F); \
- assert(chunksize(P) == small_index2size(I)); \
- if (B == F) { \
- clear_smallmap(M, I); \
- } else if (RTCHECK(ok_address(M, F) && F->bk == P)) { \
- F->bk = B; \
- B->fd = F; \
- } else { \
- CORRUPTION_ERROR_ACTION(M); \
- } \
- }
-
-/* Replace dv node, binning the old one */
-/* Used only when dvsize known to be small */
-#define replace_dv(M, P, S) \
- { \
- size_t DVS = M->dvsize; \
- assert(is_small(DVS)); \
- if (DVS != 0) { \
- mchunkptr DV = M->dv; \
- insert_small_chunk(M, DV, DVS); \
- } \
- M->dvsize = S; \
- M->dv = P; \
- }
-
-/* ───────────────────────── Operations on trees ───────────────────────── */
-
-/* Insert chunk into tree */
-#define insert_large_chunk(M, X, S) \
- { \
- tbinptr *H; \
- bindex_t I; \
- compute_tree_index(S, I); \
- H = treebin_at(M, I); \
- X->index = I; \
- X->child[0] = X->child[1] = 0; \
- if (!treemap_is_marked(M, I)) { \
- mark_treemap(M, I); \
- *H = X; \
- X->parent = (tchunkptr)H; \
- X->fd = X->bk = X; \
- } else { \
- tchunkptr T = *H; \
- size_t K = S << leftshift_for_tree_index(I); \
- for (;;) { \
- if (chunksize(T) != S) { \
- tchunkptr *C = \
- &(T->child[(K >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]); \
- K <<= 1; \
- if (*C != 0) \
- T = *C; \
- else if (RTCHECK(ok_address(M, C))) { \
- *C = X; \
- X->parent = T; \
- X->fd = X->bk = X; \
- break; \
- } else { \
- CORRUPTION_ERROR_ACTION(M); \
- break; \
- } \
- } else { \
- tchunkptr F = T->fd; \
- if (RTCHECK(ok_address(M, T) && ok_address(M, F))) { \
- T->fd = F->bk = X; \
- X->fd = F; \
- X->bk = T; \
- X->parent = 0; \
- break; \
- } else { \
- CORRUPTION_ERROR_ACTION(M); \
- break; \
- } \
- } \
- } \
- } \
- }
-
-/*
- Unlink steps:
-
- 1. If x is a chained node, unlink it from its same-sized fd/bk links
- and choose its bk node as its replacement.
- 2. If x was the last node of its size, but not a leaf node, it must
- be replaced with a leaf node (not merely one with an open left or
- right), to make sure that lefts and rights of descendents
- correspond properly to bit masks. We use the rightmost descendent
- of x. We could use any other leaf, but this is easy to locate and
- tends to counteract removal of leftmosts elsewhere, and so keeps
- paths shorter than minimally guaranteed. This doesn't loop much
- because on average a node in a tree is near the bottom.
- 3. If x is the base of a chain (i.e., has parent links) relink
- x's parent and children to x's replacement (or null if none).
-*/
-
-#define unlink_large_chunk(M, X) \
- { \
- tchunkptr XP = X->parent; \
- tchunkptr R; \
- if (X->bk != X) { \
- tchunkptr F = X->fd; \
- R = X->bk; \
- if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) { \
- F->bk = R; \
- R->fd = F; \
- } else { \
- CORRUPTION_ERROR_ACTION(M); \
- } \
- } else { \
- tchunkptr *RP; \
- if (((R = *(RP = &(X->child[1]))) != 0) || \
- ((R = *(RP = &(X->child[0]))) != 0)) { \
- tchunkptr *CP; \
- while ((*(CP = &(R->child[1])) != 0) || \
- (*(CP = &(R->child[0])) != 0)) { \
- R = *(RP = CP); \
- } \
- if (RTCHECK(ok_address(M, RP))) \
- *RP = 0; \
- else { \
- CORRUPTION_ERROR_ACTION(M); \
- } \
- } \
- } \
- if (XP != 0) { \
- tbinptr *H = treebin_at(M, X->index); \
- if (X == *H) { \
- if ((*H = R) == 0) clear_treemap(M, X->index); \
- } else if (RTCHECK(ok_address(M, XP))) { \
- if (XP->child[0] == X) \
- XP->child[0] = R; \
- else \
- XP->child[1] = R; \
- } else \
- CORRUPTION_ERROR_ACTION(M); \
- if (R != 0) { \
- if (RTCHECK(ok_address(M, R))) { \
- tchunkptr C0, C1; \
- R->parent = XP; \
- if ((C0 = X->child[0]) != 0) { \
- if (RTCHECK(ok_address(M, C0))) { \
- R->child[0] = C0; \
- C0->parent = R; \
- } else \
- CORRUPTION_ERROR_ACTION(M); \
- } \
- if ((C1 = X->child[1]) != 0) { \
- if (RTCHECK(ok_address(M, C1))) { \
- R->child[1] = C1; \
- C1->parent = R; \
- } else \
- CORRUPTION_ERROR_ACTION(M); \
- } \
- } else \
- CORRUPTION_ERROR_ACTION(M); \
- } \
- } \
- }
-
-/* Relays to large vs small bin operations */
-#define insert_chunk(M, P, S) \
- if (is_small(S)) insert_small_chunk(M, P, S) else { \
- tchunkptr TP = (tchunkptr)(P); \
- insert_large_chunk(M, TP, S); \
- }
-#define unlink_chunk(M, P, S) \
- if (is_small(S)) unlink_small_chunk(M, P, S) else { \
- tchunkptr TP = (tchunkptr)(P); \
- unlink_large_chunk(M, TP); \
- }
-
-#ifndef MORECORE_CANNOT_TRIM
-#define should_trim(M, s) ((s) > (M)->trim_check)
-#else /* MORECORE_CANNOT_TRIM */
-#define should_trim(M, s) (0)
-#endif /* MORECORE_CANNOT_TRIM */
-
-/*
- TOP_FOOT_SIZE is padding at the end of a segment, including space
- that may be needed to place segment records and fenceposts when new
- noncontiguous segments are added.
-*/
-#define TOP_FOOT_SIZE \
- (align_offset(chunk2mem(0)) + pad_request(sizeof(struct MallocSegment)) + \
- MIN_CHUNK_SIZE)
-
-/* ────────────────────────── Debugging setup ──────────────────────────── */
-
-#ifdef DEBUG
-#define check_free_chunk(M, P) do_check_free_chunk(M, P)
-#define check_inuse_chunk(M, P) do_check_inuse_chunk(M, P)
-#define check_top_chunk(M, P) do_check_top_chunk(M, P)
-#define check_malloced_chunk(M, P, N) do_check_malloced_chunk(M, P, N)
-#define check_mmapped_chunk(M, P) do_check_mmapped_chunk(M, P)
-#define check_malloc_state(M) do_check_malloc_state(M)
-#else
-#define check_free_chunk(M, P)
-#define check_inuse_chunk(M, P)
-#define check_malloced_chunk(M, P, N)
-#define check_mmapped_chunk(M, P)
-#define check_malloc_state(M)
-#define check_top_chunk(M, P)
-#endif /* DEBUG */
-
-void do_check_free_chunk(mstate, mchunkptr) hidden;
-void do_check_inuse_chunk(mstate, mchunkptr) hidden;
-void do_check_top_chunk(mstate, mchunkptr) hidden;
-void do_check_malloced_chunk(mstate, void *, size_t) hidden;
-void do_check_mmapped_chunk(mstate, mchunkptr) hidden;
-void do_check_malloc_state(mstate) hidden;
-
-/* ─────────────────────────── prototypes ──────────────────────────────── */
-
-void *dlmalloc(size_t) hidden attributeallocsize((1)) mallocesque;
-void *dlcalloc(size_t, size_t) hidden attributeallocsize((1, 2)) mallocesque;
-void dlfree(void *) dontthrow nocallback hidden;
-void *dlmemalign_impl(mstate, size_t, size_t) hidden;
-void *dlrealloc(void *, size_t) hidden reallocesque;
-void *dlrealloc_in_place(void *, size_t) hidden reallocesque;
-void *dlmemalign(size_t, size_t) hidden attributeallocalign((1))
- attributeallocsize((2)) returnspointerwithnoaliases libcesque dontdiscard;
-int dlmalloc_trim(size_t) hidden;
-size_t dlmalloc_usable_size(const void *) hidden;
-void **dlindependent_calloc(size_t, size_t, void *[]) hidden;
-void **dlindependent_comalloc(size_t, size_t[], void *[]) hidden;
-struct MallocStats dlmalloc_stats(mstate) hidden;
-int dlmalloc_sys_trim(mstate, size_t) hidden;
-void dlmalloc_dispose_chunk(mstate, mchunkptr, size_t) hidden;
-mchunkptr dlmalloc_try_realloc_chunk(mstate, mchunkptr, size_t, int) hidden;
-size_t dlbulk_free(void *[], size_t) hidden;
-
-COSMOPOLITAN_C_END_
-#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
-#endif /* !ANSI */
-#endif /* COSMOPOLITAN_LIBC_MEM_DLMALLOC_H_ */
diff --git a/third_party/dlmalloc/dlmalloc.mk b/third_party/dlmalloc/dlmalloc.mk
index 2e0d6662c..e12187fb6 100644
--- a/third_party/dlmalloc/dlmalloc.mk
+++ b/third_party/dlmalloc/dlmalloc.mk
@@ -31,9 +31,11 @@ THIRD_PARTY_DLMALLOC_A_DIRECTDEPS = \
LIBC_NEXGEN32E \
LIBC_RUNTIME \
LIBC_STR \
+ LIBC_RAND \
LIBC_STUBS \
LIBC_SYSV \
- LIBC_SYSV_CALLS
+ LIBC_SYSV_CALLS \
+ THIRD_PARTY_COMPILER_RT
THIRD_PARTY_DLMALLOC_A_DEPS := \
$(call uniq,$(foreach x,$(THIRD_PARTY_DLMALLOC_A_DIRECTDEPS),$($(x))))
@@ -50,13 +52,8 @@ $(THIRD_PARTY_DLMALLOC_A).pkg: \
$(THIRD_PARTY_DLMALLOC_A_OBJS): \
OVERRIDE_CFLAGS += \
$(NO_MAGIC) \
- -fno-sanitize=address
-
-ifneq ($(MODE),dbg)
-$(THIRD_PARTY_DLMALLOC_A_OBJS): \
- OVERRIDE_CFLAGS += \
- -DNDEBUG
-endif
+ -ffunction-sections \
+ -fdata-sections
THIRD_PARTY_DLMALLOC_LIBS = $(foreach x,$(THIRD_PARTY_DLMALLOC_ARTIFACTS),$($(x)))
THIRD_PARTY_DLMALLOC_SRCS = $(foreach x,$(THIRD_PARTY_DLMALLOC_ARTIFACTS),$($(x)_SRCS))
diff --git a/third_party/dlmalloc/dlmalloc_stats.c b/third_party/dlmalloc/dlmalloc_stats.c
deleted file mode 100644
index 2923dac86..000000000
--- a/third_party/dlmalloc/dlmalloc_stats.c
+++ /dev/null
@@ -1,48 +0,0 @@
-#include "libc/mem/mem.h"
-#include "libc/str/str.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-/**
- * Prints on stderr the amount of space obtained from the system (both
- * via sbrk and mmap), the maximum amount (which may be more than
- * current if malloc_trim and/or munmap got called), and the current
- * number of bytes allocated via malloc (or realloc, etc) but not yet
- * freed. Note that this is the number of bytes allocated, not the
- * number requested. It will be larger than the number requested because
- * of alignment and bookkeeping overhead. Because it includes alignment
- * wastage as being in use, this figure may be greater than zero even
- * when no user-level chunks are allocated.
- *
- * The reported current and maximum system memory can be inaccurate if a
- * program makes other calls to system memory allocation functions
- * (normally sbrk) outside of malloc.
- *
- * malloc_stats prints only the most commonly interesting statistics.
- * More information can be obtained by calling mallinfo.
- */
-struct MallocStats dlmalloc_stats(mstate m) {
- struct MallocChunk *q;
- struct MallocStats res;
- bzero(&res, sizeof(res));
- ensure_initialization();
- if (!PREACTION(m)) {
- check_malloc_state(m);
- if (is_initialized(m)) {
- msegmentptr s = &m->seg;
- res.maxfp = m->max_footprint;
- res.fp = m->footprint;
- res.used = res.fp - (m->topsize + TOP_FOOT_SIZE);
- while (s != 0) {
- q = align_as_chunk(s->base);
- while (segment_holds(s, q) && q != m->top &&
- q->head != FENCEPOST_HEAD) {
- if (!is_inuse(q)) res.used -= chunksize(q);
- q = next_chunk(q);
- }
- s = s->next;
- }
- }
- POSTACTION(m); /* drop lock */
- }
- return res;
-}
diff --git a/third_party/dlmalloc/dlmalloc_try_realloc_chunk.c b/third_party/dlmalloc/dlmalloc_try_realloc_chunk.c
deleted file mode 100644
index ac9c853ea..000000000
--- a/third_party/dlmalloc/dlmalloc_try_realloc_chunk.c
+++ /dev/null
@@ -1,110 +0,0 @@
-#include "libc/errno.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-/* Realloc using mmap */
-mchunkptr dlmalloc_mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
- size_t oldsize = chunksize(oldp);
- if (is_small(nb)) return 0; /* Can't shrink mmap regions below small size */
- /* Keep old chunk if big enough but not too big */
- if (oldsize >= nb + SIZE_T_SIZE &&
- (oldsize - nb) <= (g_mparams.granularity << 1)) {
- return oldp;
- } else {
- size_t offset = oldp->prev_foot;
- size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
- size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
- int err = errno;
- char *cp = mremap((char *)oldp - offset, oldmmsize, newmmsize, flags, 0);
- errno = err;
- if (cp != CMFAIL) {
- mchunkptr newp = (mchunkptr)(cp + offset);
- size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
- newp->head = psize;
- mark_inuse_foot(m, newp, psize);
- chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
- chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0;
- if (cp < m->least_addr) m->least_addr = cp;
- if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) {
- m->max_footprint = m->footprint;
- }
- check_mmapped_chunk(m, newp);
- return newp;
- }
- }
- return 0;
-}
-
-/* Try to realloc; only in-place unless can_move true */
-mchunkptr dlmalloc_try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
- int can_move) {
- mchunkptr newp = 0;
- size_t oldsize = chunksize(p);
- mchunkptr next = chunk_plus_offset(p, oldsize);
- if (RTCHECK(ok_address(m, p) && ok_inuse(p) && ok_next(p, next) &&
- ok_pinuse(next))) {
- if (!is_mmapped(p)) {
- if (oldsize >= nb) { /* already big enough */
- size_t rsize = oldsize - nb;
- if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
- mchunkptr r = chunk_plus_offset(p, nb);
- set_inuse(m, p, nb);
- set_inuse(m, r, rsize);
- dlmalloc_dispose_chunk(m, r, rsize);
- }
- newp = p;
- } else if (next == m->top) { /* extend into top */
- if (oldsize + m->topsize > nb) {
- size_t newsize = oldsize + m->topsize;
- size_t newtopsize = newsize - nb;
- mchunkptr newtop = chunk_plus_offset(p, nb);
- set_inuse(m, p, nb);
- newtop->head = newtopsize | PINUSE_BIT;
- m->top = newtop;
- m->topsize = newtopsize;
- newp = p;
- }
- } else if (next == m->dv) { /* extend into dv */
- size_t dvs = m->dvsize;
- if (oldsize + dvs >= nb) {
- size_t dsize = oldsize + dvs - nb;
- if (dsize >= MIN_CHUNK_SIZE) {
- mchunkptr r = chunk_plus_offset(p, nb);
- mchunkptr n = chunk_plus_offset(r, dsize);
- set_inuse(m, p, nb);
- set_size_and_pinuse_of_free_chunk(r, dsize);
- clear_pinuse(n);
- m->dvsize = dsize;
- m->dv = r;
- } else { /* exhaust dv */
- size_t newsize = oldsize + dvs;
- set_inuse(m, p, newsize);
- m->dvsize = 0;
- m->dv = 0;
- }
- newp = p;
- }
- } else if (!cinuse(next)) { /* extend into next free chunk */
- size_t nextsize = chunksize(next);
- if (oldsize + nextsize >= nb) {
- size_t rsize = oldsize + nextsize - nb;
- unlink_chunk(m, next, nextsize);
- if (rsize < MIN_CHUNK_SIZE) {
- size_t newsize = oldsize + nextsize;
- set_inuse(m, p, newsize);
- } else {
- mchunkptr r = chunk_plus_offset(p, nb);
- set_inuse(m, p, nb);
- set_inuse(m, r, rsize);
- dlmalloc_dispose_chunk(m, r, rsize);
- }
- newp = p;
- }
- }
- } else {
- newp = dlmalloc_mmap_resize(m, p, nb, can_move);
- }
- } else {
- USAGE_ERROR_ACTION(m, chunk2mem(p));
- }
- return newp;
-}
diff --git a/third_party/dlmalloc/dlrealloc.c b/third_party/dlmalloc/dlrealloc.c
deleted file mode 100644
index 22f649fcf..000000000
--- a/third_party/dlmalloc/dlrealloc.c
+++ /dev/null
@@ -1,47 +0,0 @@
-#include "libc/bits/likely.h"
-#include "libc/str/str.h"
-#include "libc/sysv/errfuns.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-void *dlrealloc(void *oldmem, size_t bytes) {
- void *mem = 0;
- size_t oc, nb;
- struct MallocState *m;
- struct MallocChunk *oldp, *newp;
- if (oldmem) {
- if (LIKELY(bytes < MAX_REQUEST)) {
- if (bytes) {
- nb = request2size(bytes);
- oldp = mem2chunk(oldmem);
-#if !FOOTERS
- m = g_dlmalloc;
-#else /* FOOTERS */
- m = get_mstate_for(oldp);
- if (UNLIKELY(!ok_magic(m))) {
- USAGE_ERROR_ACTION(m, oldmem);
- return 0;
- }
-#endif /* FOOTERS */
- if (!PREACTION(m)) {
- newp = dlmalloc_try_realloc_chunk(m, oldp, nb, 1);
- POSTACTION(m);
- if (newp) {
- check_inuse_chunk(m, newp);
- mem = chunk2mem(newp);
- } else if ((mem = dlmalloc(bytes))) {
- oc = chunksize(oldp) - overhead_for(oldp);
- memcpy(mem, oldmem, (oc < bytes) ? oc : bytes);
- dlfree(oldmem);
- }
- }
- } else {
- dlfree(oldmem);
- }
- } else {
- enomem();
- }
- } else {
- mem = dlmalloc(bytes);
- }
- return mem;
-}
diff --git a/third_party/dlmalloc/dlrealloc_in_place.c b/third_party/dlmalloc/dlrealloc_in_place.c
deleted file mode 100644
index ab9bbc9ba..000000000
--- a/third_party/dlmalloc/dlrealloc_in_place.c
+++ /dev/null
@@ -1,33 +0,0 @@
-#include "libc/mem/mem.h"
-#include "libc/sysv/errfuns.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-void *dlrealloc_in_place(void *oldmem, size_t bytes) {
- void *mem = 0;
- if (oldmem != 0) {
- if (bytes >= MAX_REQUEST) {
- enomem();
- } else {
- size_t nb = request2size(bytes);
- mchunkptr oldp = mem2chunk(oldmem);
-#if !FOOTERS
- mstate m = g_dlmalloc;
-#else /* FOOTERS */
- mstate m = get_mstate_for(oldp);
- if (!ok_magic(m)) {
- USAGE_ERROR_ACTION(m, oldmem);
- return 0;
- }
-#endif /* FOOTERS */
- if (!PREACTION(m)) {
- mchunkptr newp = dlmalloc_try_realloc_chunk(m, oldp, nb, 0);
- POSTACTION(m);
- if (newp == oldp) {
- check_inuse_chunk(m, newp);
- mem = oldmem;
- }
- }
- }
- }
- return mem;
-}
diff --git a/third_party/dlmalloc/mallinfo.c b/third_party/dlmalloc/mallinfo.c
deleted file mode 100644
index b696a5055..000000000
--- a/third_party/dlmalloc/mallinfo.c
+++ /dev/null
@@ -1,69 +0,0 @@
-#include "libc/mem/mem.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-/**
- * Returns (by copy) a struct containing various summary statistics:
- *
- * - arena: current total non-mmapped bytes allocated from system
- *
- * - ordblks: the number of free chunks
- *
- * - smblks: always zero.
- *
- * - hblks: current number of mmapped regions
- *
- * - hblkhd: total bytes held in mmapped regions
- *
- * - usmblks: the maximum total allocated space. This will be greater
- * than current total if trimming has occurred.
- *
- * - fsmblks: always zero
- *
- * - uordblks: current total allocated space (normal or mmapped)
- *
- * - fordblks: total free space
- *
- * - keepcost: the maximum number of bytes that could ideally be
- * released back to system via malloc_trim. ("ideally" means that it
- * ignores page restrictions etc.)
- *
- * Because these fields are ints, but internal bookkeeping may
- * be kept as longs, the reported values may wrap around zero and
- * thus be inaccurate.
- */
-struct mallinfo mallinfo(void) {
- struct mallinfo nm = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
- ensure_initialization();
- if (!PREACTION(g_dlmalloc)) {
- check_malloc_state(g_dlmalloc);
- if (is_initialized(g_dlmalloc)) {
- size_t nfree = SIZE_T_ONE; /* top always free */
- size_t mfree = g_dlmalloc->topsize + TOP_FOOT_SIZE;
- size_t sum = mfree;
- msegmentptr s = &g_dlmalloc->seg;
- while (s != 0) {
- mchunkptr q = align_as_chunk(s->base);
- while (segment_holds(s, q) && q != g_dlmalloc->top &&
- q->head != FENCEPOST_HEAD) {
- size_t sz = chunksize(q);
- sum += sz;
- if (!is_inuse(q)) {
- mfree += sz;
- ++nfree;
- }
- q = next_chunk(q);
- }
- s = s->next;
- }
- nm.arena = sum;
- nm.ordblks = nfree;
- nm.hblkhd = g_dlmalloc->footprint - sum;
- nm.usmblks = g_dlmalloc->max_footprint;
- nm.uordblks = g_dlmalloc->footprint - mfree;
- nm.fordblks = mfree;
- nm.keepcost = g_dlmalloc->topsize;
- }
- POSTACTION(g_dlmalloc);
- }
- return nm;
-}
diff --git a/third_party/dlmalloc/malloc_footprint.c b/third_party/dlmalloc/malloc_footprint.c
deleted file mode 100644
index 818f8cc7e..000000000
--- a/third_party/dlmalloc/malloc_footprint.c
+++ /dev/null
@@ -1,14 +0,0 @@
-#include "libc/mem/mem.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-/**
- * Returns the number of bytes obtained from the system. The total
- * number of bytes allocated by malloc, realloc etc., is less than this
- * value. Unlike mallinfo, this function returns only a precomputed
- * result, so can be called frequently to monitor memory consumption.
- * Even if locks are otherwise defined, this function does not use them,
- * so results might not be up to date.
- */
-size_t malloc_footprint(void) {
- return g_dlmalloc->footprint;
-}
diff --git a/third_party/dlmalloc/malloc_footprint_limit.c b/third_party/dlmalloc/malloc_footprint_limit.c
deleted file mode 100644
index 84cd9bc4e..000000000
--- a/third_party/dlmalloc/malloc_footprint_limit.c
+++ /dev/null
@@ -1,15 +0,0 @@
-#include "libc/limits.h"
-#include "libc/mem/mem.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-/**
- * Returns the number of bytes that the heap is allowed to obtain from
- * the system, returning the last value returned by
- * malloc_set_footprint_limit, or the maximum size_t value if never set.
- * The returned value reflects a permission. There is no guarantee that
- * this number of bytes can actually be obtained from the system.
- */
-size_t malloc_footprint_limit(void) {
- size_t maf = g_dlmalloc->footprint_limit;
- return maf == 0 ? SIZE_MAX : maf;
-}
diff --git a/third_party/dlmalloc/malloc_inspect_all.c b/third_party/dlmalloc/malloc_inspect_all.c
deleted file mode 100644
index f1fa6e938..000000000
--- a/third_party/dlmalloc/malloc_inspect_all.c
+++ /dev/null
@@ -1,72 +0,0 @@
-#include "libc/mem/mem.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-static void internal_inspect_all(mstate m,
- void (*handler)(void *start, void *end,
- size_t used_bytes,
- void *callback_arg),
- void *arg) {
- if (is_initialized(m)) {
- mchunkptr top = m->top;
- msegmentptr s;
- for (s = &m->seg; s != 0; s = s->next) {
- mchunkptr q = align_as_chunk(s->base);
- while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
- mchunkptr next = next_chunk(q);
- size_t sz = chunksize(q);
- size_t used;
- void *start;
- if (is_inuse(q)) {
- used = sz - CHUNK_OVERHEAD; /* must not be mmapped */
- start = chunk2mem(q);
- } else {
- used = 0;
- if (is_small(sz)) { /* offset by possible bookkeeping */
- start = (void *)((char *)q + sizeof(struct MallocChunk));
- } else {
- start = (void *)((char *)q + sizeof(struct MallocTreeChunk));
- }
- }
- if (start < (void *)next) { /* skip if all space is bookkeeping */
- handler(start, next, used, arg);
- }
- if (q == top) break;
- q = next;
- }
- }
- }
-}
-
-/**
- * Traverses the heap and calls the given handler for each managed
- * region, skipping all bytes that are (or may be) used for bookkeeping
- * purposes. Traversal does not include include chunks that have been
- * directly memory mapped. Each reported region begins at the start
- * address, and continues up to but not including the end address. The
- * first used_bytes of the region contain allocated data. If
- * used_bytes is zero, the region is unallocated. The handler is
- * invoked with the given callback argument. If locks are defined, they
- * are held during the entire traversal. It is a bad idea to invoke
- * other malloc functions from within the handler.
- *
- * For example, to count the number of in-use chunks with size greater
- * than 1000, you could write:
- *
- * static int count = 0;
- * void count_chunks(void* start, void* end, size_t used, void* arg) {
- * if (used >= 1000) ++count;
- * }
- *
- * then,
- *
- * malloc_inspect_all(count_chunks, NULL);
- */
-void malloc_inspect_all(void (*handler)(void *start, void *end,
- size_t used_bytes, void *callback_arg),
- void *arg) {
- ensure_initialization();
- if (!PREACTION(g_dlmalloc)) {
- internal_inspect_all(g_dlmalloc, handler, arg);
- POSTACTION(g_dlmalloc);
- }
-}
diff --git a/third_party/dlmalloc/malloc_max_footprint.c b/third_party/dlmalloc/malloc_max_footprint.c
deleted file mode 100644
index 301b2b989..000000000
--- a/third_party/dlmalloc/malloc_max_footprint.c
+++ /dev/null
@@ -1,16 +0,0 @@
-#include "libc/mem/mem.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-/**
- * Returns the maximum number of bytes obtained from the system. This
- * value will be greater than current footprint if deallocated space has
- * been reclaimed by the system. The peak number of bytes allocated by
- * malloc, realloc etc., is less than this value. Unlike mallinfo, this
- * function returns only a precomputed result, so can be called
- * frequently to monitor memory consumption. Even if locks are otherwise
- * defined, this function does not use them, so results might not be up
- * to date.
- */
-size_t malloc_max_footprint(void) {
- return g_dlmalloc->max_footprint;
-}
diff --git a/third_party/dlmalloc/malloc_set_footprint_limit.c b/third_party/dlmalloc/malloc_set_footprint_limit.c
deleted file mode 100644
index 68d840421..000000000
--- a/third_party/dlmalloc/malloc_set_footprint_limit.c
+++ /dev/null
@@ -1,25 +0,0 @@
-#include "libc/limits.h"
-#include "libc/mem/mem.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-/**
- * Sets the maximum number of bytes to obtain from the system, causing
- * failure returns from malloc and related functions upon attempts to
- * exceed this value. The argument value may be subject to page rounding
- * to an enforceable limit; this actual value is returned. Using an
- * argument of the maximum possible size_t effectively disables checks.
- * If the argument is less than or equal to the current
- * malloc_footprint, then all future allocations that require additional
- * system memory will fail. However, invocation cannot retroactively
- * deallocate existing used memory.
- */
-size_t malloc_set_footprint_limit(size_t bytes) {
- size_t result; /* invert sense of 0 */
- if (bytes == 0) result = granularity_align(1); /* Use minimal size */
- if (bytes == SIZE_MAX) {
- result = 0; /* disable */
- } else {
- result = granularity_align(bytes);
- }
- return g_dlmalloc->footprint_limit = result;
-}
diff --git a/third_party/dlmalloc/malloc_trim.c b/third_party/dlmalloc/malloc_trim.c
deleted file mode 100644
index e6982a8a8..000000000
--- a/third_party/dlmalloc/malloc_trim.c
+++ /dev/null
@@ -1,32 +0,0 @@
-#include "libc/mem/mem.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-/**
- * If possible, gives memory back to the system (via negative arguments
- * to sbrk) if there is unused memory at the `high` end of the malloc
- * pool or in unused MMAP segments. You can call this after freeing
- * large blocks of memory to potentially reduce the system-level memory
- * requirements of a program. However, it cannot guarantee to reduce
- * memory. Under some allocation patterns, some large free blocks of
- * memory will be locked between two used chunks, so they cannot be
- * given back to the system.
- *
- * The `pad` argument to malloc_trim represents the amount of free
- * trailing space to leave untrimmed. If this argument is zero, only the
- * minimum amount of memory to maintain internal data structures will be
- * left. Non-zero arguments can be supplied to maintain enough trailing
- * space to service future expected allocations without having to
- * re-obtain memory from the system.
- *
- * @return 1 if it actually released any memory, else 0
- */
-int dlmalloc_trim(size_t pad) {
- /* asan runtime depends on this function */
- int result = 0;
- ensure_initialization();
- if (!PREACTION(g_dlmalloc)) {
- result = dlmalloc_sys_trim(g_dlmalloc, pad);
- POSTACTION(g_dlmalloc);
- }
- return result;
-}
diff --git a/third_party/dlmalloc/mallopt.c b/third_party/dlmalloc/mallopt.c
deleted file mode 100644
index a7ab0d604..000000000
--- a/third_party/dlmalloc/mallopt.c
+++ /dev/null
@@ -1,42 +0,0 @@
-#include "libc/limits.h"
-#include "libc/mem/mem.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
-
-/**
- * Sets memory allocation parameter.
- *
- * The format is to provide a (parameter-number, parameter-value) pair.
- * mallopt then sets the corresponding parameter to the argument value
- * if it can (i.e., so long as the value is meaningful), and returns 1
- * if successful else 0. SVID/XPG/ANSI defines four standard param
- * numbers for mallopt, normally defined in malloc.h. None of these are
- * use in this malloc, so setting them has no effect. But this malloc
- * also supports other options in mallopt:
- *
- * Symbol param # default allowed param values
- * M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming)
- * M_GRANULARITY -2 page size any power of 2 >= page size
- * M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
- */
-bool32 mallopt(int param_number, int value) {
- size_t val;
- ensure_initialization();
- val = (value == -1) ? SIZE_MAX : (size_t)value;
- switch (param_number) {
- case M_TRIM_THRESHOLD:
- g_mparams.trim_threshold = val;
- return true;
- case M_GRANULARITY:
- if (val >= g_mparams.page_size && ((val & (val - 1)) == 0)) {
- g_mparams.granularity = val;
- return true;
- } else {
- return false;
- }
- case M_MMAP_THRESHOLD:
- g_mparams.mmap_threshold = val;
- return true;
- default:
- return false;
- }
-}
diff --git a/third_party/dlmalloc/vespene.c b/third_party/dlmalloc/vespene.c
new file mode 100644
index 000000000..bbcbd0989
--- /dev/null
+++ b/third_party/dlmalloc/vespene.c
@@ -0,0 +1,40 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/bits/weaken.h"
+#include "libc/calls/calls.h"
+#include "libc/intrin/asan.internal.h"
+#include "libc/intrin/asancodes.h"
+#include "libc/intrin/kprintf.h"
+#include "libc/runtime/runtime.h"
+#include "libc/sysv/consts/map.h"
+#include "libc/sysv/consts/prot.h"
+
+/**
+ * Acquires more system memory for dlmalloc.
+ */
+void *dlmalloc_requires_more_vespene_gas(size_t size) {
+ char *p;
+ if ((p = mmap(0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
+ -1, 0)) != MAP_FAILED) {
+ if (weaken(__asan_poison)) {
+ weaken(__asan_poison)((uintptr_t)p, size, kAsanHeapFree);
+ }
+ }
+ return p;
+}
diff --git a/third_party/dlmalloc/vespene.internal.h b/third_party/dlmalloc/vespene.internal.h
new file mode 100644
index 000000000..6c4445999
--- /dev/null
+++ b/third_party/dlmalloc/vespene.internal.h
@@ -0,0 +1,10 @@
+#ifndef COSMOPOLITAN_THIRD_PARTY_DLMALLOC_VESPENE_INTERNAL_H_
+#define COSMOPOLITAN_THIRD_PARTY_DLMALLOC_VESPENE_INTERNAL_H_
+#if !(__ASSEMBLER__ + __LINKER__ + 0)
+COSMOPOLITAN_C_START_
+
+void *dlmalloc_requires_more_vespene_gas(size_t);
+
+COSMOPOLITAN_C_END_
+#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
+#endif /* COSMOPOLITAN_THIRD_PARTY_DLMALLOC_VESPENE_INTERNAL_H_ */
diff --git a/third_party/python/Objects/obmalloc.c b/third_party/python/Objects/obmalloc.c
index 06960776e..fb5515732 100644
--- a/third_party/python/Objects/obmalloc.c
+++ b/third_party/python/Objects/obmalloc.c
@@ -13,7 +13,7 @@
#include "libc/runtime/runtime.h"
#include "libc/sysv/consts/map.h"
#include "libc/sysv/consts/prot.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
+#include "third_party/dlmalloc/dlmalloc.h"
#include "third_party/python/Include/objimpl.h"
#include "third_party/python/Include/pydebug.h"
#include "third_party/python/Include/pyerrors.h"
diff --git a/third_party/sqlite3/os_unix.c b/third_party/sqlite3/os_unix.c
index 3e5aec156..62907d5ae 100644
--- a/third_party/sqlite3/os_unix.c
+++ b/third_party/sqlite3/os_unix.c
@@ -3531,16 +3531,6 @@ int sqlite3_sync_count = 0;
int sqlite3_fullsync_count = 0;
#endif
-/*
-** We do not trust systems to provide a working fdatasync(). Some do.
-** Others do no. To be safe, we will stick with the (slightly slower)
-** fsync(). If you know that your system does support fdatasync() correctly,
-** then simply compile with -Dfdatasync=fdatasync or -DHAVE_FDATASYNC
-*/
-#if !defined(fdatasync) && !HAVE_FDATASYNC
-# define fdatasync fsync
-#endif
-
/*
** Define HAVE_FULLFSYNC to 0 or 1 depending on whether or not
** the F_FULLFSYNC macro is defined. F_FULLFSYNC is currently
diff --git a/tool/build/blinkenlights.c b/tool/build/blinkenlights.c
index ba9324705..95b14319b 100644
--- a/tool/build/blinkenlights.c
+++ b/tool/build/blinkenlights.c
@@ -700,14 +700,12 @@ static void LoadSyms(void) {
static int DrainInput(int fd) {
char buf[32];
struct pollfd fds[1];
- if (!IsWindows()) {
- for (;;) {
- fds[0].fd = fd;
- fds[0].events = POLLIN;
- if (poll(fds, ARRAYLEN(fds), 0) == -1) return -1;
- if (!(fds[0].revents & POLLIN)) break;
- if (read(fd, buf, sizeof(buf)) == -1) return -1;
- }
+ for (;;) {
+ fds[0].fd = fd;
+ fds[0].events = POLLIN;
+ if (poll(fds, ARRAYLEN(fds), 0) == -1) return -1;
+ if (!(fds[0].revents & POLLIN)) break;
+ if (read(fd, buf, sizeof(buf)) == -1) return -1;
}
return 0;
}
@@ -1928,25 +1926,21 @@ static int OnPtyFdPoll(struct pollfd *fds, size_t nfds, int ms) {
ReactiveDraw();
once = true;
}
- if (!IsWindows()) {
- p2.fd = fds[i].fd;
- p2.events = fds[i].events;
- switch (poll(&p2, 1, ms)) {
- case -1:
- re = POLLERR;
- ++t;
- break;
- case 0:
- break;
- case 1:
- re = p2.revents;
- ++t;
- break;
- default:
- unreachable;
- }
- } else {
- re = POLLIN | POLLOUT; /* xxx */
+ p2.fd = fds[i].fd;
+ p2.events = fds[i].events;
+ switch (poll(&p2, 1, ms)) {
+ case -1:
+ re = POLLERR;
+ ++t;
+ break;
+ case 0:
+ break;
+ case 1:
+ re = p2.revents;
+ ++t;
+ break;
+ default:
+ unreachable;
}
}
}
@@ -2608,11 +2602,7 @@ static bool HasPendingKeyboard(void) {
}
static void Sleep(int ms) {
- if (IsWindows()) {
- usleep(ms * 1000L);
- } else {
- poll((struct pollfd[]){{ttyin, POLLIN}}, 1, ms);
- }
+ poll((struct pollfd[]){{ttyin, POLLIN}}, 1, ms);
}
static void OnMouseWheelUp(struct Panel *p, int y, int x) {
diff --git a/tool/build/emubin/mdatest.real.c b/tool/build/emubin/mdatest.real.c
index e0f997e40..40845447b 100644
--- a/tool/build/emubin/mdatest.real.c
+++ b/tool/build/emubin/mdatest.real.c
@@ -40,5 +40,5 @@ int main() {
SetVideoMode(7);
SetEs(0xb0000 >> 4);
MdaTest((void *)0);
- for (;;) asm("pause");
+ for (;;) __builtin_ia32_pause();
}
diff --git a/tool/build/lib/interner.c b/tool/build/lib/interner.c
index b65c90a68..b468c5a68 100644
--- a/tool/build/lib/interner.c
+++ b/tool/build/lib/interner.c
@@ -21,6 +21,7 @@
#include "libc/bits/safemacros.internal.h"
#include "libc/mem/mem.h"
#include "libc/nexgen32e/crc32.h"
+#include "libc/runtime/runtime.h"
#include "libc/str/str.h"
#include "libc/x/x.h"
#include "tool/build/lib/interner.h"
@@ -92,10 +93,12 @@ size_t interncount(const struct Interner *t) {
* @note use consistent size w/ non-string items
*/
size_t internobj(struct Interner *t, const void *data, size_t size) {
+ char *p2;
+ size_t n2;
char *item;
unsigned hash;
- size_t i, step;
struct InternerObject *it;
+ size_t i, off, step, need, bytes;
step = 0;
item = data;
it = (struct InternerObject *)t;
@@ -117,9 +120,25 @@ size_t internobj(struct Interner *t, const void *data, size_t size) {
step++;
} while (it->p[i].hash);
}
+ off = it->pool.i;
+ if (__builtin_add_overflow(off, size, &need)) abort();
+ if (__builtin_add_overflow(need, 1, &need)) abort();
+ if (need > it->pool.n) {
+ if (__builtin_add_overflow(it->pool.n, 1, &n2)) abort();
+ do {
+ if (__builtin_add_overflow(n2, n2 >> 1, &n2)) abort();
+ } while (need > n2);
+ if (__builtin_mul_overflow(n2, sizeof(*it->pool.p), &bytes)) abort();
+ if (!(p2 = realloc(it->pool.p, bytes))) abort();
+ it->pool.p = p2;
+ it->pool.n = n2;
+ }
+ memcpy(it->pool.p + off, data, size);
+ it->pool.p[off + size] = 0;
it->p[i].hash = hash;
- return (it->p[i].index =
- CONCAT(&it->pool.p, &it->pool.i, &it->pool.n, item, size));
+ it->p[i].index = off;
+ it->pool.i += size;
+ return off;
}
/**
diff --git a/tool/build/lib/ioports.c b/tool/build/lib/ioports.c
index adc231b9a..37d70dc5c 100644
--- a/tool/build/lib/ioports.c
+++ b/tool/build/lib/ioports.c
@@ -69,11 +69,7 @@ static int OpSerialIn(struct Machine *m, int r) {
return 0x01;
}
case UART_LSR:
- if (IsWindows()) {
- p = POLLIN | POLLOUT; /* XXX */
- } else {
- if ((p = OpE9Poll(m)) == -1) return -1;
- }
+ if ((p = OpE9Poll(m)) == -1) return -1;
s = UART_TTYIDL;
if (p & POLLIN) s |= UART_TTYDA;
if (p & POLLOUT) s |= UART_TTYTXR;
diff --git a/tool/build/lib/time.c b/tool/build/lib/time.c
index f134ce478..d856638ab 100644
--- a/tool/build/lib/time.c
+++ b/tool/build/lib/time.c
@@ -37,13 +37,9 @@ void OpPause(struct Machine *m, uint32_t rde) {
interactive = isatty(0);
once = true;
}
- if (!IsWindows() && interactive) {
- pf.fd = 0;
- pf.events = POLLIN;
- poll(&pf, 1, 20); /* make spin loops less brutal */
- } else {
- sched_yield();
- }
+ pf.fd = 0;
+ pf.events = POLLIN;
+ poll(&pf, 1, 20); /* make spin loops less brutal */
}
void OpRdtsc(struct Machine *m, uint32_t rde) {
diff --git a/tool/build/pstrace.c b/tool/build/pstrace.c
index 039c15027..a9fde0694 100644
--- a/tool/build/pstrace.c
+++ b/tool/build/pstrace.c
@@ -37,7 +37,7 @@
#include "libc/sysv/consts/sa.h"
#include "libc/sysv/consts/sig.h"
#include "libc/x/x.h"
-#include "third_party/dlmalloc/dlmalloc.internal.h"
+#include "third_party/dlmalloc/dlmalloc.h"
#include "third_party/getopt/getopt.h"
/**
diff --git a/tool/build/rollup.c b/tool/build/rollup.c
index c19fbfe9a..ab3d79bb3 100644
--- a/tool/build/rollup.c
+++ b/tool/build/rollup.c
@@ -20,62 +20,45 @@
#include "libc/alg/arraylist2.internal.h"
#include "libc/calls/calls.h"
#include "libc/calls/struct/stat.h"
+#include "libc/errno.h"
#include "libc/fmt/itoa.h"
+#include "libc/intrin/kprintf.h"
#include "libc/log/check.h"
#include "libc/log/log.h"
#include "libc/mem/mem.h"
#include "libc/runtime/runtime.h"
+#include "libc/stdio/append.internal.h"
#include "libc/stdio/stdio.h"
#include "libc/str/str.h"
#include "libc/sysv/consts/map.h"
#include "libc/sysv/consts/o.h"
#include "libc/sysv/consts/prot.h"
#include "tool/build/lib/getargs.h"
+#include "tool/build/lib/interner.h"
#define LOOKINGAT(p, pe, s) LookingAt(p, pe, s, strlen(s))
-#define APPENDSTR(s) AppendData(s, strlen(s))
-
-struct Output {
- size_t i, n;
- const char *p;
-};
struct Visited {
size_t i, n;
const char **p;
};
-static struct stat st;
-static struct Output output;
-static struct Visited visited;
+char *output;
+struct Interner *visited;
-static void Visit(const char *);
+void Visit(const char *);
-static bool HasVisited(const char *path) {
- int i;
- for (i = 0; i < visited.i; ++i) {
- if (strcmp(path, visited.p[i]) == 0) {
- return true;
- }
- }
- return false;
+size_t GetFdSize(int fd) {
+ struct stat st;
+ CHECK_NE(-1, fstat(fd, &st));
+ return st.st_size;
}
-static void AppendData(const char *s, size_t n) {
- CONCAT(&output.p, &output.i, &output.n, s, n);
-}
-
-static void AppendInt(long x) {
- char ibuf[21];
- AppendData(ibuf, int64toarray_radix10(x, ibuf));
-}
-
-static bool LookingAt(const char *p, const char *pe, const char *s, size_t n) {
+bool LookingAt(const char *p, const char *pe, const char *s, size_t n) {
return pe - p >= n && memcmp(p, s, n) == 0;
}
-static void Process(const char *p, const char *pe, const char *path,
- bool isheader) {
+void Process(const char *p, const char *pe, const char *path, bool isheader) {
int level;
bool noformat;
const char *p2, *dq, *name;
@@ -101,16 +84,18 @@ static void Process(const char *p, const char *pe, const char *path,
continue;
}
}
- AppendData(p, p2 - p);
+ appendd(&output, p, p2 - p);
}
if (noformat) {
- APPENDSTR("/* clang-format on */\n");
+ appends(&output, "/* clang-format on */\n");
}
+ kprintf("finished%n");
}
-static void Visit(const char *path) {
+void Visit(const char *path) {
int fd;
char *map;
+ size_t size;
bool isheader;
if (!endswith(path, ".h") && !endswith(path, ".inc")) return;
if (endswith(path, ".internal.h")) return;
@@ -118,43 +103,62 @@ static void Visit(const char *path) {
if (endswith(path, ".internal.inc")) return;
if (endswith(path, "/internal.inc")) return;
isheader = endswith(path, ".h");
- if (isheader && HasVisited(path)) return;
- APPENDSTR("\n\f\n/*!BEGIN ");
- APPENDSTR(path);
- APPENDSTR(" */\n\n");
- APPEND(&visited.p, &visited.i, &visited.n, &path);
+ if (isheader && isinterned(visited, path)) return;
+ appends(&output, "\n\f\n/*!BEGIN ");
+ appends(&output, path);
+ appends(&output, " */\n\n");
+ intern(visited, path);
if ((fd = open(path, O_RDONLY)) == -1) {
fprintf(stderr, "error: %s: failed to open\n", path);
exit(1);
}
- CHECK_NE(-1, fstat(fd, &st));
- if (st.st_size) {
+ if ((size = GetFdSize(fd))) {
+ kprintf("size 1 = %'zu%n", size);
CHECK_NE(MAP_FAILED,
- (map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0)));
- Process(map, map + st.st_size, path, isheader);
- LOGIFNEG1(munmap(map, st.st_size));
+ (map = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0)));
+ Process(map, map + size, path, isheader);
+ kprintf("size = %'zu%n", size);
+ CHECK_EQ(0, munmap(map, size), "p=%p z=%'zu path=%s", map, size, path);
}
- LOGIFNEG1(close(fd));
+ CHECK_EQ(0, close(fd));
+}
+
+ssize_t WriteAll(int fd, const char *p, size_t n) {
+ ssize_t rc;
+ size_t i, got;
+ for (i = 0; i < n;) {
+ rc = write(fd, p + i, n - i);
+ if (rc != -1) {
+ got = rc;
+ i += got;
+ } else if (errno != EINTR) {
+ return -1;
+ }
+ }
+ return i;
}
int main(int argc, char *argv[]) {
const char *src;
struct GetArgs ga;
- APPENDSTR("#ifndef COSMOPOLITAN_H_\n");
- APPENDSTR("#define COSMOPOLITAN_H_\n");
- /* APPENDSTR("#define IMAGE_BASE_VIRTUAL "); */
- /* AppendInt(IMAGE_BASE_VIRTUAL); */
- /* APPENDSTR("\n"); */
- /* APPENDSTR("#define IMAGE_BASE_PHYSICAL "); */
- /* AppendInt(IMAGE_BASE_PHYSICAL); */
- /* APPENDSTR("\n"); */
+ ShowCrashReports();
+ visited = newinterner();
+ appends(&output, "#ifndef COSMOPOLITAN_H_\n");
+ appends(&output, "#define COSMOPOLITAN_H_\n");
+ /* appends(&output, "#define IMAGE_BASE_VIRTUAL "); */
+ /* appendf(&output, "%p", IMAGE_BASE_VIRTUAL); */
+ /* appends(&output, "\n"); */
+ /* appends(&output, "#define IMAGE_BASE_PHYSICAL "); */
+ /* appendf(&output, "%p", IMAGE_BASE_PHYSICAL); */
+ /* appends(&output, "\n"); */
getargs_init(&ga, argv + 1);
while ((src = getargs_next(&ga))) {
Visit(src);
}
getargs_destroy(&ga);
- APPENDSTR("\n");
- APPENDSTR("#endif /* COSMOPOLITAN_H_ */\n");
- CHECK_EQ(output.i, write(1, output.p, output.i));
+ appends(&output, "\n");
+ appends(&output, "#endif /* COSMOPOLITAN_H_ */\n");
+ CHECK_NE(-1, WriteAll(1, output, appendz(output).i));
+ freeinterner(visited);
return 0;
}
diff --git a/tool/build/runitd.c b/tool/build/runitd.c
index 63a38cb19..741a5078f 100644
--- a/tool/build/runitd.c
+++ b/tool/build/runitd.c
@@ -92,6 +92,8 @@
#define kLogFile "o/runitd.log"
#define kLogMaxBytes (2 * 1000 * 1000)
+bool use_ftrace;
+bool use_strace;
char *g_exepath;
volatile bool g_interrupted;
struct sockaddr_in g_servaddr;
@@ -136,9 +138,15 @@ void GetOpts(int argc, char *argv[]) {
g_servaddr.sin_family = AF_INET;
g_servaddr.sin_port = htons(RUNITD_PORT);
g_servaddr.sin_addr.s_addr = INADDR_ANY;
- while ((opt = getopt(argc, argv, "hvsdrl:p:t:w:")) != -1) {
+ while ((opt = getopt(argc, argv, "fqhvsdrl:p:t:w:")) != -1) {
switch (opt) {
+ case 'f':
+ use_ftrace = true;
+ break;
case 's':
+ use_strace = true;
+ break;
+ case 'q':
--__log_level;
break;
case 'v':
@@ -340,7 +348,12 @@ void HandleClient(void) {
dup2(pipefds[1], 2);
if (pipefds[0] > 2) close(pipefds[1]);
if (g_devnullfd > 2) close(g_devnullfd);
- execv(g_exepath, (char *const[]){g_exepath, NULL});
+ int i = 0;
+ char *args[4] = {0};
+ args[i++] = g_exepath;
+ if (use_strace) args[i++] = "--strace";
+ if (use_ftrace) args[i++] = "--ftrace";
+ execv(g_exepath, args);
_exit(127);
}
LOGIFNEG1(close(pipefds[1]));
diff --git a/tool/emacs/cosmo-c-builtins.el b/tool/emacs/cosmo-c-builtins.el
index fa3f767d9..42795053e 100644
--- a/tool/emacs/cosmo-c-builtins.el
+++ b/tool/emacs/cosmo-c-builtins.el
@@ -269,7 +269,13 @@
"__builtin_bcmp"
"__builtin_bzero"
"__builtin_memset"
- "__builtin_strlen"))
+ "__builtin_strlen"
+ "__ATOMIC_RELAXED"
+ "__ATOMIC_CONSUME"
+ "__ATOMIC_ACQUIRE"
+ "__ATOMIC_RELEASE"
+ "__ATOMIC_ACQ_REL"
+ "__ATOMIC_SEQ_CST"))
(gcc-builtin-functions-atomic
'("__atomic_load_n"
diff --git a/tool/net/lunix.c b/tool/net/lunix.c
index c4bfc17a0..ef9374412 100644
--- a/tool/net/lunix.c
+++ b/tool/net/lunix.c
@@ -902,6 +902,47 @@ static int LuaUnixRecvfrom(lua_State *L) {
return 4;
}
+// unix.recv(fd[, bufsiz[, flags]]) → data, errno
+static int LuaUnixRecv(lua_State *L) {
+ char *buf;
+ size_t got;
+ ssize_t rc;
+ int fd, flags, bufsiz, olderr;
+ olderr = errno;
+ fd = luaL_checkinteger(L, 1);
+ bufsiz = luaL_optinteger(L, 2, 1500);
+ flags = luaL_optinteger(L, 3, 0);
+ bufsiz = MIN(bufsiz, 0x7ffff000);
+ buf = xmalloc(bufsiz);
+ rc = recv(fd, buf, bufsiz, flags);
+ if (rc != -1) {
+ got = rc;
+ lua_pushlstring(L, buf, got);
+ lua_pushnil(L);
+ } else {
+ lua_pushnil(L);
+ lua_pushinteger(L, errno);
+ errno = olderr;
+ }
+ free(buf);
+ return 4;
+}
+
+// unix.send(fd, data[, flags]) → sent, errno
+static int LuaUnixSend(lua_State *L) {
+ char *data;
+ ssize_t rc;
+ size_t sent, size;
+ int fd, flags, bufsiz, olderr;
+ olderr = errno;
+ fd = luaL_checkinteger(L, 1);
+ data = luaL_checklstring(L, 2, &size);
+ size = MIN(size, 0x7ffff000);
+ flags = luaL_optinteger(L, 5, 0);
+ rc = send(fd, data, size, flags);
+ return ReturnRc(L, rc, olderr);
+}
+
// unix.sendto(fd, data, ip, port[, flags]) → sent, errno
// flags MSG_OOB, MSG_DONTROUTE, MSG_NOSIGNAL, etc.
static int LuaUnixSendto(lua_State *L) {
@@ -1401,7 +1442,9 @@ static const luaL_Reg kLuaUnix[] = {
{"listen", LuaUnixListen}, // begin listening for clients
{"accept", LuaUnixAccept}, // create client fd for client
{"connect", LuaUnixConnect}, // connect to remote address
+ {"recv", LuaUnixRecv}, // receive tcp from some address
{"recvfrom", LuaUnixRecvfrom}, // receive udp from some address
+ {"send", LuaUnixSend}, // send tcp to some address
{"sendto", LuaUnixSendto}, // send udp to some address
{"shutdown", LuaUnixShutdown}, // make socket half empty or full
{"getpeername", LuaUnixGetpeername}, // get address of remote end
diff --git a/tool/net/redbean.c b/tool/net/redbean.c
index f6b35e3ef..04ade2143 100644
--- a/tool/net/redbean.c
+++ b/tool/net/redbean.c
@@ -24,6 +24,7 @@
#include "libc/calls/calls.h"
#include "libc/calls/math.h"
#include "libc/calls/sigbits.h"
+#include "libc/calls/strace.internal.h"
#include "libc/calls/struct/dirent.h"
#include "libc/calls/struct/flock.h"
#include "libc/calls/struct/rusage.h"
@@ -1187,11 +1188,7 @@ static void KillGroupImpl(int sig) {
}
static void KillGroup(void) {
- if (IsWindows()) {
- KillGroupImpl(SIGINT);
- } else {
- KillGroupImpl(SIGTERM);
- }
+ KillGroupImpl(SIGTERM);
}
static void WaitAll(void) {
diff --git a/tool/net/wb.c b/tool/net/wb.c
index 08310a11c..7ad4f960c 100644
--- a/tool/net/wb.c
+++ b/tool/net/wb.c
@@ -20,6 +20,7 @@
#include "libc/calls/calls.h"
#include "libc/dns/dns.h"
#include "libc/errno.h"
+#include "libc/fmt/conv.h"
#include "libc/log/check.h"
#include "libc/log/log.h"
#include "libc/macros.internal.h"
@@ -53,6 +54,8 @@
#include "third_party/mbedtls/error.h"
#include "third_party/mbedtls/ssl.h"
+#define OPTS "BIqksvzX:H:C:m:"
+
#define Micros(t) ((int64_t)((t)*1e6))
#define HasHeader(H) (!!msg.headers[H].a)
#define HeaderData(H) (inbuf.p + msg.headers[H].a)
@@ -79,16 +82,17 @@ bool authmode = MBEDTLS_SSL_VERIFY_NONE;
char *host;
char *port;
+char *flags;
bool usessl;
uint32_t ip;
struct Url url;
struct addrinfo *addr;
struct Buffer inbuf;
-long fetch_count;
long error_count;
long failure_count;
-long response_count;
+long message_count;
+long connect_count;
double *latencies;
size_t latencies_n;
size_t latencies_c;
@@ -96,6 +100,8 @@ long double start_run;
long double end_run;
long double start_fetch;
long double end_fetch;
+long connectionstobemade = 100;
+long messagesperconnection = 100;
mbedtls_x509_crt *cachain;
mbedtls_ssl_config conf;
@@ -146,7 +152,18 @@ static int TlsRecv(void *c, unsigned char *p, size_t n, uint32_t o) {
}
static wontreturn void PrintUsage(FILE *f, int rc) {
- fprintf(f, "usage: %s [-ksvV] URL\n", program_invocation_name);
+ fprintf(f, "usage: %s [-%s] URL\n", OPTS, program_invocation_name);
+ fprintf(f, "wb - cosmopolitan http/https benchmark tool\n");
+ fprintf(f, " -C INT connections to be made\n");
+ fprintf(f, " -m INT messages per connection\n");
+ fprintf(f, " -B use suite b ciphersuites\n");
+ fprintf(f, " -v increase verbosity\n");
+ fprintf(f, " -H K:V append http header\n");
+ fprintf(f, " -X NAME specify http method\n");
+ fprintf(f, " -k verify ssl certs\n");
+ fprintf(f, " -I same as -X HEAD\n");
+ fprintf(f, " -z same as -H Accept-Encoding:gzip\n");
+ fprintf(f, " -h show this help\n");
exit(rc);
}
@@ -157,11 +174,14 @@ int fetch(void) {
const char *body;
int t, ret, sock;
struct TlsBio *bio;
+ long messagesremaining;
struct HttpMessage msg;
struct HttpUnchunker u;
size_t urlarglen, requestlen;
size_t g, i, n, hdrsize, paylen;
+ messagesremaining = messagesperconnection;
+
/*
* Setup crypto.
*/
@@ -294,13 +314,12 @@ SendAnother:
Finished:
status = msg.status;
DestroyHttpMessage(&msg);
- if (!isdone && status == 200) {
+ if (!isdone && status == 200 && --messagesremaining > 0) {
long double now = nowl();
end_fetch = now;
- ++response_count;
+ ++message_count;
latencies = realloc(latencies, ++latencies_n * sizeof(*latencies));
latencies[latencies_n - 1] = end_fetch - start_fetch;
- ++fetch_count;
start_fetch = now;
goto SendAnother;
}
@@ -321,29 +340,45 @@ int main(int argc, char *argv[]) {
*/
int opt;
__log_level = kLogWarn;
- while ((opt = getopt(argc, argv, "BqksvIX:H:")) != -1) {
+ while ((opt = getopt(argc, argv, OPTS)) != -1) {
switch (opt) {
case 's':
case 'q':
break;
case 'B':
suiteb = true;
+ appendf(&flags, " -B");
break;
case 'v':
++__log_level;
break;
case 'I':
method = kHttpHead;
+ appendf(&flags, " -I");
break;
case 'H':
headers.p = realloc(headers.p, ++headers.n * sizeof(*headers.p));
headers.p[headers.n - 1] = optarg;
+ appendf(&flags, " -H '%s'", optarg);
+ break;
+ case 'z':
+ headers.p = realloc(headers.p, ++headers.n * sizeof(*headers.p));
+ headers.p[headers.n - 1] = "Accept-Encoding: gzip";
+ appendf(&flags, " -z");
break;
case 'X':
CHECK((method = GetHttpMethod(optarg, strlen(optarg))));
+ appendf(&flags, " -X %s", optarg);
break;
case 'k':
authmode = MBEDTLS_SSL_VERIFY_REQUIRED;
+ appendf(&flags, " -k");
+ break;
+ case 'm':
+ messagesperconnection = strtol(optarg, 0, 0);
+ break;
+ case 'C':
+ connectionstobemade = strtol(optarg, 0, 0);
break;
case 'h':
PrintUsage(stdout, EXIT_SUCCESS);
@@ -352,10 +387,15 @@ int main(int argc, char *argv[]) {
}
}
+ appendf(&flags, " -m %ld", messagesperconnection);
+ appendf(&flags, " -C %ld", connectionstobemade);
+
if (optind == argc) PrintUsage(stdout, EXIT_SUCCESS);
urlarg = argv[optind];
cachain = GetSslRoots();
+ long connectsremaining = connectionstobemade;
+
/*
* Parse URL.
*/
@@ -434,13 +474,13 @@ int main(int argc, char *argv[]) {
latencies_c = 1024;
latencies = malloc(latencies_c * sizeof(*latencies));
start_run = nowl();
- while (!isdone) {
- ++fetch_count;
+ while (!isdone && --connectsremaining >= 0) {
start_fetch = nowl();
status = fetch();
end_fetch = nowl();
if (status == 200) {
- ++response_count;
+ ++connect_count;
+ ++message_count;
latencies = realloc(latencies, ++latencies_n * sizeof(*latencies));
latencies[latencies_n - 1] = end_fetch - start_fetch;
} else if (status == 900) {
@@ -452,17 +492,17 @@ int main(int argc, char *argv[]) {
end_run = nowl();
double latencies_sum = fsum(latencies, latencies_n);
- double avg_latency = latencies_sum / response_count;
+ double avg_latency = latencies_sum / message_count;
- printf("\n");
- printf("run time: %,ldµs\n", Micros(end_run - start_run));
- printf("per second: %,ld\n",
- (int64_t)(response_count / (end_run - start_run)));
- printf("avg latency: %,ldµs\n", Micros(avg_latency));
- printf("response count: %,ld\n", response_count);
- printf("fetch count: %,ld\n", fetch_count - failure_count);
- printf("error count: %,ld (non-200 responses)\n", error_count);
- printf("failure count: %,ld (transport error)\n", failure_count);
+ printf("wb%s\n", flags);
+ printf("msgs / second: %,ld qps\n",
+ (int64_t)(message_count / (end_run - start_run)));
+ printf("run time: %,ldµs\n", Micros(end_run - start_run));
+ printf("latency / msgs: %,ldµs\n", Micros(avg_latency));
+ printf("message count: %,ld\n", message_count);
+ printf("connect count: %,ld\n", connect_count);
+ printf("error count: %,ld (non-200 responses)\n", error_count);
+ printf("failure count: %,ld (transport error)\n", failure_count);
return 0;
}
diff --git a/tool/plinko/lib/plinko.c b/tool/plinko/lib/plinko.c
index b7f5b37d4..5f6e6379c 100644
--- a/tool/plinko/lib/plinko.c
+++ b/tool/plinko/lib/plinko.c
@@ -20,6 +20,7 @@
#include "libc/calls/calls.h"
#include "libc/calls/strace.internal.h"
#include "libc/calls/struct/sigaction.h"
+#include "libc/errno.h"
#include "libc/intrin/kprintf.h"
#include "libc/log/countbranch.h"
#include "libc/log/countexpr.h"
@@ -29,6 +30,7 @@
#include "libc/runtime/runtime.h"
#include "libc/runtime/stack.h"
#include "libc/runtime/symbols.internal.h"
+#include "libc/stdio/stdio.h"
#include "libc/str/str.h"
#include "libc/sysv/consts/map.h"
#include "libc/sysv/consts/o.h"
@@ -936,9 +938,12 @@ int Plinko(int argc, char *argv[]) {
if (arch_prctl(ARCH_SET_FS, 0x200000000000) == -1 ||
arch_prctl(ARCH_SET_GS, (intptr_t)DispatchPlan) == -1) {
- kprintf("error: %m%nyour operating system doesn't allow you change both "
- "the %%fs and %%gs registers in your processor which is a shame "
- "since they're crucial for performance and thread-local storage%n");
+ fputs("error: ", stderr);
+ fputs(strerror(errno), stderr);
+ fputs("\nyour operating system doesn't allow you change both "
+ "the %fs and %gs registers\nin your processor. that's a shame, "
+ "since they're crucial for performance.\n",
+ stderr);
exit(1);
}
@@ -960,13 +965,16 @@ int Plinko(int argc, char *argv[]) {
(BANE & (BANE | MASK(BANE))) * sizeof(g_mem[0]),
PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1,
0) == MAP_FAILED) {
- kprintf("error: %m%nyour operating system doesn't allow you to allocate "
- "outrageous amounts of overcommit memory, which is a shame, since "
- "the pml4t feature in your processor was intended to give you that "
- "power since it's crucial for sparse data applications and lisp. "
- "for instance, the way racket works around this problem is by "
- "triggering thousands of segmentation faults as part of normal "
- "operation%n");
+ fputs("error: ", stderr);
+ fputs(strerror(errno), stderr);
+ fputs("\nyour operating system doesn't allow you to allocate\n"
+ "outrageous amounts of overcommit memory, which is a shame, since\n"
+ "the pml4t feature in your processor was intended to give you that\n"
+ "power since it's crucial for sparse data applications and lisp.\n"
+ "for instance, the way racket works around this problem is by\n"
+ "triggering thousands of segmentation faults as part of normal\n"
+ "operation\n",
+ stderr);
exit(1);
}
diff --git a/tool/viz/fixconsole.c b/tool/viz/fixconsole.c
new file mode 100644
index 000000000..7c885f639
--- /dev/null
+++ b/tool/viz/fixconsole.c
@@ -0,0 +1,42 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/dce.h"
+#include "libc/intrin/kprintf.h"
+#include "libc/nt/console.h"
+#include "libc/nt/enum/consolemodeflags.h"
+#include "libc/nt/runtime.h"
+#include "libc/runtime/runtime.h"
+
+int main(int argc, char *argv[]) {
+ if (!IsWindows()) {
+ kprintf("%s is intended for windows%n", argv[0]);
+ return 1;
+ }
+ SetConsoleMode(GetStdHandle(kNtStdInputHandle),
+ kNtEnableProcessedInput | kNtEnableLineInput |
+ kNtEnableEchoInput | kNtEnableMouseInput |
+ kNtEnableInsertMode | kNtEnableQuickEditMode |
+ kNtEnableExtendedFlags | kNtEnableAutoPosition);
+ SetConsoleMode(GetStdHandle(kNtStdOutputHandle),
+ kNtEnableProcessedOutput | kNtEnableWrapAtEolOutput);
+ SetConsoleMode(GetStdHandle(kNtStdErrorHandle),
+ kNtEnableProcessedOutput | kNtEnableWrapAtEolOutput |
+ kNtEnableVirtualTerminalProcessing);
+ _Exit(0);
+}
diff --git a/tool/viz/printdos2errno.c b/tool/viz/printdos2errno.c
new file mode 100644
index 000000000..498c89862
--- /dev/null
+++ b/tool/viz/printdos2errno.c
@@ -0,0 +1,36 @@
+/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
+│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
+╞══════════════════════════════════════════════════════════════════════════════╡
+│ Copyright 2022 Justine Alexandra Roberts Tunney │
+│ │
+│ Permission to use, copy, modify, and/or distribute this software for │
+│ any purpose with or without fee is hereby granted, provided that the │
+│ above copyright notice and this permission notice appear in all copies. │
+│ │
+│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
+│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
+│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
+│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
+│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
+│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
+│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
+│ PERFORMANCE OF THIS SOFTWARE. │
+╚─────────────────────────────────────────────────────────────────────────────*/
+#include "libc/fmt/fmt.h"
+#include "libc/intrin/dos2errno.internal.h"
+#include "libc/intrin/kprintf.h"
+
+// note: these are supplementary errno magnum mappings
+// don't include the ones in libc/sysv/consts.sh
+
+int main(int argc, char *argv[]) {
+ int i;
+ for (i = 0; kDos2Errno[i].doscode; ++i) {
+ kprintf("dos error %10hu maps to rva %10d errno %10d which is %s%n",
+ kDos2Errno[i].doscode, kDos2Errno[i].systemv,
+ *(const int *)((intptr_t)kDos2Errno + kDos2Errno[i].systemv),
+ strerror_short(
+ *(const int *)((intptr_t)kDos2Errno + kDos2Errno[i].systemv)));
+ }
+ return 0;
+}