mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-01-31 11:37:35 +00:00
60cb435cb4
If threads are being used, then fork() will now acquire and release and runtime locks so that fork() may be safely used from threads. This also makes vfork() thread safe, because pthread mutexes will do nothing when the process is a child of vfork(). More torture tests have been written to confirm this all works like a charm. Additionally: - Invent hexpcpy() api - Rename nsync_malloc_() to kmalloc() - Complete posix named semaphore implementation - Make pthread_create() asynchronous signal safe - Add rm, rmdir, and touch to command interpreter builtins - Invent sigisprecious() and modify sigset functions to use it - Add unit tests for posix_spawn() attributes and fix its bugs One unresolved problem is the reclaiming of *NSYNC waiter memory in the forked child processes, within apps which have threads waiting on locks
77 lines
2.1 KiB
C++
77 lines
2.1 KiB
C++
// clang-format off
|
|
|
|
/* ----------------------------- statistics ------------------------------ */
|
|
|
|
#if !NO_MALLINFO
|
|
static struct mallinfo internal_mallinfo(mstate m) {
|
|
struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
|
|
ensure_initialization();
|
|
if (!PREACTION(m)) {
|
|
check_malloc_state(m);
|
|
if (is_initialized(m)) {
|
|
size_t nfree = SIZE_T_ONE; /* top always free */
|
|
size_t mfree = m->topsize + TOP_FOOT_SIZE;
|
|
size_t sum = mfree;
|
|
msegmentptr s = &m->seg;
|
|
while (s != 0) {
|
|
mchunkptr q = align_as_chunk(s->base);
|
|
while (segment_holds(s, q) &&
|
|
q != m->top && q->head != FENCEPOST_HEAD) {
|
|
size_t sz = chunksize(q);
|
|
sum += sz;
|
|
if (!is_inuse(q)) {
|
|
mfree += sz;
|
|
++nfree;
|
|
}
|
|
q = next_chunk(q);
|
|
}
|
|
s = s->next;
|
|
}
|
|
|
|
nm.arena = sum;
|
|
nm.ordblks = nfree;
|
|
nm.hblkhd = m->footprint - sum;
|
|
nm.usmblks = m->max_footprint;
|
|
nm.uordblks = m->footprint - mfree;
|
|
nm.fordblks = mfree;
|
|
nm.keepcost = m->topsize;
|
|
}
|
|
|
|
POSTACTION(m);
|
|
}
|
|
return nm;
|
|
}
|
|
#endif /* !NO_MALLINFO */
|
|
|
|
#if !NO_MALLOC_STATS
|
|
static void internal_malloc_stats(mstate m) {
|
|
ensure_initialization();
|
|
if (!PREACTION(m)) {
|
|
size_t maxfp = 0;
|
|
size_t fp = 0;
|
|
size_t used = 0;
|
|
check_malloc_state(m);
|
|
if (is_initialized(m)) {
|
|
msegmentptr s = &m->seg;
|
|
maxfp = m->max_footprint;
|
|
fp = m->footprint;
|
|
used = fp - (m->topsize + TOP_FOOT_SIZE);
|
|
|
|
while (s != 0) {
|
|
mchunkptr q = align_as_chunk(s->base);
|
|
while (segment_holds(s, q) &&
|
|
q != m->top && q->head != FENCEPOST_HEAD) {
|
|
if (!is_inuse(q))
|
|
used -= chunksize(q);
|
|
q = next_chunk(q);
|
|
}
|
|
s = s->next;
|
|
}
|
|
}
|
|
POSTACTION(m); /* drop lock */
|
|
kprintf("max system bytes = %10lu\n", (unsigned long)(maxfp));
|
|
kprintf("system bytes = %10lu\n", (unsigned long)(fp));
|
|
kprintf("in use bytes = %10lu\n", (unsigned long)(used));
|
|
}
|
|
}
|
|
#endif /* NO_MALLOC_STATS */
|