mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-02-28 22:18:29 +00:00
- Emulator can now test the αcτµαlly pδrταblε εxεcµταblε bootloader - Whipped up a webserver named redbean. It services 150k requests per second on a single core. Bundling assets inside zip enables extremely fast serving for two reasons. The first is that zip central directory lookups go faster than stat() system calls. The second is that both zip and gzip content-encoding use DEFLATE, therefore, compressed responses can be served via the sendfile() system call which does an in-kernel copy directly from the zip executable structure. Also note that red bean zip executables can be deployed easily to all platforms, since these native executables work on Linux, Mac, BSD, and Windows. - Address sanitizer now works very well
58 lines
2 KiB
C
58 lines
2 KiB
C
#include "libc/mem/mem.h"
|
|
#include "third_party/dlmalloc/dlmalloc.h"
|
|
|
|
/**
|
|
* Frees and clears (sets to NULL) each non-null pointer in the given
|
|
* array. This is likely to be faster than freeing them one-by-one. If
|
|
* footers are used, pointers that have been allocated in different
|
|
* mspaces are not freed or cleared, and the count of all such pointers
|
|
* is returned. For large arrays of pointers with poor locality, it may
|
|
* be worthwhile to sort this array before calling bulk_free.
|
|
*/
|
|
size_t bulk_free(void *array[], size_t nelem) {
|
|
/*
|
|
* Try to free all pointers in the given array. Note: this could be
|
|
* made faster, by delaying consolidation, at the price of disabling
|
|
* some user integrity checks, We still optimize some consolidations
|
|
* by combining adjacent chunks before freeing, which will occur often
|
|
* if allocated with ialloc or the array is sorted.
|
|
*/
|
|
size_t unfreed = 0;
|
|
if (!PREACTION(g_dlmalloc)) {
|
|
void **a;
|
|
void **fence = &(array[nelem]);
|
|
for (a = array; a != fence; ++a) {
|
|
void *mem = *a;
|
|
if (mem != 0) {
|
|
mchunkptr p = mem2chunk(ADDRESS_DEATH_ACTION(mem));
|
|
size_t psize = chunksize(p);
|
|
#if FOOTERS
|
|
if (get_mstate_for(p) != g_dlmalloc) {
|
|
++unfreed;
|
|
continue;
|
|
}
|
|
#endif
|
|
check_inuse_chunk(g_dlmalloc, p);
|
|
*a = 0;
|
|
if (RTCHECK(ok_address(g_dlmalloc, p) && ok_inuse(p))) {
|
|
void **b = a + 1; /* try to merge with next chunk */
|
|
mchunkptr next = next_chunk(p);
|
|
if (b != fence && *b == chunk2mem(next)) {
|
|
size_t newsize = chunksize(next) + psize;
|
|
set_inuse(g_dlmalloc, p, newsize);
|
|
*b = chunk2mem(p);
|
|
} else
|
|
dlmalloc_dispose_chunk(g_dlmalloc, p, psize);
|
|
} else {
|
|
CORRUPTION_ERROR_ACTION(g_dlmalloc);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
if (should_trim(g_dlmalloc, g_dlmalloc->topsize)) {
|
|
dlmalloc_sys_trim(g_dlmalloc, 0);
|
|
}
|
|
POSTACTION(g_dlmalloc);
|
|
}
|
|
return unfreed;
|
|
}
|