mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-02-07 15:03:34 +00:00
- 10.5% reduction of o//depend dependency graph - 8.8% reduction in latency of make command - Fix issue with temporary file cleanup There's a new -w option in compile.com that turns off the recent Landlock output path workaround for "good commands" which do not unlink() the output file like GNU tooling does. Our new GNU Make unveil sandboxing appears to have zero overhead in the grand scheme of things. Full builds are pretty fast since the only thing that's actually slowed us down is probably libcxx make -j16 MODE=rel RL: took 85,732,063µs wall time RL: ballooned to 323,612kb in size RL: needed 828,560,521µs cpu (11% kernel) RL: caused 39,080,670 page faults (99% memcpy) RL: 350,073 context switches (72% consensual) RL: performed 0 reads and 11,494,960 write i/o operations pledge() and unveil() no longer consider ENOSYS to be an error. These functions have also been added to Python's cosmo module. This change also removes some WIN32 APIs and System Five magnums which we're not using and it's doubtful anyone else would be too
517 lines
22 KiB
C
517 lines
22 KiB
C
/*
|
|
* QuickJS Javascript Engine
|
|
*
|
|
* Copyright (c) 2017-2021 Fabrice Bellard
|
|
* Copyright (c) 2017-2021 Charlie Gordon
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
* in the Software without restriction, including without limitation the rights
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
* furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
* THE SOFTWARE.
|
|
*/
|
|
#include "libc/inttypes.h"
|
|
#include "libc/str/str.h"
|
|
#include "third_party/quickjs/internal.h"
|
|
|
|
asm(".ident\t\"\\n\\n\
|
|
QuickJS (MIT License)\\n\
|
|
Copyright (c) 2017-2021 Fabrice Bellard\\n\
|
|
Copyright (c) 2017-2021 Charlie Gordon\"");
|
|
asm(".include \"libc/disclaimer.inc\"");
|
|
/* clang-format off */
|
|
|
|
/* Compute memory used by various object types */
|
|
/* XXX: poor man's approach to handling multiply referenced objects */
|
|
typedef struct JSMemoryUsage_helper {
|
|
double memory_used_count;
|
|
double str_count;
|
|
double str_size;
|
|
int64_t js_func_count;
|
|
double js_func_size;
|
|
int64_t js_func_code_size;
|
|
int64_t js_func_pc2line_count;
|
|
int64_t js_func_pc2line_size;
|
|
} JSMemoryUsage_helper;
|
|
|
|
static void compute_value_size(JSValueConst val, JSMemoryUsage_helper *hp);
|
|
|
|
static void compute_jsstring_size(JSString *str, JSMemoryUsage_helper *hp)
|
|
{
|
|
if (!str->atom_type) { /* atoms are handled separately */
|
|
double s_ref_count = str->header.ref_count;
|
|
hp->str_count += 1 / s_ref_count;
|
|
hp->str_size += ((sizeof(*str) + (str->len << str->is_wide_char) +
|
|
1 - str->is_wide_char) / s_ref_count);
|
|
}
|
|
}
|
|
|
|
static void compute_bytecode_size(JSFunctionBytecode *b, JSMemoryUsage_helper *hp)
|
|
{
|
|
int memory_used_count, js_func_size, i;
|
|
memory_used_count = 0;
|
|
js_func_size = offsetof(JSFunctionBytecode, debug);
|
|
if (b->vardefs) {
|
|
js_func_size += (b->arg_count + b->var_count) * sizeof(*b->vardefs);
|
|
}
|
|
if (b->cpool) {
|
|
js_func_size += b->cpool_count * sizeof(*b->cpool);
|
|
for (i = 0; i < b->cpool_count; i++) {
|
|
JSValueConst val = b->cpool[i];
|
|
compute_value_size(val, hp);
|
|
}
|
|
}
|
|
if (b->closure_var) {
|
|
js_func_size += b->closure_var_count * sizeof(*b->closure_var);
|
|
}
|
|
if (!b->read_only_bytecode && b->byte_code_buf) {
|
|
hp->js_func_code_size += b->byte_code_len;
|
|
}
|
|
if (b->has_debug) {
|
|
js_func_size += sizeof(*b) - offsetof(JSFunctionBytecode, debug);
|
|
if (b->debug.source) {
|
|
memory_used_count++;
|
|
js_func_size += b->debug.source_len + 1;
|
|
}
|
|
if (b->debug.pc2line_len) {
|
|
memory_used_count++;
|
|
hp->js_func_pc2line_count += 1;
|
|
hp->js_func_pc2line_size += b->debug.pc2line_len;
|
|
}
|
|
}
|
|
hp->js_func_size += js_func_size;
|
|
hp->js_func_count += 1;
|
|
hp->memory_used_count += memory_used_count;
|
|
}
|
|
|
|
static void compute_value_size(JSValueConst val, JSMemoryUsage_helper *hp)
|
|
{
|
|
switch(JS_VALUE_GET_TAG(val)) {
|
|
case JS_TAG_STRING:
|
|
compute_jsstring_size(JS_VALUE_GET_STRING(val), hp);
|
|
break;
|
|
#ifdef CONFIG_BIGNUM
|
|
case JS_TAG_BIG_INT:
|
|
case JS_TAG_BIG_FLOAT:
|
|
case JS_TAG_BIG_DECIMAL:
|
|
/* should track JSBigFloat usage */
|
|
break;
|
|
#endif
|
|
}
|
|
}
|
|
|
|
void JS_ComputeMemoryUsage(JSRuntime *rt, JSMemoryUsage *s)
|
|
{
|
|
struct list_head *el, *el1;
|
|
int i;
|
|
JSMemoryUsage_helper mem = { 0 }, *hp = &mem;
|
|
bzero(s, sizeof(*s));
|
|
s->malloc_count = rt->malloc_state.malloc_count;
|
|
s->malloc_size = rt->malloc_state.malloc_size;
|
|
s->malloc_limit = rt->malloc_state.malloc_limit;
|
|
s->memory_used_count = 2; /* rt + rt->class_array */
|
|
s->memory_used_size = sizeof(JSRuntime) + sizeof(JSValue) * rt->class_count;
|
|
list_for_each(el, &rt->context_list) {
|
|
JSContext *ctx = list_entry(el, JSContext, link);
|
|
JSShape *sh = ctx->array_shape;
|
|
s->memory_used_count += 2; /* ctx + ctx->class_proto */
|
|
s->memory_used_size += sizeof(JSContext) +
|
|
sizeof(JSValue) * rt->class_count;
|
|
s->binary_object_count += ctx->binary_object_count;
|
|
s->binary_object_size += ctx->binary_object_size;
|
|
/* the hashed shapes are counted separately */
|
|
if (sh && !sh->is_hashed) {
|
|
int hash_size = sh->prop_hash_mask + 1;
|
|
s->shape_count++;
|
|
s->shape_size += get_shape_size(hash_size, sh->prop_size);
|
|
}
|
|
list_for_each(el1, &ctx->loaded_modules) {
|
|
JSModuleDef *m = list_entry(el1, JSModuleDef, link);
|
|
s->memory_used_count += 1;
|
|
s->memory_used_size += sizeof(*m);
|
|
if (m->req_module_entries) {
|
|
s->memory_used_count += 1;
|
|
s->memory_used_size += m->req_module_entries_count * sizeof(*m->req_module_entries);
|
|
}
|
|
if (m->export_entries) {
|
|
s->memory_used_count += 1;
|
|
s->memory_used_size += m->export_entries_count * sizeof(*m->export_entries);
|
|
for (i = 0; i < m->export_entries_count; i++) {
|
|
JSExportEntry *me = &m->export_entries[i];
|
|
if (me->export_type == JS_EXPORT_TYPE_LOCAL && me->u.local.var_ref) {
|
|
/* potential multiple count */
|
|
s->memory_used_count += 1;
|
|
compute_value_size(me->u.local.var_ref->value, hp);
|
|
}
|
|
}
|
|
}
|
|
if (m->star_export_entries) {
|
|
s->memory_used_count += 1;
|
|
s->memory_used_size += m->star_export_entries_count * sizeof(*m->star_export_entries);
|
|
}
|
|
if (m->import_entries) {
|
|
s->memory_used_count += 1;
|
|
s->memory_used_size += m->import_entries_count * sizeof(*m->import_entries);
|
|
}
|
|
compute_value_size(m->module_ns, hp);
|
|
compute_value_size(m->func_obj, hp);
|
|
}
|
|
}
|
|
list_for_each(el, &rt->gc_obj_list) {
|
|
JSGCObjectHeader *gp = list_entry(el, JSGCObjectHeader, link);
|
|
JSObject *p;
|
|
JSShape *sh;
|
|
JSShapeProperty *prs;
|
|
/* XXX: could count the other GC object types too */
|
|
if (gp->gc_obj_type == JS_GC_OBJ_TYPE_FUNCTION_BYTECODE) {
|
|
compute_bytecode_size((JSFunctionBytecode *)gp, hp);
|
|
continue;
|
|
} else if (gp->gc_obj_type != JS_GC_OBJ_TYPE_JS_OBJECT) {
|
|
continue;
|
|
}
|
|
p = (JSObject *)gp;
|
|
sh = p->shape;
|
|
s->obj_count++;
|
|
if (p->prop) {
|
|
s->memory_used_count++;
|
|
s->prop_size += sh->prop_size * sizeof(*p->prop);
|
|
s->prop_count += sh->prop_count;
|
|
prs = get_shape_prop(sh);
|
|
for(i = 0; i < sh->prop_count; i++) {
|
|
JSProperty *pr = &p->prop[i];
|
|
if (prs->atom != JS_ATOM_NULL && !(prs->flags & JS_PROP_TMASK)) {
|
|
compute_value_size(pr->u.value, hp);
|
|
}
|
|
prs++;
|
|
}
|
|
}
|
|
/* the hashed shapes are counted separately */
|
|
if (!sh->is_hashed) {
|
|
int hash_size = sh->prop_hash_mask + 1;
|
|
s->shape_count++;
|
|
s->shape_size += get_shape_size(hash_size, sh->prop_size);
|
|
}
|
|
switch(p->class_id) {
|
|
case JS_CLASS_ARRAY: /* u.array | length */
|
|
case JS_CLASS_ARGUMENTS: /* u.array | length */
|
|
s->array_count++;
|
|
if (p->fast_array) {
|
|
s->fast_array_count++;
|
|
if (p->u.array.u.values) {
|
|
s->memory_used_count++;
|
|
s->memory_used_size += p->u.array.count *
|
|
sizeof(*p->u.array.u.values);
|
|
s->fast_array_elements += p->u.array.count;
|
|
for (i = 0; i < p->u.array.count; i++) {
|
|
compute_value_size(p->u.array.u.values[i], hp);
|
|
}
|
|
}
|
|
}
|
|
break;
|
|
case JS_CLASS_NUMBER: /* u.object_data */
|
|
case JS_CLASS_STRING: /* u.object_data */
|
|
case JS_CLASS_BOOLEAN: /* u.object_data */
|
|
case JS_CLASS_SYMBOL: /* u.object_data */
|
|
case JS_CLASS_DATE: /* u.object_data */
|
|
#ifdef CONFIG_BIGNUM
|
|
case JS_CLASS_BIG_INT: /* u.object_data */
|
|
case JS_CLASS_BIG_FLOAT: /* u.object_data */
|
|
case JS_CLASS_BIG_DECIMAL: /* u.object_data */
|
|
#endif
|
|
compute_value_size(p->u.object_data, hp);
|
|
break;
|
|
case JS_CLASS_C_FUNCTION: /* u.cfunc */
|
|
s->c_func_count++;
|
|
break;
|
|
case JS_CLASS_BYTECODE_FUNCTION: /* u.func */
|
|
{
|
|
JSFunctionBytecode *b = p->u.func.function_bytecode;
|
|
JSVarRef **var_refs = p->u.func.var_refs;
|
|
/* home_object: object will be accounted for in list scan */
|
|
if (var_refs) {
|
|
s->memory_used_count++;
|
|
s->js_func_size += b->closure_var_count * sizeof(*var_refs);
|
|
for (i = 0; i < b->closure_var_count; i++) {
|
|
if (var_refs[i]) {
|
|
double ref_count = var_refs[i]->header.ref_count;
|
|
s->memory_used_count += 1 / ref_count;
|
|
s->js_func_size += sizeof(*var_refs[i]) / ref_count;
|
|
/* handle non object closed values */
|
|
if (var_refs[i]->pvalue == &var_refs[i]->value) {
|
|
/* potential multiple count */
|
|
compute_value_size(var_refs[i]->value, hp);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
break;
|
|
case JS_CLASS_BOUND_FUNCTION: /* u.bound_function */
|
|
{
|
|
JSBoundFunction *bf = p->u.bound_function;
|
|
/* func_obj and this_val are objects */
|
|
for (i = 0; i < bf->argc; i++) {
|
|
compute_value_size(bf->argv[i], hp);
|
|
}
|
|
s->memory_used_count += 1;
|
|
s->memory_used_size += sizeof(*bf) + bf->argc * sizeof(*bf->argv);
|
|
}
|
|
break;
|
|
case JS_CLASS_C_FUNCTION_DATA: /* u.c_function_data_record */
|
|
{
|
|
JSCFunctionDataRecord *fd = p->u.c_function_data_record;
|
|
if (fd) {
|
|
for (i = 0; i < fd->data_len; i++) {
|
|
compute_value_size(fd->data[i], hp);
|
|
}
|
|
s->memory_used_count += 1;
|
|
s->memory_used_size += sizeof(*fd) + fd->data_len * sizeof(*fd->data);
|
|
}
|
|
}
|
|
break;
|
|
case JS_CLASS_REGEXP: /* u.regexp */
|
|
compute_jsstring_size(p->u.regexp.pattern, hp);
|
|
compute_jsstring_size(p->u.regexp.bytecode, hp);
|
|
break;
|
|
case JS_CLASS_FOR_IN_ITERATOR: /* u.for_in_iterator */
|
|
{
|
|
JSForInIterator *it = p->u.for_in_iterator;
|
|
if (it) {
|
|
compute_value_size(it->obj, hp);
|
|
s->memory_used_count += 1;
|
|
s->memory_used_size += sizeof(*it);
|
|
}
|
|
}
|
|
break;
|
|
case JS_CLASS_ARRAY_BUFFER: /* u.array_buffer */
|
|
case JS_CLASS_SHARED_ARRAY_BUFFER: /* u.array_buffer */
|
|
{
|
|
JSArrayBuffer *abuf = p->u.array_buffer;
|
|
if (abuf) {
|
|
s->memory_used_count += 1;
|
|
s->memory_used_size += sizeof(*abuf);
|
|
if (abuf->data) {
|
|
s->memory_used_count += 1;
|
|
s->memory_used_size += abuf->byte_length;
|
|
}
|
|
}
|
|
}
|
|
break;
|
|
case JS_CLASS_GENERATOR: /* u.generator_data */
|
|
case JS_CLASS_UINT8C_ARRAY: /* u.typed_array / u.array */
|
|
case JS_CLASS_INT8_ARRAY: /* u.typed_array / u.array */
|
|
case JS_CLASS_UINT8_ARRAY: /* u.typed_array / u.array */
|
|
case JS_CLASS_INT16_ARRAY: /* u.typed_array / u.array */
|
|
case JS_CLASS_UINT16_ARRAY: /* u.typed_array / u.array */
|
|
case JS_CLASS_INT32_ARRAY: /* u.typed_array / u.array */
|
|
case JS_CLASS_UINT32_ARRAY: /* u.typed_array / u.array */
|
|
#ifdef CONFIG_BIGNUM
|
|
case JS_CLASS_BIG_INT64_ARRAY: /* u.typed_array / u.array */
|
|
case JS_CLASS_BIG_UINT64_ARRAY: /* u.typed_array / u.array */
|
|
#endif
|
|
case JS_CLASS_FLOAT32_ARRAY: /* u.typed_array / u.array */
|
|
case JS_CLASS_FLOAT64_ARRAY: /* u.typed_array / u.array */
|
|
case JS_CLASS_DATAVIEW: /* u.typed_array */
|
|
#ifdef CONFIG_BIGNUM
|
|
case JS_CLASS_FLOAT_ENV: /* u.float_env */
|
|
#endif
|
|
case JS_CLASS_MAP: /* u.map_state */
|
|
case JS_CLASS_SET: /* u.map_state */
|
|
case JS_CLASS_WEAKMAP: /* u.map_state */
|
|
case JS_CLASS_WEAKSET: /* u.map_state */
|
|
case JS_CLASS_MAP_ITERATOR: /* u.map_iterator_data */
|
|
case JS_CLASS_SET_ITERATOR: /* u.map_iterator_data */
|
|
case JS_CLASS_ARRAY_ITERATOR: /* u.array_iterator_data */
|
|
case JS_CLASS_STRING_ITERATOR: /* u.array_iterator_data */
|
|
case JS_CLASS_PROXY: /* u.proxy_data */
|
|
case JS_CLASS_PROMISE: /* u.promise_data */
|
|
case JS_CLASS_PROMISE_RESOLVE_FUNCTION: /* u.promise_function_data */
|
|
case JS_CLASS_PROMISE_REJECT_FUNCTION: /* u.promise_function_data */
|
|
case JS_CLASS_ASYNC_FUNCTION_RESOLVE: /* u.async_function_data */
|
|
case JS_CLASS_ASYNC_FUNCTION_REJECT: /* u.async_function_data */
|
|
case JS_CLASS_ASYNC_FROM_SYNC_ITERATOR: /* u.async_from_sync_iterator_data */
|
|
case JS_CLASS_ASYNC_GENERATOR: /* u.async_generator_data */
|
|
/* TODO */
|
|
default:
|
|
/* XXX: class definition should have an opaque block size */
|
|
if (p->u.opaque) {
|
|
s->memory_used_count += 1;
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
s->obj_size += s->obj_count * sizeof(JSObject);
|
|
/* hashed shapes */
|
|
s->memory_used_count++; /* rt->shape_hash */
|
|
s->memory_used_size += sizeof(rt->shape_hash[0]) * rt->shape_hash_size;
|
|
for(i = 0; i < rt->shape_hash_size; i++) {
|
|
JSShape *sh;
|
|
for(sh = rt->shape_hash[i]; sh != NULL; sh = sh->shape_hash_next) {
|
|
int hash_size = sh->prop_hash_mask + 1;
|
|
s->shape_count++;
|
|
s->shape_size += get_shape_size(hash_size, sh->prop_size);
|
|
}
|
|
}
|
|
/* atoms */
|
|
s->memory_used_count += 2; /* rt->atom_array, rt->atom_hash */
|
|
s->atom_count = rt->atom_count;
|
|
s->atom_size = sizeof(rt->atom_array[0]) * rt->atom_size +
|
|
sizeof(rt->atom_hash[0]) * rt->atom_hash_size;
|
|
for(i = 0; i < rt->atom_size; i++) {
|
|
JSAtomStruct *p = rt->atom_array[i];
|
|
if (!atom_is_free(p)) {
|
|
s->atom_size += (sizeof(*p) + (p->len << p->is_wide_char) +
|
|
1 - p->is_wide_char);
|
|
}
|
|
}
|
|
s->str_count = round(mem.str_count);
|
|
s->str_size = round(mem.str_size);
|
|
s->js_func_count = mem.js_func_count;
|
|
s->js_func_size = round(mem.js_func_size);
|
|
s->js_func_code_size = mem.js_func_code_size;
|
|
s->js_func_pc2line_count = mem.js_func_pc2line_count;
|
|
s->js_func_pc2line_size = mem.js_func_pc2line_size;
|
|
s->memory_used_count += round(mem.memory_used_count) +
|
|
s->atom_count + s->str_count +
|
|
s->obj_count + s->shape_count +
|
|
s->js_func_count + s->js_func_pc2line_count;
|
|
s->memory_used_size += s->atom_size + s->str_size +
|
|
s->obj_size + s->prop_size + s->shape_size +
|
|
s->js_func_size + s->js_func_code_size + s->js_func_pc2line_size;
|
|
}
|
|
|
|
void JS_DumpMemoryUsage(FILE *fp, const JSMemoryUsage *s, JSRuntime *rt)
|
|
{
|
|
fprintf(fp, "QuickJS memory usage -- %d-bit, malloc limit: %"PRId64"\n\n",
|
|
(int)sizeof(void *) * 8, (int64_t)(ssize_t)s->malloc_limit);
|
|
#if 1
|
|
if (rt) {
|
|
static const struct {
|
|
const char *name;
|
|
size_t size;
|
|
} object_types[] = {
|
|
{ "JSRuntime", sizeof(JSRuntime) },
|
|
{ "JSContext", sizeof(JSContext) },
|
|
{ "JSObject", sizeof(JSObject) },
|
|
{ "JSString", sizeof(JSString) },
|
|
{ "JSFunctionBytecode", sizeof(JSFunctionBytecode) },
|
|
};
|
|
int i, usage_size_ok = 0;
|
|
for(i = 0; i < countof(object_types); i++) {
|
|
unsigned int size = object_types[i].size;
|
|
void *p = js_malloc_rt(rt, size);
|
|
if (p) {
|
|
unsigned int size1 = js_malloc_usable_size_rt(rt, p);
|
|
if (size1 >= size) {
|
|
usage_size_ok = 1;
|
|
fprintf(fp, " %3u + %-2u %s\n",
|
|
size, size1 - size, object_types[i].name);
|
|
}
|
|
js_free_rt(rt, p);
|
|
}
|
|
}
|
|
if (!usage_size_ok) {
|
|
fprintf(fp, " malloc_usable_size unavailable\n");
|
|
}
|
|
{
|
|
int obj_classes[JS_CLASS_INIT_COUNT + 1] = { 0 };
|
|
int class_id;
|
|
struct list_head *el;
|
|
list_for_each(el, &rt->gc_obj_list) {
|
|
JSGCObjectHeader *gp = list_entry(el, JSGCObjectHeader, link);
|
|
JSObject *p;
|
|
if (gp->gc_obj_type == JS_GC_OBJ_TYPE_JS_OBJECT) {
|
|
p = (JSObject *)gp;
|
|
obj_classes[min_uint32(p->class_id, JS_CLASS_INIT_COUNT)]++;
|
|
}
|
|
}
|
|
fprintf(fp, "\n" "JSObject classes\n");
|
|
if (obj_classes[0])
|
|
fprintf(fp, " %5d %2.0d %s\n", obj_classes[0], 0, "none");
|
|
for (class_id = 1; class_id < JS_CLASS_INIT_COUNT; class_id++) {
|
|
if (obj_classes[class_id]) {
|
|
char buf[ATOM_GET_STR_BUF_SIZE];
|
|
fprintf(fp, " %5d %2.0d %s\n", obj_classes[class_id], class_id,
|
|
JS_AtomGetStrRT(rt, buf, sizeof(buf), js_std_class_def[class_id - 1].class_name));
|
|
}
|
|
}
|
|
if (obj_classes[JS_CLASS_INIT_COUNT])
|
|
fprintf(fp, " %5d %2.0d %s\n", obj_classes[JS_CLASS_INIT_COUNT], 0, "other");
|
|
}
|
|
fprintf(fp, "\n");
|
|
}
|
|
#endif
|
|
fprintf(fp, "%-20s %8s %8s\n", "NAME", "COUNT", "SIZE");
|
|
if (s->malloc_count) {
|
|
fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per block)\n",
|
|
"memory allocated", s->malloc_count, s->malloc_size,
|
|
(double)s->malloc_size / s->malloc_count);
|
|
fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%d overhead, %0.1f average slack)\n",
|
|
"memory used", s->memory_used_count, s->memory_used_size,
|
|
MALLOC_OVERHEAD, ((double)(s->malloc_size - s->memory_used_size) /
|
|
s->memory_used_count));
|
|
}
|
|
if (s->atom_count) {
|
|
fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per atom)\n",
|
|
"atoms", s->atom_count, s->atom_size,
|
|
(double)s->atom_size / s->atom_count);
|
|
}
|
|
if (s->str_count) {
|
|
fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per string)\n",
|
|
"strings", s->str_count, s->str_size,
|
|
(double)s->str_size / s->str_count);
|
|
}
|
|
if (s->obj_count) {
|
|
fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per object)\n",
|
|
"objects", s->obj_count, s->obj_size,
|
|
(double)s->obj_size / s->obj_count);
|
|
fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per object)\n",
|
|
" properties", s->prop_count, s->prop_size,
|
|
(double)s->prop_count / s->obj_count);
|
|
fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per shape)\n",
|
|
" shapes", s->shape_count, s->shape_size,
|
|
(double)s->shape_size / s->shape_count);
|
|
}
|
|
if (s->js_func_count) {
|
|
fprintf(fp, "%-20s %8"PRId64" %8"PRId64"\n",
|
|
"bytecode functions", s->js_func_count, s->js_func_size);
|
|
fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per function)\n",
|
|
" bytecode", s->js_func_count, s->js_func_code_size,
|
|
(double)s->js_func_code_size / s->js_func_count);
|
|
if (s->js_func_pc2line_count) {
|
|
fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per function)\n",
|
|
" pc2line", s->js_func_pc2line_count,
|
|
s->js_func_pc2line_size,
|
|
(double)s->js_func_pc2line_size / s->js_func_pc2line_count);
|
|
}
|
|
}
|
|
if (s->c_func_count) {
|
|
fprintf(fp, "%-20s %8"PRId64"\n", "C functions", s->c_func_count);
|
|
}
|
|
if (s->array_count) {
|
|
fprintf(fp, "%-20s %8"PRId64"\n", "arrays", s->array_count);
|
|
if (s->fast_array_count) {
|
|
fprintf(fp, "%-20s %8"PRId64"\n", " fast arrays", s->fast_array_count);
|
|
fprintf(fp, "%-20s %8"PRId64" %8"PRId64" (%0.1f per fast array)\n",
|
|
" elements", s->fast_array_elements,
|
|
s->fast_array_elements * (int)sizeof(JSValue),
|
|
(double)s->fast_array_elements / s->fast_array_count);
|
|
}
|
|
}
|
|
if (s->binary_object_count) {
|
|
fprintf(fp, "%-20s %8"PRId64" %8"PRId64"\n",
|
|
"binary objects", s->binary_object_count, s->binary_object_size);
|
|
}
|
|
}
|