Better refcounting

Cribbed from [Rust Arc][1] and the [Boost docs][2]:

"""
Increasing the reference counter can always be done with
memory_order_relaxed: New references to an object can only be formed
from an existing reference, and passing an existing reference from one
thread to another must already provide any required synchronization.

It is important to enforce any possible access to the object in one
thread (through an existing reference) to happen before deleting the
object in a different thread. This is achieved by a "release" operation
after dropping a reference (any access to the object through this
reference must obviously happened before), and an "acquire" operation
before deleting the object.

It would be possible to use memory_order_acq_rel for the fetch_sub
operation, but this results in unneeded "acquire" operations when the
reference counter does not yet reach zero and may impose a performance
penalty.
"""

[1] https://moshg.github.io/rust-std-ja/src/alloc/arc.rs.html
[2] https://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html
This commit is contained in:
Jōshin 2023-12-01 00:28:16 -05:00
parent 7845495a93
commit 1e01283bb9
No known key found for this signature in database

View file

@ -31,8 +31,10 @@
#include "libc/intrin/cmpxchg.h"
#include "libc/intrin/directmap.internal.h"
#include "libc/intrin/extend.internal.h"
#include "libc/intrin/likely.h"
#include "libc/intrin/strace.internal.h"
#include "libc/intrin/weaken.h"
#include "libc/limits.h"
#include "libc/runtime/internal.h"
#include "libc/runtime/memtrack.internal.h"
#include "libc/runtime/zipos.internal.h"
@ -49,6 +51,8 @@
#include "libc/thread/tls.h"
#include "libc/zip.internal.h"
#define MAX_REFS (INT_MAX >> 1)
static char *__zipos_mapend;
static size_t __zipos_maptotal;
static pthread_mutex_t __zipos_lock_obj;
@ -79,13 +83,17 @@ static void *__zipos_mmap_space(size_t mapsize) {
}
struct ZiposHandle *__zipos_keep(struct ZiposHandle *h) {
atomic_fetch_add_explicit(&h->refs, 1, memory_order_relaxed);
int refs = atomic_fetch_add_explicit(&h->refs, 1, memory_order_relaxed);
unassert(!VERY_UNLIKELY(refs > MAX_REFS));
return h;
}
static bool __zipos_drop(struct ZiposHandle *h) {
int refs = atomic_load_explicit(&h->refs, memory_order_acquire);
return -1 == refs || -1 == atomic_fetch_sub(&h->refs, 1);
if (!atomic_fetch_sub_explicit(&h->refs, 1, memory_order_release)) {
atomic_thread_fence(memory_order_acquire);
return true;
}
return false;
}
void __zipos_free(struct ZiposHandle *h) {
@ -113,7 +121,7 @@ StartOver:
while ((h = *ph)) {
if (h->mapsize >= mapsize) {
if (!_cmpxchg(ph, h, h->next)) goto StartOver;
atomic_init(&h->refs, 0);
atomic_store_explicit(&h->refs, 0, memory_order_relaxed);
break;
}
ph = &h->next;