mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-02-07 06:53:33 +00:00
Better zipos refcounts and atomic reads/seeks (#973)
* Better refcounting Cribbed from [Rust Arc][1] and the [Boost docs][2]: """ Increasing the reference counter can always be done with memory_order_relaxed: New references to an object can only be formed from an existing reference, and passing an existing reference from one thread to another must already provide any required synchronization. It is important to enforce any possible access to the object in one thread (through an existing reference) to happen before deleting the object in a different thread. This is achieved by a "release" operation after dropping a reference (any access to the object through this reference must obviously happened before), and an "acquire" operation before deleting the object. It would be possible to use memory_order_acq_rel for the fetch_sub operation, but this results in unneeded "acquire" operations when the reference counter does not yet reach zero and may impose a performance penalty. """ [1] https://moshg.github.io/rust-std-ja/src/alloc/arc.rs.html [2] https://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html * Make ZiposHandle's pos atomic Implements a somewhat stronger guarantee than POSIX specifies: reads and seeks are atomic. They may be arbitrarily reordered between threads, but each one happens all the way and leaves the fd in a consistent state. This is achieved by "locking" pos in __zipos_read by storing SIZE_MAX to pos during the operation, so only one can be in-flight at a time. Seeks, on the other hand, just update pos in one go, and rerun if it changed in the meantime. I used `LIKELY` / `UNLIKELY` to pessimize the concurrent case; hopefully that buys back some performance.
This commit is contained in:
parent
f0bfabba07
commit
d95d61b1af
4 changed files with 65 additions and 31 deletions
|
@ -31,8 +31,10 @@
|
|||
#include "libc/intrin/cmpxchg.h"
|
||||
#include "libc/intrin/directmap.internal.h"
|
||||
#include "libc/intrin/extend.internal.h"
|
||||
#include "libc/intrin/likely.h"
|
||||
#include "libc/intrin/strace.internal.h"
|
||||
#include "libc/intrin/weaken.h"
|
||||
#include "libc/limits.h"
|
||||
#include "libc/runtime/internal.h"
|
||||
#include "libc/runtime/memtrack.internal.h"
|
||||
#include "libc/runtime/zipos.internal.h"
|
||||
|
@ -49,6 +51,8 @@
|
|||
#include "libc/thread/tls.h"
|
||||
#include "libc/zip.internal.h"
|
||||
|
||||
#define MAX_REFS (INT_MAX >> 1)
|
||||
|
||||
static char *__zipos_mapend;
|
||||
static size_t __zipos_maptotal;
|
||||
static pthread_mutex_t __zipos_lock_obj;
|
||||
|
@ -79,13 +83,17 @@ static void *__zipos_mmap_space(size_t mapsize) {
|
|||
}
|
||||
|
||||
struct ZiposHandle *__zipos_keep(struct ZiposHandle *h) {
|
||||
atomic_fetch_add_explicit(&h->refs, 1, memory_order_relaxed);
|
||||
int refs = atomic_fetch_add_explicit(&h->refs, 1, memory_order_relaxed);
|
||||
unassert(!VERY_UNLIKELY(refs > MAX_REFS));
|
||||
return h;
|
||||
}
|
||||
|
||||
static bool __zipos_drop(struct ZiposHandle *h) {
|
||||
int refs = atomic_load_explicit(&h->refs, memory_order_acquire);
|
||||
return -1 == refs || -1 == atomic_fetch_sub(&h->refs, 1);
|
||||
if (!atomic_fetch_sub_explicit(&h->refs, 1, memory_order_release)) {
|
||||
atomic_thread_fence(memory_order_acquire);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void __zipos_free(struct ZiposHandle *h) {
|
||||
|
@ -113,7 +121,7 @@ StartOver:
|
|||
while ((h = *ph)) {
|
||||
if (h->mapsize >= mapsize) {
|
||||
if (!_cmpxchg(ph, h, h->next)) goto StartOver;
|
||||
atomic_init(&h->refs, 0);
|
||||
atomic_store_explicit(&h->refs, 0, memory_order_relaxed);
|
||||
break;
|
||||
}
|
||||
ph = &h->next;
|
||||
|
@ -194,8 +202,9 @@ static int __zipos_load(struct Zipos *zipos, size_t cf, int flags,
|
|||
return eio();
|
||||
}
|
||||
}
|
||||
h->pos = 0;
|
||||
atomic_store_explicit(&h->pos, 0, memory_order_relaxed);
|
||||
h->cfile = cf;
|
||||
unassert(size < SIZE_MAX);
|
||||
h->size = size;
|
||||
if (h->mem) {
|
||||
minfd = 3;
|
||||
|
|
|
@ -18,6 +18,9 @@
|
|||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/assert.h"
|
||||
#include "libc/calls/struct/iovec.h"
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/intrin/likely.h"
|
||||
#include "libc/limits.h"
|
||||
#include "libc/runtime/zipos.internal.h"
|
||||
#include "libc/stdio/sysparam.h"
|
||||
#include "libc/str/str.h"
|
||||
|
@ -29,13 +32,24 @@
|
|||
static ssize_t __zipos_read_impl(struct ZiposHandle *h, const struct iovec *iov,
|
||||
size_t iovlen, ssize_t opt_offset) {
|
||||
int i;
|
||||
int64_t b, x, y;
|
||||
int64_t b, x, y, start_pos;
|
||||
if (h->cfile == ZIPOS_SYNTHETIC_DIRECTORY ||
|
||||
S_ISDIR(GetZipCfileMode(h->zipos->map + h->cfile))) {
|
||||
return eisdir();
|
||||
}
|
||||
if (opt_offset == -1) {
|
||||
x = y = h->pos;
|
||||
while (true) {
|
||||
start_pos = atomic_load_explicit(&h->pos, memory_order_relaxed);
|
||||
if (UNLIKELY(start_pos == SIZE_MAX)) {
|
||||
continue;
|
||||
}
|
||||
if (LIKELY(atomic_compare_exchange_weak_explicit(
|
||||
&h->pos, &start_pos, SIZE_MAX, memory_order_acquire,
|
||||
memory_order_relaxed))) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
x = y = start_pos;
|
||||
} else {
|
||||
x = y = opt_offset;
|
||||
}
|
||||
|
@ -44,7 +58,7 @@ static ssize_t __zipos_read_impl(struct ZiposHandle *h, const struct iovec *iov,
|
|||
if (b) memcpy(iov[i].iov_base, h->mem + y, b);
|
||||
}
|
||||
if (opt_offset == -1) {
|
||||
h->pos = y;
|
||||
atomic_store_explicit(&h->pos, y, memory_order_release);
|
||||
}
|
||||
return y - x;
|
||||
}
|
||||
|
|
|
@ -17,28 +17,18 @@
|
|||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/intrin/likely.h"
|
||||
#include "libc/limits.h"
|
||||
#include "libc/runtime/zipos.internal.h"
|
||||
#include "libc/stdckdint.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
|
||||
static int64_t GetPosition(struct ZiposHandle *h, int whence) {
|
||||
switch (whence) {
|
||||
case SEEK_SET:
|
||||
return 0;
|
||||
case SEEK_CUR:
|
||||
return h->pos;
|
||||
case SEEK_END:
|
||||
return h->size;
|
||||
default:
|
||||
return einval();
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t Seek(struct ZiposHandle *h, int64_t offset, int whence) {
|
||||
int64_t pos;
|
||||
if (!ckd_add(&pos, GetPosition(h, whence), offset)) {
|
||||
if (pos >= 0) {
|
||||
return pos;
|
||||
static int64_t Seek(int64_t pos, int64_t offset) {
|
||||
int64_t rc;
|
||||
if (!ckd_add(&rc, pos, offset)) {
|
||||
if (rc >= 0) {
|
||||
return rc;
|
||||
} else {
|
||||
return einval();
|
||||
}
|
||||
|
@ -56,9 +46,30 @@ static int64_t Seek(struct ZiposHandle *h, int64_t offset, int whence) {
|
|||
* @asyncsignalsafe
|
||||
*/
|
||||
int64_t __zipos_seek(struct ZiposHandle *h, int64_t offset, unsigned whence) {
|
||||
int64_t pos;
|
||||
if ((pos = Seek(h, offset, whence)) != -1) {
|
||||
h->pos = pos;
|
||||
int64_t pos, new_pos;
|
||||
while (true) {
|
||||
pos = atomic_load_explicit(&h->pos, memory_order_relaxed);
|
||||
if (UNLIKELY(pos == SIZE_MAX)) {
|
||||
continue;
|
||||
}
|
||||
switch (whence) {
|
||||
case SEEK_SET:
|
||||
new_pos = Seek(0, offset);
|
||||
break;
|
||||
case SEEK_CUR:
|
||||
new_pos = Seek(pos, offset);
|
||||
break;
|
||||
case SEEK_END:
|
||||
new_pos = Seek(h->size, offset);
|
||||
break;
|
||||
default:
|
||||
new_pos = einval();
|
||||
}
|
||||
if (LIKELY(atomic_compare_exchange_weak_explicit(
|
||||
&h->pos, &pos, new_pos < 0 ? pos : new_pos, memory_order_acquire,
|
||||
memory_order_relaxed))) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return pos;
|
||||
return new_pos;
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ struct ZiposHandle {
|
|||
size_t mapsize;
|
||||
size_t cfile;
|
||||
_Atomic(int) refs;
|
||||
size_t pos; // TODO atomic
|
||||
_Atomic(size_t) pos;
|
||||
uint8_t *mem;
|
||||
uint8_t data[];
|
||||
};
|
||||
|
|
Loading…
Reference in a new issue