Make ZiposHandle's pos atomic

Implements a somewhat stronger guarantee than POSIX specifies: reads and
seeks are atomic. They may be arbitrarily reordered between threads, but
each one happens all the way and leaves the fd in a consistent state.

This is achieved by "locking" pos in __zipos_read by storing SIZE_MAX to
pos during the operation, so only one can be in-flight at a time. Seeks,
on the other hand, just update pos in one go, and rerun if it changed in
the meantime.

I used `LIKELY` / `UNLIKELY` to pessimize the concurrent case; hopefully
that buys back some performance.
This commit is contained in:
Jōshin 2023-12-01 02:56:58 -05:00
parent 1e01283bb9
commit 03d931c367
No known key found for this signature in database
4 changed files with 53 additions and 27 deletions

View file

@ -202,8 +202,9 @@ static int __zipos_load(struct Zipos *zipos, size_t cf, int flags,
return eio();
}
}
h->pos = 0;
atomic_store_explicit(&h->pos, 0, memory_order_relaxed);
h->cfile = cf;
unassert(size < SIZE_MAX);
h->size = size;
if (h->mem) {
minfd = 3;

View file

@ -18,6 +18,9 @@
*/
#include "libc/assert.h"
#include "libc/calls/struct/iovec.h"
#include "libc/intrin/atomic.h"
#include "libc/intrin/likely.h"
#include "libc/limits.h"
#include "libc/runtime/zipos.internal.h"
#include "libc/stdio/sysparam.h"
#include "libc/str/str.h"
@ -29,13 +32,24 @@
static ssize_t __zipos_read_impl(struct ZiposHandle *h, const struct iovec *iov,
size_t iovlen, ssize_t opt_offset) {
int i;
int64_t b, x, y;
int64_t b, x, y, start_pos;
if (h->cfile == ZIPOS_SYNTHETIC_DIRECTORY ||
S_ISDIR(GetZipCfileMode(h->zipos->map + h->cfile))) {
return eisdir();
}
if (opt_offset == -1) {
x = y = h->pos;
while (true) {
start_pos = atomic_load_explicit(&h->pos, memory_order_relaxed);
if (UNLIKELY(start_pos == SIZE_MAX)) {
continue;
}
if (LIKELY(atomic_compare_exchange_weak_explicit(
&h->pos, &start_pos, SIZE_MAX, memory_order_acquire,
memory_order_relaxed))) {
break;
}
}
x = y = start_pos;
} else {
x = y = opt_offset;
}
@ -44,7 +58,7 @@ static ssize_t __zipos_read_impl(struct ZiposHandle *h, const struct iovec *iov,
if (b) memcpy(iov[i].iov_base, h->mem + y, b);
}
if (opt_offset == -1) {
h->pos = y;
atomic_store_explicit(&h->pos, y, memory_order_release);
}
return y - x;
}

View file

@ -17,28 +17,18 @@
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/calls/calls.h"
#include "libc/intrin/atomic.h"
#include "libc/intrin/likely.h"
#include "libc/limits.h"
#include "libc/runtime/zipos.internal.h"
#include "libc/stdckdint.h"
#include "libc/sysv/errfuns.h"
static int64_t GetPosition(struct ZiposHandle *h, int whence) {
switch (whence) {
case SEEK_SET:
return 0;
case SEEK_CUR:
return h->pos;
case SEEK_END:
return h->size;
default:
return einval();
}
}
static int64_t Seek(struct ZiposHandle *h, int64_t offset, int whence) {
int64_t pos;
if (!ckd_add(&pos, GetPosition(h, whence), offset)) {
if (pos >= 0) {
return pos;
static int64_t Seek(int64_t pos, int64_t offset) {
int64_t rc;
if (!ckd_add(&rc, pos, offset)) {
if (rc >= 0) {
return rc;
} else {
return einval();
}
@ -56,9 +46,30 @@ static int64_t Seek(struct ZiposHandle *h, int64_t offset, int whence) {
* @asyncsignalsafe
*/
int64_t __zipos_seek(struct ZiposHandle *h, int64_t offset, unsigned whence) {
int64_t pos;
if ((pos = Seek(h, offset, whence)) != -1) {
h->pos = pos;
int64_t pos, new_pos;
while (true) {
pos = atomic_load_explicit(&h->pos, memory_order_relaxed);
if (UNLIKELY(pos == SIZE_MAX)) {
continue;
}
switch (whence) {
case SEEK_SET:
new_pos = Seek(0, offset);
break;
case SEEK_CUR:
new_pos = Seek(pos, offset);
break;
case SEEK_END:
new_pos = Seek(h->size, offset);
break;
default:
new_pos = einval();
}
if (LIKELY(atomic_compare_exchange_weak_explicit(
&h->pos, &pos, new_pos < 0 ? pos : new_pos, memory_order_acquire,
memory_order_relaxed))) {
break;
}
}
return pos;
return new_pos;
}

View file

@ -22,7 +22,7 @@ struct ZiposHandle {
size_t mapsize;
size_t cfile;
_Atomic(int) refs;
size_t pos; // TODO atomic
_Atomic(size_t) pos;
uint8_t *mem;
uint8_t data[];
};