Make more improvements to threads and mappings

- NetBSD should now have faster synchronization
- POSIX barriers may now be shared across processes
- An edge case with memory map tracking has been fixed
- Grand Central Dispatch is no longer used on MacOS ARM64
- POSIX mutexes in normal mode now use futexes across processes
This commit is contained in:
Justine Tunney 2024-07-24 01:05:00 -07:00
parent 2187d6d2dd
commit e398f3887c
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
20 changed files with 566 additions and 171 deletions

View file

@ -16,9 +16,10 @@
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/errno.h"
#include "libc/intrin/atomic.h"
#include "libc/str/str.h"
#include "libc/thread/thread.h"
#include "third_party/nsync/counter.h"
/**
* Destroys barrier.
@ -27,9 +28,8 @@
* @raise EINVAL if threads are still inside the barrier
*/
errno_t pthread_barrier_destroy(pthread_barrier_t *barrier) {
if (barrier->_nsync) {
nsync_counter_free(barrier->_nsync);
barrier->_nsync = 0;
}
if (atomic_load_explicit(&barrier->_waiters, memory_order_relaxed))
return EINVAL;
memset(barrier, -1, sizeof(*barrier));
return 0;
}

View file

@ -17,8 +17,9 @@
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/errno.h"
#include "libc/intrin/atomic.h"
#include "libc/limits.h"
#include "libc/thread/thread.h"
#include "third_party/nsync/counter.h"
/**
* Initializes barrier.
@ -28,16 +29,17 @@
* before the barrier is released, which must be greater than zero
* @return 0 on success, or error number on failure
* @raise EINVAL if `count` isn't greater than zero
* @raise ENOMEM if insufficient memory exists
*/
errno_t pthread_barrier_init(pthread_barrier_t *barrier,
const pthread_barrierattr_t *attr,
unsigned count) {
nsync_counter c;
if (!count)
return EINVAL;
if (!(c = nsync_counter_new(count)))
return ENOMEM;
*barrier = (pthread_barrier_t){._nsync = c};
if (count > INT_MAX)
return EINVAL;
barrier->_count = count;
barrier->_pshared = attr ? *attr : PTHREAD_PROCESS_PRIVATE;
atomic_store_explicit(&barrier->_counter, count, memory_order_relaxed);
atomic_store_explicit(&barrier->_waiters, 0, memory_order_relaxed);
return 0;
}

View file

@ -16,25 +16,53 @@
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/calls/blockcancel.internal.h"
#include "libc/errno.h"
#include "libc/intrin/atomic.h"
#include "libc/limits.h"
#include "libc/thread/thread.h"
#include "third_party/nsync/counter.h"
#include "third_party/nsync/futex.internal.h"
/**
* Waits for all threads to arrive at barrier.
*
* When the barrier is broken, the state becomes reset to what it was
* when pthread_barrier_init() was called, so that the barrior may be
* used again in the same way. The last thread to arrive shall be the
* last to leave and it returns a magic value.
* used again in the same way.
*
* Unlike pthread_cond_timedwait() this function is not a cancelation
* point. It is not needed to have cleanup handlers on block cancels.
*
* @return 0 on success, `PTHREAD_BARRIER_SERIAL_THREAD` to one lucky
* thread which was the last arrival, or an errno on error
* @raise EINVAL if barrier is used incorrectly
*/
errno_t pthread_barrier_wait(pthread_barrier_t *barrier) {
if (nsync_counter_add(barrier->_nsync, -1)) {
nsync_counter_wait(barrier->_nsync, nsync_time_no_deadline);
return 0;
} else {
int n;
// enter barrier
atomic_fetch_add_explicit(&barrier->_waiters, 1, memory_order_acq_rel);
n = atomic_fetch_sub_explicit(&barrier->_counter, 1, memory_order_acq_rel);
n = n - 1;
// this can only happen on invalid usage
if (n < 0)
return EINVAL;
// reset count and wake waiters if we're last at barrier
if (!n) {
atomic_store_explicit(&barrier->_counter, barrier->_count,
memory_order_release);
atomic_store_explicit(&barrier->_waiters, 0, memory_order_release);
nsync_futex_wake_(&barrier->_waiters, INT_MAX, barrier->_pshared);
return PTHREAD_BARRIER_SERIAL_THREAD;
}
// wait for everyone else to arrive at barrier
BLOCK_CANCELATION;
while ((n = atomic_load_explicit(&barrier->_waiters, memory_order_acquire)))
nsync_futex_wait_(&barrier->_waiters, n, barrier->_pshared, 0);
ALLOW_CANCELATION;
return 0;
}

View file

@ -23,7 +23,7 @@
*
* @param pshared is set to one of the following
* - `PTHREAD_PROCESS_PRIVATE` (default)
* - `PTHREAD_PROCESS_SHARED` (unsupported)
* - `PTHREAD_PROCESS_SHARED`
* @return 0 on success, or error on failure
*/
errno_t pthread_barrierattr_getpshared(const pthread_barrierattr_t *attr,

View file

@ -24,6 +24,6 @@
* @return 0 on success, or error on failure
*/
errno_t pthread_barrierattr_init(pthread_barrierattr_t *attr) {
*attr = 0;
*attr = PTHREAD_PROCESS_PRIVATE;
return 0;
}

View file

@ -24,13 +24,14 @@
*
* @param pshared can be one of
* - `PTHREAD_PROCESS_PRIVATE` (default)
* - `PTHREAD_PROCESS_SHARED` (unsupported)
* - `PTHREAD_PROCESS_SHARED`
* @return 0 on success, or error on failure
* @raises EINVAL if `pshared` is invalid
*/
errno_t pthread_barrierattr_setpshared(pthread_barrierattr_t *attr,
int pshared) {
switch (pshared) {
case PTHREAD_PROCESS_SHARED:
case PTHREAD_PROCESS_PRIVATE:
*attr = pshared;
return 0;

View file

@ -46,7 +46,7 @@ COSMOPOLITAN_C_START_
#define PTHREAD_RWLOCK_INITIALIZER {0}
#define PTHREAD_MUTEX_INITIALIZER {0}
#define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP {0, 0, PTHREAD_MUTEX_RECURSIVE}
#define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP {0, {}, PTHREAD_MUTEX_RECURSIVE}
typedef uintptr_t pthread_t;
typedef int pthread_id_np_t;
@ -66,7 +66,10 @@ typedef struct pthread_spinlock_s {
typedef struct pthread_mutex_s {
uint32_t _nsync;
int32_t _pid;
union {
int32_t _pid;
_Atomic(int32_t) _futex;
};
_Atomic(uint64_t) _word;
} pthread_mutex_t;
@ -92,7 +95,10 @@ typedef struct pthread_rwlock_s {
} pthread_rwlock_t;
typedef struct pthread_barrier_s {
void *_nsync;
int _count;
char _pshared;
_Atomic(int) _counter;
_Atomic(int) _waiters;
} pthread_barrier_t;
typedef struct pthread_attr_s {