mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-01-31 03:27:39 +00:00
Cleanup inline locking code
This commit is contained in:
parent
96185e1ac0
commit
4427581a05
12 changed files with 48 additions and 74 deletions
|
@ -267,12 +267,7 @@ LIBC_HDRS = $(filter %.h,$(LIBC_FILES)) $(LIBC_ISYSTEM)
|
|||
LIBC_HDRS_H = $(filter %.h,$(LIBC_HDRS))
|
||||
LIBC_INCS = $(filter %.inc,$(LIBC_FILES))
|
||||
LIBC_CHECKS = $(LIBC_HDRS_H:%=o/$(MODE)/%.ok)
|
||||
|
||||
ifneq ($(MODE), llvm)
|
||||
LIBC_FILES := $(wildcard libc/*)
|
||||
else
|
||||
LIBC_FILES := $(wildcard libc/*)
|
||||
endif
|
||||
|
||||
.PHONY: o/$(MODE)/libc
|
||||
o/$(MODE)/libc: o/$(MODE)/libc/calls \
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
*
|
||||
* @return 0 on success, or error number on failure
|
||||
*/
|
||||
int pthread_yield(void) {
|
||||
int pthread_yield_np(void) {
|
||||
if (IsXnuSilicon()) {
|
||||
__syslib->__pthread_yield_np();
|
||||
} else if (IsOpenbsd()) {
|
||||
|
@ -37,3 +37,5 @@ int pthread_yield(void) {
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
__weak_reference(pthread_yield_np, pthread_yield);
|
30
libc/intrin/pthread_pause_np.c
Normal file
30
libc/intrin/pthread_pause_np.c
Normal file
|
@ -0,0 +1,30 @@
|
|||
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│
|
||||
│vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│
|
||||
╞══════════════════════════════════════════════════════════════════════════════╡
|
||||
│ Copyright 2023 Justine Alexandra Roberts Tunney │
|
||||
│ │
|
||||
│ Permission to use, copy, modify, and/or distribute this software for │
|
||||
│ any purpose with or without fee is hereby granted, provided that the │
|
||||
│ above copyright notice and this permission notice appear in all copies. │
|
||||
│ │
|
||||
│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │
|
||||
│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │
|
||||
│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │
|
||||
│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │
|
||||
│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │
|
||||
│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │
|
||||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/thread/thread.h"
|
||||
|
||||
/**
|
||||
* Yields hyperthread.
|
||||
*/
|
||||
void pthread_pause_np(void) {
|
||||
#if defined(__GNUC__) && defined(__aarch64__)
|
||||
__asm__ volatile("yield");
|
||||
#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
|
||||
__asm__ volatile("pause");
|
||||
#endif
|
||||
}
|
|
@ -24,7 +24,7 @@
|
|||
*
|
||||
* @return 0 on success, or errno on error
|
||||
*/
|
||||
errno_t(pthread_spin_destroy)(pthread_spinlock_t *spin) {
|
||||
errno_t pthread_spin_destroy(pthread_spinlock_t *spin) {
|
||||
atomic_store_explicit(&spin->_lock, -1, memory_order_relaxed);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
* @see pthread_spin_destroy
|
||||
* @see pthread_spin_lock
|
||||
*/
|
||||
errno_t(pthread_spin_init)(pthread_spinlock_t *spin, int pshared) {
|
||||
errno_t pthread_spin_init(pthread_spinlock_t *spin, int pshared) {
|
||||
atomic_store_explicit(&spin->_lock, 0, memory_order_relaxed);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -16,9 +16,7 @@
|
|||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/assert.h"
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/intrin/strace.internal.h"
|
||||
#include "libc/thread/thread.h"
|
||||
|
||||
/**
|
||||
|
@ -39,24 +37,9 @@
|
|||
* @see pthread_spin_unlock
|
||||
* @see pthread_spin_init
|
||||
*/
|
||||
errno_t(pthread_spin_lock)(pthread_spinlock_t *spin) {
|
||||
int x;
|
||||
#if defined(SYSDEBUG) && _LOCKTRACE
|
||||
for (;;) {
|
||||
x = atomic_exchange_explicit(&spin->_lock, 1, memory_order_acquire);
|
||||
if (!x) {
|
||||
LOCKTRACE("pthread_spin_lock(%t)", spin);
|
||||
break;
|
||||
}
|
||||
unassert(x == 1);
|
||||
LOCKTRACE("pthread_spin_lock(%t) trying...", spin);
|
||||
errno_t pthread_spin_lock(pthread_spinlock_t *spin) {
|
||||
while (atomic_exchange_explicit(&spin->_lock, 1, memory_order_acquire)) {
|
||||
pthread_pause_np();
|
||||
}
|
||||
#else
|
||||
for (;;) {
|
||||
x = atomic_exchange_explicit(&spin->_lock, 1, memory_order_acquire);
|
||||
if (!x) break;
|
||||
unassert(x == 1);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/assert.h"
|
||||
#include "libc/errno.h"
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/thread/thread.h"
|
||||
|
@ -30,10 +29,10 @@
|
|||
* @return 0 on success, or errno on error
|
||||
* @raise EBUSY if lock is already held
|
||||
*/
|
||||
errno_t(pthread_spin_trylock)(pthread_spinlock_t *spin) {
|
||||
int x;
|
||||
x = atomic_exchange_explicit(&spin->_lock, 1, memory_order_acquire);
|
||||
if (!x) return 0;
|
||||
unassert(x == 1);
|
||||
return EBUSY;
|
||||
errno_t pthread_spin_trylock(pthread_spinlock_t *spin) {
|
||||
if (!atomic_exchange_explicit(&spin->_lock, 1, memory_order_acquire)) {
|
||||
return 0;
|
||||
} else {
|
||||
return EBUSY;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/intrin/strace.internal.h"
|
||||
#include "libc/thread/thread.h"
|
||||
|
||||
/**
|
||||
|
@ -29,8 +28,7 @@
|
|||
* @return 0 on success, or errno on error
|
||||
* @see pthread_spin_lock
|
||||
*/
|
||||
errno_t(pthread_spin_unlock)(pthread_spinlock_t *spin) {
|
||||
LOCKTRACE("pthread_spin_unlock(%t)", spin);
|
||||
errno_t pthread_spin_unlock(pthread_spinlock_t *spin) {
|
||||
atomic_store_explicit(&spin->_lock, 0, memory_order_release);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -193,6 +193,7 @@ int pthread_spin_trylock(pthread_spinlock_t *) paramsnonnull();
|
|||
int pthread_spin_unlock(pthread_spinlock_t *) paramsnonnull();
|
||||
int pthread_testcancel_np(void);
|
||||
int pthread_tryjoin_np(pthread_t, void **);
|
||||
int pthread_yield_np(void);
|
||||
int pthread_yield(void);
|
||||
pthread_id_np_t pthread_getthreadid_np(void);
|
||||
pthread_t pthread_self(void) pureconst;
|
||||
|
@ -201,6 +202,7 @@ void pthread_cleanup_pop(struct _pthread_cleanup_buffer *, int) paramsnonnull();
|
|||
void pthread_cleanup_push(struct _pthread_cleanup_buffer *, void (*)(void *), void *) paramsnonnull((1));
|
||||
void pthread_exit(void *) wontreturn;
|
||||
void pthread_testcancel(void);
|
||||
void pthread_pause_np(void);
|
||||
|
||||
/* clang-format on */
|
||||
|
||||
|
@ -213,42 +215,6 @@ void pthread_testcancel(void);
|
|||
pthread_cleanup_pop(&_buffer, (execute)); \
|
||||
}
|
||||
|
||||
#if defined(__GNUC__) && defined(__aarch64__)
|
||||
#define pthread_pause_np() __asm__ volatile("yield")
|
||||
#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
|
||||
#define pthread_pause_np() __asm__ volatile("pause")
|
||||
#else
|
||||
#define pthread_pause_np() (void)0
|
||||
#endif
|
||||
|
||||
#if (__GNUC__ + 0) * 100 + (__GNUC_MINOR__ + 0) >= 407 && \
|
||||
!defined(__STRICT_ANSI__) && !defined(MODE_DBG)
|
||||
extern const errno_t EBUSY;
|
||||
#define pthread_spin_lock(pSpin) \
|
||||
({ \
|
||||
pthread_spinlock_t *_s = pSpin; \
|
||||
while (__atomic_test_and_set(&_s->_lock, __ATOMIC_ACQUIRE)) { \
|
||||
pthread_pause_np(); \
|
||||
} \
|
||||
0; \
|
||||
})
|
||||
#define pthread_spin_unlock(pSpin) \
|
||||
({ \
|
||||
pthread_spinlock_t *_s = pSpin; \
|
||||
__atomic_store_n(&_s->_lock, 0, __ATOMIC_RELEASE); \
|
||||
0; \
|
||||
})
|
||||
#define pthread_spin_trylock(pSpin) \
|
||||
({ \
|
||||
pthread_spinlock_t *_s = pSpin; \
|
||||
__atomic_test_and_set(&_s->_lock, __ATOMIC_ACQUIRE) ? EBUSY : 0; \
|
||||
})
|
||||
#define pthread_spin_init(pSpin, multiprocess) \
|
||||
(__atomic_store_n(&(pSpin)->_lock, 0, __ATOMIC_RELAXED), 0)
|
||||
#define pthread_spin_destroy(pSpin) \
|
||||
(__atomic_store_n(&(pSpin)->_lock, -1, __ATOMIC_RELAXED), 0)
|
||||
#endif /* GCC 4.7+ */
|
||||
|
||||
COSMOPOLITAN_C_END_
|
||||
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
|
||||
#endif /* COSMOPOLITAN_LIBC_THREAD_THREAD_H_ */
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "libc/str/str.h"
|
||||
#include "libc/sysv/consts/sa.h"
|
||||
#include "libc/sysv/consts/sig.h"
|
||||
#include "libc/testlib/ezbench.h"
|
||||
#include "libc/testlib/testlib.h"
|
||||
#include "libc/thread/thread.h"
|
||||
|
||||
|
|
Loading…
Reference in a new issue