diff --git a/libc/calls/clock_nanosleep.c b/libc/calls/clock_nanosleep.c index cae196e89..5415373a5 100644 --- a/libc/calls/clock_nanosleep.c +++ b/libc/calls/clock_nanosleep.c @@ -58,6 +58,8 @@ * @param clock may be * - `CLOCK_REALTIME` * - `CLOCK_MONOTONIC` + * - `CLOCK_REALTIME_COARSE` but is likely to sleep negative time + * - `CLOCK_MONTONIC_COARSE` but is likely to sleep negative time * @param flags can be 0 for relative and `TIMER_ABSTIME` for absolute * @param req can be a relative or absolute time, depending on `flags` * @param rem shall be updated with the remainder of unslept time when diff --git a/libc/intrin/pthread_mutex_lock.c b/libc/intrin/pthread_mutex_lock.c index a71202200..818fec3f2 100644 --- a/libc/intrin/pthread_mutex_lock.c +++ b/libc/intrin/pthread_mutex_lock.c @@ -111,6 +111,37 @@ static errno_t pthread_mutex_lock_recursive(pthread_mutex_t *mutex, } } +#if PTHREAD_USE_NSYNC +static errno_t pthread_mutex_lock_recursive_nsync(pthread_mutex_t *mutex, + uint64_t word) { + int me = gettid(); + for (;;) { + if (MUTEX_OWNER(word) == me) { + if (MUTEX_TYPE(word) != PTHREAD_MUTEX_ERRORCHECK) { + if (MUTEX_DEPTH(word) < MUTEX_DEPTH_MAX) { + if (atomic_compare_exchange_weak_explicit( + &mutex->_word, &word, MUTEX_INC_DEPTH(word), + memory_order_relaxed, memory_order_relaxed)) + return 0; + continue; + } else { + return EAGAIN; + } + } else { + return EDEADLK; + } + } + _weaken(nsync_mu_lock)((nsync_mu *)mutex->_nsyncx); + word = MUTEX_UNLOCK(word); + word = MUTEX_LOCK(word); + word = MUTEX_SET_OWNER(word, me); + mutex->_word = word; + mutex->_pid = __pid; + return 0; + } +} +#endif + static errno_t pthread_mutex_lock_impl(pthread_mutex_t *mutex) { uint64_t word; @@ -141,8 +172,17 @@ static errno_t pthread_mutex_lock_impl(pthread_mutex_t *mutex) { return 0; } - // handle recursive and error checking mutexes +// handle recursive and error checking mutexes +#if PTHREAD_USE_NSYNC + if (_weaken(nsync_mu_lock) && + MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE) { + return pthread_mutex_lock_recursive_nsync(mutex, word); + } else { + return pthread_mutex_lock_recursive(mutex, word); + } +#else return pthread_mutex_lock_recursive(mutex, word); +#endif } /** diff --git a/libc/intrin/pthread_mutex_trylock.c b/libc/intrin/pthread_mutex_trylock.c index e6b542973..39607de5f 100644 --- a/libc/intrin/pthread_mutex_trylock.c +++ b/libc/intrin/pthread_mutex_trylock.c @@ -74,6 +74,38 @@ static errno_t pthread_mutex_trylock_recursive(pthread_mutex_t *mutex, } } +static errno_t pthread_mutex_trylock_recursive_nsync(pthread_mutex_t *mutex, + uint64_t word) { + int me = gettid(); + for (;;) { + if (MUTEX_OWNER(word) == me) { + if (MUTEX_TYPE(word) != PTHREAD_MUTEX_ERRORCHECK) { + if (MUTEX_DEPTH(word) < MUTEX_DEPTH_MAX) { + if (atomic_compare_exchange_weak_explicit( + &mutex->_word, &word, MUTEX_INC_DEPTH(word), + memory_order_relaxed, memory_order_relaxed)) + return 0; + continue; + } else { + return EAGAIN; + } + } else { + return EDEADLK; + } + } + if (_weaken(nsync_mu_trylock)((nsync_mu *)mutex->_nsyncx)) { + word = MUTEX_UNLOCK(word); + word = MUTEX_LOCK(word); + word = MUTEX_SET_OWNER(word, me); + mutex->_word = word; + mutex->_pid = __pid; + return 0; + } else { + return EBUSY; + } + } +} + /** * Attempts acquiring lock. * @@ -119,5 +151,14 @@ errno_t pthread_mutex_trylock(pthread_mutex_t *mutex) { } // handle recursive and error checking mutexes +#if PTHREAD_USE_NSYNC + if (_weaken(nsync_mu_trylock) && + MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE) { + return pthread_mutex_trylock_recursive_nsync(mutex, word); + } else { + return pthread_mutex_trylock_recursive(mutex, word); + } +#else return pthread_mutex_trylock_recursive(mutex, word); +#endif } diff --git a/libc/intrin/pthread_mutex_unlock.c b/libc/intrin/pthread_mutex_unlock.c index d0322b72f..ec9a90cae 100644 --- a/libc/intrin/pthread_mutex_unlock.c +++ b/libc/intrin/pthread_mutex_unlock.c @@ -17,6 +17,7 @@ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/calls/calls.h" +#include "libc/calls/state.internal.h" #include "libc/dce.h" #include "libc/errno.h" #include "libc/intrin/atomic.h" @@ -69,6 +70,35 @@ static errno_t pthread_mutex_unlock_recursive(pthread_mutex_t *mutex, } } +#if PTHREAD_USE_NSYNC +static errno_t pthread_mutex_unlock_recursive_nsync(pthread_mutex_t *mutex, + uint64_t word) { + int me = gettid(); + for (;;) { + + // we allow unlocking an initialized lock that wasn't locked, but we + // don't allow unlocking a lock held by another thread, or unlocking + // recursive locks from a forked child, since it should be re-init'd + if (MUTEX_OWNER(word) && (MUTEX_OWNER(word) != me || mutex->_pid != __pid)) + return EPERM; + + // check if this is a nested lock with signal safety + if (MUTEX_DEPTH(word)) { + if (atomic_compare_exchange_strong_explicit( + &mutex->_word, &word, MUTEX_DEC_DEPTH(word), memory_order_relaxed, + memory_order_relaxed)) + return 0; + continue; + } + + // actually unlock the mutex + mutex->_word = MUTEX_UNLOCK(word); + _weaken(nsync_mu_unlock)((nsync_mu *)mutex->_nsyncx); + return 0; + } +} +#endif + /** * Releases mutex. * @@ -81,6 +111,11 @@ static errno_t pthread_mutex_unlock_recursive(pthread_mutex_t *mutex, errno_t pthread_mutex_unlock(pthread_mutex_t *mutex) { uint64_t word; + if (__vforked) { + LOCKTRACE("skipping pthread_mutex_lock(%t) due to vfork", mutex); + return 0; + } + LOCKTRACE("pthread_mutex_unlock(%t)", mutex); // get current state of lock @@ -111,5 +146,14 @@ errno_t pthread_mutex_unlock(pthread_mutex_t *mutex) { } // handle recursive and error checking mutexes +#if PTHREAD_USE_NSYNC + if (_weaken(nsync_mu_unlock) && + MUTEX_PSHARED(word) == PTHREAD_PROCESS_PRIVATE) { + return pthread_mutex_unlock_recursive_nsync(mutex, word); + } else { + return pthread_mutex_unlock_recursive(mutex, word); + } +#else return pthread_mutex_unlock_recursive(mutex, word); +#endif } diff --git a/libc/intrin/pthread_mutexattr_settype.c b/libc/intrin/pthread_mutexattr_settype.c index 96dc080de..70a421abe 100644 --- a/libc/intrin/pthread_mutexattr_settype.c +++ b/libc/intrin/pthread_mutexattr_settype.c @@ -25,7 +25,6 @@ * * @param type can be one of * - `PTHREAD_MUTEX_NORMAL` - * - `PTHREAD_MUTEX_DEFAULT` * - `PTHREAD_MUTEX_RECURSIVE` * - `PTHREAD_MUTEX_ERRORCHECK` * @return 0 on success, or error on failure diff --git a/libc/intrin/reservefd.c b/libc/intrin/reservefd.c index 84345b23d..47003fe3b 100644 --- a/libc/intrin/reservefd.c +++ b/libc/intrin/reservefd.c @@ -18,10 +18,10 @@ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/calls/internal.h" #include "libc/calls/state.internal.h" -#include "libc/intrin/fds.h" #include "libc/intrin/atomic.h" #include "libc/intrin/cmpxchg.h" #include "libc/intrin/extend.h" +#include "libc/intrin/fds.h" #include "libc/macros.h" #include "libc/runtime/memtrack.internal.h" #include "libc/str/str.h" @@ -47,7 +47,7 @@ int __ensurefds_unlocked(int fd) { /** * Grows file descriptor array memory if needed. - * @asyncsignalsafe + * @asyncsignalsafe if signals are blocked */ int __ensurefds(int fd) { __fds_lock(); @@ -82,7 +82,7 @@ int __reservefd_unlocked(int start) { /** * Finds open file descriptor slot. - * @asyncsignalsafe + * @asyncsignalsafe if signals are blocked */ int __reservefd(int start) { int fd; diff --git a/libc/sock/socketpair-nt.c b/libc/sock/socketpair-nt.c index 8a6bdb625..833de4b82 100644 --- a/libc/sock/socketpair-nt.c +++ b/libc/sock/socketpair-nt.c @@ -18,6 +18,7 @@ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/calls/internal.h" #include "libc/calls/state.internal.h" +#include "libc/calls/struct/sigset.internal.h" #include "libc/calls/syscall_support-nt.internal.h" #include "libc/nt/createfile.h" #include "libc/nt/enum/accessmask.h" @@ -33,7 +34,8 @@ #include "libc/sysv/errfuns.h" #ifdef __x86_64__ -textwindows int sys_socketpair_nt(int family, int type, int proto, int sv[2]) { +textwindows static int sys_socketpair_nt_impl(int family, int type, int proto, + int sv[2]) { uint32_t mode; int64_t hpipe, h1; char16_t pipename[64]; @@ -111,4 +113,12 @@ textwindows int sys_socketpair_nt(int family, int type, int proto, int sv[2]) { return rc; } +textwindows int sys_socketpair_nt(int family, int type, int proto, int sv[2]) { + int rc; + BLOCK_SIGNALS; + rc = sys_socketpair_nt_impl(family, type, proto, sv); + ALLOW_SIGNALS; + return rc; +} + #endif /* __x86_64__ */ diff --git a/libc/thread/pthread_cond_init.c b/libc/thread/pthread_cond_init.c index 150ef78a4..8928a9128 100644 --- a/libc/thread/pthread_cond_init.c +++ b/libc/thread/pthread_cond_init.c @@ -16,6 +16,7 @@ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ +#include "libc/dce.h" #include "libc/sysv/consts/clock.h" #include "libc/thread/thread.h" @@ -29,8 +30,11 @@ errno_t pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr) { *cond = (pthread_cond_t){0}; if (attr) { + cond->_footek = IsXnuSilicon() || attr->_pshared; cond->_pshared = attr->_pshared; cond->_clock = attr->_clock; + } else { + cond->_footek = IsXnuSilicon(); } return 0; } diff --git a/libc/thread/pthread_cond_signal.c b/libc/thread/pthread_cond_signal.c index ea3c23d42..146fc6b27 100644 --- a/libc/thread/pthread_cond_signal.c +++ b/libc/thread/pthread_cond_signal.c @@ -42,9 +42,14 @@ errno_t pthread_cond_signal(pthread_cond_t *cond) { #if PTHREAD_USE_NSYNC + // do nothing if pthread_cond_timedwait() hasn't been called yet + // this is because we dont know for certain if nsync is safe + if (!atomic_load_explicit(&cond->_waited, memory_order_acquire)) + return 0; + // favor *NSYNC if this is a process private condition variable // if using Mike Burrows' code isn't possible, use a naive impl - if (!cond->_pshared && !IsXnuSilicon()) { + if (!cond->_footek) { nsync_cv_signal((nsync_cv *)cond); return 0; } diff --git a/libc/thread/pthread_cond_timedwait.c b/libc/thread/pthread_cond_timedwait.c index 96f0cf8a5..9f3013cfb 100644 --- a/libc/thread/pthread_cond_timedwait.c +++ b/libc/thread/pthread_cond_timedwait.c @@ -20,6 +20,7 @@ #include "libc/calls/cp.internal.h" #include "libc/dce.h" #include "libc/errno.h" +#include "libc/intrin/atomic.h" #include "libc/sysv/consts/clock.h" #include "libc/thread/lock.h" #include "libc/thread/posixthread.internal.h" @@ -116,17 +117,30 @@ errno_t pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, MUTEX_OWNER(muword) != gettid()) return EPERM; - // if condition variable is shared then mutex must be too - if (cond->_pshared) - if (MUTEX_PSHARED(muword) != PTHREAD_PROCESS_SHARED) +#if PTHREAD_USE_NSYNC + // the first time pthread_cond_timedwait() is called we learn if the + // associated mutex is normal and private. that means *NSYNC is safe + // this decision is permanent. you can't use a recursive mutex later + if (!atomic_load_explicit(&cond->_waited, memory_order_acquire)) { + if (!cond->_footek) + if (MUTEX_TYPE(muword) != PTHREAD_MUTEX_NORMAL || + MUTEX_PSHARED(muword) != PTHREAD_PROCESS_PRIVATE) + cond->_footek = true; + atomic_store_explicit(&cond->_waited, true, memory_order_release); + } else if (!cond->_footek) { + if (MUTEX_TYPE(muword) != PTHREAD_MUTEX_NORMAL || + MUTEX_PSHARED(muword) != PTHREAD_PROCESS_PRIVATE) return EINVAL; + } +#endif + // now perform the actual wait errno_t err; BEGIN_CANCELATION_POINT; #if PTHREAD_USE_NSYNC // favor *NSYNC if this is a process private condition variable // if using Mike Burrows' code isn't possible, use a naive impl - if (!cond->_pshared && !IsXnuSilicon()) { + if (!cond->_footek) { err = nsync_cv_wait_with_deadline( (nsync_cv *)cond, (nsync_mu *)mutex, cond->_clock, abstime ? *abstime : nsync_time_no_deadline, 0); diff --git a/libc/thread/thread.h b/libc/thread/thread.h index a840cb6eb..4418bb3cd 100644 --- a/libc/thread/thread.h +++ b/libc/thread/thread.h @@ -8,7 +8,6 @@ #define PTHREAD_BARRIER_SERIAL_THREAD 31337 -#define PTHREAD_MUTEX_DEFAULT 0 #define PTHREAD_MUTEX_NORMAL 0 #define PTHREAD_MUTEX_RECURSIVE 1 #define PTHREAD_MUTEX_ERRORCHECK 2 @@ -77,6 +76,7 @@ typedef struct pthread_mutex_s { }; /* this cleverly overlaps with NSYNC struct Dll *waiters; */ _PTHREAD_ATOMIC(uint64_t) _word; + long _nsyncx[2]; } pthread_mutex_t; typedef struct pthread_mutexattr_s { @@ -95,6 +95,8 @@ typedef struct pthread_cond_s { uint32_t _nsync; char _pshared; char _clock; + char _footek; + _PTHREAD_ATOMIC(char) _waited; }; }; _PTHREAD_ATOMIC(uint32_t) _sequence; diff --git a/test/libc/intrin/pthread_mutex_lock_test.c b/test/libc/intrin/pthread_mutex_lock_test.c index 0ee1dea05..4881733f5 100644 --- a/test/libc/intrin/pthread_mutex_lock_test.c +++ b/test/libc/intrin/pthread_mutex_lock_test.c @@ -95,7 +95,6 @@ TEST(pthread_mutex_lock, recursive) { } ASSERT_EQ(0, pthread_mutex_lock(&lock)); ASSERT_EQ(0, pthread_mutex_unlock(&lock)); - ASSERT_EQ(0, pthread_mutex_unlock(&lock)); ASSERT_EQ(0, pthread_mutex_destroy(&lock)); } diff --git a/test/libc/thread/footek_test.c b/test/libc/thread/footek_test.c index 9973d38e6..51d3bd1c9 100644 --- a/test/libc/thread/footek_test.c +++ b/test/libc/thread/footek_test.c @@ -1,4 +1,4 @@ -#define USE POSIX +#define USE POSIX_RECURSIVE #define ITERATIONS 100000 #define THREADS 30 diff --git a/test/libc/thread/pthread_cancel_deferred_cond_test.c b/test/libc/thread/pthread_cancel_deferred_cond_test.c index 7bf8e1045..76d9eb928 100644 --- a/test/libc/thread/pthread_cancel_deferred_cond_test.c +++ b/test/libc/thread/pthread_cancel_deferred_cond_test.c @@ -2,10 +2,11 @@ #include #include #include +#include "libc/stdio/stdio.h" int got_cleanup; -pthread_cond_t cv = PTHREAD_COND_INITIALIZER; -pthread_mutex_t mu = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t cv; +pthread_mutex_t mu; void cleanup(void* arg) { got_cleanup = 1; @@ -23,6 +24,12 @@ void* worker(void* arg) { int main(int argc, char* argv[]) { void* rc; pthread_t th; + pthread_mutexattr_t at; + pthread_mutexattr_init(&at); + pthread_mutexattr_settype(&at, PTHREAD_MUTEX_NORMAL); + pthread_mutex_init(&mu, &at); + pthread_mutexattr_destroy(&at); + pthread_cond_init(&cv, 0); if (pthread_create(&th, 0, worker, 0)) return 2; if (pthread_cancel(th)) @@ -37,4 +44,6 @@ int main(int argc, char* argv[]) { return 7; if (pthread_mutex_unlock(&mu)) return 8; + pthread_mutex_destroy(&mu); + pthread_cond_destroy(&cv); } diff --git a/test/libc/thread/pthread_cancel_test.c b/test/libc/thread/pthread_cancel_test.c index c43aacc04..790f74ad5 100644 --- a/test/libc/thread/pthread_cancel_test.c +++ b/test/libc/thread/pthread_cancel_test.c @@ -40,6 +40,12 @@ atomic_int gotcleanup; void SetUpOnce(void) { testlib_enable_tmp_setup_teardown(); + pthread_mutexattr_t at; + pthread_mutexattr_init(&at); + pthread_mutexattr_settype(&at, PTHREAD_MUTEX_NORMAL); + pthread_mutex_init(&mu, &at); + pthread_mutexattr_destroy(&at); + pthread_cond_init(&cv, 0); } void SetUp(void) { diff --git a/test/posix/mutex_async_signal_safety_test.c b/test/posix/mutex_async_signal_safety_test.c index 5102ab2fb..08cc268e8 100644 --- a/test/posix/mutex_async_signal_safety_test.c +++ b/test/posix/mutex_async_signal_safety_test.c @@ -8,6 +8,8 @@ // tests that recursive mutexes are implemented atomically // // glibc fails this test +// musl passes this test +// cosmo only guarantees this in process shared mode atomic_bool done; atomic_bool ready; @@ -45,6 +47,8 @@ int main() { _Exit(2); if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) _Exit(3); + if (pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)) + _Exit(3); if (pthread_mutex_init(&lock, &attr)) _Exit(4); if (pthread_mutexattr_destroy(&attr)) diff --git a/third_party/dlmalloc/dlmalloc.c b/third_party/dlmalloc/dlmalloc.c index 28f516f0e..389fff109 100644 --- a/third_party/dlmalloc/dlmalloc.c +++ b/third_party/dlmalloc/dlmalloc.c @@ -31,7 +31,7 @@ #define FOOTERS 1 #define MSPACES 1 #define ONLY_MSPACES 1 // enables scalable multi-threaded malloc -#define USE_SPIN_LOCKS 1 // only profitable using sched_getcpu() +#define USE_SPIN_LOCKS 0 // only profitable using sched_getcpu() #else #define INSECURE 1 #define PROCEED_ON_ERROR 1 diff --git a/third_party/nsync/README.cosmo b/third_party/nsync/README.cosmo index 044532305..bf0f49919 100644 --- a/third_party/nsync/README.cosmo +++ b/third_party/nsync/README.cosmo @@ -23,6 +23,8 @@ LOCAL CHANGES - Double linked list API was so good that it's now in libc + - Max delay on sleep should be 20ms (not 4ms) on OpenBSD and NetBSD + - Support Apple's ulock futexes which are internal but nicer than GCD - Ensure resources such as POSIX semaphores are are released on fork. diff --git a/third_party/nsync/panic.c b/third_party/nsync/panic.c index 10f9eddf8..59755fdba 100644 --- a/third_party/nsync/panic.c +++ b/third_party/nsync/panic.c @@ -24,6 +24,8 @@ /* Aborts after printing the nul-terminated string s[]. */ void nsync_panic_ (const char *s) { + if (1) + __builtin_trap(); tinyprint(2, "error: nsync panic: ", s, "cosmoaddr2line ", program_invocation_name, " ", DescribeBacktrace (__builtin_frame_address (0)), "\n", diff --git a/third_party/nsync/testing/BUILD.mk b/third_party/nsync/testing/BUILD.mk index b87bb80d3..d7e261430 100644 --- a/third_party/nsync/testing/BUILD.mk +++ b/third_party/nsync/testing/BUILD.mk @@ -8,6 +8,7 @@ THIRD_PARTY_NSYNC_TESTING_A = o/$(MODE)/third_party/nsync/testing/lib.a THIRD_PARTY_NSYNC_TESTING_FILES = $(wildcard third_party/nsync/testing/*) THIRD_PARTY_NSYNC_TESTING_SRCS = $(filter %.c,$(THIRD_PARTY_NSYNC_TESTING_FILES)) THIRD_PARTY_NSYNC_TESTING_HDRS = $(filter %.h,$(THIRD_PARTY_NSYNC_TESTING_FILES)) +THIRD_PARTY_NSYNC_TESTING_INCS = $(filter %.inc,$(THIRD_PARTY_NSYNC_TESTING_FILES)) THIRD_PARTY_NSYNC_TESTING_SRCS_TEST = $(filter %_test.c,$(THIRD_PARTY_NSYNC_TESTING_SRCS)) THIRD_PARTY_NSYNC_TESTING_OBJS = $(THIRD_PARTY_NSYNC_TESTING_SRCS:%.c=o/$(MODE)/%.o) THIRD_PARTY_NSYNC_TESTING_COMS = $(THIRD_PARTY_NSYNC_TESTING_SRCS_TEST:%.c=o/$(MODE)/%) @@ -54,7 +55,14 @@ o/$(MODE)/third_party/nsync/testing/%_test.dbg: \ o/$(MODE)/third_party/nsync/testing/mu_starvation_test.ok: private QUOTA = -L300 o/$(MODE)/third_party/nsync/testing/mu_starvation_test.runs: private QUOTA = -C128 -L300 o/$(MODE)/third_party/nsync/testing/mu_test.ok: private QUOTA = -L300 +o/$(MODE)/third_party/nsync/testing/mu2_test.ok: private QUOTA = -L300 +o/$(MODE)/third_party/nsync/testing/mu3_test.ok: private QUOTA = -L300 +o/$(MODE)/third_party/nsync/testing/cv_mu_timeout_stress_test.ok: private QUOTA = -L300 +o/$(MODE)/third_party/nsync/testing/cv_mu_timeout_stress2_test.ok: private QUOTA = -L300 +o/$(MODE)/third_party/nsync/testing/cv_mu_timeout_stress3_test.ok: private QUOTA = -L300 o/$(MODE)/third_party/nsync/testing/mu_test.runs: private QUOTA = -C128 -L300 +o/$(MODE)/third_party/nsync/testing/mu2_test.runs: private QUOTA = -C128 -L300 +o/$(MODE)/third_party/nsync/testing/mu3_test.runs: private QUOTA = -C128 -L300 o/$(MODE)/third_party/nsync/testing/wait_test.ok: private QUOTA = -P65536 o/$(MODE)/third_party/nsync/testing/wait_test.runs: private QUOTA = -P65536 diff --git a/third_party/nsync/testing/cv2_test.c b/third_party/nsync/testing/cv2_test.c new file mode 100644 index 000000000..47cc96485 --- /dev/null +++ b/third_party/nsync/testing/cv2_test.c @@ -0,0 +1,24 @@ +/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ +│ vi: set noet ft=c ts=8 sw=8 fenc=utf-8 :vi │ +╞══════════════════════════════════════════════════════════════════════════════╡ +│ Copyright 2016 Google Inc. │ +│ │ +│ Licensed under the Apache License, Version 2.0 (the "License"); │ +│ you may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +╚─────────────────────────────────────────────────────────────────────────────*/ +#include "third_party/nsync/testing/cv_test.inc" + +int main (int argc, char *argv[]) { + testing_base tb = testing_new (argc, argv, 0); + TEST_RUN (tb, test_cv_deadline); + return (testing_base_exit (tb)); +} diff --git a/third_party/nsync/testing/cv3_test.c b/third_party/nsync/testing/cv3_test.c new file mode 100644 index 000000000..52e0e9839 --- /dev/null +++ b/third_party/nsync/testing/cv3_test.c @@ -0,0 +1,24 @@ +/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ +│ vi: set noet ft=c ts=8 sw=8 fenc=utf-8 :vi │ +╞══════════════════════════════════════════════════════════════════════════════╡ +│ Copyright 2016 Google Inc. │ +│ │ +│ Licensed under the Apache License, Version 2.0 (the "License"); │ +│ you may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +╚─────────────────────────────────────────────────────────────────────────────*/ +#include "third_party/nsync/testing/cv_test.inc" + +int main (int argc, char *argv[]) { + testing_base tb = testing_new (argc, argv, 0); + TEST_RUN (tb, test_cv_cancel); + return (testing_base_exit (tb)); +} diff --git a/third_party/nsync/testing/cv_mu_timeout_stress2_test.c b/third_party/nsync/testing/cv_mu_timeout_stress2_test.c new file mode 100644 index 000000000..94127460d --- /dev/null +++ b/third_party/nsync/testing/cv_mu_timeout_stress2_test.c @@ -0,0 +1,24 @@ +/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ +│ vi: set noet ft=c ts=8 sw=8 fenc=utf-8 :vi │ +╞══════════════════════════════════════════════════════════════════════════════╡ +│ Copyright 2016 Google Inc. │ +│ │ +│ Licensed under the Apache License, Version 2.0 (the "License"); │ +│ you may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +╚─────────────────────────────────────────────────────────────────────────────*/ +#include "third_party/nsync/testing/cv_mu_timeout_stress_test.inc" + +int main (int argc, char *argv[]) { + testing_base tb = testing_new (argc, argv, 0); + TEST_RUN (tb, test_mu_timeout_stress); + return (testing_base_exit (tb)); +} diff --git a/third_party/nsync/testing/cv_mu_timeout_stress3_test.c b/third_party/nsync/testing/cv_mu_timeout_stress3_test.c new file mode 100644 index 000000000..8b74d34be --- /dev/null +++ b/third_party/nsync/testing/cv_mu_timeout_stress3_test.c @@ -0,0 +1,24 @@ +/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ +│ vi: set noet ft=c ts=8 sw=8 fenc=utf-8 :vi │ +╞══════════════════════════════════════════════════════════════════════════════╡ +│ Copyright 2016 Google Inc. │ +│ │ +│ Licensed under the Apache License, Version 2.0 (the "License"); │ +│ you may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +╚─────────────────────────────────────────────────────────────────────────────*/ +#include "third_party/nsync/testing/cv_mu_timeout_stress_test.inc" + +int main (int argc, char *argv[]) { + testing_base tb = testing_new (argc, argv, 0); + TEST_RUN (tb, test_mu_cv_timeout_stress); + return (testing_base_exit (tb)); +} diff --git a/third_party/nsync/testing/cv_mu_timeout_stress_test.c b/third_party/nsync/testing/cv_mu_timeout_stress_test.c new file mode 100644 index 000000000..6c9cf3a63 --- /dev/null +++ b/third_party/nsync/testing/cv_mu_timeout_stress_test.c @@ -0,0 +1,24 @@ +/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ +│ vi: set noet ft=c ts=8 sw=8 fenc=utf-8 :vi │ +╞══════════════════════════════════════════════════════════════════════════════╡ +│ Copyright 2016 Google Inc. │ +│ │ +│ Licensed under the Apache License, Version 2.0 (the "License"); │ +│ you may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +╚─────────────────────────────────────────────────────────────────────────────*/ +#include "third_party/nsync/testing/cv_mu_timeout_stress_test.inc" + +int main (int argc, char *argv[]) { + testing_base tb = testing_new (argc, argv, 0); + TEST_RUN (tb, test_cv_timeout_stress); + return (testing_base_exit (tb)); +} diff --git a/third_party/nsync/testing/cv_mu_timeout_stress_test_.c b/third_party/nsync/testing/cv_mu_timeout_stress_test.inc similarity index 98% rename from third_party/nsync/testing/cv_mu_timeout_stress_test_.c rename to third_party/nsync/testing/cv_mu_timeout_stress_test.inc index 2211fcce4..81a6b522a 100644 --- a/third_party/nsync/testing/cv_mu_timeout_stress_test_.c +++ b/third_party/nsync/testing/cv_mu_timeout_stress_test.inc @@ -23,6 +23,7 @@ #include "third_party/nsync/mu_wait.h" #include "third_party/nsync/testing/closure.h" #include "third_party/nsync/testing/smprintf.h" +#include "libc/dce.h" #include "third_party/nsync/testing/testing.h" /* A cv_stress_data represents the data used by the threads of the tests below. */ @@ -59,8 +60,8 @@ typedef struct cv_stress_data_s { /* The delays in cv_stress_inc_loop(), cv_stress_reader_loop(), mu_stress_inc_loop(), and mu_stress_reader_loop() are uniformly distributed from 0 to STRESS_MAX_DELAY_MICROS-1 microseconds. */ -#define STRESS_MAX_DELAY_MICROS (4000) /* maximum delay */ -#define STRESS_MEAN_DELAY_MICROS (STRESS_MAX_DELAY_MICROS / 2) /* mean delay */ +#define STRESS_MAX_DELAY_MICROS (IsNetbsd() || IsOpenbsd() ? 20000 : 4000) /* maximum delay */ +#define STRESS_MEAN_DELAY_MICROS (STRESS_MAX_DELAY_MICROS / 2) /* mean delay */ #define STRESS_EXPECT_TIMEOUTS_PER_SEC (1000000 / STRESS_MEAN_DELAY_MICROS) /* expect timeouts/s*/ /* Acquire s.mu, then increment s.count n times, each time @@ -550,11 +551,3 @@ static void test_mu_cv_timeout_stress (testing t) { loop_count *= 2; } while (!run_stress_test (&s, t, "test_mu_cv_timeout_stress")); } - -int main (int argc, char *argv[]) { - testing_base tb = testing_new (argc, argv, 0); - TEST_RUN (tb, test_cv_timeout_stress); - TEST_RUN (tb, test_mu_timeout_stress); - TEST_RUN (tb, test_mu_cv_timeout_stress); - return (testing_base_exit (tb)); -} diff --git a/third_party/nsync/testing/cv_test.c b/third_party/nsync/testing/cv_test.c index ee3c2505c..09fb43d88 100644 --- a/third_party/nsync/testing/cv_test.c +++ b/third_party/nsync/testing/cv_test.c @@ -15,766 +15,7 @@ │ See the License for the specific language governing permissions and │ │ limitations under the License. │ ╚─────────────────────────────────────────────────────────────────────────────*/ -#include "third_party/nsync/cv.h" -#include "libc/errno.h" -#include "libc/mem/mem.h" -#include "libc/runtime/runtime.h" -#include "libc/stdio/stdio.h" -#include "libc/str/str.h" -#include "third_party/nsync/debug.h" -#include "third_party/nsync/mu.h" -#include "third_party/nsync/mu_wait.h" -#include "third_party/nsync/note.h" -#include "third_party/nsync/testing/closure.h" -#include "third_party/nsync/testing/smprintf.h" -#include "third_party/nsync/testing/testing.h" -#include "third_party/nsync/testing/time_extra.h" -#include "third_party/nsync/time.h" - -/* --------------------------- */ - -/* A cv_queue represents a FIFO queue with up to limit elements. - The storage for the queue expands as necessary up to limit. */ -typedef struct cv_queue_s { - int limit; /* max value of count---should not be changed after initialization */ - nsync_cv non_empty; /* signalled when count transitions from zero to non-zero */ - nsync_cv non_full; /* signalled when count transitions from limit to less than limit */ - nsync_mu mu; /* protects fields below */ - int pos; /* index of first in-use element */ - int count; /* number of elements in use */ - void *data[1]; /* in use elements are data[pos, ..., (pos+count-1)%limit] */ -} cv_queue; - -/* Return a pointer to new cv_queue. */ -static cv_queue *cv_queue_new (int limit) { - cv_queue *q; - int size = offsetof (struct cv_queue_s, data) + sizeof (q->data[0]) * limit; - q = (cv_queue *) malloc (size); - bzero ((void *) q, size); - q->limit = limit; - return (q); -} - -/* Add v to the end of the FIFO *q and return non-zero, or if the FIFO already - has limit elements and continues to do so until abs_deadline, do nothing and - return 0. */ -static int cv_queue_put (cv_queue *q, void *v, nsync_time abs_deadline) { - int added = 0; - int wake = 0; - nsync_mu_lock (&q->mu); - while (q->count == q->limit && - nsync_cv_wait_with_deadline (&q->non_full, &q->mu, NSYNC_CLOCK, abs_deadline, NULL) == 0) { - } - if (q->count != q->limit) { - int i = q->pos + q->count; - if (q->limit <= i) { - i -= q->limit; - } - q->data[i] = v; - if (q->count == 0) { - wake = 1; - } - q->count++; - added = 1; - } - nsync_mu_unlock (&q->mu); - if (wake) { - nsync_cv_broadcast (&q->non_empty); - } - return (added); -} - -/* Remove the first value from the front of the FIFO *q and return it, - or if the FIFO is empty and continues to be so until abs_deadline, - do nothing and return NULL. */ -static void *cv_queue_get (cv_queue *q, nsync_time abs_deadline) { - void *v = NULL; - nsync_mu_lock (&q->mu); - while (q->count == 0 && - nsync_cv_wait_with_deadline (&q->non_empty, &q->mu, NSYNC_CLOCK, abs_deadline, NULL) == 0) { - } - if (q->count != 0) { - v = q->data[q->pos]; - q->data[q->pos] = NULL; - if (q->count == q->limit) { - nsync_cv_broadcast (&q->non_full); - } - q->pos++; - q->count--; - if (q->pos == q->limit) { - q->pos = 0; - } - } - nsync_mu_unlock (&q->mu); - return (v); -} - -/* --------------------------- */ - -static char ptr_to_int_c; -#define INT_TO_PTR(x) ((x) + &ptr_to_int_c) -#define PTR_TO_INT(p) (((char *) (p)) - &ptr_to_int_c) - -/* Put count integers on *q, in the sequence start*3, (start+1)*3, (start+2)*3, .... */ -static void producer_cv_n (testing t, cv_queue *q, int start, int count) { - int i; - for (i = 0; i != count; i++) { - if (!cv_queue_put (q, INT_TO_PTR ((start+i)*3), nsync_time_no_deadline)) { - TEST_FATAL (t, ("cv_queue_put() returned 0 with no deadline")); - } - } -} -CLOSURE_DECL_BODY4 (producer_cv_n, testing, cv_queue *, int, int) - -/* Get count integers from *q, and check that they are in the - sequence start*3, (start+1)*3, (start+2)*3, .... */ -static void consumer_cv_n (testing t, cv_queue *q, int start, int count) { - int i; - for (i = 0; i != count; i++) { - void *v = cv_queue_get (q, nsync_time_no_deadline); - int x; - if (v == NULL) { - TEST_FATAL (t, ("cv_queue_get() returned NULL with no deadline")); - } - x = PTR_TO_INT (v); - if (x != (start+i)*3) { - TEST_FATAL (t, ("cv_queue_get() returned bad value; want %d, got %d", - (start+i)*3, x)); - } - } -} - -/* CV_PRODUCER_CONSUMER_N is the number of elements passed from producer to consumer in the - test_cv_producer_consumer*() tests below. */ -#define CV_PRODUCER_CONSUMER_N 100000 - -/* Send a stream of integers from a producer thread to - a consumer thread via a queue with limit 10**0. */ -static void test_cv_producer_consumer0 (testing t) { - cv_queue *q = cv_queue_new (1); - closure_fork (closure_producer_cv_n (&producer_cv_n, t, q, 0, CV_PRODUCER_CONSUMER_N)); - consumer_cv_n (t, q, 0, CV_PRODUCER_CONSUMER_N); - free (q); -} - -/* Send a stream of integers from a producer thread to - a consumer thread via a queue with limit 10**1. */ -static void test_cv_producer_consumer1 (testing t) { - cv_queue *q = cv_queue_new (10); - closure_fork (closure_producer_cv_n (&producer_cv_n, t, q, 0, CV_PRODUCER_CONSUMER_N)); - consumer_cv_n (t, q, 0, CV_PRODUCER_CONSUMER_N); - free (q); -} - -/* Send a stream of integers from a producer thread to - a consumer thread via a queue with limit 10**2. */ -static void test_cv_producer_consumer2 (testing t) { - cv_queue *q = cv_queue_new (100); - closure_fork (closure_producer_cv_n (&producer_cv_n, t, q, 0, CV_PRODUCER_CONSUMER_N)); - consumer_cv_n (t, q, 0, CV_PRODUCER_CONSUMER_N); - free (q); -} - -/* Send a stream of integers from a producer thread to - a consumer thread via a queue with limit 10**3. */ -static void test_cv_producer_consumer3 (testing t) { - cv_queue *q = cv_queue_new (1000); - closure_fork (closure_producer_cv_n (&producer_cv_n, t, q, 0, CV_PRODUCER_CONSUMER_N)); - consumer_cv_n (t, q, 0, CV_PRODUCER_CONSUMER_N); - free (q); -} - -/* Send a stream of integers from a producer thread to - a consumer thread via a queue with limit 10**4. */ -static void test_cv_producer_consumer4 (testing t) { - cv_queue *q = cv_queue_new (10 * 1000); - closure_fork (closure_producer_cv_n (&producer_cv_n, t, q, 0, CV_PRODUCER_CONSUMER_N)); - consumer_cv_n (t, q, 0, CV_PRODUCER_CONSUMER_N); - free (q); -} - -/* Send a stream of integers from a producer thread to - a consumer thread via a queue with limit 10**5. */ -static void test_cv_producer_consumer5 (testing t) { - cv_queue *q = cv_queue_new (100 * 1000); - closure_fork (closure_producer_cv_n (&producer_cv_n, t, q, 0, CV_PRODUCER_CONSUMER_N)); - consumer_cv_n (t, q, 0, CV_PRODUCER_CONSUMER_N); - free (q); -} - -/* Send a stream of integers from a producer thread to - a consumer thread via a queue with limit 10**6. */ -static void test_cv_producer_consumer6 (testing t) { - cv_queue *q = cv_queue_new (1000 * 1000); - closure_fork (closure_producer_cv_n (&producer_cv_n, t, q, 0, CV_PRODUCER_CONSUMER_N)); - consumer_cv_n (t, q, 0, CV_PRODUCER_CONSUMER_N); - free (q); -} - -/* The following values control how aggressively we police the timeout. */ -#define TOO_EARLY_MS 1 -#define TOO_LATE_MS 100 /* longer, to accommodate scheduling delays */ -#define TOO_LATE_ALLOWED 25 /* number of iterations permitted to violate too_late */ - -/* Check timeouts on a CV wait_with_deadline(). */ -static void test_cv_deadline (testing t) { - int too_late_violations; - nsync_mu mu; - nsync_cv cv; - int i; - nsync_time too_early; - nsync_time too_late; - - nsync_mu_init (&mu); - nsync_cv_init (&cv); - too_early = nsync_time_ms (TOO_EARLY_MS); - too_late = nsync_time_ms (TOO_LATE_MS); - too_late_violations = 0; - nsync_mu_lock (&mu); - for (i = 0; i != 50; i++) { - nsync_time end_time; - nsync_time start_time; - nsync_time expected_end_time; - start_time = nsync_time_now (NSYNC_CLOCK); - expected_end_time = nsync_time_add (start_time, nsync_time_ms (87)); - if (nsync_cv_wait_with_deadline (&cv, &mu, NSYNC_CLOCK, expected_end_time, - NULL) != ETIMEDOUT) { - TEST_FATAL (t, ("nsync_cv_wait() returned non-expired for a timeout")); - } - end_time = nsync_time_now (NSYNC_CLOCK); - if (nsync_time_cmp (end_time, nsync_time_sub (expected_end_time, too_early)) < 0) { - char *elapsed_str = nsync_time_str (nsync_time_sub (expected_end_time, end_time), 2); - TEST_ERROR (t, ("nsync_cv_wait() returned %s too early", elapsed_str)); - free (elapsed_str); - } - if (nsync_time_cmp (nsync_time_add (expected_end_time, too_late), end_time) < 0) { - too_late_violations++; - } - } - nsync_mu_unlock (&mu); - if (too_late_violations > TOO_LATE_ALLOWED) { - TEST_ERROR (t, ("nsync_cv_wait() returned too late %d times", too_late_violations)); - } -} - -/* Check cancellations with nsync_cv_wait_with_deadline(). */ -static void test_cv_cancel (testing t) { - nsync_time future_time; - int too_late_violations; - nsync_mu mu; - nsync_cv cv; - int i; - nsync_time too_early; - nsync_time too_late; - - nsync_mu_init (&mu); - nsync_cv_init (&cv); - too_early = nsync_time_ms (TOO_EARLY_MS); - too_late = nsync_time_ms (TOO_LATE_MS); - - /* The loops below cancel after 87 milliseconds, like the timeout tests above. */ - - future_time = nsync_time_add (nsync_time_now (NSYNC_CLOCK), nsync_time_ms (3600000)); /* test cancels with timeout */ - - too_late_violations = 0; - nsync_mu_lock (&mu); - for (i = 0; i != 50; i++) { - int x; - nsync_note cancel; - nsync_time end_time; - nsync_time start_time; - nsync_time expected_end_time; - start_time = nsync_time_now (NSYNC_CLOCK); - expected_end_time = nsync_time_add (start_time, nsync_time_ms (87)); - - cancel = nsync_note_new (NULL, NSYNC_CLOCK, expected_end_time); - - x = nsync_cv_wait_with_deadline (&cv, &mu, NSYNC_CLOCK, future_time, cancel); - if (x != ECANCELED) { - TEST_FATAL (t, ("nsync_cv_wait() returned non-cancelled (%d) for " - "a cancellation; expected %d", - x, ECANCELED)); - } - end_time = nsync_time_now (NSYNC_CLOCK); - if (nsync_time_cmp (end_time, nsync_time_sub (expected_end_time, too_early)) < 0) { - char *elapsed_str = nsync_time_str (nsync_time_sub (expected_end_time, end_time), 2); - TEST_ERROR (t, ("nsync_cv_wait() returned %s too early", elapsed_str)); - free (elapsed_str); - } - if (nsync_time_cmp (nsync_time_add (expected_end_time, too_late), end_time) < 0) { - too_late_violations++; - } - - /* Check that an already cancelled wait returns immediately. */ - start_time = nsync_time_now (NSYNC_CLOCK); - - x = nsync_cv_wait_with_deadline (&cv, &mu, NSYNC_CLOCK, nsync_time_no_deadline, cancel); - if (x != ECANCELED) { - TEST_FATAL (t, ("nsync_cv_wait() returned non-cancelled (%d) for " - "a cancellation; expected %d", - x, ECANCELED)); - } - end_time = nsync_time_now (NSYNC_CLOCK); - if (nsync_time_cmp (end_time, start_time) < 0) { - char *elapsed_str = nsync_time_str (nsync_time_sub (expected_end_time, end_time), 2); - TEST_ERROR (t, ("nsync_cv_wait() returned %s too early", elapsed_str)); - free (elapsed_str); - } - if (nsync_time_cmp (nsync_time_add (start_time, too_late), end_time) < 0) { - too_late_violations++; - } - nsync_note_notify (cancel); - - nsync_note_free (cancel); - } - nsync_mu_unlock (&mu); - if (too_late_violations > TOO_LATE_ALLOWED) { - TEST_ERROR (t, ("nsync_cv_wait() returned too late %d times", too_late_violations)); - } -} - -/* --------------------------- */ - -/* Names of debug results for test_cv_debug. */ -static const char *result_name[] = { - "init_mu0", - "init_cv0", - "init_mu1", - "init_cv1", - "init_mu2", - "init_cv2", - "held_mu", - "wait0_mu", - "wait0_cv", - "wait1_mu", - "wait1_cv", - "wait2_mu", - "wait2_cv", - "wait3_mu", - "wait3_cv", - "rheld1_mu", - "rheld2_mu", - "rheld1again_mu", - NULL /* sentinel */ -}; - -/* state for test_cv_debug() */ -struct debug_state { - nsync_mu mu; /* protects flag field */ - nsync_cv cv; /* signalled when flag becomes zero */ - int flag; /* 0 => threads proceed; non-zero => threads block */ - - /* result[] is an array of nul-terminated string values, accessed via - name (in result_name[]) via slot(). Entries accessed from multiple - threads are protected by result_mu. */ - char *result[sizeof (result_name) / sizeof (result_name[0])]; - nsync_mu result_mu; -}; - -/* Return a pointer to the slot in s->result[] associated with the - nul-terminated name[] */ -static char **slot (struct debug_state *s, const char *name) { - int i = 0; - while (result_name[i] != NULL && strcmp (result_name[i], name) != 0) { - i++; - } - if (result_name[i] == NULL) { /* caller gave non-existent name */ - abort (); - } - return (&s->result[i]); -} - -/* Check that the strings associated with nul-terminated strings name0[] and - name1[] have the same values in s->result[]. */ -static void check_same (testing t, struct debug_state *s, - const char *name0, const char *name1) { - if (strcmp (*slot (s, name0), *slot (s, name1)) != 0) { - TEST_ERROR (t, ("nsync_mu_debug_state() %s state != %s state (%s vs. %s)", - name0, name1, *slot (s, name0), *slot (s, name1))); - } -} - -/* Check that the strings associated with nul-terminated strings name0[] and - name1[] have different values in s->result[]. */ -static void check_different (testing t, struct debug_state *s, - const char *name0, const char *name1) { - if (strcmp (*slot (s, name0), *slot (s, name1)) == 0) { - TEST_ERROR (t, ("nsync_mu_debug_state() %s state == %s state", - name0, name1)); - } -} - -/* Return whether the integer at address v is zero. */ -static int int_is_zero (const void *v) { - return (*(int *)v == 0); -} - -/* Acquire and release s->mu in write mode, waiting for s->flag==0 - using nsync_mu_wait(). */ -static void debug_thread_writer (struct debug_state *s) { - nsync_mu_lock (&s->mu); - nsync_mu_wait (&s->mu, &int_is_zero, &s->flag, NULL); - nsync_mu_unlock (&s->mu); -} - -/* Acquire and release s->mu in write mode, waiting for s->flag==0 - using nsync_cv_wait(). */ -static void debug_thread_writer_cv (struct debug_state *s) { - nsync_mu_lock (&s->mu); - while (s->flag != 0) { - nsync_cv_wait (&s->cv, &s->mu); - } - nsync_mu_unlock (&s->mu); -} - -/* Acquire and release s->mu in read mode, waiting for s->flag==0 - using nsync_mu_wait(). - If name!=NULL, record state of s->mu while held using name[]. */ -static void debug_thread_reader (struct debug_state *s, - const char *name) { - nsync_mu_rlock (&s->mu); - nsync_mu_wait (&s->mu, &int_is_zero, &s->flag, NULL); - if (name != NULL) { - int len = 1024; - nsync_mu_lock (&s->result_mu); - *slot (s, name) = nsync_mu_debug_state_and_waiters ( - &s->mu, (char *) malloc (len), len); - nsync_mu_unlock (&s->result_mu); - } - nsync_mu_runlock (&s->mu); -} - -/* Acquire and release s->mu in read mode, waiting for s->flag==0 - using nsync_cv_wait(). - If name!=NULL, record state of s->mu while held using name[]. */ -static void debug_thread_reader_cv (struct debug_state *s, - const char *name) { - nsync_mu_rlock (&s->mu); - while (s->flag != 0) { - nsync_cv_wait (&s->cv, &s->mu); - } - if (name != NULL) { - int len = 1024; - nsync_mu_lock (&s->result_mu); - *slot (s, name) = nsync_mu_debug_state_and_waiters ( - &s->mu, (char *) malloc (len), len); - nsync_mu_unlock (&s->result_mu); - } - nsync_mu_runlock (&s->mu); -} - -CLOSURE_DECL_BODY1 (debug_thread, struct debug_state *) -CLOSURE_DECL_BODY2 (debug_thread_reader, struct debug_state *, const char *) - -/* Check that nsync_mu_debug_state() and nsync_cv_debug_state() - and their variants yield reasonable results. - - The specification of those routines is intentionally loose, - so this do not check much, but the various possibilities can be - examined using the verbose testing flag (-v). */ -static void test_cv_debug (testing t) { - int i; - int len = 1024; - char *tmp; - char *buf; - int buflen; - struct debug_state xs; - struct debug_state *s = &xs; - bzero ((void *) s, sizeof (*s)); - - /* Use nsync_*_debugger to check that they work. */ - tmp = nsync_mu_debugger (&s->mu); - buflen = strlen (tmp)+1; - buf = (char *) malloc (buflen); - snprintf (buf, buflen, "%s", tmp); - *slot (s, "init_mu0") = buf; - - tmp = nsync_cv_debugger (&s->cv); - buflen = strlen (tmp)+1; - buf = (char *) malloc (buflen); - snprintf (buf, buflen, "%s", tmp); - *slot (s, "init_cv0") = buf; - - /* Get the same information via the other routines */ - *slot (s, "init_mu1") = nsync_mu_debug_state ( - &s->mu, (char *) malloc (len), len); - *slot (s, "init_cv1") = nsync_cv_debug_state ( - &s->cv, (char *) malloc (len), len); - *slot (s, "init_mu2") = nsync_mu_debug_state_and_waiters ( - &s->mu, (char *) malloc (len), len); - *slot (s, "init_cv2") = nsync_cv_debug_state_and_waiters ( - &s->cv, (char *) malloc (len), len); - - nsync_mu_lock (&s->mu); - *slot (s, "held_mu") = nsync_mu_debug_state_and_waiters ( - &s->mu, (char *) malloc (len), len); - nsync_mu_unlock (&s->mu); - - /* set up several threads waiting on the mutex */ - nsync_mu_lock (&s->mu); - s->flag = 1; /* so thread will block on conditions */ - closure_fork (closure_debug_thread (&debug_thread_writer, s)); - closure_fork (closure_debug_thread (&debug_thread_writer, s)); - closure_fork (closure_debug_thread (&debug_thread_writer, s)); - closure_fork (closure_debug_thread_reader (&debug_thread_reader, s, NULL)); - closure_fork (closure_debug_thread (&debug_thread_writer_cv, s)); - closure_fork (closure_debug_thread (&debug_thread_writer_cv, s)); - closure_fork (closure_debug_thread (&debug_thread_writer_cv, s)); - closure_fork (closure_debug_thread_reader (&debug_thread_reader_cv, s, NULL)); - nsync_time_sleep (NSYNC_CLOCK, nsync_time_ms (500)); - *slot (s, "wait0_mu") = nsync_mu_debug_state_and_waiters ( - &s->mu, (char *) malloc (len), len); - *slot (s, "wait0_cv") = nsync_cv_debug_state_and_waiters ( - &s->cv, (char *) malloc (len), len); - - /* allow the threads to proceed to their conditional waits */ - nsync_mu_unlock (&s->mu); - nsync_time_sleep (NSYNC_CLOCK, nsync_time_ms (500)); - *slot (s, "wait1_mu") = nsync_mu_debug_state_and_waiters ( - &s->mu, (char *) malloc (len), len); - *slot (s, "wait1_cv") = nsync_cv_debug_state_and_waiters ( - &s->cv, (char *) malloc (len), len); - - nsync_mu_lock (&s->mu); - /* move cv waiters to mutex queue */ - nsync_cv_broadcast (&s->cv); - *slot (s, "wait2_mu") = nsync_mu_debug_state_and_waiters ( - &s->mu, (char *) malloc (len), len); - *slot (s, "wait2_cv") = nsync_cv_debug_state_and_waiters ( - &s->cv, (char *) malloc (len), len); - - /* allow all threads to proceed and exit */ - s->flag = 0; - nsync_mu_unlock (&s->mu); - nsync_time_sleep (NSYNC_CLOCK, nsync_time_ms (500)); - *slot (s, "wait3_mu") = nsync_mu_debug_state_and_waiters ( - &s->mu, (char *) malloc (len), len); - *slot (s, "wait3_cv") = nsync_cv_debug_state_and_waiters ( - &s->cv, (char *) malloc (len), len); - - /* Test with more than one reader */ - nsync_mu_rlock (&s->mu); - *slot (s, "rheld1_mu") = nsync_mu_debug_state_and_waiters ( - &s->mu, (char *) malloc (len), len); - closure_fork (closure_debug_thread_reader ( - &debug_thread_reader, s, "rheld2_mu")); - nsync_time_sleep (NSYNC_CLOCK, nsync_time_ms (500)); - *slot (s, "rheld1again_mu") = nsync_mu_debug_state_and_waiters ( - &s->mu, (char *) malloc (len), len); - nsync_mu_runlock (&s->mu); - - check_same (t, s, "init_mu0", "init_mu1"); - check_same (t, s, "init_mu0", "init_mu2"); - check_same (t, s, "init_cv0", "init_cv1"); - check_same (t, s, "init_cv0", "init_cv2"); - check_different (t, s, "init_mu0", "held_mu"); - check_different (t, s, "rheld1_mu", "held_mu"); - /* Must acquire result_mu, because the "rheld2_mu" slot is accessed - from the debug_thread_reader() thread created above. */ - nsync_mu_lock (&s->result_mu); - check_different (t, s, "rheld1_mu", "rheld2_mu"); - nsync_mu_unlock (&s->result_mu); - check_different (t, s, "init_mu0", "init_cv0"); - - for (i = 0; result_name[i] != NULL; i++) { - if (testing_verbose (t)) { - const char *str = *slot (s, result_name[i]); - TEST_LOG (t, ("%-16s %s\n", result_name[i], str)); - } - if (strlen (s->result[i]) == 0) { - TEST_ERROR (t, ("nsync_mu_debug_state() %s empty", - result_name[i])); - } - free (s->result[i]); - } -} - -/* --------------------------- */ - -/* Max number of waiter threads used in transfer test. - The last uses a conditional critical section, and others - use a condition variable. */ -#define TRANSFER_MAX_WAITERS 8 - -/* A struct cv_transfer is used to test cv-to-mu thread transfer. - There are up to TRANSFER_MAX_WAITERS waiter threads, and a wakeup thread. - Some threads wait using conditional critical sections, - and others using a condition variable. */ -struct cv_transfer { - nsync_mu mu; - - nsync_cv cv; /* signalled each time a cond[] element becomes non-zero */ - /* Thread i waits for cond[i] to be non-zero; under mu. */ - int cond[TRANSFER_MAX_WAITERS]; - - nsync_mu control_mu; /* protects fields below */ - nsync_cv done_cv; /* signalled each time an element of done[] becomes non-zero */ - int ready[TRANSFER_MAX_WAITERS]; /* set by waiters as they wait */ - int done[TRANSFER_MAX_WAITERS]; /* set by completed waiters: to 1 by readers, and to 2 by writers */ -}; - -/* Return whether *(int *)v != 0. Used as a condition for nsync_mu_wait(). */ -static int int_is_non_zero (const void *v) { - return (0 != *(const int *)v); -} - -/* Return when *pi becomes non-zero, where *pi is protected by *mu. - Acquires and releases *mu. */ -static void transfer_await_nonzero (nsync_mu *mu, int *pi) { - nsync_mu_lock (mu); - nsync_mu_wait (mu, &int_is_non_zero, pi, NULL); - nsync_mu_unlock (mu); -} - -/* Set *pi to x value, where *pi is protected by *mu. - Acquires and releases *mu. */ -static void transfer_set (nsync_mu *mu, int *pi, int x) { - nsync_mu_lock (mu); - *pi = x; - nsync_mu_unlock (mu); -} - -/* Lock and unlock routines for writers (index 0), and readers (index 1). */ -static const struct { - void (*lock) (nsync_mu *); - void (*unlock) (nsync_mu *); -} lock_type[2] = { - { &nsync_mu_lock, &nsync_mu_unlock }, - { &nsync_mu_rlock, &nsync_mu_runlock }, -}; - -/* Signal and broadcast routines */ -typedef void (*wakeup_func_type) (nsync_cv *); -static wakeup_func_type wakeup_func[2] = { &nsync_cv_broadcast, &nsync_cv_signal }; - -/* Acquire cvt->mu in write or read mode (depending on "reader"), - set cvt->ready[i], wait for cvt->cond[i] to become non-zero (using - a condition variable if use_cv!=0), then release cvt->mu, and - set cvt->done[i]. - Used as the body of waiter threads created by test_cv_transfer(). */ -static void transfer_waiter_thread (struct cv_transfer *cvt, int i, int reader, int use_cv) { - (*lock_type[reader].lock) (&cvt->mu); - transfer_set (&cvt->control_mu, &cvt->ready[i], 1); - if (use_cv) { - while (!cvt->cond[i]) { - nsync_cv_wait (&cvt->cv, &cvt->mu); - } - } else { - nsync_mu_wait (&cvt->mu, &int_is_non_zero, &cvt->cond[i], NULL); - } - (*lock_type[reader].unlock) (&cvt->mu); - - transfer_set (&cvt->control_mu, &cvt->done[i], reader? 1 : 2); - nsync_cv_broadcast (&cvt->done_cv); -} - -/* Return whether all the elements a[0..n-1] are less than x. */ -static int are_all_below (int a[], int n, int x) { - int i; - for (i = 0; i != n && a[i] < x; i++) { - } - return (i == n); -} - -CLOSURE_DECL_BODY4 (transfer_thread, struct cv_transfer *, int, int, int) - -/* Test cv-to-mutex queue transfer. (See the code in cv.c, wake_waiters().) - - The queue transfer needs to work regardless of: - - whether the mutex is also being used with conditional critical sections, - - whether reader locks are used, - - whether the waker signals from within the critical section (as it would in - a traditional monitor), or after that critical section, and - - the number of threads that might be awoken. */ -static void test_cv_transfer (testing t) { - int waiters; /* number of waiters (in [2, TRANSFER_MAX_WAITERS]). */ - int cv_writers; /* number of cv_writers: -1 means all */ - int ccs_reader; /* ccs waiter is a reader */ - int wakeup_type; /* bits: use_signal and after_region */ - enum { use_signal = 0x1 }; /* use signal rather than broadcast */ - enum { after_region = 0x2 }; /* perform wakeup after region, rather than within */ - struct cv_transfer Xcvt; - struct cv_transfer *cvt = &Xcvt; /* So all accesses are of form cvt-> */ - int i; - - /* for all settings of all of wakeup_type, ccs_reader, cv_writers, - and various different numbers of waiters */ - for (waiters = 2; waiters <= TRANSFER_MAX_WAITERS; waiters <<= 1) { - for (wakeup_type = 0; wakeup_type != 4; wakeup_type++) { - for (cv_writers = -1; cv_writers != 3; cv_writers++) { - for (ccs_reader = 0; ccs_reader != 2; ccs_reader++) { - if (testing_verbose (t)) { - TEST_LOG (t, ("transfer waiters %d wakeup_type %d cv_writers %d ccs_reader %d\n", - waiters, wakeup_type, cv_writers, ccs_reader)); - } - bzero ((void *) cvt, sizeof (*cvt)); - - /* Start the waiter threads that use condition variables. */ - for (i = 0; i < waiters-1; i++) { - int is_reader = (cv_writers != -1 && i < waiters-1-cv_writers); - closure_fork (closure_transfer_thread (&transfer_waiter_thread, cvt, i, - is_reader, 1/*use_cv*/)); - transfer_await_nonzero (&cvt->control_mu, &cvt->ready[i]); - } - /* Start the waiter thread that uses conditional critical sections. */ - closure_fork (closure_transfer_thread (&transfer_waiter_thread, cvt, i, - ccs_reader, 0/*use_cv*/)); - /* Wait for all waiters to enter their regions. */ - for (i = 0; i != waiters; i++) { - transfer_await_nonzero (&cvt->control_mu, &cvt->ready[i]); - } - - nsync_mu_lock (&cvt->mu); - /* At this point, all the waiter threads are in waiting: - they have set their ready[] flags, and have released cvt->mu. */ - - /* Mark all the condition-variable as runnable, - and signal at least one of them. - This may wake more than one, depending on - the presence of readers, and the use of - signal vs broadcast. */ - for (i = 0; i != waiters-1; i++) { - cvt->cond[i] = 1; - } - if ((wakeup_type & after_region) == 0) { - (*wakeup_func[wakeup_type & use_signal]) (&cvt->cv); - } - nsync_mu_unlock (&cvt->mu); - if ((wakeup_type & after_region) != 0) { - for (i = 0; i != waiters-1; i++) { - (*wakeup_func[wakeup_type & use_signal]) (&cvt->cv); - } - } - - /* Wait for at least one woken waiter to proceed, - and at least one writer if there is one. */ - nsync_mu_lock (&cvt->control_mu); - while (are_all_below (&cvt->done[0], waiters-1, cv_writers!=0? 2 : 1)) { - nsync_cv_wait (&cvt->done_cv, &cvt->control_mu); - } - nsync_mu_unlock (&cvt->control_mu); - - /* Wake all remaining threads. */ - nsync_cv_broadcast (&cvt->cv); - transfer_set (&cvt->mu, &cvt->cond[waiters-1], 1); - - /* And wait for all to finish. */ - for (i = 0; i != waiters; i++) { - transfer_await_nonzero (&cvt->control_mu, &cvt->done[i]); - } - - if (testing_verbose (t)) { - TEST_LOG (t, ("transfer waiters %d wakeup_type %d cv_writers %d ccs_reader %d complete\n", - waiters, wakeup_type, cv_writers, ccs_reader)); - } - } - } - } - } -} - - -/* --------------------------- */ +#include "third_party/nsync/testing/cv_test.inc" int main (int argc, char *argv[]) { testing_base tb = testing_new (argc, argv, 0); @@ -785,8 +26,6 @@ int main (int argc, char *argv[]) { TEST_RUN (tb, test_cv_producer_consumer4); TEST_RUN (tb, test_cv_producer_consumer5); TEST_RUN (tb, test_cv_producer_consumer6); - TEST_RUN (tb, test_cv_deadline); - TEST_RUN (tb, test_cv_cancel); TEST_RUN (tb, test_cv_debug); TEST_RUN (tb, test_cv_transfer); return (testing_base_exit (tb)); diff --git a/third_party/nsync/testing/cv_test.inc b/third_party/nsync/testing/cv_test.inc new file mode 100644 index 000000000..6a1f656b3 --- /dev/null +++ b/third_party/nsync/testing/cv_test.inc @@ -0,0 +1,774 @@ +/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ +│ vi: set noet ft=c ts=8 sw=8 fenc=utf-8 :vi │ +╞══════════════════════════════════════════════════════════════════════════════╡ +│ Copyright 2016 Google Inc. │ +│ │ +│ Licensed under the Apache License, Version 2.0 (the "License"); │ +│ you may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +╚─────────────────────────────────────────────────────────────────────────────*/ +#include "third_party/nsync/cv.h" +#include "libc/errno.h" +#include "libc/mem/mem.h" +#include "libc/runtime/runtime.h" +#include "libc/stdio/stdio.h" +#include "libc/str/str.h" +#include "third_party/nsync/debug.h" +#include "third_party/nsync/mu.h" +#include "third_party/nsync/mu_wait.h" +#include "third_party/nsync/note.h" +#include "third_party/nsync/testing/closure.h" +#include "third_party/nsync/testing/smprintf.h" +#include "third_party/nsync/testing/testing.h" +#include "third_party/nsync/testing/time_extra.h" +#include "third_party/nsync/time.h" + +/* --------------------------- */ + +/* A cv_queue represents a FIFO queue with up to limit elements. + The storage for the queue expands as necessary up to limit. */ +typedef struct cv_queue_s { + int limit; /* max value of count---should not be changed after initialization */ + nsync_cv non_empty; /* signalled when count transitions from zero to non-zero */ + nsync_cv non_full; /* signalled when count transitions from limit to less than limit */ + nsync_mu mu; /* protects fields below */ + int pos; /* index of first in-use element */ + int count; /* number of elements in use */ + void *data[1]; /* in use elements are data[pos, ..., (pos+count-1)%limit] */ +} cv_queue; + +/* Return a pointer to new cv_queue. */ +static cv_queue *cv_queue_new (int limit) { + cv_queue *q; + int size = offsetof (struct cv_queue_s, data) + sizeof (q->data[0]) * limit; + q = (cv_queue *) malloc (size); + bzero ((void *) q, size); + q->limit = limit; + return (q); +} + +/* Add v to the end of the FIFO *q and return non-zero, or if the FIFO already + has limit elements and continues to do so until abs_deadline, do nothing and + return 0. */ +static int cv_queue_put (cv_queue *q, void *v, nsync_time abs_deadline) { + int added = 0; + int wake = 0; + nsync_mu_lock (&q->mu); + while (q->count == q->limit && + nsync_cv_wait_with_deadline (&q->non_full, &q->mu, NSYNC_CLOCK, abs_deadline, NULL) == 0) { + } + if (q->count != q->limit) { + int i = q->pos + q->count; + if (q->limit <= i) { + i -= q->limit; + } + q->data[i] = v; + if (q->count == 0) { + wake = 1; + } + q->count++; + added = 1; + } + nsync_mu_unlock (&q->mu); + if (wake) { + nsync_cv_broadcast (&q->non_empty); + } + return (added); +} + +/* Remove the first value from the front of the FIFO *q and return it, + or if the FIFO is empty and continues to be so until abs_deadline, + do nothing and return NULL. */ +static void *cv_queue_get (cv_queue *q, nsync_time abs_deadline) { + void *v = NULL; + nsync_mu_lock (&q->mu); + while (q->count == 0 && + nsync_cv_wait_with_deadline (&q->non_empty, &q->mu, NSYNC_CLOCK, abs_deadline, NULL) == 0) { + } + if (q->count != 0) { + v = q->data[q->pos]; + q->data[q->pos] = NULL; + if (q->count == q->limit) { + nsync_cv_broadcast (&q->non_full); + } + q->pos++; + q->count--; + if (q->pos == q->limit) { + q->pos = 0; + } + } + nsync_mu_unlock (&q->mu); + return (v); +} + +/* --------------------------- */ + +static char ptr_to_int_c; +#define INT_TO_PTR(x) ((x) + &ptr_to_int_c) +#define PTR_TO_INT(p) (((char *) (p)) - &ptr_to_int_c) + +/* Put count integers on *q, in the sequence start*3, (start+1)*3, (start+2)*3, .... */ +static void producer_cv_n (testing t, cv_queue *q, int start, int count) { + int i; + for (i = 0; i != count; i++) { + if (!cv_queue_put (q, INT_TO_PTR ((start+i)*3), nsync_time_no_deadline)) { + TEST_FATAL (t, ("cv_queue_put() returned 0 with no deadline")); + } + } +} +CLOSURE_DECL_BODY4 (producer_cv_n, testing, cv_queue *, int, int) + +/* Get count integers from *q, and check that they are in the + sequence start*3, (start+1)*3, (start+2)*3, .... */ +static void consumer_cv_n (testing t, cv_queue *q, int start, int count) { + int i; + for (i = 0; i != count; i++) { + void *v = cv_queue_get (q, nsync_time_no_deadline); + int x; + if (v == NULL) { + TEST_FATAL (t, ("cv_queue_get() returned NULL with no deadline")); + } + x = PTR_TO_INT (v); + if (x != (start+i)*3) { + TEST_FATAL (t, ("cv_queue_get() returned bad value; want %d, got %d", + (start+i)*3, x)); + } + } +} + +/* CV_PRODUCER_CONSUMER_N is the number of elements passed from producer to consumer in the + test_cv_producer_consumer*() tests below. */ +#define CV_PRODUCER_CONSUMER_N 100000 + +/* Send a stream of integers from a producer thread to + a consumer thread via a queue with limit 10**0. */ +static void test_cv_producer_consumer0 (testing t) { + cv_queue *q = cv_queue_new (1); + closure_fork (closure_producer_cv_n (&producer_cv_n, t, q, 0, CV_PRODUCER_CONSUMER_N)); + consumer_cv_n (t, q, 0, CV_PRODUCER_CONSUMER_N); + free (q); +} + +/* Send a stream of integers from a producer thread to + a consumer thread via a queue with limit 10**1. */ +static void test_cv_producer_consumer1 (testing t) { + cv_queue *q = cv_queue_new (10); + closure_fork (closure_producer_cv_n (&producer_cv_n, t, q, 0, CV_PRODUCER_CONSUMER_N)); + consumer_cv_n (t, q, 0, CV_PRODUCER_CONSUMER_N); + free (q); +} + +/* Send a stream of integers from a producer thread to + a consumer thread via a queue with limit 10**2. */ +static void test_cv_producer_consumer2 (testing t) { + cv_queue *q = cv_queue_new (100); + closure_fork (closure_producer_cv_n (&producer_cv_n, t, q, 0, CV_PRODUCER_CONSUMER_N)); + consumer_cv_n (t, q, 0, CV_PRODUCER_CONSUMER_N); + free (q); +} + +/* Send a stream of integers from a producer thread to + a consumer thread via a queue with limit 10**3. */ +static void test_cv_producer_consumer3 (testing t) { + cv_queue *q = cv_queue_new (1000); + closure_fork (closure_producer_cv_n (&producer_cv_n, t, q, 0, CV_PRODUCER_CONSUMER_N)); + consumer_cv_n (t, q, 0, CV_PRODUCER_CONSUMER_N); + free (q); +} + +/* Send a stream of integers from a producer thread to + a consumer thread via a queue with limit 10**4. */ +static void test_cv_producer_consumer4 (testing t) { + cv_queue *q = cv_queue_new (10 * 1000); + closure_fork (closure_producer_cv_n (&producer_cv_n, t, q, 0, CV_PRODUCER_CONSUMER_N)); + consumer_cv_n (t, q, 0, CV_PRODUCER_CONSUMER_N); + free (q); +} + +/* Send a stream of integers from a producer thread to + a consumer thread via a queue with limit 10**5. */ +static void test_cv_producer_consumer5 (testing t) { + cv_queue *q = cv_queue_new (100 * 1000); + closure_fork (closure_producer_cv_n (&producer_cv_n, t, q, 0, CV_PRODUCER_CONSUMER_N)); + consumer_cv_n (t, q, 0, CV_PRODUCER_CONSUMER_N); + free (q); +} + +/* Send a stream of integers from a producer thread to + a consumer thread via a queue with limit 10**6. */ +static void test_cv_producer_consumer6 (testing t) { + cv_queue *q = cv_queue_new (1000 * 1000); + closure_fork (closure_producer_cv_n (&producer_cv_n, t, q, 0, CV_PRODUCER_CONSUMER_N)); + consumer_cv_n (t, q, 0, CV_PRODUCER_CONSUMER_N); + free (q); +} + +/* The following values control how aggressively we police the timeout. */ +#define TOO_EARLY_MS 1 +#define TOO_LATE_MS 100 /* longer, to accommodate scheduling delays */ +#define TOO_LATE_ALLOWED 25 /* number of iterations permitted to violate too_late */ + +/* Check timeouts on a CV wait_with_deadline(). */ +static void test_cv_deadline (testing t) { + int too_late_violations; + nsync_mu mu; + nsync_cv cv; + int i; + nsync_time too_early; + nsync_time too_late; + + nsync_mu_init (&mu); + nsync_cv_init (&cv); + too_early = nsync_time_ms (TOO_EARLY_MS); + too_late = nsync_time_ms (TOO_LATE_MS); + too_late_violations = 0; + nsync_mu_lock (&mu); + for (i = 0; i != 50; i++) { + nsync_time end_time; + nsync_time start_time; + nsync_time expected_end_time; + start_time = nsync_time_now (NSYNC_CLOCK); + expected_end_time = nsync_time_add (start_time, nsync_time_ms (87)); + if (nsync_cv_wait_with_deadline (&cv, &mu, NSYNC_CLOCK, expected_end_time, + NULL) != ETIMEDOUT) { + TEST_FATAL (t, ("nsync_cv_wait() returned non-expired for a timeout")); + } + end_time = nsync_time_now (NSYNC_CLOCK); + if (nsync_time_cmp (end_time, nsync_time_sub (expected_end_time, too_early)) < 0) { + char *elapsed_str = nsync_time_str (nsync_time_sub (expected_end_time, end_time), 2); + TEST_ERROR (t, ("nsync_cv_wait() returned %s too early", elapsed_str)); + free (elapsed_str); + } + if (nsync_time_cmp (nsync_time_add (expected_end_time, too_late), end_time) < 0) { + too_late_violations++; + } + } + nsync_mu_unlock (&mu); + if (too_late_violations > TOO_LATE_ALLOWED) { + TEST_ERROR (t, ("nsync_cv_wait() returned too late %d times", too_late_violations)); + } +} + +/* Check cancellations with nsync_cv_wait_with_deadline(). */ +static void test_cv_cancel (testing t) { + nsync_time future_time; + int too_late_violations; + nsync_mu mu; + nsync_cv cv; + int i; + nsync_time too_early; + nsync_time too_late; + + nsync_mu_init (&mu); + nsync_cv_init (&cv); + too_early = nsync_time_ms (TOO_EARLY_MS); + too_late = nsync_time_ms (TOO_LATE_MS); + + /* The loops below cancel after 87 milliseconds, like the timeout tests above. */ + + future_time = nsync_time_add (nsync_time_now (NSYNC_CLOCK), nsync_time_ms (3600000)); /* test cancels with timeout */ + + too_late_violations = 0; + nsync_mu_lock (&mu); + for (i = 0; i != 50; i++) { + int x; + nsync_note cancel; + nsync_time end_time; + nsync_time start_time; + nsync_time expected_end_time; + start_time = nsync_time_now (NSYNC_CLOCK); + expected_end_time = nsync_time_add (start_time, nsync_time_ms (87)); + + cancel = nsync_note_new (NULL, NSYNC_CLOCK, expected_end_time); + + x = nsync_cv_wait_with_deadline (&cv, &mu, NSYNC_CLOCK, future_time, cancel); + if (x != ECANCELED) { + TEST_FATAL (t, ("nsync_cv_wait() returned non-cancelled (%d) for " + "a cancellation; expected %d", + x, ECANCELED)); + } + end_time = nsync_time_now (NSYNC_CLOCK); + if (nsync_time_cmp (end_time, nsync_time_sub (expected_end_time, too_early)) < 0) { + char *elapsed_str = nsync_time_str (nsync_time_sub (expected_end_time, end_time), 2); + TEST_ERROR (t, ("nsync_cv_wait() returned %s too early", elapsed_str)); + free (elapsed_str); + } + if (nsync_time_cmp (nsync_time_add (expected_end_time, too_late), end_time) < 0) { + too_late_violations++; + } + + /* Check that an already cancelled wait returns immediately. */ + start_time = nsync_time_now (NSYNC_CLOCK); + + x = nsync_cv_wait_with_deadline (&cv, &mu, NSYNC_CLOCK, nsync_time_no_deadline, cancel); + if (x != ECANCELED) { + TEST_FATAL (t, ("nsync_cv_wait() returned non-cancelled (%d) for " + "a cancellation; expected %d", + x, ECANCELED)); + } + end_time = nsync_time_now (NSYNC_CLOCK); + if (nsync_time_cmp (end_time, start_time) < 0) { + char *elapsed_str = nsync_time_str (nsync_time_sub (expected_end_time, end_time), 2); + TEST_ERROR (t, ("nsync_cv_wait() returned %s too early", elapsed_str)); + free (elapsed_str); + } + if (nsync_time_cmp (nsync_time_add (start_time, too_late), end_time) < 0) { + too_late_violations++; + } + nsync_note_notify (cancel); + + nsync_note_free (cancel); + } + nsync_mu_unlock (&mu); + if (too_late_violations > TOO_LATE_ALLOWED) { + TEST_ERROR (t, ("nsync_cv_wait() returned too late %d times", too_late_violations)); + } +} + +/* --------------------------- */ + +/* Names of debug results for test_cv_debug. */ +static const char *result_name[] = { + "init_mu0", + "init_cv0", + "init_mu1", + "init_cv1", + "init_mu2", + "init_cv2", + "held_mu", + "wait0_mu", + "wait0_cv", + "wait1_mu", + "wait1_cv", + "wait2_mu", + "wait2_cv", + "wait3_mu", + "wait3_cv", + "rheld1_mu", + "rheld2_mu", + "rheld1again_mu", + NULL /* sentinel */ +}; + +/* state for test_cv_debug() */ +struct debug_state { + nsync_mu mu; /* protects flag field */ + nsync_cv cv; /* signalled when flag becomes zero */ + int flag; /* 0 => threads proceed; non-zero => threads block */ + + /* result[] is an array of nul-terminated string values, accessed via + name (in result_name[]) via slot(). Entries accessed from multiple + threads are protected by result_mu. */ + char *result[sizeof (result_name) / sizeof (result_name[0])]; + nsync_mu result_mu; +}; + +/* Return a pointer to the slot in s->result[] associated with the + nul-terminated name[] */ +static char **slot (struct debug_state *s, const char *name) { + int i = 0; + while (result_name[i] != NULL && strcmp (result_name[i], name) != 0) { + i++; + } + if (result_name[i] == NULL) { /* caller gave non-existent name */ + abort (); + } + return (&s->result[i]); +} + +/* Check that the strings associated with nul-terminated strings name0[] and + name1[] have the same values in s->result[]. */ +static void check_same (testing t, struct debug_state *s, + const char *name0, const char *name1) { + if (strcmp (*slot (s, name0), *slot (s, name1)) != 0) { + TEST_ERROR (t, ("nsync_mu_debug_state() %s state != %s state (%s vs. %s)", + name0, name1, *slot (s, name0), *slot (s, name1))); + } +} + +/* Check that the strings associated with nul-terminated strings name0[] and + name1[] have different values in s->result[]. */ +static void check_different (testing t, struct debug_state *s, + const char *name0, const char *name1) { + if (strcmp (*slot (s, name0), *slot (s, name1)) == 0) { + TEST_ERROR (t, ("nsync_mu_debug_state() %s state == %s state", + name0, name1)); + } +} + +/* Return whether the integer at address v is zero. */ +static int int_is_zero (const void *v) { + return (*(int *)v == 0); +} + +/* Acquire and release s->mu in write mode, waiting for s->flag==0 + using nsync_mu_wait(). */ +static void debug_thread_writer (struct debug_state *s) { + nsync_mu_lock (&s->mu); + nsync_mu_wait (&s->mu, &int_is_zero, &s->flag, NULL); + nsync_mu_unlock (&s->mu); +} + +/* Acquire and release s->mu in write mode, waiting for s->flag==0 + using nsync_cv_wait(). */ +static void debug_thread_writer_cv (struct debug_state *s) { + nsync_mu_lock (&s->mu); + while (s->flag != 0) { + nsync_cv_wait (&s->cv, &s->mu); + } + nsync_mu_unlock (&s->mu); +} + +/* Acquire and release s->mu in read mode, waiting for s->flag==0 + using nsync_mu_wait(). + If name!=NULL, record state of s->mu while held using name[]. */ +static void debug_thread_reader (struct debug_state *s, + const char *name) { + nsync_mu_rlock (&s->mu); + nsync_mu_wait (&s->mu, &int_is_zero, &s->flag, NULL); + if (name != NULL) { + int len = 1024; + nsync_mu_lock (&s->result_mu); + *slot (s, name) = nsync_mu_debug_state_and_waiters ( + &s->mu, (char *) malloc (len), len); + nsync_mu_unlock (&s->result_mu); + } + nsync_mu_runlock (&s->mu); +} + +/* Acquire and release s->mu in read mode, waiting for s->flag==0 + using nsync_cv_wait(). + If name!=NULL, record state of s->mu while held using name[]. */ +static void debug_thread_reader_cv (struct debug_state *s, + const char *name) { + nsync_mu_rlock (&s->mu); + while (s->flag != 0) { + nsync_cv_wait (&s->cv, &s->mu); + } + if (name != NULL) { + int len = 1024; + nsync_mu_lock (&s->result_mu); + *slot (s, name) = nsync_mu_debug_state_and_waiters ( + &s->mu, (char *) malloc (len), len); + nsync_mu_unlock (&s->result_mu); + } + nsync_mu_runlock (&s->mu); +} + +CLOSURE_DECL_BODY1 (debug_thread, struct debug_state *) +CLOSURE_DECL_BODY2 (debug_thread_reader, struct debug_state *, const char *) + +/* Check that nsync_mu_debug_state() and nsync_cv_debug_state() + and their variants yield reasonable results. + + The specification of those routines is intentionally loose, + so this do not check much, but the various possibilities can be + examined using the verbose testing flag (-v). */ +static void test_cv_debug (testing t) { + int i; + int len = 1024; + char *tmp; + char *buf; + int buflen; + struct debug_state xs; + struct debug_state *s = &xs; + bzero ((void *) s, sizeof (*s)); + + /* Use nsync_*_debugger to check that they work. */ + tmp = nsync_mu_debugger (&s->mu); + buflen = strlen (tmp)+1; + buf = (char *) malloc (buflen); + snprintf (buf, buflen, "%s", tmp); + *slot (s, "init_mu0") = buf; + + tmp = nsync_cv_debugger (&s->cv); + buflen = strlen (tmp)+1; + buf = (char *) malloc (buflen); + snprintf (buf, buflen, "%s", tmp); + *slot (s, "init_cv0") = buf; + + /* Get the same information via the other routines */ + *slot (s, "init_mu1") = nsync_mu_debug_state ( + &s->mu, (char *) malloc (len), len); + *slot (s, "init_cv1") = nsync_cv_debug_state ( + &s->cv, (char *) malloc (len), len); + *slot (s, "init_mu2") = nsync_mu_debug_state_and_waiters ( + &s->mu, (char *) malloc (len), len); + *slot (s, "init_cv2") = nsync_cv_debug_state_and_waiters ( + &s->cv, (char *) malloc (len), len); + + nsync_mu_lock (&s->mu); + *slot (s, "held_mu") = nsync_mu_debug_state_and_waiters ( + &s->mu, (char *) malloc (len), len); + nsync_mu_unlock (&s->mu); + + /* set up several threads waiting on the mutex */ + nsync_mu_lock (&s->mu); + s->flag = 1; /* so thread will block on conditions */ + closure_fork (closure_debug_thread (&debug_thread_writer, s)); + closure_fork (closure_debug_thread (&debug_thread_writer, s)); + closure_fork (closure_debug_thread (&debug_thread_writer, s)); + closure_fork (closure_debug_thread_reader (&debug_thread_reader, s, NULL)); + closure_fork (closure_debug_thread (&debug_thread_writer_cv, s)); + closure_fork (closure_debug_thread (&debug_thread_writer_cv, s)); + closure_fork (closure_debug_thread (&debug_thread_writer_cv, s)); + closure_fork (closure_debug_thread_reader (&debug_thread_reader_cv, s, NULL)); + nsync_time_sleep (NSYNC_CLOCK, nsync_time_ms (500)); + *slot (s, "wait0_mu") = nsync_mu_debug_state_and_waiters ( + &s->mu, (char *) malloc (len), len); + *slot (s, "wait0_cv") = nsync_cv_debug_state_and_waiters ( + &s->cv, (char *) malloc (len), len); + + /* allow the threads to proceed to their conditional waits */ + nsync_mu_unlock (&s->mu); + nsync_time_sleep (NSYNC_CLOCK, nsync_time_ms (500)); + *slot (s, "wait1_mu") = nsync_mu_debug_state_and_waiters ( + &s->mu, (char *) malloc (len), len); + *slot (s, "wait1_cv") = nsync_cv_debug_state_and_waiters ( + &s->cv, (char *) malloc (len), len); + + nsync_mu_lock (&s->mu); + /* move cv waiters to mutex queue */ + nsync_cv_broadcast (&s->cv); + *slot (s, "wait2_mu") = nsync_mu_debug_state_and_waiters ( + &s->mu, (char *) malloc (len), len); + *slot (s, "wait2_cv") = nsync_cv_debug_state_and_waiters ( + &s->cv, (char *) malloc (len), len); + + /* allow all threads to proceed and exit */ + s->flag = 0; + nsync_mu_unlock (&s->mu); + nsync_time_sleep (NSYNC_CLOCK, nsync_time_ms (500)); + *slot (s, "wait3_mu") = nsync_mu_debug_state_and_waiters ( + &s->mu, (char *) malloc (len), len); + *slot (s, "wait3_cv") = nsync_cv_debug_state_and_waiters ( + &s->cv, (char *) malloc (len), len); + + /* Test with more than one reader */ + nsync_mu_rlock (&s->mu); + *slot (s, "rheld1_mu") = nsync_mu_debug_state_and_waiters ( + &s->mu, (char *) malloc (len), len); + closure_fork (closure_debug_thread_reader ( + &debug_thread_reader, s, "rheld2_mu")); + nsync_time_sleep (NSYNC_CLOCK, nsync_time_ms (500)); + *slot (s, "rheld1again_mu") = nsync_mu_debug_state_and_waiters ( + &s->mu, (char *) malloc (len), len); + nsync_mu_runlock (&s->mu); + + check_same (t, s, "init_mu0", "init_mu1"); + check_same (t, s, "init_mu0", "init_mu2"); + check_same (t, s, "init_cv0", "init_cv1"); + check_same (t, s, "init_cv0", "init_cv2"); + check_different (t, s, "init_mu0", "held_mu"); + check_different (t, s, "rheld1_mu", "held_mu"); + /* Must acquire result_mu, because the "rheld2_mu" slot is accessed + from the debug_thread_reader() thread created above. */ + nsync_mu_lock (&s->result_mu); + check_different (t, s, "rheld1_mu", "rheld2_mu"); + nsync_mu_unlock (&s->result_mu); + check_different (t, s, "init_mu0", "init_cv0"); + + for (i = 0; result_name[i] != NULL; i++) { + if (testing_verbose (t)) { + const char *str = *slot (s, result_name[i]); + TEST_LOG (t, ("%-16s %s\n", result_name[i], str)); + } + if (strlen (s->result[i]) == 0) { + TEST_ERROR (t, ("nsync_mu_debug_state() %s empty", + result_name[i])); + } + free (s->result[i]); + } +} + +/* --------------------------- */ + +/* Max number of waiter threads used in transfer test. + The last uses a conditional critical section, and others + use a condition variable. */ +#define TRANSFER_MAX_WAITERS 8 + +/* A struct cv_transfer is used to test cv-to-mu thread transfer. + There are up to TRANSFER_MAX_WAITERS waiter threads, and a wakeup thread. + Some threads wait using conditional critical sections, + and others using a condition variable. */ +struct cv_transfer { + nsync_mu mu; + + nsync_cv cv; /* signalled each time a cond[] element becomes non-zero */ + /* Thread i waits for cond[i] to be non-zero; under mu. */ + int cond[TRANSFER_MAX_WAITERS]; + + nsync_mu control_mu; /* protects fields below */ + nsync_cv done_cv; /* signalled each time an element of done[] becomes non-zero */ + int ready[TRANSFER_MAX_WAITERS]; /* set by waiters as they wait */ + int done[TRANSFER_MAX_WAITERS]; /* set by completed waiters: to 1 by readers, and to 2 by writers */ +}; + +/* Return whether *(int *)v != 0. Used as a condition for nsync_mu_wait(). */ +static int int_is_non_zero (const void *v) { + return (0 != *(const int *)v); +} + +/* Return when *pi becomes non-zero, where *pi is protected by *mu. + Acquires and releases *mu. */ +static void transfer_await_nonzero (nsync_mu *mu, int *pi) { + nsync_mu_lock (mu); + nsync_mu_wait (mu, &int_is_non_zero, pi, NULL); + nsync_mu_unlock (mu); +} + +/* Set *pi to x value, where *pi is protected by *mu. + Acquires and releases *mu. */ +static void transfer_set (nsync_mu *mu, int *pi, int x) { + nsync_mu_lock (mu); + *pi = x; + nsync_mu_unlock (mu); +} + +/* Lock and unlock routines for writers (index 0), and readers (index 1). */ +static const struct { + void (*lock) (nsync_mu *); + void (*unlock) (nsync_mu *); +} lock_type[2] = { + { &nsync_mu_lock, &nsync_mu_unlock }, + { &nsync_mu_rlock, &nsync_mu_runlock }, +}; + +/* Signal and broadcast routines */ +typedef void (*wakeup_func_type) (nsync_cv *); +static wakeup_func_type wakeup_func[2] = { &nsync_cv_broadcast, &nsync_cv_signal }; + +/* Acquire cvt->mu in write or read mode (depending on "reader"), + set cvt->ready[i], wait for cvt->cond[i] to become non-zero (using + a condition variable if use_cv!=0), then release cvt->mu, and + set cvt->done[i]. + Used as the body of waiter threads created by test_cv_transfer(). */ +static void transfer_waiter_thread (struct cv_transfer *cvt, int i, int reader, int use_cv) { + (*lock_type[reader].lock) (&cvt->mu); + transfer_set (&cvt->control_mu, &cvt->ready[i], 1); + if (use_cv) { + while (!cvt->cond[i]) { + nsync_cv_wait (&cvt->cv, &cvt->mu); + } + } else { + nsync_mu_wait (&cvt->mu, &int_is_non_zero, &cvt->cond[i], NULL); + } + (*lock_type[reader].unlock) (&cvt->mu); + + transfer_set (&cvt->control_mu, &cvt->done[i], reader? 1 : 2); + nsync_cv_broadcast (&cvt->done_cv); +} + +/* Return whether all the elements a[0..n-1] are less than x. */ +static int are_all_below (int a[], int n, int x) { + int i; + for (i = 0; i != n && a[i] < x; i++) { + } + return (i == n); +} + +CLOSURE_DECL_BODY4 (transfer_thread, struct cv_transfer *, int, int, int) + +/* Test cv-to-mutex queue transfer. (See the code in cv.c, wake_waiters().) + + The queue transfer needs to work regardless of: + - whether the mutex is also being used with conditional critical sections, + - whether reader locks are used, + - whether the waker signals from within the critical section (as it would in + a traditional monitor), or after that critical section, and + - the number of threads that might be awoken. */ +static void test_cv_transfer (testing t) { + int waiters; /* number of waiters (in [2, TRANSFER_MAX_WAITERS]). */ + int cv_writers; /* number of cv_writers: -1 means all */ + int ccs_reader; /* ccs waiter is a reader */ + int wakeup_type; /* bits: use_signal and after_region */ + enum { use_signal = 0x1 }; /* use signal rather than broadcast */ + enum { after_region = 0x2 }; /* perform wakeup after region, rather than within */ + struct cv_transfer Xcvt; + struct cv_transfer *cvt = &Xcvt; /* So all accesses are of form cvt-> */ + int i; + + /* for all settings of all of wakeup_type, ccs_reader, cv_writers, + and various different numbers of waiters */ + for (waiters = 2; waiters <= TRANSFER_MAX_WAITERS; waiters <<= 1) { + for (wakeup_type = 0; wakeup_type != 4; wakeup_type++) { + for (cv_writers = -1; cv_writers != 3; cv_writers++) { + for (ccs_reader = 0; ccs_reader != 2; ccs_reader++) { + if (testing_verbose (t)) { + TEST_LOG (t, ("transfer waiters %d wakeup_type %d cv_writers %d ccs_reader %d\n", + waiters, wakeup_type, cv_writers, ccs_reader)); + } + bzero ((void *) cvt, sizeof (*cvt)); + + /* Start the waiter threads that use condition variables. */ + for (i = 0; i < waiters-1; i++) { + int is_reader = (cv_writers != -1 && i < waiters-1-cv_writers); + closure_fork (closure_transfer_thread (&transfer_waiter_thread, cvt, i, + is_reader, 1/*use_cv*/)); + transfer_await_nonzero (&cvt->control_mu, &cvt->ready[i]); + } + /* Start the waiter thread that uses conditional critical sections. */ + closure_fork (closure_transfer_thread (&transfer_waiter_thread, cvt, i, + ccs_reader, 0/*use_cv*/)); + /* Wait for all waiters to enter their regions. */ + for (i = 0; i != waiters; i++) { + transfer_await_nonzero (&cvt->control_mu, &cvt->ready[i]); + } + + nsync_mu_lock (&cvt->mu); + /* At this point, all the waiter threads are in waiting: + they have set their ready[] flags, and have released cvt->mu. */ + + /* Mark all the condition-variable as runnable, + and signal at least one of them. + This may wake more than one, depending on + the presence of readers, and the use of + signal vs broadcast. */ + for (i = 0; i != waiters-1; i++) { + cvt->cond[i] = 1; + } + if ((wakeup_type & after_region) == 0) { + (*wakeup_func[wakeup_type & use_signal]) (&cvt->cv); + } + nsync_mu_unlock (&cvt->mu); + if ((wakeup_type & after_region) != 0) { + for (i = 0; i != waiters-1; i++) { + (*wakeup_func[wakeup_type & use_signal]) (&cvt->cv); + } + } + + /* Wait for at least one woken waiter to proceed, + and at least one writer if there is one. */ + nsync_mu_lock (&cvt->control_mu); + while (are_all_below (&cvt->done[0], waiters-1, cv_writers!=0? 2 : 1)) { + nsync_cv_wait (&cvt->done_cv, &cvt->control_mu); + } + nsync_mu_unlock (&cvt->control_mu); + + /* Wake all remaining threads. */ + nsync_cv_broadcast (&cvt->cv); + transfer_set (&cvt->mu, &cvt->cond[waiters-1], 1); + + /* And wait for all to finish. */ + for (i = 0; i != waiters; i++) { + transfer_await_nonzero (&cvt->control_mu, &cvt->done[i]); + } + + if (testing_verbose (t)) { + TEST_LOG (t, ("transfer waiters %d wakeup_type %d cv_writers %d ccs_reader %d complete\n", + waiters, wakeup_type, cv_writers, ccs_reader)); + } + } + } + } + } +} diff --git a/third_party/nsync/testing/mu2_test.c b/third_party/nsync/testing/mu2_test.c new file mode 100644 index 000000000..938100063 --- /dev/null +++ b/third_party/nsync/testing/mu2_test.c @@ -0,0 +1,27 @@ +/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ +│ vi: set noet ft=c ts=8 sw=8 fenc=utf-8 :vi │ +╞══════════════════════════════════════════════════════════════════════════════╡ +│ Copyright 2016 Google Inc. │ +│ │ +│ Licensed under the Apache License, Version 2.0 (the "License"); │ +│ you may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +╚─────────────────────────────────────────────────────────────────────────────*/ +#include "third_party/nsync/testing/mu_test.inc" + +int main (int argc, char *argv[]) { + testing_base tb = testing_new (argc, argv, 0); + + TEST_RUN (tb, test_mutex_nthread); + TEST_RUN (tb, test_xmutex_nthread); + + return (testing_base_exit (tb)); +} diff --git a/third_party/nsync/testing/mu3_test.c b/third_party/nsync/testing/mu3_test.c new file mode 100644 index 000000000..2aac65baa --- /dev/null +++ b/third_party/nsync/testing/mu3_test.c @@ -0,0 +1,27 @@ +/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ +│ vi: set noet ft=c ts=8 sw=8 fenc=utf-8 :vi │ +╞══════════════════════════════════════════════════════════════════════════════╡ +│ Copyright 2016 Google Inc. │ +│ │ +│ Licensed under the Apache License, Version 2.0 (the "License"); │ +│ you may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +╚─────────────────────────────────────────────────────────────────────────────*/ +#include "third_party/nsync/testing/mu_test.inc" + +int main (int argc, char *argv[]) { + testing_base tb = testing_new (argc, argv, 0); + + TEST_RUN (tb, test_rwmutex_nthread); + TEST_RUN (tb, test_try_mu_nthread); + + return (testing_base_exit (tb)); +} diff --git a/third_party/nsync/testing/mu_test.c b/third_party/nsync/testing/mu_test.c index de7b84b76..fb4713f31 100644 --- a/third_party/nsync/testing/mu_test.c +++ b/third_party/nsync/testing/mu_test.c @@ -15,1101 +15,13 @@ │ See the License for the specific language governing permissions and │ │ limitations under the License. │ ╚─────────────────────────────────────────────────────────────────────────────*/ -#include "third_party/nsync/mu.h" -#include "libc/calls/calls.h" -#include "libc/str/str.h" -#include "libc/thread/thread.h" -#include "third_party/nsync/time.h" -#include "third_party/nsync/cv.h" -#include "third_party/nsync/mu_wait.h" -#include "third_party/nsync/testing/closure.h" -#include "third_party/nsync/testing/smprintf.h" -#include "third_party/nsync/testing/testing.h" -#include "third_party/nsync/testing/time_extra.h" - -/* The state shared between the threads in each of the tests below. */ -typedef struct test_data_s { - testing t; - int n_threads; /* Number of test threads; constant after init. */ - int loop_count; /* Iteration count for each test thread; constant after init */ - - /* mu_in_use protects i, id, loop_count, and finished_threads. */ - void *mu_in_use; /* points at mu, mutex, or rwmutex depending on which is in use. */ - void (*lock) (void *); /* operations on mu_in_use */ - void (*unlock) (void *); - - nsync_mu mu; - pthread_mutex_t mutex; - pthread_rwlock_t rwmutex; - - int i; /* counter incremented by test loops. */ - volatile int id; /* id of current lock-holding thread in some tests. */ - - nsync_cv done; /* Signalled when finished_threads==n_threads. */ - int finished_threads; /* Count of threads that have finished. */ -} test_data; - -/* Indicate that a thread has finished its operations on test_data - by incrementing td.finished_threads, and signal td.done when it reaches td.n_threads. - See test_data_wait_for_all_threads(). */ -static void test_data_thread_finished (test_data *td) { - (*td->lock) (td->mu_in_use); - td->finished_threads++; - if (td->finished_threads == td->n_threads) { - nsync_cv_broadcast (&td->done); - } - (*td->unlock) (td->mu_in_use); -} - -/* Wait until all td.n_threads have called test_data_thread_finished(), - and then return. */ -static void test_data_wait_for_all_threads (test_data *td) { - (*td->lock) (td->mu_in_use); - while (td->finished_threads != td->n_threads) { - nsync_cv_wait_with_deadline_generic (&td->done, td->mu_in_use, - td->lock, td->unlock, - NSYNC_CLOCK, - nsync_time_no_deadline, NULL); - } - (*td->unlock) (td->mu_in_use); -} - -/* --------------------------------------- */ - -/* The body of each thread executed by test_mu_nthread() - and test_mutex_nthread. - *td represents the test data that the threads share, and id is an integer - unique to each test thread. */ -static void counting_loop (test_data *td, int id) { - int n = td->loop_count; - int i = 0; - for (i = 0; i != n; i++) { - (*td->lock) (td->mu_in_use); - td->id = id; - td->i++; - if (td->id != id) { - testing_panic ("td->id != id"); - } - (*td->unlock) (td->mu_in_use); - } - test_data_thread_finished (td); -} - -CLOSURE_DECL_BODY2 (counting, test_data *, int) - -/* Versions of nsync_mu_lock() and nsync_mu_unlock() that take "void *" - arguments, to avoid call through a function pointer of a different type, - which is undefined. */ -static void void_mu_lock (void *mu) { - nsync_mu_lock ((nsync_mu *) mu); -} -static void void_mu_unlock (void *mu) { - nsync_mu_unlock((nsync_mu *) mu); -} - -/* Create a few threads, each of which increments an - integer a fixed number of times, using an nsync_mu for mutual exclusion. - It checks that the integer is incremented the correct number of times. */ -static void test_mu_nthread (testing t) { - int loop_count = 100000; - nsync_time deadline; - deadline = nsync_time_add (nsync_time_now (NSYNC_CLOCK), nsync_time_ms (1500)); - do { - int i; - test_data td; - bzero ((void *) &td, sizeof (td)); - td.t = t; - td.n_threads = 5; - td.loop_count = loop_count; - td.mu_in_use = &td.mu; - td.lock = &void_mu_lock; - td.unlock = &void_mu_unlock; - for (i = 0; i != td.n_threads; i++) { - closure_fork (closure_counting (&counting_loop, &td, i)); - } - test_data_wait_for_all_threads (&td); - if (td.i != td.n_threads*td.loop_count) { - TEST_FATAL (t, ("test_mu_nthread final count inconsistent: want %d, got %d", - td.n_threads*td.loop_count, td.i)); - } - loop_count *= 2; - } while (nsync_time_cmp (nsync_time_now (NSYNC_CLOCK), deadline) < 0); -} - -/* void pthread_mutex_lock */ -static void void_pthread_mutex_lock (void *mu) { - pthread_mutex_lock ((pthread_mutex_t *) mu); -} - -/* void pthread_mutex_unlock */ -static void void_pthread_mutex_unlock (void *mu) { - pthread_mutex_unlock ((pthread_mutex_t *) mu); -} - -/* Create a few threads, each of which increments an - integer a fixed number of times, using a pthread_mutex_t for mutual exclusion. - It checks that the integer is incremented the correct number of times. */ -static void test_mutex_nthread (testing t) { - int loop_count = 100000; - nsync_time deadline; - deadline = nsync_time_add (nsync_time_now (NSYNC_CLOCK), nsync_time_ms (1500)); - do { - int i; - test_data td; - bzero ((void *) &td, sizeof (td)); - td.t = t; - td.n_threads = 5; - td.loop_count = loop_count; - td.mu_in_use = &td.mutex; - td.lock = &void_pthread_mutex_lock; - td.unlock = &void_pthread_mutex_unlock; - pthread_mutex_init (&td.mutex, NULL); - for (i = 0; i != td.n_threads; i++) { - closure_fork (closure_counting (&counting_loop, &td, i)); - } - test_data_wait_for_all_threads (&td); - if (td.i != td.n_threads*td.loop_count) { - TEST_FATAL (t, ("test_mutex_nthread final count inconsistent: want %d, got %d", - td.n_threads*td.loop_count, td.i)); - } - pthread_mutex_destroy (&td.mutex); - loop_count *= 2; - } while (nsync_time_cmp (nsync_time_now (NSYNC_CLOCK), deadline) < 0); -} - -/* Create a few threads, each of which increments an integer a fixed - number of times, using a recursive pthread_mutex_t for mutual exclusion. - It checks that the integer is incremented the correct number of times. */ -static void test_xmutex_nthread (testing t) { - int loop_count = 100000; - nsync_time deadline; - deadline = nsync_time_add (nsync_time_now (NSYNC_CLOCK), nsync_time_ms (1500)); - do { - int i; - test_data td; - pthread_mutexattr_t attr; - bzero ((void *) &td, sizeof (td)); - td.t = t; - td.n_threads = 5; - td.loop_count = loop_count; - td.mu_in_use = &td.mutex; - td.lock = &void_pthread_mutex_lock; - td.unlock = &void_pthread_mutex_unlock; - pthread_mutexattr_init (&attr); - pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE); - pthread_mutex_init (&td.mutex, &attr); - pthread_mutexattr_destroy (&attr); - for (i = 0; i != td.n_threads; i++) { - closure_fork (closure_counting (&counting_loop, &td, i)); - } - test_data_wait_for_all_threads (&td); - if (td.i != td.n_threads*td.loop_count) { - TEST_FATAL (t, ("test_mutex_nthread final count inconsistent: want %d, got %d", - td.n_threads*td.loop_count, td.i)); - } - pthread_mutex_destroy (&td.mutex); - loop_count *= 2; - } while (nsync_time_cmp (nsync_time_now (NSYNC_CLOCK), deadline) < 0); -} - -/* void pthread_rwlock_wrlock */ -static void void_pthread_rwlock_wrlock (void *mu) { - pthread_rwlock_wrlock ((pthread_rwlock_t *) mu); -} - -/* void pthread_rwlock_unlock */ -static void void_pthread_rwlock_unlock (void *mu) { - pthread_rwlock_unlock ((pthread_rwlock_t *) mu); -} - -/* Create a few threads, each of which increments an - integer a fixed number of times, using a pthread_rwlock_t for mutual exclusion. - It checks that the integer is incremented the correct number of times. */ -static void test_rwmutex_nthread (testing t) { - int loop_count = 100000; - nsync_time deadline; - deadline = nsync_time_add (nsync_time_now (NSYNC_CLOCK), nsync_time_ms (1500)); - do { - int i; - test_data td; - bzero ((void *) &td, sizeof (td)); - td.t = t; - td.n_threads = 5; - td.loop_count = loop_count; - td.mu_in_use = &td.rwmutex; - td.lock = &void_pthread_rwlock_wrlock; - td.unlock = &void_pthread_rwlock_unlock; - pthread_rwlock_init (&td.rwmutex, NULL); - for (i = 0; i != td.n_threads; i++) { - closure_fork (closure_counting (&counting_loop, &td, i)); - } - test_data_wait_for_all_threads (&td); - if (td.i != td.n_threads*td.loop_count) { - TEST_FATAL (t, ("test_mutex_nthread final count inconsistent: want %d, got %d", - td.n_threads*td.loop_count, td.i)); - } - pthread_rwlock_destroy (&td.rwmutex); - loop_count *= 2; - } while (nsync_time_cmp (nsync_time_now (NSYNC_CLOCK), deadline) < 0); -} - -/* --------------------------------------- */ - -/* The body of each thread executed by test_try_mu_nthread(). - *td represents the test data that the threads share, and id is an integer - unique to each test thread. */ -static void counting_loop_try_mu (test_data *td, int id) { - int i; - int n = td->loop_count; - for (i = 0; i != n; i++) { - while (!nsync_mu_trylock (&td->mu)) { - pthread_yield (); - } - td->id = id; - td->i++; - if (td->id != id) { - testing_panic ("td->id != id"); - } - n = td->loop_count; - nsync_mu_unlock (&td->mu); - } - test_data_thread_finished (td); -} - -/* Test that acquiring an nsync_mu with nsync_mu_trylock() - using several threads provides mutual exclusion. */ -static void test_try_mu_nthread (testing t) { - int loop_count = 100000; - nsync_time deadline; - deadline = nsync_time_add (nsync_time_now (NSYNC_CLOCK), nsync_time_ms (1500)); - do { - int i; - test_data td; - bzero ((void *) &td, sizeof (td)); - td.t = t; - td.n_threads = 5; - td.loop_count = loop_count; - td.mu_in_use = &td.mu; - td.lock = &void_mu_lock; - td.unlock = &void_mu_unlock; - for (i = 0; i != td.n_threads; i++) { - closure_fork (closure_counting (&counting_loop_try_mu, &td, i)); - } - test_data_wait_for_all_threads (&td); - if (td.i != td.n_threads*td.loop_count) { - TEST_FATAL (t, ("test_try_mu_nthread final count inconsistent: want %d, got %d", - td.n_threads*td.loop_count, td.i)); - } - loop_count *= 2; - } while (nsync_time_cmp (nsync_time_now (NSYNC_CLOCK), deadline) < 0); -} - -/* --------------------------------------- */ - -/* An integer protected by a mutex, and with an associated - condition variable that is signalled when the counter reaches 0. */ -typedef struct counter_s { - nsync_mu mu; /* protects value */ - int value; - nsync_cv cv; /* signalled when value becomes 0 */ -} counter; - -/* Return a counter with initial value "initial". */ -static counter *counter_new (int initial) { - counter *c = (counter *) malloc (sizeof (*c)); - bzero ((void *) c, sizeof (*c)); - c->value = initial; - return (c); -} - -/* Increment *c by "increment". */ -static void counter_inc (counter *c, int increment) { - if (increment != 0) { - nsync_mu_lock (&c->mu); - c->value += increment; - if (c->value == 0) { - nsync_cv_broadcast (&c->cv); - } - nsync_mu_unlock (&c->mu); - } -} - -/* Wait on *c's condition variable until the counter - becomes 0, or abs_deadline is reached. */ -static int counter_wait_for_zero_with_deadline (counter *c, nsync_time abs_deadline) { - int value; - nsync_mu_rlock (&c->mu); - while (c->value != 0 && - nsync_cv_wait_with_deadline (&c->cv, &c->mu, NSYNC_CLOCK, abs_deadline, NULL) == 0) { - } - value = c->value; - nsync_mu_runlock (&c->mu); - return (value); -} - -/* Wait on *c's condition variable until the counter becomes 0. */ -static void counter_wait_for_zero (counter *c) { - int value = counter_wait_for_zero_with_deadline (c, nsync_time_no_deadline); - if (value != 0) { - testing_panic (smprintf ("wait_for_zero() about to return with " - "non-zero value %d", value)); - } -} - -/* Return the current value of *c. */ -static int counter_value (counter *c) { - int value; - nsync_mu_rlock (&c->mu); - value = c->value; - nsync_mu_runlock (&c->mu); - return (value); -} - -/* --------------------------------------- */ - -CLOSURE_DECL_BODY9 (attempt_trylock, testing , const char *, int, nsync_mu *, - int, int, int *, int, counter *) - -/* Call nsync_mu_trylock(), and compares the result to expected_acquire. - If the lock was acquired, then: - - if expected_value != -1, compare *value against expected_value. - - increment *value. - - if release is non-zero, release the lock before returning. - In any case, the counter *done is decremented. */ -static void attempt_trylock (testing t, const char *id, int verbose, - nsync_mu *mu, int expected_acquire, int release, - int *value, int expected_value, counter *done) { - int acquired = nsync_mu_trylock (mu); - if (acquired != expected_acquire) { - testing_panic (smprintf ("attempt_trylock %s: expected " - "nsync_mu_trylock() to return %d but got %d", - id, expected_acquire, acquired)); - } - if (verbose) { - TEST_LOG (t, ("attempt_trylock %s %d\n", id, acquired)); - } - if (acquired) { - nsync_mu_assert_held (mu); - if (expected_value != -1 && *value != expected_value) { - testing_panic (smprintf ("attempt_trylock %s expected " - "value %d, *value=%d", - id, expected_value, *value)); - } - (*value)++; - if (verbose) { - TEST_LOG (t, ("attempt_trylock %s incremented value to %d\n", id, *value)); - } - if (release) { - nsync_mu_unlock (mu); - } - } - counter_inc (done, -1); -} - -/* Call nsync_mu_rtrylock(), and compare the result to expected_acquire. - If the lock was acquired, then: - - if expected_value != -1, compare *value against expected_value. - - if release is non-zero, release the lock before returning. - In any case, decrement *done. */ -static void attempt_rtrylock (testing t, const char *id, int verbose, - nsync_mu *mu, int expected_acquire, int release, - int *value, int expected_value, counter *done) { - int acquired = nsync_mu_rtrylock (mu); - if (acquired != expected_acquire) { - testing_panic (smprintf ("attempt_rtrylock %s: expected " - "nsync_mu_rtrylock() to return %d but got %d", - id, expected_acquire, acquired)); - } - if (verbose) { - TEST_LOG (t, ("attempt_rtrylock %s %d\n", id, acquired)); - } - if (acquired) { - nsync_mu_rassert_held (mu); - if (expected_value != -1 && *value != expected_value) { - testing_panic (smprintf ("attempt_rtrylock %s expected " - "value %d, *value=%d", - id, expected_value, *value)); - } - if (release) { - nsync_mu_runlock (mu); - } - } - counter_inc (done, -1); -} - -CLOSURE_DECL_BODY9 (lock_unlock, testing, const char *, int, nsync_mu *, - int *, int, nsync_time, counter *, counter *) - -/* First acquire *mu, then: - - if expected_value != -1, compare *value against expected_value. - - increment *value. - - sleep for "sleep". - Then release *mu and decrement *done. */ -static void lock_unlock (testing t, const char *id, int verbose, nsync_mu *mu, int *value, - int expected_value, nsync_time sleep, counter *sleeping, counter *done) { - if (verbose) { - TEST_LOG (t, ("lock_unlock %s\n", id)); - } - if (sleeping != NULL) { - counter_inc (sleeping, -1); - } - nsync_mu_lock (mu); - nsync_mu_assert_held (mu); - if (expected_value != -1 && *value != expected_value) { - testing_panic (smprintf ("lock_unlock %s expected " - "value %d, *value=%d", - id, expected_value, *value)); - } - (*value)++; - if (verbose) { - TEST_LOG (t, ("lock_unlock %s incremented value to %d\n", id, *value)); - } - nsync_time_sleep (NSYNC_CLOCK, sleep); - nsync_mu_unlock (mu); - counter_inc (done, -1); -} - -/* First acquire *mu in read mode, then: - - if expected_value != -1, compare *value against expected_value. - - sleep for "sleep". - Then release *mu and decrement *done. */ -static void rlock_runlock (testing t, const char *id, int verbose, nsync_mu *mu, - int *value, int expected_value, nsync_time sleep, - counter *sleeping, counter *done) { - if (verbose) { - TEST_LOG (t, ("rlock_runlock %s\n", id)); - } - if (sleeping != NULL) { - counter_inc (sleeping, -1); - } - nsync_mu_rlock (mu); - nsync_mu_rassert_held (mu); - if (expected_value != -1 && *value != expected_value) { - testing_panic (smprintf ("rlock_runlock %s expected " - "value %d, *value=%d", id, expected_value, *value)); - } - nsync_time_sleep (NSYNC_CLOCK, sleep); - nsync_mu_runlock (mu); - counter_inc (done, -1); -} - -/* Check that the time since start_time is between expected_duration-1ms. - If the time exceeds expected_duration+slop_duration, return 1, else 0. */ -static int check_times (testing t, const char *id, nsync_time start_time, - nsync_time expected_duration, nsync_time slop_duration) { - int exceeds_count = 0; - nsync_time now; - nsync_time measured_duration; - now = nsync_time_now (NSYNC_CLOCK); - measured_duration = nsync_time_sub (now, start_time); - if (nsync_time_cmp (measured_duration, - nsync_time_sub (expected_duration, nsync_time_ms (5))) < 0) { - char *m_str = nsync_time_str (measured_duration, 2); - char *e_str = nsync_time_str (expected_duration, 2); - TEST_ERROR (t, ("check_times %s too short a delay: %s instead of %s", - id, m_str, e_str)); - free (m_str); - free (e_str); - } - if (nsync_time_cmp (nsync_time_add (expected_duration, slop_duration), measured_duration) < 0) { - exceeds_count++; - } - return (exceeds_count); -} - -/* Check the operation of nsync_mu as a reader/writer lock. */ -static void test_rlock (testing t) { - int loop; - int i; - int max_write_wait_exceeded; - int max_read_wait_exceeded; - nsync_time time_unit; - nsync_time slop_duration; - nsync_time delay_duration; - nsync_time writer_duration; - nsync_time reader_duration; - static const int loop_count = 5; - static const int read_threads = 3; - static const int limit = 3; - static const int verbose = 0; - max_write_wait_exceeded = 0; - max_read_wait_exceeded = 0; - - time_unit = nsync_time_ms (100); - slop_duration = nsync_time_add (nsync_time_add (time_unit, time_unit), time_unit); - delay_duration = time_unit; - writer_duration = time_unit; - reader_duration = nsync_time_add (time_unit, time_unit); - - max_write_wait_exceeded = 0; - max_read_wait_exceeded = 0; - for (loop = 0; loop != loop_count; loop++) { - counter *lock_unlock_sleeping; - counter *rlock_runlock_sleeping; - counter *lock_unlock_done; - counter *rlock_runlock_done; - nsync_time read_start_time; - nsync_mu mu; - int value = 0; - counter *thread_done; - - nsync_time start_time; - nsync_mu_init (&mu); - start_time = nsync_time_now (NSYNC_CLOCK); - - /* ------------------------------------ */ - /* Acquire lock with nsync_mu_rtrylock(). This thread will - hold a read lock until the next line with =====. */ - thread_done = counter_new (1); - attempt_rtrylock (t, "a", verbose, &mu, 1, 0, &value, 0, thread_done); - counter_wait_for_zero (thread_done); - - nsync_mu_rassert_held (&mu); - - counter_inc (thread_done, 1); - /* Can get read lock holding read lock. */ - closure_fork (closure_attempt_trylock (&attempt_rtrylock, - t, "b", verbose, &mu, 1, 1, &value, 0, thread_done)); - counter_wait_for_zero (thread_done); - - nsync_mu_rassert_held (&mu); - - counter_inc (thread_done, 1); - /* Can't get write lock holding read lock. */ - closure_fork (closure_attempt_trylock (&attempt_trylock, t, "c", verbose, - &mu, 0, 1, &value, -1, thread_done)); - counter_wait_for_zero (thread_done); - - if (!nsync_mu_is_reader (&mu)) { - TEST_FATAL(t, ("expected mu held in reader mode")); - } - - counter_inc (thread_done, 1); - closure_fork (closure_lock_unlock (&rlock_runlock, t, "d", verbose, - &mu, &value, 0, nsync_time_zero /*no delay*/, - NULL, thread_done)); - counter_wait_for_zero (thread_done); - - nsync_mu_rassert_held (&mu); - - lock_unlock_done = counter_new (1); - lock_unlock_sleeping = counter_new (1); - closure_fork (closure_lock_unlock (&lock_unlock, t, "e", verbose, - &mu, &value, 0, writer_duration, - lock_unlock_sleeping, lock_unlock_done)); - - counter_wait_for_zero (lock_unlock_sleeping); - nsync_time_sleep (NSYNC_CLOCK, delay_duration); /* give time for lock_unlock() thread to wait. */ - - nsync_mu_rassert_held (&mu); - - rlock_runlock_done = counter_new (read_threads); - rlock_runlock_sleeping = counter_new (read_threads); - for (i = 0; i != read_threads; i++) { - /* read lock will be acquired after lock_unlock() completes */ - closure_fork (closure_lock_unlock (&rlock_runlock, t, "f", verbose, - &mu, &value, 1, reader_duration, - rlock_runlock_sleeping, - rlock_runlock_done)); - } - - nsync_mu_rassert_held (&mu); - - counter_wait_for_zero (rlock_runlock_sleeping); - nsync_time_sleep (NSYNC_CLOCK, delay_duration); /* time for rlock_runlock() threads to wait. */ - - nsync_mu_rassert_held (&mu); - - if (counter_value (lock_unlock_done) == 0) { - TEST_FATAL (t, ("thread was able to acquire write lock while read lock held")); - } - if (counter_value (rlock_runlock_done) == 0) { - TEST_FATAL (t, ("thread was able to acquire read lock with " - "other reader and waiting writer")); - } - - nsync_mu_rassert_held (&mu); - - counter_inc (thread_done, 1); - /* Still can't get write lock. */ - closure_fork (closure_attempt_trylock (&attempt_trylock, t, "g", verbose, - &mu, 0, 1, &value, -1, thread_done)); - counter_wait_for_zero (thread_done); - - counter_inc (thread_done, 1); - /* Now can't get read lock because a writer is waiting. */ - closure_fork (closure_attempt_trylock (&attempt_rtrylock, t, "h", verbose, - &mu, 0, 1, &value, -1, thread_done)); - counter_wait_for_zero (thread_done); - - nsync_mu_runlock (&mu); - /* ==================================== */ - - read_start_time = nsync_time_now (NSYNC_CLOCK); - counter_wait_for_zero (lock_unlock_done); /* Now can get write lock. */ - max_write_wait_exceeded += check_times (t, "i", start_time, - nsync_time_add (nsync_time_add (delay_duration, delay_duration), writer_duration), - slop_duration); - - counter_wait_for_zero (rlock_runlock_done); /* And now an get read lock again. */ - max_read_wait_exceeded += check_times (t, "j", read_start_time, - reader_duration, slop_duration); - - free (thread_done); - free (lock_unlock_done); - free (rlock_runlock_done); - free (lock_unlock_sleeping); - free (rlock_runlock_sleeping); - } - if (verbose) { - TEST_LOG (t, ("read lock max_write_wait_exceeded %d max_read_wait_exceeded %d\n", - max_write_wait_exceeded, max_read_wait_exceeded)); - } - if (max_write_wait_exceeded > limit) { - TEST_ERROR (t, ("lock_unlock() took too long %d " - "(more than %d) times out of %d", - max_write_wait_exceeded, limit, loop_count)); - } - if (max_read_wait_exceeded > limit) { - TEST_ERROR (t, ("rlock_runlock() took too long %d " - "(more than %d) times out of %d", - max_read_wait_exceeded, limit, loop_count)); - } - - max_write_wait_exceeded = 0; - max_read_wait_exceeded = 0; - for (loop = 0; loop != loop_count; loop++) { - counter *lock_unlock_sleeping; - counter *rlock_runlock_sleeping; - counter *lock_unlock_done; - counter *rlock_runlock_done; - nsync_time read_start_time; - nsync_mu mu; - int value = 0; - counter *thread_done; - - nsync_time start_time; - - nsync_mu_init (&mu); - start_time = nsync_time_now (NSYNC_CLOCK); - - /* ------------------------------------ */ - /* Acquire lock with nsync_mu_trylock(). This thread will hold - a write lock until the next line with =====. */ - thread_done = counter_new (1); - attempt_trylock (t, "A", verbose, &mu, 1, 0, &value, 0, thread_done); - counter_wait_for_zero (thread_done); - - nsync_mu_assert_held (&mu); - nsync_mu_rassert_held (&mu); - - counter_inc (thread_done, 1); - /* Can't get read lock while holding write lock. */ - closure_fork (closure_attempt_trylock (&attempt_rtrylock, t, "B", verbose, - &mu, 0, 1, &value, -1, thread_done)); - counter_wait_for_zero (thread_done); - - if (nsync_mu_is_reader (&mu)) { - TEST_FATAL (t, ("expected mu held in write mode")); - } - nsync_mu_assert_held (&mu); - nsync_mu_rassert_held (&mu); - - counter_inc (thread_done, 1); - /* Can't get write lock while holding write lock. */ - closure_fork (closure_attempt_trylock (&attempt_trylock, t, "C", verbose, - &mu, 0, 1, &value, -1, thread_done)); - counter_wait_for_zero (thread_done); - - nsync_mu_assert_held (&mu); - nsync_mu_rassert_held (&mu); - - lock_unlock_done = counter_new (1); - lock_unlock_sleeping = counter_new (1); - closure_fork (closure_lock_unlock (&lock_unlock, t, "D", verbose, - &mu, &value, 1, writer_duration, - lock_unlock_sleeping, lock_unlock_done)); - - counter_wait_for_zero (lock_unlock_sleeping); - nsync_time_sleep (NSYNC_CLOCK, delay_duration); /* give time for lock_unlock() thread to wait. */ - - nsync_mu_assert_held (&mu); - nsync_mu_rassert_held (&mu); - - rlock_runlock_done = counter_new (read_threads); - rlock_runlock_sleeping = counter_new (read_threads); - for (i = 0; i != read_threads; i++) { - /* not guaranteed will complete after lock_unlock() above */ - closure_fork (closure_lock_unlock (&rlock_runlock, t, "E", verbose, - &mu, &value, -1, reader_duration, - rlock_runlock_sleeping, - rlock_runlock_done)); - } - - nsync_mu_assert_held (&mu); - nsync_mu_rassert_held (&mu); - - counter_wait_for_zero (rlock_runlock_sleeping); - nsync_time_sleep (NSYNC_CLOCK, delay_duration); /* time for rlock_runlock() threads to wait. */ - - nsync_mu_assert_held (&mu); - nsync_mu_rassert_held (&mu); - - if (counter_value (lock_unlock_done) == 0) { - TEST_FATAL (t, ("thread was able to acquire write lock " - "while other write lock held")); - } - if (counter_value (rlock_runlock_done) == 0) { - TEST_FATAL (t, ("thread was able to acquire read lock " - "while write lock held")); - } - - nsync_mu_assert_held (&mu); - nsync_mu_rassert_held (&mu); - - counter_inc (thread_done, 1); - /* Still can't get read lock while holding write lock. */ - closure_fork (closure_attempt_trylock (&attempt_rtrylock, t, "F", verbose, - &mu, 0, 1, &value, -1, thread_done)); - counter_wait_for_zero (thread_done); - - nsync_mu_assert_held (&mu); - nsync_mu_rassert_held (&mu); - - counter_inc (thread_done, 1); - /* Still can't get write lock while holding write lock. */ - closure_fork (closure_attempt_trylock (&attempt_trylock, t, "G", verbose, - &mu, 0, 1, &value, -1, thread_done)); - counter_wait_for_zero (thread_done); - - nsync_mu_assert_held (&mu); - nsync_mu_rassert_held (&mu); - - nsync_mu_unlock (&mu); - /* ==================================== */ - - read_start_time = nsync_time_now (NSYNC_CLOCK); - counter_wait_for_zero (lock_unlock_done); /* Now can get write lock. */ - max_write_wait_exceeded += check_times (t, "H", start_time, - nsync_time_add (nsync_time_add (delay_duration, delay_duration), writer_duration), - slop_duration); - - counter_wait_for_zero (rlock_runlock_done); /* And now can get read lock again. */ - max_read_wait_exceeded += check_times (t, "I", read_start_time, - reader_duration, slop_duration); - - free (thread_done); - free (lock_unlock_done); - free (rlock_runlock_done); - free (lock_unlock_sleeping); - free (rlock_runlock_sleeping); - } - if (verbose) { - TEST_LOG (t, ("write lock max_write_wait_exceeded %d " - "max_read_wait_exceeded %d\n", - max_write_wait_exceeded, max_read_wait_exceeded)); - } - if (max_write_wait_exceeded > limit) { - TEST_ERROR (t, ("lock_unlock() took too long %d (more than %d) " - "times out of %d", - max_write_wait_exceeded, limit, loop_count)); - } - if (max_read_wait_exceeded > limit) { - TEST_ERROR (t, ("rlock_runlock() took too long %d (more than %d) " - "times out of %d", - max_read_wait_exceeded, limit, loop_count)); - } -} - -/* --------------------------------------- */ - -/* Measure the performance of an uncontended nsync_mu. */ -static void benchmark_mu_uncontended (testing t) { - int i; - int n = testing_n (t); - nsync_mu mu; - nsync_mu_init (&mu); - for (i = 0; i != n; i++) { - nsync_mu_lock (&mu); - nsync_mu_unlock (&mu); - } -} - -/* Return whether int *value is one. */ -static int int_is_1 (const void *value) { return (*(const int *)value == 1); } - -/* Return whether int *value is two. */ -static int int_is_2 (const void *value) { return (*(const int *)value == 2); } - -/* Return whether int *value is three. */ -static int int_is_3 (const void *value) { return (*(const int *)value == 3); } - -/* Set *value to 1, wait for it to become 2, then set it to 3. *value is under - *mu */ -static void waiter (nsync_mu *mu, int *value) { - nsync_mu_lock (mu); - *value = 1; - nsync_mu_wait (mu, &int_is_2, value, NULL); - *value = 3; - nsync_mu_unlock (mu); -} - -CLOSURE_DECL_BODY2 (waiter, nsync_mu *, int *) - -/* Measure the performance of an uncontended nsync_mu - with a blocked waiter. */ -static void benchmark_mu_uncontended_waiter (testing t) { - int i; - int n = testing_n (t); - nsync_mu mu; - int value = 0; - nsync_mu_init (&mu); - closure_fork (closure_waiter (&waiter, &mu, &value)); - nsync_mu_lock (&mu); - nsync_mu_wait (&mu, &int_is_1, &value, NULL); - nsync_mu_unlock (&mu); - for (i = 0; i != n; i++) { - nsync_mu_lock (&mu); - nsync_mu_unlock (&mu); - } - nsync_mu_lock (&mu); - value = 2; - nsync_mu_wait (&mu, &int_is_3, &value, NULL); - nsync_mu_unlock (&mu); -} - -/* Measure the performance of an uncontended nsync_mu - with a blocked waiter using nsync_mu_unlock_without_wakeup. */ -static void benchmark_mu_uncontended_no_wakeup (testing t) { - int i; - int n = testing_n (t); - nsync_mu mu; - int value = 0; - nsync_mu_init (&mu); - closure_fork (closure_waiter (&waiter, &mu, &value)); - nsync_mu_lock (&mu); - nsync_mu_wait (&mu, &int_is_1, &value, NULL); - nsync_mu_unlock (&mu); - for (i = 0; i != n; i++) { - nsync_mu_lock (&mu); - nsync_mu_unlock_without_wakeup (&mu); - } - nsync_mu_lock (&mu); - value = 2; - nsync_mu_wait (&mu, &int_is_3, &value, NULL); - nsync_mu_unlock (&mu); -} - -/* Measure the performance of an uncontended - nsync_mu in read mode. */ -static void benchmark_rmu_uncontended (testing t) { - int i; - int n = testing_n (t); - nsync_mu mu; - nsync_mu_init (&mu); - for (i = 0; i != n; i++) { - nsync_mu_rlock (&mu); - nsync_mu_runlock (&mu); - } -} - -/* Measure the performance of an uncontended nsync_mu - in read mode with a blocked waiter. */ -static void benchmark_rmu_uncontended_waiter (testing t) { - int i; - int n = testing_n (t); - nsync_mu mu; - int value = 0; - nsync_mu_init (&mu); - closure_fork (closure_waiter (&waiter, &mu, &value)); - nsync_mu_lock (&mu); - nsync_mu_wait (&mu, &int_is_1, &value, NULL); - nsync_mu_unlock (&mu); - for (i = 0; i != n; i++) { - nsync_mu_rlock (&mu); - nsync_mu_runlock (&mu); - } - nsync_mu_lock (&mu); - value = 2; - nsync_mu_wait (&mu, &int_is_3, &value, NULL); - nsync_mu_unlock (&mu); -} - -/* Measure the performance of an uncontended pthread_mutex_t. */ -static void benchmark_mutex_uncontended (testing t) { - int i; - int n = testing_n (t); - pthread_mutex_t mu; - pthread_mutex_init (&mu, NULL); - for (i = 0; i != n; i++) { - pthread_mutex_lock (&mu); - pthread_mutex_unlock (&mu); - } - pthread_mutex_destroy (&mu); -} - -/* Measure the performance of an uncontended pthread_rwlock_t. */ -static void benchmark_wmutex_uncontended (testing t) { - int i; - int n = testing_n (t); - pthread_rwlock_t mu; - pthread_rwlock_init (&mu, NULL); - for (i = 0; i != n; i++) { - pthread_rwlock_wrlock (&mu); - pthread_rwlock_unlock (&mu); - } - pthread_rwlock_destroy (&mu); -} - -/* Measure the performance of an uncontended - pthread_rwlock_t in read mode. */ -static void benchmark_rmutex_uncontended (testing t) { - int i; - int n = testing_n (t); - pthread_rwlock_t mu; - pthread_rwlock_init (&mu, NULL); - for (i = 0; i != n; i++) { - pthread_rwlock_rdlock (&mu); - pthread_rwlock_unlock (&mu); - } - pthread_rwlock_destroy (&mu); -} - -/* --------------------------------------- - Benchmarks for contended locks. */ - -/* It's hard to write these as benchmark functions, since we wish to measure - throughput over an extended period (a second or two), rather than get the - latency of a few iterations. */ - -/* A contended_state represents state shared between threads - in the contended benchmarks. */ -typedef struct contended_state_s { - testing t; - - /* locks to test */ - nsync_mu mu; - pthread_mutex_t mutex; - pthread_rwlock_t rwmutex; - int count; /* counter protected by a lock above */ - - nsync_mu start_done_mu; - int start; /* whether threads should start, under start_done_mu */ - int not_yet_done; /* threads not yet complete, under start_done_mu */ -} contended_state; - -static int contended_state_may_start (const void *v) { - return (((const contended_state *)v)->start); -} - -static int contended_state_all_done (const void *v) { - return (((const contended_state *)v)->not_yet_done == 0); -} - -/* Wait for cs.start to become non-zero, then loop, acquiring and - releasing mu on each iteration until cs.deadline is reached, then decrement - cs.not_yet_done. */ -static void contended_state_contend_loop (contended_state *cs, - void *mu, void (*lock) (void *), - void (*unlock) (void *)) { - int n = testing_n (cs->t); - int j; - int i; - nsync_mu_rlock (&cs->start_done_mu); - nsync_mu_wait (&cs->start_done_mu, &contended_state_may_start, cs, NULL); - nsync_mu_runlock (&cs->start_done_mu); - - for (j = 0; j < n; j += 10000) { - for (i = 0; i != 10000; i++) { - (*lock) (mu); - cs->count++; - (*unlock) (mu); - } - } - - nsync_mu_lock (&cs->start_done_mu); - cs->not_yet_done--; - nsync_mu_unlock (&cs->start_done_mu); -} - -typedef void (*func_any) (void *); -CLOSURE_DECL_BODY4 (contended_state_contend_loop, contended_state *, void *, func_any, func_any) - -/* Start the threads in a contended test, wait for them to finish, - and print the number of iterations achieved. */ -static void contended_state_run_test (contended_state *cs, testing t, - void *mu, void (*lock) (void *), - void (*unlock) (void *)) { - int i; - cs->t = t; - cs->not_yet_done = 4; /* number of threads */ - cs->start = 0; - cs->count = 0; - for (i = 0; i != cs->not_yet_done; i++) { - closure_fork (closure_contended_state_contend_loop ( - &contended_state_contend_loop, cs, mu, lock, unlock)); - } - nsync_mu_lock (&cs->start_done_mu); - cs->start = 1; - nsync_mu_wait (&cs->start_done_mu, &contended_state_all_done, cs, NULL); - nsync_mu_unlock (&cs->start_done_mu); -} - -/* Measure the performance of highly contended - nsync_mu locks, with small critical sections. */ -static void benchmark_mu_contended (testing t) { - contended_state cs; - bzero ((void *) &cs, sizeof (cs)); - contended_state_run_test (&cs, t, &cs.mu, (void (*) (void*))&nsync_mu_lock, - (void (*) (void*))&nsync_mu_unlock); -} - -/* Measure the performance of highly contended - pthread_mutex_t locks, with small critical sections. */ -static void benchmark_mutex_contended (testing t) { - contended_state cs; - bzero ((void *) &cs, sizeof (cs)); - pthread_mutex_init (&cs.mutex, NULL); - contended_state_run_test (&cs, t, &cs.mutex, &void_pthread_mutex_lock, - &void_pthread_mutex_unlock); - pthread_mutex_destroy (&cs.mutex); -} - -/* Measure the performance of highly contended recursive - pthread_mutex_t locks, with small critical sections. */ -static void benchmark_xmutex_contended (testing t) { - contended_state cs; - pthread_mutexattr_t attr; - bzero ((void *) &cs, sizeof (cs)); - pthread_mutexattr_init (&attr); - pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE); - pthread_mutex_init (&cs.mutex, &attr); - pthread_mutexattr_destroy (&attr); - contended_state_run_test (&cs, t, &cs.mutex, &void_pthread_mutex_lock, - &void_pthread_mutex_unlock); - pthread_mutex_destroy (&cs.mutex); -} - -/* Measure the performance of highly contended - pthread_rwlock_t locks, with small critical sections. */ -static void benchmark_wmutex_contended (testing t) { - contended_state cs; - bzero ((void *) &cs, sizeof (cs)); - pthread_rwlock_init (&cs.rwmutex, NULL); - contended_state_run_test (&cs, t, &cs.rwmutex, &void_pthread_rwlock_wrlock, - &void_pthread_rwlock_unlock); - pthread_rwlock_destroy (&cs.rwmutex); -} +#include "third_party/nsync/testing/mu_test.inc" int main (int argc, char *argv[]) { testing_base tb = testing_new (argc, argv, 0); TEST_RUN (tb, test_rlock); TEST_RUN (tb, test_mu_nthread); - TEST_RUN (tb, test_mutex_nthread); - TEST_RUN (tb, test_xmutex_nthread); - TEST_RUN (tb, test_rwmutex_nthread); - TEST_RUN (tb, test_try_mu_nthread); BENCHMARK_RUN (tb, benchmark_mu_contended); BENCHMARK_RUN (tb, benchmark_mutex_contended); @@ -1119,6 +31,7 @@ int main (int argc, char *argv[]) { BENCHMARK_RUN (tb, benchmark_mu_uncontended); BENCHMARK_RUN (tb, benchmark_rmu_uncontended); BENCHMARK_RUN (tb, benchmark_mutex_uncontended); + BENCHMARK_RUN (tb, benchmark_xmutex_uncontended); BENCHMARK_RUN (tb, benchmark_wmutex_uncontended); BENCHMARK_RUN (tb, benchmark_rmutex_uncontended); BENCHMARK_RUN (tb, benchmark_mu_uncontended_waiter); diff --git a/third_party/nsync/testing/mu_test.inc b/third_party/nsync/testing/mu_test.inc new file mode 100644 index 000000000..086520f2c --- /dev/null +++ b/third_party/nsync/testing/mu_test.inc @@ -0,0 +1,1119 @@ +/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ +│ vi: set noet ft=c ts=8 sw=8 fenc=utf-8 :vi │ +╞══════════════════════════════════════════════════════════════════════════════╡ +│ Copyright 2016 Google Inc. │ +│ │ +│ Licensed under the Apache License, Version 2.0 (the "License"); │ +│ you may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +╚─────────────────────────────────────────────────────────────────────────────*/ +#include "third_party/nsync/mu.h" +#include "libc/calls/calls.h" +#include "libc/str/str.h" +#include "libc/thread/thread.h" +#include "third_party/nsync/time.h" +#include "third_party/nsync/cv.h" +#include "third_party/nsync/mu_wait.h" +#include "third_party/nsync/testing/closure.h" +#include "third_party/nsync/testing/smprintf.h" +#include "third_party/nsync/testing/testing.h" +#include "third_party/nsync/testing/time_extra.h" + +/* The state shared between the threads in each of the tests below. */ +typedef struct test_data_s { + testing t; + int n_threads; /* Number of test threads; constant after init. */ + int loop_count; /* Iteration count for each test thread; constant after init */ + + /* mu_in_use protects i, id, loop_count, and finished_threads. */ + void *mu_in_use; /* points at mu, mutex, or rwmutex depending on which is in use. */ + void (*lock) (void *); /* operations on mu_in_use */ + void (*unlock) (void *); + + nsync_mu mu; + pthread_mutex_t mutex; + pthread_rwlock_t rwmutex; + + int i; /* counter incremented by test loops. */ + volatile int id; /* id of current lock-holding thread in some tests. */ + + nsync_cv done; /* Signalled when finished_threads==n_threads. */ + int finished_threads; /* Count of threads that have finished. */ +} test_data; + +/* Indicate that a thread has finished its operations on test_data + by incrementing td.finished_threads, and signal td.done when it reaches td.n_threads. + See test_data_wait_for_all_threads(). */ +static void test_data_thread_finished (test_data *td) { + (*td->lock) (td->mu_in_use); + td->finished_threads++; + if (td->finished_threads == td->n_threads) { + nsync_cv_broadcast (&td->done); + } + (*td->unlock) (td->mu_in_use); +} + +/* Wait until all td.n_threads have called test_data_thread_finished(), + and then return. */ +static void test_data_wait_for_all_threads (test_data *td) { + (*td->lock) (td->mu_in_use); + while (td->finished_threads != td->n_threads) { + nsync_cv_wait_with_deadline_generic (&td->done, td->mu_in_use, + td->lock, td->unlock, + NSYNC_CLOCK, + nsync_time_no_deadline, NULL); + } + (*td->unlock) (td->mu_in_use); +} + +/* --------------------------------------- */ + +/* The body of each thread executed by test_mu_nthread() + and test_mutex_nthread. + *td represents the test data that the threads share, and id is an integer + unique to each test thread. */ +static void counting_loop (test_data *td, int id) { + int n = td->loop_count; + int i = 0; + for (i = 0; i != n; i++) { + (*td->lock) (td->mu_in_use); + td->id = id; + td->i++; + if (td->id != id) { + testing_panic ("td->id != id"); + } + (*td->unlock) (td->mu_in_use); + } + test_data_thread_finished (td); +} + +CLOSURE_DECL_BODY2 (counting, test_data *, int) + +/* Versions of nsync_mu_lock() and nsync_mu_unlock() that take "void *" + arguments, to avoid call through a function pointer of a different type, + which is undefined. */ +static void void_mu_lock (void *mu) { + nsync_mu_lock ((nsync_mu *) mu); +} +static void void_mu_unlock (void *mu) { + nsync_mu_unlock((nsync_mu *) mu); +} + +/* Create a few threads, each of which increments an + integer a fixed number of times, using an nsync_mu for mutual exclusion. + It checks that the integer is incremented the correct number of times. */ +static void test_mu_nthread (testing t) { + int loop_count = 100000; + nsync_time deadline; + deadline = nsync_time_add (nsync_time_now (NSYNC_CLOCK), nsync_time_ms (1500)); + do { + int i; + test_data td; + bzero ((void *) &td, sizeof (td)); + td.t = t; + td.n_threads = 5; + td.loop_count = loop_count; + td.mu_in_use = &td.mu; + td.lock = &void_mu_lock; + td.unlock = &void_mu_unlock; + for (i = 0; i != td.n_threads; i++) { + closure_fork (closure_counting (&counting_loop, &td, i)); + } + test_data_wait_for_all_threads (&td); + if (td.i != td.n_threads*td.loop_count) { + TEST_FATAL (t, ("test_mu_nthread final count inconsistent: want %d, got %d", + td.n_threads*td.loop_count, td.i)); + } + loop_count *= 2; + } while (nsync_time_cmp (nsync_time_now (NSYNC_CLOCK), deadline) < 0); +} + +/* void pthread_mutex_lock */ +static void void_pthread_mutex_lock (void *mu) { + pthread_mutex_lock ((pthread_mutex_t *) mu); +} + +/* void pthread_mutex_unlock */ +static void void_pthread_mutex_unlock (void *mu) { + pthread_mutex_unlock ((pthread_mutex_t *) mu); +} + +/* Create a few threads, each of which increments an + integer a fixed number of times, using a pthread_mutex_t for mutual exclusion. + It checks that the integer is incremented the correct number of times. */ +static void test_mutex_nthread (testing t) { + int loop_count = 100000; + nsync_time deadline; + deadline = nsync_time_add (nsync_time_now (NSYNC_CLOCK), nsync_time_ms (1500)); + do { + int i; + test_data td; + bzero ((void *) &td, sizeof (td)); + td.t = t; + td.n_threads = 5; + td.loop_count = loop_count; + td.mu_in_use = &td.mutex; + td.lock = &void_pthread_mutex_lock; + td.unlock = &void_pthread_mutex_unlock; + pthread_mutex_init (&td.mutex, NULL); + for (i = 0; i != td.n_threads; i++) { + closure_fork (closure_counting (&counting_loop, &td, i)); + } + test_data_wait_for_all_threads (&td); + if (td.i != td.n_threads*td.loop_count) { + TEST_FATAL (t, ("test_mutex_nthread final count inconsistent: want %d, got %d", + td.n_threads*td.loop_count, td.i)); + } + pthread_mutex_destroy (&td.mutex); + loop_count *= 2; + } while (nsync_time_cmp (nsync_time_now (NSYNC_CLOCK), deadline) < 0); +} + +/* Create a few threads, each of which increments an integer a fixed + number of times, using a recursive pthread_mutex_t for mutual exclusion. + It checks that the integer is incremented the correct number of times. */ +static void test_xmutex_nthread (testing t) { + int loop_count = 100000; + nsync_time deadline; + deadline = nsync_time_add (nsync_time_now (NSYNC_CLOCK), nsync_time_ms (1500)); + do { + int i; + test_data td; + pthread_mutexattr_t attr; + bzero ((void *) &td, sizeof (td)); + td.t = t; + td.n_threads = 5; + td.loop_count = loop_count; + td.mu_in_use = &td.mutex; + td.lock = &void_pthread_mutex_lock; + td.unlock = &void_pthread_mutex_unlock; + pthread_mutexattr_init (&attr); + pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init (&td.mutex, &attr); + pthread_mutexattr_destroy (&attr); + for (i = 0; i != td.n_threads; i++) { + closure_fork (closure_counting (&counting_loop, &td, i)); + } + test_data_wait_for_all_threads (&td); + if (td.i != td.n_threads*td.loop_count) { + TEST_FATAL (t, ("test_mutex_nthread final count inconsistent: want %d, got %d", + td.n_threads*td.loop_count, td.i)); + } + pthread_mutex_destroy (&td.mutex); + loop_count *= 2; + } while (nsync_time_cmp (nsync_time_now (NSYNC_CLOCK), deadline) < 0); +} + +/* void pthread_rwlock_wrlock */ +static void void_pthread_rwlock_wrlock (void *mu) { + pthread_rwlock_wrlock ((pthread_rwlock_t *) mu); +} + +/* void pthread_rwlock_unlock */ +static void void_pthread_rwlock_unlock (void *mu) { + pthread_rwlock_unlock ((pthread_rwlock_t *) mu); +} + +/* Create a few threads, each of which increments an + integer a fixed number of times, using a pthread_rwlock_t for mutual exclusion. + It checks that the integer is incremented the correct number of times. */ +static void test_rwmutex_nthread (testing t) { + int loop_count = 100000; + nsync_time deadline; + deadline = nsync_time_add (nsync_time_now (NSYNC_CLOCK), nsync_time_ms (1500)); + do { + int i; + test_data td; + bzero ((void *) &td, sizeof (td)); + td.t = t; + td.n_threads = 5; + td.loop_count = loop_count; + td.mu_in_use = &td.rwmutex; + td.lock = &void_pthread_rwlock_wrlock; + td.unlock = &void_pthread_rwlock_unlock; + pthread_rwlock_init (&td.rwmutex, NULL); + for (i = 0; i != td.n_threads; i++) { + closure_fork (closure_counting (&counting_loop, &td, i)); + } + test_data_wait_for_all_threads (&td); + if (td.i != td.n_threads*td.loop_count) { + TEST_FATAL (t, ("test_mutex_nthread final count inconsistent: want %d, got %d", + td.n_threads*td.loop_count, td.i)); + } + pthread_rwlock_destroy (&td.rwmutex); + loop_count *= 2; + } while (nsync_time_cmp (nsync_time_now (NSYNC_CLOCK), deadline) < 0); +} + +/* --------------------------------------- */ + +/* The body of each thread executed by test_try_mu_nthread(). + *td represents the test data that the threads share, and id is an integer + unique to each test thread. */ +static void counting_loop_try_mu (test_data *td, int id) { + int i; + int n = td->loop_count; + for (i = 0; i != n; i++) { + while (!nsync_mu_trylock (&td->mu)) { + pthread_yield (); + } + td->id = id; + td->i++; + if (td->id != id) { + testing_panic ("td->id != id"); + } + n = td->loop_count; + nsync_mu_unlock (&td->mu); + } + test_data_thread_finished (td); +} + +/* Test that acquiring an nsync_mu with nsync_mu_trylock() + using several threads provides mutual exclusion. */ +static void test_try_mu_nthread (testing t) { + int loop_count = 100000; + nsync_time deadline; + deadline = nsync_time_add (nsync_time_now (NSYNC_CLOCK), nsync_time_ms (1500)); + do { + int i; + test_data td; + bzero ((void *) &td, sizeof (td)); + td.t = t; + td.n_threads = 5; + td.loop_count = loop_count; + td.mu_in_use = &td.mu; + td.lock = &void_mu_lock; + td.unlock = &void_mu_unlock; + for (i = 0; i != td.n_threads; i++) { + closure_fork (closure_counting (&counting_loop_try_mu, &td, i)); + } + test_data_wait_for_all_threads (&td); + if (td.i != td.n_threads*td.loop_count) { + TEST_FATAL (t, ("test_try_mu_nthread final count inconsistent: want %d, got %d", + td.n_threads*td.loop_count, td.i)); + } + loop_count *= 2; + } while (nsync_time_cmp (nsync_time_now (NSYNC_CLOCK), deadline) < 0); +} + +/* --------------------------------------- */ + +/* An integer protected by a mutex, and with an associated + condition variable that is signalled when the counter reaches 0. */ +typedef struct counter_s { + nsync_mu mu; /* protects value */ + int value; + nsync_cv cv; /* signalled when value becomes 0 */ +} counter; + +/* Return a counter with initial value "initial". */ +static counter *counter_new (int initial) { + counter *c = (counter *) malloc (sizeof (*c)); + bzero ((void *) c, sizeof (*c)); + c->value = initial; + return (c); +} + +/* Increment *c by "increment". */ +static void counter_inc (counter *c, int increment) { + if (increment != 0) { + nsync_mu_lock (&c->mu); + c->value += increment; + if (c->value == 0) { + nsync_cv_broadcast (&c->cv); + } + nsync_mu_unlock (&c->mu); + } +} + +/* Wait on *c's condition variable until the counter + becomes 0, or abs_deadline is reached. */ +static int counter_wait_for_zero_with_deadline (counter *c, nsync_time abs_deadline) { + int value; + nsync_mu_rlock (&c->mu); + while (c->value != 0 && + nsync_cv_wait_with_deadline (&c->cv, &c->mu, NSYNC_CLOCK, abs_deadline, NULL) == 0) { + } + value = c->value; + nsync_mu_runlock (&c->mu); + return (value); +} + +/* Wait on *c's condition variable until the counter becomes 0. */ +static void counter_wait_for_zero (counter *c) { + int value = counter_wait_for_zero_with_deadline (c, nsync_time_no_deadline); + if (value != 0) { + testing_panic (smprintf ("wait_for_zero() about to return with " + "non-zero value %d", value)); + } +} + +/* Return the current value of *c. */ +static int counter_value (counter *c) { + int value; + nsync_mu_rlock (&c->mu); + value = c->value; + nsync_mu_runlock (&c->mu); + return (value); +} + +/* --------------------------------------- */ + +CLOSURE_DECL_BODY9 (attempt_trylock, testing , const char *, int, nsync_mu *, + int, int, int *, int, counter *) + +/* Call nsync_mu_trylock(), and compares the result to expected_acquire. + If the lock was acquired, then: + - if expected_value != -1, compare *value against expected_value. + - increment *value. + - if release is non-zero, release the lock before returning. + In any case, the counter *done is decremented. */ +static void attempt_trylock (testing t, const char *id, int verbose, + nsync_mu *mu, int expected_acquire, int release, + int *value, int expected_value, counter *done) { + int acquired = nsync_mu_trylock (mu); + if (acquired != expected_acquire) { + testing_panic (smprintf ("attempt_trylock %s: expected " + "nsync_mu_trylock() to return %d but got %d", + id, expected_acquire, acquired)); + } + if (verbose) { + TEST_LOG (t, ("attempt_trylock %s %d\n", id, acquired)); + } + if (acquired) { + nsync_mu_assert_held (mu); + if (expected_value != -1 && *value != expected_value) { + testing_panic (smprintf ("attempt_trylock %s expected " + "value %d, *value=%d", + id, expected_value, *value)); + } + (*value)++; + if (verbose) { + TEST_LOG (t, ("attempt_trylock %s incremented value to %d\n", id, *value)); + } + if (release) { + nsync_mu_unlock (mu); + } + } + counter_inc (done, -1); +} + +/* Call nsync_mu_rtrylock(), and compare the result to expected_acquire. + If the lock was acquired, then: + - if expected_value != -1, compare *value against expected_value. + - if release is non-zero, release the lock before returning. + In any case, decrement *done. */ +static void attempt_rtrylock (testing t, const char *id, int verbose, + nsync_mu *mu, int expected_acquire, int release, + int *value, int expected_value, counter *done) { + int acquired = nsync_mu_rtrylock (mu); + if (acquired != expected_acquire) { + testing_panic (smprintf ("attempt_rtrylock %s: expected " + "nsync_mu_rtrylock() to return %d but got %d", + id, expected_acquire, acquired)); + } + if (verbose) { + TEST_LOG (t, ("attempt_rtrylock %s %d\n", id, acquired)); + } + if (acquired) { + nsync_mu_rassert_held (mu); + if (expected_value != -1 && *value != expected_value) { + testing_panic (smprintf ("attempt_rtrylock %s expected " + "value %d, *value=%d", + id, expected_value, *value)); + } + if (release) { + nsync_mu_runlock (mu); + } + } + counter_inc (done, -1); +} + +CLOSURE_DECL_BODY9 (lock_unlock, testing, const char *, int, nsync_mu *, + int *, int, nsync_time, counter *, counter *) + +/* First acquire *mu, then: + - if expected_value != -1, compare *value against expected_value. + - increment *value. + - sleep for "sleep". + Then release *mu and decrement *done. */ +static void lock_unlock (testing t, const char *id, int verbose, nsync_mu *mu, int *value, + int expected_value, nsync_time sleep, counter *sleeping, counter *done) { + if (verbose) { + TEST_LOG (t, ("lock_unlock %s\n", id)); + } + if (sleeping != NULL) { + counter_inc (sleeping, -1); + } + nsync_mu_lock (mu); + nsync_mu_assert_held (mu); + if (expected_value != -1 && *value != expected_value) { + testing_panic (smprintf ("lock_unlock %s expected " + "value %d, *value=%d", + id, expected_value, *value)); + } + (*value)++; + if (verbose) { + TEST_LOG (t, ("lock_unlock %s incremented value to %d\n", id, *value)); + } + nsync_time_sleep (NSYNC_CLOCK, sleep); + nsync_mu_unlock (mu); + counter_inc (done, -1); +} + +/* First acquire *mu in read mode, then: + - if expected_value != -1, compare *value against expected_value. + - sleep for "sleep". + Then release *mu and decrement *done. */ +static void rlock_runlock (testing t, const char *id, int verbose, nsync_mu *mu, + int *value, int expected_value, nsync_time sleep, + counter *sleeping, counter *done) { + if (verbose) { + TEST_LOG (t, ("rlock_runlock %s\n", id)); + } + if (sleeping != NULL) { + counter_inc (sleeping, -1); + } + nsync_mu_rlock (mu); + nsync_mu_rassert_held (mu); + if (expected_value != -1 && *value != expected_value) { + testing_panic (smprintf ("rlock_runlock %s expected " + "value %d, *value=%d", id, expected_value, *value)); + } + nsync_time_sleep (NSYNC_CLOCK, sleep); + nsync_mu_runlock (mu); + counter_inc (done, -1); +} + +/* Check that the time since start_time is between expected_duration-1ms. + If the time exceeds expected_duration+slop_duration, return 1, else 0. */ +static int check_times (testing t, const char *id, nsync_time start_time, + nsync_time expected_duration, nsync_time slop_duration) { + int exceeds_count = 0; + nsync_time now; + nsync_time measured_duration; + now = nsync_time_now (NSYNC_CLOCK); + measured_duration = nsync_time_sub (now, start_time); + if (nsync_time_cmp (measured_duration, + nsync_time_sub (expected_duration, nsync_time_ms (5))) < 0) { + char *m_str = nsync_time_str (measured_duration, 2); + char *e_str = nsync_time_str (expected_duration, 2); + TEST_ERROR (t, ("check_times %s too short a delay: %s instead of %s", + id, m_str, e_str)); + free (m_str); + free (e_str); + } + if (nsync_time_cmp (nsync_time_add (expected_duration, slop_duration), measured_duration) < 0) { + exceeds_count++; + } + return (exceeds_count); +} + +/* Check the operation of nsync_mu as a reader/writer lock. */ +static void test_rlock (testing t) { + int loop; + int i; + int max_write_wait_exceeded; + int max_read_wait_exceeded; + nsync_time time_unit; + nsync_time slop_duration; + nsync_time delay_duration; + nsync_time writer_duration; + nsync_time reader_duration; + static const int loop_count = 5; + static const int read_threads = 3; + static const int limit = 3; + static const int verbose = 0; + max_write_wait_exceeded = 0; + max_read_wait_exceeded = 0; + + time_unit = nsync_time_ms (100); + slop_duration = nsync_time_add (nsync_time_add (time_unit, time_unit), time_unit); + delay_duration = time_unit; + writer_duration = time_unit; + reader_duration = nsync_time_add (time_unit, time_unit); + + max_write_wait_exceeded = 0; + max_read_wait_exceeded = 0; + for (loop = 0; loop != loop_count; loop++) { + counter *lock_unlock_sleeping; + counter *rlock_runlock_sleeping; + counter *lock_unlock_done; + counter *rlock_runlock_done; + nsync_time read_start_time; + nsync_mu mu; + int value = 0; + counter *thread_done; + + nsync_time start_time; + nsync_mu_init (&mu); + start_time = nsync_time_now (NSYNC_CLOCK); + + /* ------------------------------------ */ + /* Acquire lock with nsync_mu_rtrylock(). This thread will + hold a read lock until the next line with =====. */ + thread_done = counter_new (1); + attempt_rtrylock (t, "a", verbose, &mu, 1, 0, &value, 0, thread_done); + counter_wait_for_zero (thread_done); + + nsync_mu_rassert_held (&mu); + + counter_inc (thread_done, 1); + /* Can get read lock holding read lock. */ + closure_fork (closure_attempt_trylock (&attempt_rtrylock, + t, "b", verbose, &mu, 1, 1, &value, 0, thread_done)); + counter_wait_for_zero (thread_done); + + nsync_mu_rassert_held (&mu); + + counter_inc (thread_done, 1); + /* Can't get write lock holding read lock. */ + closure_fork (closure_attempt_trylock (&attempt_trylock, t, "c", verbose, + &mu, 0, 1, &value, -1, thread_done)); + counter_wait_for_zero (thread_done); + + if (!nsync_mu_is_reader (&mu)) { + TEST_FATAL(t, ("expected mu held in reader mode")); + } + + counter_inc (thread_done, 1); + closure_fork (closure_lock_unlock (&rlock_runlock, t, "d", verbose, + &mu, &value, 0, nsync_time_zero /*no delay*/, + NULL, thread_done)); + counter_wait_for_zero (thread_done); + + nsync_mu_rassert_held (&mu); + + lock_unlock_done = counter_new (1); + lock_unlock_sleeping = counter_new (1); + closure_fork (closure_lock_unlock (&lock_unlock, t, "e", verbose, + &mu, &value, 0, writer_duration, + lock_unlock_sleeping, lock_unlock_done)); + + counter_wait_for_zero (lock_unlock_sleeping); + nsync_time_sleep (NSYNC_CLOCK, delay_duration); /* give time for lock_unlock() thread to wait. */ + + nsync_mu_rassert_held (&mu); + + rlock_runlock_done = counter_new (read_threads); + rlock_runlock_sleeping = counter_new (read_threads); + for (i = 0; i != read_threads; i++) { + /* read lock will be acquired after lock_unlock() completes */ + closure_fork (closure_lock_unlock (&rlock_runlock, t, "f", verbose, + &mu, &value, 1, reader_duration, + rlock_runlock_sleeping, + rlock_runlock_done)); + } + + nsync_mu_rassert_held (&mu); + + counter_wait_for_zero (rlock_runlock_sleeping); + nsync_time_sleep (NSYNC_CLOCK, delay_duration); /* time for rlock_runlock() threads to wait. */ + + nsync_mu_rassert_held (&mu); + + if (counter_value (lock_unlock_done) == 0) { + TEST_FATAL (t, ("thread was able to acquire write lock while read lock held")); + } + if (counter_value (rlock_runlock_done) == 0) { + TEST_FATAL (t, ("thread was able to acquire read lock with " + "other reader and waiting writer")); + } + + nsync_mu_rassert_held (&mu); + + counter_inc (thread_done, 1); + /* Still can't get write lock. */ + closure_fork (closure_attempt_trylock (&attempt_trylock, t, "g", verbose, + &mu, 0, 1, &value, -1, thread_done)); + counter_wait_for_zero (thread_done); + + counter_inc (thread_done, 1); + /* Now can't get read lock because a writer is waiting. */ + closure_fork (closure_attempt_trylock (&attempt_rtrylock, t, "h", verbose, + &mu, 0, 1, &value, -1, thread_done)); + counter_wait_for_zero (thread_done); + + nsync_mu_runlock (&mu); + /* ==================================== */ + + read_start_time = nsync_time_now (NSYNC_CLOCK); + counter_wait_for_zero (lock_unlock_done); /* Now can get write lock. */ + max_write_wait_exceeded += check_times (t, "i", start_time, + nsync_time_add (nsync_time_add (delay_duration, delay_duration), writer_duration), + slop_duration); + + counter_wait_for_zero (rlock_runlock_done); /* And now an get read lock again. */ + max_read_wait_exceeded += check_times (t, "j", read_start_time, + reader_duration, slop_duration); + + free (thread_done); + free (lock_unlock_done); + free (rlock_runlock_done); + free (lock_unlock_sleeping); + free (rlock_runlock_sleeping); + } + if (verbose) { + TEST_LOG (t, ("read lock max_write_wait_exceeded %d max_read_wait_exceeded %d\n", + max_write_wait_exceeded, max_read_wait_exceeded)); + } + if (max_write_wait_exceeded > limit) { + TEST_ERROR (t, ("lock_unlock() took too long %d " + "(more than %d) times out of %d", + max_write_wait_exceeded, limit, loop_count)); + } + if (max_read_wait_exceeded > limit) { + TEST_ERROR (t, ("rlock_runlock() took too long %d " + "(more than %d) times out of %d", + max_read_wait_exceeded, limit, loop_count)); + } + + max_write_wait_exceeded = 0; + max_read_wait_exceeded = 0; + for (loop = 0; loop != loop_count; loop++) { + counter *lock_unlock_sleeping; + counter *rlock_runlock_sleeping; + counter *lock_unlock_done; + counter *rlock_runlock_done; + nsync_time read_start_time; + nsync_mu mu; + int value = 0; + counter *thread_done; + + nsync_time start_time; + + nsync_mu_init (&mu); + start_time = nsync_time_now (NSYNC_CLOCK); + + /* ------------------------------------ */ + /* Acquire lock with nsync_mu_trylock(). This thread will hold + a write lock until the next line with =====. */ + thread_done = counter_new (1); + attempt_trylock (t, "A", verbose, &mu, 1, 0, &value, 0, thread_done); + counter_wait_for_zero (thread_done); + + nsync_mu_assert_held (&mu); + nsync_mu_rassert_held (&mu); + + counter_inc (thread_done, 1); + /* Can't get read lock while holding write lock. */ + closure_fork (closure_attempt_trylock (&attempt_rtrylock, t, "B", verbose, + &mu, 0, 1, &value, -1, thread_done)); + counter_wait_for_zero (thread_done); + + if (nsync_mu_is_reader (&mu)) { + TEST_FATAL (t, ("expected mu held in write mode")); + } + nsync_mu_assert_held (&mu); + nsync_mu_rassert_held (&mu); + + counter_inc (thread_done, 1); + /* Can't get write lock while holding write lock. */ + closure_fork (closure_attempt_trylock (&attempt_trylock, t, "C", verbose, + &mu, 0, 1, &value, -1, thread_done)); + counter_wait_for_zero (thread_done); + + nsync_mu_assert_held (&mu); + nsync_mu_rassert_held (&mu); + + lock_unlock_done = counter_new (1); + lock_unlock_sleeping = counter_new (1); + closure_fork (closure_lock_unlock (&lock_unlock, t, "D", verbose, + &mu, &value, 1, writer_duration, + lock_unlock_sleeping, lock_unlock_done)); + + counter_wait_for_zero (lock_unlock_sleeping); + nsync_time_sleep (NSYNC_CLOCK, delay_duration); /* give time for lock_unlock() thread to wait. */ + + nsync_mu_assert_held (&mu); + nsync_mu_rassert_held (&mu); + + rlock_runlock_done = counter_new (read_threads); + rlock_runlock_sleeping = counter_new (read_threads); + for (i = 0; i != read_threads; i++) { + /* not guaranteed will complete after lock_unlock() above */ + closure_fork (closure_lock_unlock (&rlock_runlock, t, "E", verbose, + &mu, &value, -1, reader_duration, + rlock_runlock_sleeping, + rlock_runlock_done)); + } + + nsync_mu_assert_held (&mu); + nsync_mu_rassert_held (&mu); + + counter_wait_for_zero (rlock_runlock_sleeping); + nsync_time_sleep (NSYNC_CLOCK, delay_duration); /* time for rlock_runlock() threads to wait. */ + + nsync_mu_assert_held (&mu); + nsync_mu_rassert_held (&mu); + + if (counter_value (lock_unlock_done) == 0) { + TEST_FATAL (t, ("thread was able to acquire write lock " + "while other write lock held")); + } + if (counter_value (rlock_runlock_done) == 0) { + TEST_FATAL (t, ("thread was able to acquire read lock " + "while write lock held")); + } + + nsync_mu_assert_held (&mu); + nsync_mu_rassert_held (&mu); + + counter_inc (thread_done, 1); + /* Still can't get read lock while holding write lock. */ + closure_fork (closure_attempt_trylock (&attempt_rtrylock, t, "F", verbose, + &mu, 0, 1, &value, -1, thread_done)); + counter_wait_for_zero (thread_done); + + nsync_mu_assert_held (&mu); + nsync_mu_rassert_held (&mu); + + counter_inc (thread_done, 1); + /* Still can't get write lock while holding write lock. */ + closure_fork (closure_attempt_trylock (&attempt_trylock, t, "G", verbose, + &mu, 0, 1, &value, -1, thread_done)); + counter_wait_for_zero (thread_done); + + nsync_mu_assert_held (&mu); + nsync_mu_rassert_held (&mu); + + nsync_mu_unlock (&mu); + /* ==================================== */ + + read_start_time = nsync_time_now (NSYNC_CLOCK); + counter_wait_for_zero (lock_unlock_done); /* Now can get write lock. */ + max_write_wait_exceeded += check_times (t, "H", start_time, + nsync_time_add (nsync_time_add (delay_duration, delay_duration), writer_duration), + slop_duration); + + counter_wait_for_zero (rlock_runlock_done); /* And now can get read lock again. */ + max_read_wait_exceeded += check_times (t, "I", read_start_time, + reader_duration, slop_duration); + + free (thread_done); + free (lock_unlock_done); + free (rlock_runlock_done); + free (lock_unlock_sleeping); + free (rlock_runlock_sleeping); + } + if (verbose) { + TEST_LOG (t, ("write lock max_write_wait_exceeded %d " + "max_read_wait_exceeded %d\n", + max_write_wait_exceeded, max_read_wait_exceeded)); + } + if (max_write_wait_exceeded > limit) { + TEST_ERROR (t, ("lock_unlock() took too long %d (more than %d) " + "times out of %d", + max_write_wait_exceeded, limit, loop_count)); + } + if (max_read_wait_exceeded > limit) { + TEST_ERROR (t, ("rlock_runlock() took too long %d (more than %d) " + "times out of %d", + max_read_wait_exceeded, limit, loop_count)); + } +} + +/* --------------------------------------- */ + +/* Measure the performance of an uncontended nsync_mu. */ +static void benchmark_mu_uncontended (testing t) { + int i; + int n = testing_n (t); + nsync_mu mu; + nsync_mu_init (&mu); + for (i = 0; i != n; i++) { + nsync_mu_lock (&mu); + nsync_mu_unlock (&mu); + } +} + +/* Return whether int *value is one. */ +static int int_is_1 (const void *value) { return (*(const int *)value == 1); } + +/* Return whether int *value is two. */ +static int int_is_2 (const void *value) { return (*(const int *)value == 2); } + +/* Return whether int *value is three. */ +static int int_is_3 (const void *value) { return (*(const int *)value == 3); } + +/* Set *value to 1, wait for it to become 2, then set it to 3. *value is under + *mu */ +static void waiter (nsync_mu *mu, int *value) { + nsync_mu_lock (mu); + *value = 1; + nsync_mu_wait (mu, &int_is_2, value, NULL); + *value = 3; + nsync_mu_unlock (mu); +} + +CLOSURE_DECL_BODY2 (waiter, nsync_mu *, int *) + +/* Measure the performance of an uncontended nsync_mu + with a blocked waiter. */ +static void benchmark_mu_uncontended_waiter (testing t) { + int i; + int n = testing_n (t); + nsync_mu mu; + int value = 0; + nsync_mu_init (&mu); + closure_fork (closure_waiter (&waiter, &mu, &value)); + nsync_mu_lock (&mu); + nsync_mu_wait (&mu, &int_is_1, &value, NULL); + nsync_mu_unlock (&mu); + for (i = 0; i != n; i++) { + nsync_mu_lock (&mu); + nsync_mu_unlock (&mu); + } + nsync_mu_lock (&mu); + value = 2; + nsync_mu_wait (&mu, &int_is_3, &value, NULL); + nsync_mu_unlock (&mu); +} + +/* Measure the performance of an uncontended nsync_mu + with a blocked waiter using nsync_mu_unlock_without_wakeup. */ +static void benchmark_mu_uncontended_no_wakeup (testing t) { + int i; + int n = testing_n (t); + nsync_mu mu; + int value = 0; + nsync_mu_init (&mu); + closure_fork (closure_waiter (&waiter, &mu, &value)); + nsync_mu_lock (&mu); + nsync_mu_wait (&mu, &int_is_1, &value, NULL); + nsync_mu_unlock (&mu); + for (i = 0; i != n; i++) { + nsync_mu_lock (&mu); + nsync_mu_unlock_without_wakeup (&mu); + } + nsync_mu_lock (&mu); + value = 2; + nsync_mu_wait (&mu, &int_is_3, &value, NULL); + nsync_mu_unlock (&mu); +} + +/* Measure the performance of an uncontended + nsync_mu in read mode. */ +static void benchmark_rmu_uncontended (testing t) { + int i; + int n = testing_n (t); + nsync_mu mu; + nsync_mu_init (&mu); + for (i = 0; i != n; i++) { + nsync_mu_rlock (&mu); + nsync_mu_runlock (&mu); + } +} + +/* Measure the performance of an uncontended nsync_mu + in read mode with a blocked waiter. */ +static void benchmark_rmu_uncontended_waiter (testing t) { + int i; + int n = testing_n (t); + nsync_mu mu; + int value = 0; + nsync_mu_init (&mu); + closure_fork (closure_waiter (&waiter, &mu, &value)); + nsync_mu_lock (&mu); + nsync_mu_wait (&mu, &int_is_1, &value, NULL); + nsync_mu_unlock (&mu); + for (i = 0; i != n; i++) { + nsync_mu_rlock (&mu); + nsync_mu_runlock (&mu); + } + nsync_mu_lock (&mu); + value = 2; + nsync_mu_wait (&mu, &int_is_3, &value, NULL); + nsync_mu_unlock (&mu); +} + +/* Measure the performance of an uncontended pthread_mutex_t. */ +static void benchmark_mutex_uncontended (testing t) { + int i; + int n = testing_n (t); + pthread_mutex_t mu; + pthread_mutex_init (&mu, NULL); + for (i = 0; i != n; i++) { + pthread_mutex_lock (&mu); + pthread_mutex_unlock (&mu); + } + pthread_mutex_destroy (&mu); +} + +/* Measure the performance of an uncontended recursive pthread_mutex_t. */ +static void benchmark_xmutex_uncontended (testing t) { + int i; + int n = testing_n (t); + pthread_mutex_t mu; + pthread_mutexattr_t attr; + pthread_mutexattr_init (&attr); + pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init (&mu, &attr); + pthread_mutexattr_destroy (&attr); + for (i = 0; i != n; i++) { + pthread_mutex_lock (&mu); + pthread_mutex_unlock (&mu); + } + pthread_mutex_destroy (&mu); +} + +/* Measure the performance of an uncontended pthread_rwlock_t. */ +static void benchmark_wmutex_uncontended (testing t) { + int i; + int n = testing_n (t); + pthread_rwlock_t mu; + pthread_rwlock_init (&mu, NULL); + for (i = 0; i != n; i++) { + pthread_rwlock_wrlock (&mu); + pthread_rwlock_unlock (&mu); + } + pthread_rwlock_destroy (&mu); +} + +/* Measure the performance of an uncontended + pthread_rwlock_t in read mode. */ +static void benchmark_rmutex_uncontended (testing t) { + int i; + int n = testing_n (t); + pthread_rwlock_t mu; + pthread_rwlock_init (&mu, NULL); + for (i = 0; i != n; i++) { + pthread_rwlock_rdlock (&mu); + pthread_rwlock_unlock (&mu); + } + pthread_rwlock_destroy (&mu); +} + +/* --------------------------------------- + Benchmarks for contended locks. */ + +/* It's hard to write these as benchmark functions, since we wish to measure + throughput over an extended period (a second or two), rather than get the + latency of a few iterations. */ + +/* A contended_state represents state shared between threads + in the contended benchmarks. */ +typedef struct contended_state_s { + testing t; + + /* locks to test */ + nsync_mu mu; + pthread_mutex_t mutex; + pthread_rwlock_t rwmutex; + int count; /* counter protected by a lock above */ + + nsync_mu start_done_mu; + int start; /* whether threads should start, under start_done_mu */ + int not_yet_done; /* threads not yet complete, under start_done_mu */ +} contended_state; + +static int contended_state_may_start (const void *v) { + return (((const contended_state *)v)->start); +} + +static int contended_state_all_done (const void *v) { + return (((const contended_state *)v)->not_yet_done == 0); +} + +/* Wait for cs.start to become non-zero, then loop, acquiring and + releasing mu on each iteration until cs.deadline is reached, then decrement + cs.not_yet_done. */ +static void contended_state_contend_loop (contended_state *cs, + void *mu, void (*lock) (void *), + void (*unlock) (void *)) { + int n = testing_n (cs->t); + int j; + int i; + nsync_mu_rlock (&cs->start_done_mu); + nsync_mu_wait (&cs->start_done_mu, &contended_state_may_start, cs, NULL); + nsync_mu_runlock (&cs->start_done_mu); + + for (j = 0; j < n; j += 10000) { + for (i = 0; i != 10000; i++) { + (*lock) (mu); + cs->count++; + (*unlock) (mu); + } + } + + nsync_mu_lock (&cs->start_done_mu); + cs->not_yet_done--; + nsync_mu_unlock (&cs->start_done_mu); +} + +typedef void (*func_any) (void *); +CLOSURE_DECL_BODY4 (contended_state_contend_loop, contended_state *, void *, func_any, func_any) + +/* Start the threads in a contended test, wait for them to finish, + and print the number of iterations achieved. */ +static void contended_state_run_test (contended_state *cs, testing t, + void *mu, void (*lock) (void *), + void (*unlock) (void *)) { + int i; + cs->t = t; + cs->not_yet_done = 4; /* number of threads */ + cs->start = 0; + cs->count = 0; + for (i = 0; i != cs->not_yet_done; i++) { + closure_fork (closure_contended_state_contend_loop ( + &contended_state_contend_loop, cs, mu, lock, unlock)); + } + nsync_mu_lock (&cs->start_done_mu); + cs->start = 1; + nsync_mu_wait (&cs->start_done_mu, &contended_state_all_done, cs, NULL); + nsync_mu_unlock (&cs->start_done_mu); +} + +/* Measure the performance of highly contended + nsync_mu locks, with small critical sections. */ +static void benchmark_mu_contended (testing t) { + contended_state cs; + bzero ((void *) &cs, sizeof (cs)); + contended_state_run_test (&cs, t, &cs.mu, (void (*) (void*))&nsync_mu_lock, + (void (*) (void*))&nsync_mu_unlock); +} + +/* Measure the performance of highly contended + pthread_mutex_t locks, with small critical sections. */ +static void benchmark_mutex_contended (testing t) { + contended_state cs; + bzero ((void *) &cs, sizeof (cs)); + pthread_mutex_init (&cs.mutex, NULL); + contended_state_run_test (&cs, t, &cs.mutex, &void_pthread_mutex_lock, + &void_pthread_mutex_unlock); + pthread_mutex_destroy (&cs.mutex); +} + +/* Measure the performance of highly contended recursive + pthread_mutex_t locks, with small critical sections. */ +static void benchmark_xmutex_contended (testing t) { + contended_state cs; + pthread_mutexattr_t attr; + bzero ((void *) &cs, sizeof (cs)); + pthread_mutexattr_init (&attr); + pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init (&cs.mutex, &attr); + pthread_mutexattr_destroy (&attr); + contended_state_run_test (&cs, t, &cs.mutex, &void_pthread_mutex_lock, + &void_pthread_mutex_unlock); + pthread_mutex_destroy (&cs.mutex); +} + +/* Measure the performance of highly contended + pthread_rwlock_t locks, with small critical sections. */ +static void benchmark_wmutex_contended (testing t) { + contended_state cs; + bzero ((void *) &cs, sizeof (cs)); + pthread_rwlock_init (&cs.rwmutex, NULL); + contended_state_run_test (&cs, t, &cs.rwmutex, &void_pthread_rwlock_wrlock, + &void_pthread_rwlock_unlock); + pthread_rwlock_destroy (&cs.rwmutex); +} diff --git a/third_party/nsync/testing/mu_wait2_test.c b/third_party/nsync/testing/mu_wait2_test.c new file mode 100644 index 000000000..30f65a2b0 --- /dev/null +++ b/third_party/nsync/testing/mu_wait2_test.c @@ -0,0 +1,27 @@ +/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ +│ vi: set noet ft=c ts=8 sw=8 fenc=utf-8 :vi │ +╞══════════════════════════════════════════════════════════════════════════════╡ +│ Copyright 2016 Google Inc. │ +│ │ +│ Licensed under the Apache License, Version 2.0 (the "License"); │ +│ you may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +╚─────────────────────────────────────────────────────────────────────────────*/ +#include "third_party/nsync/testing/mu_wait_test.inc" + +int main (int argc, char *argv[]) { + testing_base tb = testing_new (argc, argv, 0); + TEST_RUN (tb, test_mu_producer_consumer0); + TEST_RUN (tb, test_mu_producer_consumer3); + TEST_RUN (tb, test_mu_producer_consumer4); + TEST_RUN (tb, test_mu_producer_consumer5); + return (testing_base_exit (tb)); +} diff --git a/third_party/nsync/testing/mu_wait3_test.c b/third_party/nsync/testing/mu_wait3_test.c new file mode 100644 index 000000000..37c9ee382 --- /dev/null +++ b/third_party/nsync/testing/mu_wait3_test.c @@ -0,0 +1,25 @@ +/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ +│ vi: set noet ft=c ts=8 sw=8 fenc=utf-8 :vi │ +╞══════════════════════════════════════════════════════════════════════════════╡ +│ Copyright 2016 Google Inc. │ +│ │ +│ Licensed under the Apache License, Version 2.0 (the "License"); │ +│ you may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +╚─────────────────────────────────────────────────────────────────────────────*/ +#include "third_party/nsync/testing/mu_wait_test.inc" + +int main (int argc, char *argv[]) { + testing_base tb = testing_new (argc, argv, 0); + TEST_RUN (tb, test_mu_producer_consumer6); + TEST_RUN (tb, test_mu_cancel); + return (testing_base_exit (tb)); +} diff --git a/third_party/nsync/testing/mu_wait_test.c b/third_party/nsync/testing/mu_wait_test.c index 93876f78c..955733856 100644 --- a/third_party/nsync/testing/mu_wait_test.c +++ b/third_party/nsync/testing/mu_wait_test.c @@ -15,333 +15,12 @@ │ See the License for the specific language governing permissions and │ │ limitations under the License. │ ╚─────────────────────────────────────────────────────────────────────────────*/ -#include "third_party/nsync/mu_wait.h" -#include "libc/errno.h" -#include "libc/str/str.h" -#include "third_party/nsync/time.h" -#include "third_party/nsync/mu.h" -#include "third_party/nsync/note.h" -#include "third_party/nsync/testing/closure.h" -#include "third_party/nsync/testing/smprintf.h" -#include "third_party/nsync/testing/testing.h" -#include "third_party/nsync/testing/time_extra.h" - -/* --------------------------- */ - -/* A FIFO queue with up to limit elements. - The storage for the queue expands as necessary up to limit. */ -typedef struct mu_queue_s { - int limit; /* max value of count---should not be changed after initialization */ - nsync_mu mu; /* protects fields below */ - int pos; /* index of first in-use element */ - int count; /* number of elements in use */ - void *data[1]; /* in use elements are data[pos, ..., (pos+count-1)%limit] */ -} mu_queue; - -/* Return a pointer to new mu_queue. */ -static mu_queue *mu_queue_new (int limit) { - mu_queue *q; - int size = offsetof (struct mu_queue_s, data) + sizeof (q->data[0]) * limit; - q = (mu_queue *) malloc (size); - bzero ((void *) q, size); - q->limit = limit; - return (q); -} - -static int mu_queue_non_empty (const void *v) { - const mu_queue *q = (const mu_queue *) v; - return (q->count != 0); -} -static int mu_queue_non_full (const void *v) { - const mu_queue *q = (const mu_queue *) v; - return (q->count != q->limit); -} - -/* Add v to the end of the FIFO *q and return non-zero, or if the FIFO already - has limit elements and continues to do so until abs_deadline, do nothing and - return 0. */ -static int mu_queue_put (mu_queue *q, void *v, nsync_time abs_deadline) { - int added = 0; - nsync_mu_lock (&q->mu); - if (nsync_mu_wait_with_deadline (&q->mu, &mu_queue_non_full, - q, NULL, 0, abs_deadline, NULL) == 0) { - int i = q->pos + q->count; - if (q->count == q->limit) { - testing_panic ("q->count == q->limit"); - } - if (q->limit <= i) { - i -= q->limit; - } - q->data[i] = v; - q->count++; - added = 1; - } - nsync_mu_unlock (&q->mu); - return (added); -} - -/* Remove the first value from the front of the FIFO *q and return it, - or if the FIFO is empty and continues to be so until abs_deadline, - do nothing and return NULL. */ -static void *mu_queue_get (mu_queue *q, nsync_time abs_deadline) { - void *v = NULL; - nsync_mu_lock (&q->mu); - if (nsync_mu_wait_with_deadline (&q->mu, &mu_queue_non_empty, - q, NULL, NSYNC_CLOCK, - abs_deadline, NULL) == 0) { - if (q->count == 0) { - testing_panic ("q->count == 0"); - } - v = q->data[q->pos]; - q->data[q->pos] = NULL; - q->pos++; - q->count--; - if (q->pos == q->limit) { - q->pos = 0; - } - } - nsync_mu_unlock (&q->mu); - return (v); -} - -/* --------------------------- */ - -static char ptr_to_int_c; -#define INT_TO_PTR(x) ((x) + &ptr_to_int_c) -#define PTR_TO_INT(p) (((char *) (p)) - &ptr_to_int_c) - -/* Put count integers on *q, in the sequence start*3, (start+1)*3, (start+2)*3, .... */ -static void producer_mu_n (testing t, mu_queue *q, int start, int count) { - int i; - for (i = 0; i != count; i++) { - if (!mu_queue_put (q, INT_TO_PTR ((start+i)*3), nsync_time_no_deadline)) { - TEST_FATAL (t, ("mu_queue_put() returned 0 with no deadline")); - } - } -} - -CLOSURE_DECL_BODY4 (producer_mu_n, testing , mu_queue *, int, int) - -/* Get count integers from *q, and check that they are in the - sequence start*3, (start+1)*3, (start+2)*3, .... */ -static void consumer_mu_n (testing t, mu_queue *q, int start, int count) { - int i; - for (i = 0; i != count; i++) { - void *v = mu_queue_get (q, nsync_time_no_deadline); - int x; - if (v == NULL) { - TEST_FATAL (t, ("mu_queue_get() returned 0 with no deadline")); - } - x = PTR_TO_INT (v); - if (x != (start+i)*3) { - TEST_FATAL (t, ("mu_queue_get() returned bad value; want %d, got %d", - (start+i)*3, x)); - } - } -} - -/* The number of elements passed from producer to consumer in the - test_mu_producer_consumer*() tests below. */ -#define MU_PRODUCER_CONSUMER_N (100000) - -/* Send a stream of integers from a producer thread to - a consumer thread via a queue with limit 10**0. */ -static void test_mu_producer_consumer0 (testing t) { - mu_queue *q = mu_queue_new (1); - closure_fork (closure_producer_mu_n (&producer_mu_n, t, q, 0, MU_PRODUCER_CONSUMER_N)); - consumer_mu_n (t, q, 0, MU_PRODUCER_CONSUMER_N); - free (q); -} - -/* Send a stream of integers from a producer thread to - a consumer thread via a queue with limit 10**1. */ -static void test_mu_producer_consumer1 (testing t) { - mu_queue *q = mu_queue_new (10); - closure_fork (closure_producer_mu_n (&producer_mu_n, t, q, 0, MU_PRODUCER_CONSUMER_N)); - consumer_mu_n (t, q, 0, MU_PRODUCER_CONSUMER_N); - free (q); -} - -/* Send a stream of integers from a producer thread to - a consumer thread via a queue with limit 10**2. */ -static void test_mu_producer_consumer2 (testing t) { - mu_queue *q = mu_queue_new (100); - closure_fork (closure_producer_mu_n (&producer_mu_n, t, q, 0, MU_PRODUCER_CONSUMER_N)); - consumer_mu_n (t, q, 0, MU_PRODUCER_CONSUMER_N); - free (q); -} - -/* Send a stream of integers from a producer thread to - a consumer thread via a queue with limit 10**3. */ -static void test_mu_producer_consumer3 (testing t) { - mu_queue *q = mu_queue_new (1000); - closure_fork (closure_producer_mu_n (&producer_mu_n, t, q, 0, MU_PRODUCER_CONSUMER_N)); - consumer_mu_n (t, q, 0, MU_PRODUCER_CONSUMER_N); - free (q); -} - -/* Send a stream of integers from a producer thread to - a consumer thread via a queue with limit 10**4. */ -static void test_mu_producer_consumer4 (testing t) { - mu_queue *q = mu_queue_new (10000); - closure_fork (closure_producer_mu_n (&producer_mu_n, t, q, 0, MU_PRODUCER_CONSUMER_N)); - consumer_mu_n (t, q, 0, MU_PRODUCER_CONSUMER_N); - free (q); -} - -/* Send a stream of integers from a producer thread to - a consumer thread via a queue with limit 10**5. */ -static void test_mu_producer_consumer5 (testing t) { - mu_queue *q = mu_queue_new (100000); - closure_fork (closure_producer_mu_n (&producer_mu_n, t, q, 0, MU_PRODUCER_CONSUMER_N)); - consumer_mu_n (t, q, 0, MU_PRODUCER_CONSUMER_N); - free (q); -} - -/* Send a stream of integers from a producer thread to - a consumer thread via a queue with limit 10**6. */ -static void test_mu_producer_consumer6 (testing t) { - mu_queue *q = mu_queue_new (1000000); - closure_fork (closure_producer_mu_n (&producer_mu_n, t, q, 0, MU_PRODUCER_CONSUMER_N)); - consumer_mu_n (t, q, 0, MU_PRODUCER_CONSUMER_N); - free (q); -} - -/* A perpetually false wait condition. */ -static int false_condition (const void *v) { - return (0); -} - -/* The following values control how aggressively we police the timeout. */ -#define TOO_EARLY_MS 1 -#define TOO_LATE_MS 100 /* longer, to accommodate scheduling delays */ -#define TOO_LATE_ALLOWED 25 /* number of iterations permitted to violate too_late */ - -/* Check timeouts on a mu wait_with_deadline(). */ -static void test_mu_deadline (testing t) { - int i; - int too_late_violations; - nsync_mu mu; - nsync_time too_early; - nsync_time too_late; - - nsync_mu_init (&mu); - too_early = nsync_time_ms (TOO_EARLY_MS); - too_late = nsync_time_ms (TOO_LATE_MS); - too_late_violations = 0; - nsync_mu_lock (&mu); - for (i = 0; i != 50; i++) { - nsync_time end_time; - nsync_time start_time; - nsync_time expected_end_time; - start_time = nsync_time_now (NSYNC_CLOCK); - expected_end_time = nsync_time_add (start_time, nsync_time_ms (87)); - if (nsync_mu_wait_with_deadline (&mu, &false_condition, NULL, NULL, NSYNC_CLOCK, - expected_end_time, NULL) != ETIMEDOUT) { - TEST_FATAL (t, ("nsync_mu_wait() returned non-expired for a timeout")); - } - end_time = nsync_time_now (NSYNC_CLOCK); - if (nsync_time_cmp (end_time, nsync_time_sub (expected_end_time, too_early)) < 0) { - char *elapsed_str = nsync_time_str (nsync_time_sub (expected_end_time, end_time), 2); - TEST_ERROR (t, ("nsync_mu_wait() returned %s too early", elapsed_str)); - free (elapsed_str); - } - if (nsync_time_cmp (nsync_time_add (expected_end_time, too_late), end_time) < 0) { - too_late_violations++; - } - } - nsync_mu_unlock (&mu); - if (too_late_violations > TOO_LATE_ALLOWED) { - TEST_ERROR (t, ("nsync_mu_wait() returned too late %d (> %d) times", - too_late_violations, TOO_LATE_ALLOWED)); - } -} - -/* Check cancellations on a mu wait_with_deadline(). */ -static void test_mu_cancel (testing t) { - int i; - nsync_time future_time; - int too_late_violations; - nsync_mu mu; - nsync_time too_early; - nsync_time too_late; - - nsync_mu_init (&mu); - too_early = nsync_time_ms (TOO_EARLY_MS); - too_late = nsync_time_ms (TOO_LATE_MS); - - /* The loops below cancel after 87 milliseconds, like the timeout tests above. */ - - future_time = nsync_time_add (nsync_time_now (NSYNC_CLOCK), nsync_time_ms (3600000)); /* test cancels with timeout */ - - too_late_violations = 0; - nsync_mu_lock (&mu); - for (i = 0; i != 50; i++) { - nsync_time end_time; - nsync_time start_time; - nsync_time expected_end_time; - int x; - nsync_note cancel; - - start_time = nsync_time_now (NSYNC_CLOCK); - expected_end_time = nsync_time_add (start_time, nsync_time_ms (87)); - cancel = nsync_note_new (NULL, NSYNC_CLOCK, expected_end_time); - - x = nsync_mu_wait_with_deadline (&mu, &false_condition, NULL, NULL, - NSYNC_CLOCK, future_time, cancel); - if (x != ECANCELED) { - TEST_FATAL (t, ("nsync_mu_wait() return non-cancelled (%d) for " - "a cancellation; expected %d", - x, ECANCELED)); - } - end_time = nsync_time_now (NSYNC_CLOCK); - if (nsync_time_cmp (end_time, nsync_time_sub (expected_end_time, too_early)) < 0) { - char *elapsed_str = nsync_time_str (nsync_time_sub (expected_end_time, end_time), 2); - TEST_ERROR (t, ("nsync_mu_wait() returned %s too early", elapsed_str)); - free (elapsed_str); - } - if (nsync_time_cmp (nsync_time_add (expected_end_time, too_late), end_time) < 0) { - too_late_violations++; - } - - /* Check that an already cancelled wait returns immediately. */ - start_time = nsync_time_now (NSYNC_CLOCK); - x = nsync_mu_wait_with_deadline (&mu, &false_condition, NULL, NULL, - NSYNC_CLOCK, nsync_time_no_deadline, - cancel); - if (x != ECANCELED) { - TEST_FATAL (t, ("nsync_mu_wait() returned non-cancelled for a " - "cancellation; expected %d", - x, ECANCELED)); - } - end_time = nsync_time_now (NSYNC_CLOCK); - if (nsync_time_cmp (end_time, start_time) < 0) { - char *elapsed_str = nsync_time_str (nsync_time_sub (expected_end_time, end_time), 2); - TEST_ERROR (t, ("nsync_mu_wait() returned %s too early", elapsed_str)); - free (elapsed_str); - } - if (nsync_time_cmp (nsync_time_add (start_time, too_late), end_time) < 0) { - too_late_violations++; - } - nsync_note_free (cancel); - } - nsync_mu_unlock (&mu); - if (too_late_violations > TOO_LATE_ALLOWED) { - TEST_ERROR (t, ("nsync_mu_wait() returned too late %d (> %d) times", - too_late_violations, TOO_LATE_ALLOWED)); - } -} +#include "third_party/nsync/testing/mu_wait_test.inc" int main (int argc, char *argv[]) { testing_base tb = testing_new (argc, argv, 0); - TEST_RUN (tb, test_mu_producer_consumer0); TEST_RUN (tb, test_mu_producer_consumer1); TEST_RUN (tb, test_mu_producer_consumer2); - TEST_RUN (tb, test_mu_producer_consumer3); - TEST_RUN (tb, test_mu_producer_consumer4); - TEST_RUN (tb, test_mu_producer_consumer5); - TEST_RUN (tb, test_mu_producer_consumer6); TEST_RUN (tb, test_mu_deadline); - TEST_RUN (tb, test_mu_cancel); return (testing_base_exit (tb)); } diff --git a/third_party/nsync/testing/mu_wait_test.inc b/third_party/nsync/testing/mu_wait_test.inc new file mode 100644 index 000000000..5ab4dedcc --- /dev/null +++ b/third_party/nsync/testing/mu_wait_test.inc @@ -0,0 +1,333 @@ +/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ +│ vi: set noet ft=c ts=8 sw=8 fenc=utf-8 :vi │ +╞══════════════════════════════════════════════════════════════════════════════╡ +│ Copyright 2016 Google Inc. │ +│ │ +│ Licensed under the Apache License, Version 2.0 (the "License"); │ +│ you may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +╚─────────────────────────────────────────────────────────────────────────────*/ +#include "third_party/nsync/mu_wait.h" +#include "libc/errno.h" +#include "libc/str/str.h" +#include "third_party/nsync/time.h" +#include "third_party/nsync/mu.h" +#include "third_party/nsync/note.h" +#include "third_party/nsync/testing/closure.h" +#include "third_party/nsync/testing/smprintf.h" +#include "third_party/nsync/testing/testing.h" +#include "third_party/nsync/testing/time_extra.h" + +/* --------------------------- */ + +/* A FIFO queue with up to limit elements. + The storage for the queue expands as necessary up to limit. */ +typedef struct mu_queue_s { + int limit; /* max value of count---should not be changed after initialization */ + nsync_mu mu; /* protects fields below */ + int pos; /* index of first in-use element */ + int count; /* number of elements in use */ + void *data[1]; /* in use elements are data[pos, ..., (pos+count-1)%limit] */ +} mu_queue; + +/* Return a pointer to new mu_queue. */ +static mu_queue *mu_queue_new (int limit) { + mu_queue *q; + int size = offsetof (struct mu_queue_s, data) + sizeof (q->data[0]) * limit; + q = (mu_queue *) malloc (size); + bzero ((void *) q, size); + q->limit = limit; + return (q); +} + +static int mu_queue_non_empty (const void *v) { + const mu_queue *q = (const mu_queue *) v; + return (q->count != 0); +} +static int mu_queue_non_full (const void *v) { + const mu_queue *q = (const mu_queue *) v; + return (q->count != q->limit); +} + +/* Add v to the end of the FIFO *q and return non-zero, or if the FIFO already + has limit elements and continues to do so until abs_deadline, do nothing and + return 0. */ +static int mu_queue_put (mu_queue *q, void *v, nsync_time abs_deadline) { + int added = 0; + nsync_mu_lock (&q->mu); + if (nsync_mu_wait_with_deadline (&q->mu, &mu_queue_non_full, + q, NULL, 0, abs_deadline, NULL) == 0) { + int i = q->pos + q->count; + if (q->count == q->limit) { + testing_panic ("q->count == q->limit"); + } + if (q->limit <= i) { + i -= q->limit; + } + q->data[i] = v; + q->count++; + added = 1; + } + nsync_mu_unlock (&q->mu); + return (added); +} + +/* Remove the first value from the front of the FIFO *q and return it, + or if the FIFO is empty and continues to be so until abs_deadline, + do nothing and return NULL. */ +static void *mu_queue_get (mu_queue *q, nsync_time abs_deadline) { + void *v = NULL; + nsync_mu_lock (&q->mu); + if (nsync_mu_wait_with_deadline (&q->mu, &mu_queue_non_empty, + q, NULL, NSYNC_CLOCK, + abs_deadline, NULL) == 0) { + if (q->count == 0) { + testing_panic ("q->count == 0"); + } + v = q->data[q->pos]; + q->data[q->pos] = NULL; + q->pos++; + q->count--; + if (q->pos == q->limit) { + q->pos = 0; + } + } + nsync_mu_unlock (&q->mu); + return (v); +} + +/* --------------------------- */ + +static char ptr_to_int_c; +#define INT_TO_PTR(x) ((x) + &ptr_to_int_c) +#define PTR_TO_INT(p) (((char *) (p)) - &ptr_to_int_c) + +/* Put count integers on *q, in the sequence start*3, (start+1)*3, (start+2)*3, .... */ +static void producer_mu_n (testing t, mu_queue *q, int start, int count) { + int i; + for (i = 0; i != count; i++) { + if (!mu_queue_put (q, INT_TO_PTR ((start+i)*3), nsync_time_no_deadline)) { + TEST_FATAL (t, ("mu_queue_put() returned 0 with no deadline")); + } + } +} + +CLOSURE_DECL_BODY4 (producer_mu_n, testing , mu_queue *, int, int) + +/* Get count integers from *q, and check that they are in the + sequence start*3, (start+1)*3, (start+2)*3, .... */ +static void consumer_mu_n (testing t, mu_queue *q, int start, int count) { + int i; + for (i = 0; i != count; i++) { + void *v = mu_queue_get (q, nsync_time_no_deadline); + int x; + if (v == NULL) { + TEST_FATAL (t, ("mu_queue_get() returned 0 with no deadline")); + } + x = PTR_TO_INT (v); + if (x != (start+i)*3) { + TEST_FATAL (t, ("mu_queue_get() returned bad value; want %d, got %d", + (start+i)*3, x)); + } + } +} + +/* The number of elements passed from producer to consumer in the + test_mu_producer_consumer*() tests below. */ +#define MU_PRODUCER_CONSUMER_N (100000) + +/* Send a stream of integers from a producer thread to + a consumer thread via a queue with limit 10**0. */ +static void test_mu_producer_consumer0 (testing t) { + mu_queue *q = mu_queue_new (1); + closure_fork (closure_producer_mu_n (&producer_mu_n, t, q, 0, MU_PRODUCER_CONSUMER_N)); + consumer_mu_n (t, q, 0, MU_PRODUCER_CONSUMER_N); + free (q); +} + +/* Send a stream of integers from a producer thread to + a consumer thread via a queue with limit 10**1. */ +static void test_mu_producer_consumer1 (testing t) { + mu_queue *q = mu_queue_new (10); + closure_fork (closure_producer_mu_n (&producer_mu_n, t, q, 0, MU_PRODUCER_CONSUMER_N)); + consumer_mu_n (t, q, 0, MU_PRODUCER_CONSUMER_N); + free (q); +} + +/* Send a stream of integers from a producer thread to + a consumer thread via a queue with limit 10**2. */ +static void test_mu_producer_consumer2 (testing t) { + mu_queue *q = mu_queue_new (100); + closure_fork (closure_producer_mu_n (&producer_mu_n, t, q, 0, MU_PRODUCER_CONSUMER_N)); + consumer_mu_n (t, q, 0, MU_PRODUCER_CONSUMER_N); + free (q); +} + +/* Send a stream of integers from a producer thread to + a consumer thread via a queue with limit 10**3. */ +static void test_mu_producer_consumer3 (testing t) { + mu_queue *q = mu_queue_new (1000); + closure_fork (closure_producer_mu_n (&producer_mu_n, t, q, 0, MU_PRODUCER_CONSUMER_N)); + consumer_mu_n (t, q, 0, MU_PRODUCER_CONSUMER_N); + free (q); +} + +/* Send a stream of integers from a producer thread to + a consumer thread via a queue with limit 10**4. */ +static void test_mu_producer_consumer4 (testing t) { + mu_queue *q = mu_queue_new (10000); + closure_fork (closure_producer_mu_n (&producer_mu_n, t, q, 0, MU_PRODUCER_CONSUMER_N)); + consumer_mu_n (t, q, 0, MU_PRODUCER_CONSUMER_N); + free (q); +} + +/* Send a stream of integers from a producer thread to + a consumer thread via a queue with limit 10**5. */ +static void test_mu_producer_consumer5 (testing t) { + mu_queue *q = mu_queue_new (100000); + closure_fork (closure_producer_mu_n (&producer_mu_n, t, q, 0, MU_PRODUCER_CONSUMER_N)); + consumer_mu_n (t, q, 0, MU_PRODUCER_CONSUMER_N); + free (q); +} + +/* Send a stream of integers from a producer thread to + a consumer thread via a queue with limit 10**6. */ +static void test_mu_producer_consumer6 (testing t) { + mu_queue *q = mu_queue_new (1000000); + closure_fork (closure_producer_mu_n (&producer_mu_n, t, q, 0, MU_PRODUCER_CONSUMER_N)); + consumer_mu_n (t, q, 0, MU_PRODUCER_CONSUMER_N); + free (q); +} + +/* A perpetually false wait condition. */ +static int false_condition (const void *v) { + return (0); +} + +/* The following values control how aggressively we police the timeout. */ +#define TOO_EARLY_MS 1 +#define TOO_LATE_MS 100 /* longer, to accommodate scheduling delays */ +#define TOO_LATE_ALLOWED 25 /* number of iterations permitted to violate too_late */ + +/* Check timeouts on a mu wait_with_deadline(). */ +static void test_mu_deadline (testing t) { + int i; + int too_late_violations; + nsync_mu mu; + nsync_time too_early; + nsync_time too_late; + + nsync_mu_init (&mu); + too_early = nsync_time_ms (TOO_EARLY_MS); + too_late = nsync_time_ms (TOO_LATE_MS); + too_late_violations = 0; + nsync_mu_lock (&mu); + for (i = 0; i != 50; i++) { + nsync_time end_time; + nsync_time start_time; + nsync_time expected_end_time; + start_time = nsync_time_now (NSYNC_CLOCK); + expected_end_time = nsync_time_add (start_time, nsync_time_ms (87)); + if (nsync_mu_wait_with_deadline (&mu, &false_condition, NULL, NULL, NSYNC_CLOCK, + expected_end_time, NULL) != ETIMEDOUT) { + TEST_FATAL (t, ("nsync_mu_wait() returned non-expired for a timeout")); + } + end_time = nsync_time_now (NSYNC_CLOCK); + if (nsync_time_cmp (end_time, nsync_time_sub (expected_end_time, too_early)) < 0) { + char *elapsed_str = nsync_time_str (nsync_time_sub (expected_end_time, end_time), 2); + TEST_ERROR (t, ("nsync_mu_wait() returned %s too early", elapsed_str)); + free (elapsed_str); + } + if (nsync_time_cmp (nsync_time_add (expected_end_time, too_late), end_time) < 0) { + too_late_violations++; + } + } + nsync_mu_unlock (&mu); + if (too_late_violations > TOO_LATE_ALLOWED) { + TEST_ERROR (t, ("nsync_mu_wait() returned too late %d (> %d) times", + too_late_violations, TOO_LATE_ALLOWED)); + } +} + +/* Check cancellations on a mu wait_with_deadline(). */ +static void test_mu_cancel (testing t) { + int i; + nsync_time future_time; + int too_late_violations; + nsync_mu mu; + nsync_time too_early; + nsync_time too_late; + + nsync_mu_init (&mu); + too_early = nsync_time_ms (TOO_EARLY_MS); + too_late = nsync_time_ms (TOO_LATE_MS); + + /* The loops below cancel after 87 milliseconds, like the timeout tests above. */ + + future_time = nsync_time_add (nsync_time_now (NSYNC_CLOCK), nsync_time_ms (3600000)); /* test cancels with timeout */ + + too_late_violations = 0; + nsync_mu_lock (&mu); + for (i = 0; i != 50; i++) { + nsync_time end_time; + nsync_time start_time; + nsync_time expected_end_time; + int x; + nsync_note cancel; + + start_time = nsync_time_now (NSYNC_CLOCK); + expected_end_time = nsync_time_add (start_time, nsync_time_ms (87)); + cancel = nsync_note_new (NULL, NSYNC_CLOCK, expected_end_time); + + x = nsync_mu_wait_with_deadline (&mu, &false_condition, NULL, NULL, + NSYNC_CLOCK, future_time, cancel); + if (x != ECANCELED) { + TEST_FATAL (t, ("nsync_mu_wait() return non-cancelled (%d) for " + "a cancellation; expected %d", + x, ECANCELED)); + } + end_time = nsync_time_now (NSYNC_CLOCK); + if (nsync_time_cmp (end_time, nsync_time_sub (expected_end_time, too_early)) < 0) { + char *elapsed_str = nsync_time_str (nsync_time_sub (expected_end_time, end_time), 2); + TEST_ERROR (t, ("nsync_mu_wait() returned %s too early", elapsed_str)); + free (elapsed_str); + } + if (nsync_time_cmp (nsync_time_add (expected_end_time, too_late), end_time) < 0) { + too_late_violations++; + } + + /* Check that an already cancelled wait returns immediately. */ + start_time = nsync_time_now (NSYNC_CLOCK); + x = nsync_mu_wait_with_deadline (&mu, &false_condition, NULL, NULL, + NSYNC_CLOCK, nsync_time_no_deadline, + cancel); + if (x != ECANCELED) { + TEST_FATAL (t, ("nsync_mu_wait() returned non-cancelled for a " + "cancellation; expected %d", + x, ECANCELED)); + } + end_time = nsync_time_now (NSYNC_CLOCK); + if (nsync_time_cmp (end_time, start_time) < 0) { + char *elapsed_str = nsync_time_str (nsync_time_sub (expected_end_time, end_time), 2); + TEST_ERROR (t, ("nsync_mu_wait() returned %s too early", elapsed_str)); + free (elapsed_str); + } + if (nsync_time_cmp (nsync_time_add (start_time, too_late), end_time) < 0) { + too_late_violations++; + } + nsync_note_free (cancel); + } + nsync_mu_unlock (&mu); + if (too_late_violations > TOO_LATE_ALLOWED) { + TEST_ERROR (t, ("nsync_mu_wait() returned too late %d (> %d) times", + too_late_violations, TOO_LATE_ALLOWED)); + } +} diff --git a/tool/viz/clock_nanosleep_accuracy.c b/tool/viz/clock_nanosleep_accuracy.c index 358683e79..977fb6ac1 100644 --- a/tool/viz/clock_nanosleep_accuracy.c +++ b/tool/viz/clock_nanosleep_accuracy.c @@ -16,28 +16,36 @@ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ -#include "libc/assert.h" -#include "libc/calls/struct/timespec.h" -#include "libc/intrin/describeflags.h" -#include "libc/intrin/kprintf.h" -#include "libc/runtime/runtime.h" -#include "libc/stdio/stdio.h" -#include "libc/sysv/consts/clock.h" +#include +#include +#include #define MAXIMUM 1e9 #define ITERATIONS 10 +const char *MyDescribeClockName(int clock) { + if (clock == CLOCK_REALTIME) + return "CLOCK_REALTIME"; + if (clock == CLOCK_MONOTONIC) + return "CLOCK_MONOTONIC"; + if (clock == CLOCK_REALTIME_COARSE) + return "CLOCK_REALTIME_COARSE"; + if (clock == CLOCK_MONOTONIC_COARSE) + return "CLOCK_MONOTONIC_COARSE"; + __builtin_trap(); +} + void TestSleepRelative(int clock) { printf("\n"); printf("testing: clock_nanosleep(%s) with relative timeout\n", - DescribeClockName(clock)); + MyDescribeClockName(clock)); for (long nanos = 1; nanos < (long)MAXIMUM; nanos *= 2) { struct timespec t1, t2, wf; wf = timespec_fromnanos(nanos); if (clock_gettime(clock, &t1)) return; for (int i = 0; i < ITERATIONS; ++i) { - npassert(!clock_nanosleep(clock, 0, &wf, 0)); + assert(!clock_nanosleep(clock, 0, &wf, 0)); } clock_gettime(clock, &t2); long took = timespec_tonanos(timespec_sub(t2, t1)) / ITERATIONS;