Exponential back-off

This commit is contained in:
Lemaitre 2021-10-10 12:06:18 +02:00
parent 67b5200a0b
commit c9f8973de7
3 changed files with 47 additions and 43 deletions

View file

@ -35,7 +35,7 @@ int main() {
cthread_t thread; cthread_t thread;
int rc = cthread_create(&thread, NULL, &worker, NULL); int rc = cthread_create(&thread, NULL, &worker, NULL);
if (rc == 0) { if (rc == 0) {
cthread_native_sem_wait(&semaphore, 0, 0, NULL); cthread_native_sem_wait(&semaphore, 0, NULL);
//printf("thread created: %p\n", thread); //printf("thread created: %p\n", thread);
sleep(1); sleep(1);
#if 1 #if 1
@ -45,7 +45,7 @@ int main() {
sleep(2); sleep(2);
#endif #endif
cthread_native_sem_signal(&semaphore); cthread_native_sem_signal(&semaphore);
cthread_native_sem_wait(&semaphore, 0, 0, NULL); cthread_native_sem_wait(&semaphore, 0, NULL);
//printf("thread joined: %p -> %d\n", thread, rc); //printf("thread joined: %p -> %d\n", thread, rc);
} else { } else {
printf("ERROR: thread could not be started: %d\n", rc); printf("ERROR: thread could not be started: %d\n", rc);

View file

@ -24,6 +24,16 @@
#define CTHREAD_THREAD_VAL_BITS 32 #define CTHREAD_THREAD_VAL_BITS 32
static void pause(int attempt) {
if (attempt < 16) {
for (int i = 0; i < (1 << attempt); ++i) {
asm("pause");
}
} else {
cthread_yield();
}
}
int cthread_native_sem_init(cthread_native_sem_t* sem, int count) { int cthread_native_sem_init(cthread_native_sem_t* sem, int count) {
sem->linux.count = count; sem->linux.count = count;
return 0; return 0;
@ -54,7 +64,7 @@ int cthread_native_sem_signal(cthread_native_sem_t* sem) {
return 0; return 0;
} }
int cthread_native_sem_wait_slow(cthread_native_sem_t* sem, int cthread_native_sem_wait_futex(cthread_native_sem_t* sem,
const struct timespec* timeout) { const struct timespec* timeout) {
uint64_t count; uint64_t count;
@ -66,11 +76,14 @@ int cthread_native_sem_wait_slow(cthread_native_sem_t* sem,
for (;;) { for (;;) {
// try to acquire the semaphore, as well as remove itself from waiters // try to acquire the semaphore, as well as remove itself from waiters
if ((uint32_t)count > 0 && while ((uint32_t)count > 0) {
atomic_compare_exchange_weak( // without spin, we could miss a futex wake
if (atomic_compare_exchange_weak(
&sem->linux.count, count, &sem->linux.count, count,
count - 1 - ((uint64_t)1 << CTHREAD_THREAD_VAL_BITS))) count - 1 - ((uint64_t)1 << CTHREAD_THREAD_VAL_BITS))) {
break; return 0;
}
}
int flags = FUTEX_WAIT; int flags = FUTEX_WAIT;
register struct timespec* timeout_ asm("r10") = timeout; register struct timespec* timeout_ asm("r10") = timeout;
@ -88,45 +101,36 @@ int cthread_native_sem_wait_slow(cthread_native_sem_t* sem,
return 0; return 0;
} }
int cthread_native_sem_wait_spin_yield(cthread_native_sem_t* sem,
uint64_t count, int yield,
const struct timespec* timeout) {
// spin on yield
while (yield-- > 0) {
if ((count >> CTHREAD_THREAD_VAL_BITS) != 0)
break; // a thread is already waiting in queue
if ((uint32_t)count > 0 &&
atomic_compare_exchange_weak(&sem->linux.count, count, count - 1))
return 0;
cthread_yield();
}
return cthread_native_sem_wait_slow(sem, timeout);
}
int cthread_native_sem_wait_spin(cthread_native_sem_t* sem, uint64_t count, int cthread_native_sem_wait_spin(cthread_native_sem_t* sem, uint64_t count,
int spin, int yield, int spin, const struct timespec* timeout) {
const struct timespec* timeout) {
// spin on pause // spin on pause
while (spin-- > 0) { for (int attempt = 0; attempt < spin; ++attempt) {
if ((count >> CTHREAD_THREAD_VAL_BITS) != 0) break; //if ((count >> CTHREAD_THREAD_VAL_BITS) != 0) break;
if ((uint32_t)count > 0 && while ((uint32_t)count > 0) {
atomic_compare_exchange_weak(&sem->linux.count, count, count - 1)) // spin is useful if multiple waiters can acquire the semaphore at the same time
if (atomic_compare_exchange_weak(
&sem->linux.count, count, count - 1)) {
return 0; return 0;
asm volatile("pause"); }
}
pause(attempt);
} }
return cthread_native_sem_wait_spin_yield(sem, count, yield, timeout); return cthread_native_sem_wait_futex(sem, timeout);
} }
int cthread_native_sem_wait(cthread_native_sem_t* sem, int spin, int yield, int cthread_native_sem_wait(cthread_native_sem_t* sem, int spin,
const struct timespec* timeout) { const struct timespec* timeout) {
uint64_t count = atomic_load(&sem->linux.count); uint64_t count = atomic_load(&sem->linux.count);
// uncontended // uncontended
if ((count >> 32) == 0 && (uint32_t)count > 0 && while ((uint32_t)count > 0) {
atomic_compare_exchange_weak(&sem->linux.count, count, count - 1)) // spin is useful if multiple waiters can acquire the semaphore at the same time
if (atomic_compare_exchange_weak(
&sem->linux.count, count, count - 1)) {
return 0; return 0;
}
}
return cthread_native_sem_wait_spin(sem, count, spin, yield, timeout); return cthread_native_sem_wait_spin(sem, count, spin, timeout);
} }

View file

@ -18,7 +18,7 @@ struct timespec;
int cthread_native_sem_init(cthread_native_sem_t*, int); int cthread_native_sem_init(cthread_native_sem_t*, int);
int cthread_native_sem_destroy(cthread_native_sem_t*); int cthread_native_sem_destroy(cthread_native_sem_t*);
int cthread_native_sem_wait(cthread_native_sem_t*, int, int, const struct timespec*); int cthread_native_sem_wait(cthread_native_sem_t*, int, const struct timespec*);
int cthread_native_sem_signal(cthread_native_sem_t*); int cthread_native_sem_signal(cthread_native_sem_t*);