mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-07-27 04:50:28 +00:00
Fix fork waiter leak in nsync
This change fixes a bug where nsync waiter objects would leak. It'd mean that long-running programs like runitd would run out of file descriptors on NetBSD where waiter objects have ksem file descriptors. On other OSes this bug is mostly harmless since the worst that can happen with a futex is to leak a little bit of ram. The bug was caused because tib_nsync was sneaking back in after the finalization code had cleared it. This change refactors the thread exiting code to handle nsync teardown appropriately and in making this change I found another issue, which is that user code which is buggy, and tries to exit without joining joinable threads which haven't been detached, would result in a deadlock. That doesn't sound so bad, except the main thread is a joinable thread. So this deadlock would be triggered in ways that put libc at fault. So we now auto-join threads and libc will log a warning to --strace when that happens for any thread
This commit is contained in:
parent
fd7da586b5
commit
98c5847727
35 changed files with 299 additions and 173 deletions
|
@ -39,7 +39,7 @@
|
|||
int gettid(void) {
|
||||
int tid;
|
||||
if (VERY_LIKELY(__tls_enabled && !__vforked)) {
|
||||
tid = atomic_load_explicit(&__get_tls()->tib_tid, memory_order_relaxed);
|
||||
tid = atomic_load_explicit(&__get_tls()->tib_ptid, memory_order_relaxed);
|
||||
if (VERY_LIKELY(tid > 0))
|
||||
return tid;
|
||||
}
|
||||
|
|
|
@ -561,7 +561,7 @@ ABI static size_t kformat(char *b, size_t n, const char *fmt, va_list va) {
|
|||
tib = __tls_enabled ? __get_tls_privileged() : 0;
|
||||
if (!(tib && (tib->tib_flags & TIB_FLAG_VFORKED))) {
|
||||
if (tib) {
|
||||
x = atomic_load_explicit(&tib->tib_tid, memory_order_relaxed);
|
||||
x = atomic_load_explicit(&tib->tib_ptid, memory_order_relaxed);
|
||||
} else {
|
||||
x = __pid;
|
||||
}
|
||||
|
|
|
@ -129,7 +129,7 @@ bool __maps_held(void) {
|
|||
return __tls_enabled && !(__get_tls()->tib_flags & TIB_FLAG_VFORKED) &&
|
||||
MUTEX_OWNER(
|
||||
atomic_load_explicit(&__maps.lock.word, memory_order_relaxed)) ==
|
||||
atomic_load_explicit(&__get_tls()->tib_tid, memory_order_relaxed);
|
||||
atomic_load_explicit(&__get_tls()->tib_ptid, memory_order_relaxed);
|
||||
}
|
||||
|
||||
ABI void __maps_lock(void) {
|
||||
|
@ -142,7 +142,7 @@ ABI void __maps_lock(void) {
|
|||
return;
|
||||
if (tib->tib_flags & TIB_FLAG_VFORKED)
|
||||
return;
|
||||
me = atomic_load_explicit(&tib->tib_tid, memory_order_relaxed);
|
||||
me = atomic_load_explicit(&tib->tib_ptid, memory_order_relaxed);
|
||||
if (me <= 0)
|
||||
return;
|
||||
word = atomic_load_explicit(&__maps.lock.word, memory_order_relaxed);
|
||||
|
@ -192,7 +192,7 @@ ABI void __maps_unlock(void) {
|
|||
return;
|
||||
if (tib->tib_flags & TIB_FLAG_VFORKED)
|
||||
return;
|
||||
me = atomic_load_explicit(&tib->tib_tid, memory_order_relaxed);
|
||||
me = atomic_load_explicit(&tib->tib_ptid, memory_order_relaxed);
|
||||
if (me <= 0)
|
||||
return;
|
||||
word = atomic_load_explicit(&__maps.lock.word, memory_order_relaxed);
|
||||
|
|
|
@ -69,7 +69,7 @@ static errno_t pthread_mutex_lock_recursive(pthread_mutex_t *mutex,
|
|||
uint64_t word, bool is_trylock) {
|
||||
uint64_t lock;
|
||||
int backoff = 0;
|
||||
int me = atomic_load_explicit(&__get_tls()->tib_tid, memory_order_relaxed);
|
||||
int me = atomic_load_explicit(&__get_tls()->tib_ptid, memory_order_relaxed);
|
||||
bool once = false;
|
||||
for (;;) {
|
||||
if (MUTEX_OWNER(word) == me) {
|
||||
|
@ -119,7 +119,7 @@ static errno_t pthread_mutex_lock_recursive(pthread_mutex_t *mutex,
|
|||
static errno_t pthread_mutex_lock_recursive_nsync(pthread_mutex_t *mutex,
|
||||
uint64_t word,
|
||||
bool is_trylock) {
|
||||
int me = atomic_load_explicit(&__get_tls()->tib_tid, memory_order_relaxed);
|
||||
int me = atomic_load_explicit(&__get_tls()->tib_ptid, memory_order_relaxed);
|
||||
for (;;) {
|
||||
if (MUTEX_OWNER(word) == me) {
|
||||
if (MUTEX_DEPTH(word) < MUTEX_DEPTH_MAX) {
|
||||
|
|
|
@ -44,7 +44,7 @@ static void pthread_mutex_unlock_drepper(atomic_int *futex, char pshare) {
|
|||
|
||||
static errno_t pthread_mutex_unlock_recursive(pthread_mutex_t *mutex,
|
||||
uint64_t word) {
|
||||
int me = atomic_load_explicit(&__get_tls()->tib_tid, memory_order_relaxed);
|
||||
int me = atomic_load_explicit(&__get_tls()->tib_ptid, memory_order_relaxed);
|
||||
for (;;) {
|
||||
|
||||
// we allow unlocking an initialized lock that wasn't locked, but we
|
||||
|
@ -76,7 +76,7 @@ static errno_t pthread_mutex_unlock_recursive(pthread_mutex_t *mutex,
|
|||
#if PTHREAD_USE_NSYNC
|
||||
static errno_t pthread_mutex_unlock_recursive_nsync(pthread_mutex_t *mutex,
|
||||
uint64_t word) {
|
||||
int me = atomic_load_explicit(&__get_tls()->tib_tid, memory_order_relaxed);
|
||||
int me = atomic_load_explicit(&__get_tls()->tib_ptid, memory_order_relaxed);
|
||||
for (;;) {
|
||||
|
||||
// we allow unlocking an initialized lock that wasn't locked, but we
|
||||
|
|
|
@ -21,9 +21,25 @@
|
|||
#include "libc/thread/posixthread.internal.h"
|
||||
#include "libc/thread/thread.h"
|
||||
|
||||
//
|
||||
// - tib_ptid: always guaranteed to be non-zero in thread itself. on
|
||||
// some platforms (e.g. xnu) the parent thread and other
|
||||
// threads may need to wait for this value to be set. this
|
||||
// is generally the value you want to read to get the tid.
|
||||
//
|
||||
// - tib_ctid: starts off as -1. once thread starts, it's set to the
|
||||
// thread's tid before calling the thread callback. when
|
||||
// thread is done executing, this is set to zero, and then
|
||||
// this address is futex woken, in case the parent thread or
|
||||
// any other thread is waiting on its completion. when a
|
||||
// thread wants to read its own tid, it shouldn't use this,
|
||||
// because the thread might need to do things after clearing
|
||||
// its own tib_ctid (see pthread_exit() for static thread).
|
||||
//
|
||||
int _pthread_tid(struct PosixThread *pt) {
|
||||
int tid = 0;
|
||||
while (pt && !(tid = atomic_load_explicit(&pt->ptid, memory_order_acquire)))
|
||||
while (pt && !(tid = atomic_load_explicit(&pt->tib->tib_ptid,
|
||||
memory_order_acquire)))
|
||||
pthread_yield_np();
|
||||
return tid;
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/intrin/atomic.h"
|
||||
#include "libc/log/libfatal.internal.h"
|
||||
#include "libc/nt/thread.h"
|
||||
#include "libc/nt/thunk/msabi.h"
|
||||
|
@ -38,7 +39,9 @@ textwindows dontinstrument void __bootstrap_tls(struct CosmoTib *tib,
|
|||
tib->tib_ftrace = __ftrace;
|
||||
tib->tib_sigstack_size = 57344;
|
||||
tib->tib_sigstack_addr = bp - 57344;
|
||||
tib->tib_tid = __imp_GetCurrentThreadId();
|
||||
int tid = __imp_GetCurrentThreadId();
|
||||
atomic_init(&tib->tib_ptid, tid);
|
||||
atomic_init(&tib->tib_ctid, tid);
|
||||
__set_tls_win32(tib);
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue