mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-06-27 14:58:30 +00:00
Perform some code maintenance
- Change IDT code so kprintf() isn't mandatory dependency - Document current intentions around pthread_cancel() - Make _npassert() an _unassert() in MODE=tiny
This commit is contained in:
parent
4a6fd3d910
commit
9b7c8db846
7 changed files with 85 additions and 49 deletions
|
@ -2,7 +2,6 @@
|
||||||
#define COSMOPOLITAN_LIBC_ASSERT_H_
|
#define COSMOPOLITAN_LIBC_ASSERT_H_
|
||||||
#if !(__ASSEMBLER__ + __LINKER__ + 0)
|
#if !(__ASSEMBLER__ + __LINKER__ + 0)
|
||||||
COSMOPOLITAN_C_START_
|
COSMOPOLITAN_C_START_
|
||||||
#include "libc/intrin/kprintf.h"
|
|
||||||
|
|
||||||
extern bool __assert_disable;
|
extern bool __assert_disable;
|
||||||
void __assert_fail(const char *, const char *, int) hidden relegated;
|
void __assert_fail(const char *, const char *, int) hidden relegated;
|
||||||
|
@ -24,13 +23,16 @@ void __assert_fail(const char *, const char *, int) hidden relegated;
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define _npassert(x) \
|
#ifndef TINY
|
||||||
do { \
|
#define _npassert(x) \
|
||||||
if (__builtin_expect(!(x), 0)) { \
|
do { \
|
||||||
kprintf("%s:%d: oh no!\n", __FILE__, __LINE__); \
|
if (__builtin_expect(!(x), 0)) { \
|
||||||
notpossible; \
|
notpossible; \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
#else
|
||||||
|
#define _npassert(x) _unassert(x)
|
||||||
|
#endif
|
||||||
|
|
||||||
COSMOPOLITAN_C_END_
|
COSMOPOLITAN_C_END_
|
||||||
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
|
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
|
||||||
|
|
|
@ -34,14 +34,11 @@ static void _mapframe(void *p, int f) {
|
||||||
struct DirectMap dm;
|
struct DirectMap dm;
|
||||||
prot = PROT_READ | PROT_WRITE;
|
prot = PROT_READ | PROT_WRITE;
|
||||||
flags = f | MAP_ANONYMOUS | MAP_FIXED;
|
flags = f | MAP_ANONYMOUS | MAP_FIXED;
|
||||||
if ((dm = sys_mmap(p, G, prot, flags, -1, 0)).addr != p) {
|
_npassert((dm = sys_mmap(p, G, prot, flags, -1, 0)).addr == p);
|
||||||
notpossible;
|
|
||||||
}
|
|
||||||
__mmi_lock();
|
__mmi_lock();
|
||||||
if (TrackMemoryInterval(&_mmi, (uintptr_t)p >> 16, (uintptr_t)p >> 16,
|
_npassert(!TrackMemoryInterval(&_mmi, (uintptr_t)p >> 16, (uintptr_t)p >> 16,
|
||||||
dm.maphandle, prot, flags, false, false, 0, G)) {
|
dm.maphandle, prot, flags, false, false, 0,
|
||||||
notpossible;
|
G));
|
||||||
}
|
|
||||||
__mmi_unlock();
|
__mmi_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,13 +62,11 @@ static void _mapframe(void *p, int f) {
|
||||||
*/
|
*/
|
||||||
noasan void *_extend(void *p, size_t n, void *e, int f, intptr_t h) {
|
noasan void *_extend(void *p, size_t n, void *e, int f, intptr_t h) {
|
||||||
char *q;
|
char *q;
|
||||||
#ifndef NDEBUG
|
_npassert(!((uintptr_t)SHADOW(p) & (G - 1)));
|
||||||
if ((uintptr_t)SHADOW(p) & (G - 1)) notpossible;
|
_npassert((uintptr_t)p + (G << kAsanScale) <= h);
|
||||||
if ((uintptr_t)p + (G << kAsanScale) > h) notpossible;
|
|
||||||
#endif
|
|
||||||
for (q = e; q < ((char *)p + n); q += 8) {
|
for (q = e; q < ((char *)p + n); q += 8) {
|
||||||
if (!((uintptr_t)q & (G - 1))) {
|
if (!((uintptr_t)q & (G - 1))) {
|
||||||
if (q + G > (char *)h) notpossible;
|
_npassert(q + G <= (char *)h);
|
||||||
_mapframe(q, f);
|
_mapframe(q, f);
|
||||||
if (IsAsan()) {
|
if (IsAsan()) {
|
||||||
if (!((uintptr_t)SHADOW(q) & (G - 1))) {
|
if (!((uintptr_t)SHADOW(q) & (G - 1))) {
|
||||||
|
|
|
@ -52,7 +52,7 @@
|
||||||
.rept 30
|
.rept 30
|
||||||
push %rsi # preserve rsi
|
push %rsi # preserve rsi
|
||||||
mov $i,%sil # rsi = exception number
|
mov $i,%sil # rsi = exception number
|
||||||
1: jmp 1f # 🦘
|
1: jmp 1f # kangeroo
|
||||||
i = i - 1
|
i = i - 1
|
||||||
.endr
|
.endr
|
||||||
__excep1_isr:
|
__excep1_isr:
|
||||||
|
@ -71,7 +71,7 @@ __excep0_isr:
|
||||||
push %rdx # use to call kprintf
|
push %rdx # use to call kprintf
|
||||||
push %r8
|
push %r8
|
||||||
push %r9
|
push %r9
|
||||||
mov 48(%rsp),%rcx # edx:rcx = "caller" cs:rip
|
mov 48(%rsp),%rcx # edx:rcx = 'caller' cs:rip
|
||||||
mov 56(%rsp),%edx
|
mov 56(%rsp),%edx
|
||||||
mov 40(%rsp),%r8 # r8 = error code
|
mov 40(%rsp),%r8 # r8 = error code
|
||||||
mov %cr2,%r9 # r9 = cr2, in case it is useful
|
mov %cr2,%r9 # r9 = cr2, in case it is useful
|
||||||
|
@ -91,10 +91,14 @@ __excep0_isr:
|
||||||
mov %eax,%ds
|
mov %eax,%ds
|
||||||
mov %eax,%es
|
mov %eax,%es
|
||||||
ezlea .excep_msg,di # stack should be 16-byte aligned now
|
ezlea .excep_msg,di # stack should be 16-byte aligned now
|
||||||
xor %eax,%eax # kprintf is variadic; remember to
|
xor %eax,%eax # kprintf is variadic, remember to
|
||||||
# pass no. of vector regs. used (= 0)
|
# pass no. of vector regs. used (= 0)
|
||||||
call kprintf # print error message
|
.weak kprintf # weakly link kprintf() because this
|
||||||
cli
|
ezlea kprintf,bx # module is a mandatory dependency
|
||||||
|
test %ebx,%ebx # and we want to keep life.com tiny
|
||||||
|
jz 8f
|
||||||
|
call *%rbx # print error message
|
||||||
|
8: cli
|
||||||
9: hlt
|
9: hlt
|
||||||
jmp 9b
|
jmp 9b
|
||||||
/* TODO: link up with sigaction etc. */
|
/* TODO: link up with sigaction etc. */
|
||||||
|
@ -105,7 +109,7 @@ __excep0_isr:
|
||||||
isr_init:
|
isr_init:
|
||||||
testb IsMetal()
|
testb IsMetal()
|
||||||
jz 9f
|
jz 9f
|
||||||
ezlea _tss+0x24,di # fill up TSS; we already loaded
|
ezlea _tss+0x24,di # fill up TSS, we already loaded
|
||||||
# task register in ape/ape.S
|
# task register in ape/ape.S
|
||||||
ezlea _isr_stk_1+ISR_STK_SZ,ax
|
ezlea _isr_stk_1+ISR_STK_SZ,ax
|
||||||
and $-ISR_STK_ALIGN,%al # be paranoid & enforce correct
|
and $-ISR_STK_ALIGN,%al # be paranoid & enforce correct
|
||||||
|
|
|
@ -49,7 +49,7 @@
|
||||||
static void *MoveMemoryIntervals(struct MemoryInterval *d,
|
static void *MoveMemoryIntervals(struct MemoryInterval *d,
|
||||||
const struct MemoryInterval *s, int n) {
|
const struct MemoryInterval *s, int n) {
|
||||||
int i;
|
int i;
|
||||||
if (n < 0) unreachable;
|
_unassert(n >= 0);
|
||||||
if (d > s) {
|
if (d > s) {
|
||||||
for (i = n; i--;) {
|
for (i = n; i--;) {
|
||||||
d[i] = s[i];
|
d[i] = s[i];
|
||||||
|
@ -63,8 +63,8 @@ static void *MoveMemoryIntervals(struct MemoryInterval *d,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void RemoveMemoryIntervals(struct MemoryIntervals *mm, int i, int n) {
|
static void RemoveMemoryIntervals(struct MemoryIntervals *mm, int i, int n) {
|
||||||
if (i < 0) unreachable;
|
_unassert(i >= 0);
|
||||||
if (i + n > mm->i) unreachable;
|
_unassert(i + n <= mm->i);
|
||||||
MoveMemoryIntervals(mm->p + i, mm->p + i + n, mm->i - (i + n));
|
MoveMemoryIntervals(mm->p + i, mm->p + i + n, mm->i - (i + n));
|
||||||
mm->i -= n;
|
mm->i -= n;
|
||||||
}
|
}
|
||||||
|
@ -108,9 +108,9 @@ static bool ExtendMemoryIntervals(struct MemoryIntervals *mm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int CreateMemoryInterval(struct MemoryIntervals *mm, int i) {
|
int CreateMemoryInterval(struct MemoryIntervals *mm, int i) {
|
||||||
if (i < 0) unreachable;
|
_unassert(i >= 0);
|
||||||
if (i > mm->i) unreachable;
|
_unassert(i <= mm->i);
|
||||||
if (mm->n < 0) unreachable;
|
_unassert(mm->n >= 0);
|
||||||
if (UNLIKELY(mm->i == mm->n) && !ExtendMemoryIntervals(mm)) return enomem();
|
if (UNLIKELY(mm->i == mm->n) && !ExtendMemoryIntervals(mm)) return enomem();
|
||||||
MoveMemoryIntervals(mm->p + i + 1, mm->p + i, mm->i++ - i);
|
MoveMemoryIntervals(mm->p + i + 1, mm->p + i, mm->i++ - i);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -129,7 +129,7 @@ int ReleaseMemoryIntervals(struct MemoryIntervals *mm, int x, int y,
|
||||||
void wf(struct MemoryIntervals *, int, int)) {
|
void wf(struct MemoryIntervals *, int, int)) {
|
||||||
unsigned l, r;
|
unsigned l, r;
|
||||||
ASSERT_MEMTRACK();
|
ASSERT_MEMTRACK();
|
||||||
if (y < x) unreachable;
|
_unassert(y >= x);
|
||||||
if (!mm->i) return 0;
|
if (!mm->i) return 0;
|
||||||
// binary search for the lefthand side
|
// binary search for the lefthand side
|
||||||
l = FindMemoryInterval(mm, x);
|
l = FindMemoryInterval(mm, x);
|
||||||
|
@ -139,8 +139,8 @@ int ReleaseMemoryIntervals(struct MemoryIntervals *mm, int x, int y,
|
||||||
// binary search for the righthand side
|
// binary search for the righthand side
|
||||||
r = FindMemoryInterval(mm, y);
|
r = FindMemoryInterval(mm, y);
|
||||||
if (r == mm->i || (r > l && y < mm->p[r].x)) --r;
|
if (r == mm->i || (r > l && y < mm->p[r].x)) --r;
|
||||||
if (r < l) unreachable;
|
_unassert(r >= l);
|
||||||
if (x > mm->p[r].y) unreachable;
|
_unassert(x <= mm->p[r].y);
|
||||||
|
|
||||||
// remove the middle of an existing map
|
// remove the middle of an existing map
|
||||||
//
|
//
|
||||||
|
@ -161,11 +161,11 @@ int ReleaseMemoryIntervals(struct MemoryIntervals *mm, int x, int y,
|
||||||
// ----|mmmm|----------------- after
|
// ----|mmmm|----------------- after
|
||||||
//
|
//
|
||||||
if (x > mm->p[l].x && x <= mm->p[l].y) {
|
if (x > mm->p[l].x && x <= mm->p[l].y) {
|
||||||
if (y < mm->p[l].y) unreachable;
|
_unassert(y >= mm->p[l].y);
|
||||||
if (IsWindows()) return einval();
|
if (IsWindows()) return einval();
|
||||||
mm->p[l].size -= (size_t)(mm->p[l].y - (x - 1)) * FRAMESIZE;
|
mm->p[l].size -= (size_t)(mm->p[l].y - (x - 1)) * FRAMESIZE;
|
||||||
mm->p[l].y = x - 1;
|
mm->p[l].y = x - 1;
|
||||||
if (mm->p[l].x > mm->p[l].y) unreachable;
|
_unassert(mm->p[l].x <= mm->p[l].y);
|
||||||
++l;
|
++l;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,11 +176,11 @@ int ReleaseMemoryIntervals(struct MemoryIntervals *mm, int x, int y,
|
||||||
// ---------------|mm|-------- after
|
// ---------------|mm|-------- after
|
||||||
//
|
//
|
||||||
if (y >= mm->p[r].x && y < mm->p[r].y) {
|
if (y >= mm->p[r].x && y < mm->p[r].y) {
|
||||||
if (x > mm->p[r].x) unreachable;
|
_unassert(x <= mm->p[r].x);
|
||||||
if (IsWindows()) return einval();
|
if (IsWindows()) return einval();
|
||||||
mm->p[r].size -= (size_t)((y + 1) - mm->p[r].x) * FRAMESIZE;
|
mm->p[r].size -= (size_t)((y + 1) - mm->p[r].x) * FRAMESIZE;
|
||||||
mm->p[r].x = y + 1;
|
mm->p[r].x = y + 1;
|
||||||
if (mm->p[r].x > mm->p[r].y) unreachable;
|
_unassert(mm->p[r].x <= mm->p[r].y);
|
||||||
--r;
|
--r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,7 +198,7 @@ int TrackMemoryInterval(struct MemoryIntervals *mm, int x, int y, long h,
|
||||||
long offset, long size) {
|
long offset, long size) {
|
||||||
unsigned i;
|
unsigned i;
|
||||||
ASSERT_MEMTRACK();
|
ASSERT_MEMTRACK();
|
||||||
if (y < x) unreachable;
|
_unassert(y >= x);
|
||||||
i = FindMemoryInterval(mm, x);
|
i = FindMemoryInterval(mm, x);
|
||||||
|
|
||||||
// try to extend the righthand side of the lefthand entry
|
// try to extend the righthand side of the lefthand entry
|
||||||
|
|
|
@ -16,9 +16,52 @@
|
||||||
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │
|
||||||
│ PERFORMANCE OF THIS SOFTWARE. │
|
│ PERFORMANCE OF THIS SOFTWARE. │
|
||||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||||
#include "libc/errno.h"
|
#include "libc/intrin/kprintf.h"
|
||||||
|
#include "libc/runtime/runtime.h"
|
||||||
#include "libc/thread/thread.h"
|
#include "libc/thread/thread.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cancels thread.
|
||||||
|
*
|
||||||
|
* This function currently isn't supported. In order to support this
|
||||||
|
* function we'd need to redesign the system call interface, and add
|
||||||
|
* bloat and complexity to every function that can return EINTR. You
|
||||||
|
* might want to consider using `nsync_note` instead, which provides
|
||||||
|
* much better cancellations because posix cancellations is a broken
|
||||||
|
* design. If you need to cancel i/o operations, try doing this:
|
||||||
|
*
|
||||||
|
* _Thread_local bool gotusr1;
|
||||||
|
* void OnUsr1(int sig) { gotusr1 = true; }
|
||||||
|
* struct sigaction sa = {.sa_handler = OnUsr1};
|
||||||
|
* sigaction(SIGUSR1, &sa, 0);
|
||||||
|
* tkill(pthread_getunique_np(thread), SIGUSR1);
|
||||||
|
*
|
||||||
|
* The above code should successfully cancel a thread's blocking io
|
||||||
|
* operations in most cases, e.g.
|
||||||
|
*
|
||||||
|
* void *MyThread(void *arg) {
|
||||||
|
* sigset_t ss;
|
||||||
|
* sigfillset(&ss);
|
||||||
|
* sigdelset(&ss, SIGUSR1);
|
||||||
|
* sigprocmask(SIG_SETMASK, &ss, 0);
|
||||||
|
* while (!gotusr1) {
|
||||||
|
* char buf[512];
|
||||||
|
* ssize_t rc = read(0, buf, sizeof(buf));
|
||||||
|
* if (rc == -1 && errno == EINTR) continue;
|
||||||
|
* write(1, buf, rc);
|
||||||
|
* }
|
||||||
|
* return 0;
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* This has the same correctness issue as glibc, but it's usually
|
||||||
|
* "good enough" if you only need cancellations to perform things
|
||||||
|
* like server shutdown and socket options like `SO_RCVTIMEO` can
|
||||||
|
* ensure it's even safer, since it can't possibly block forever.
|
||||||
|
*
|
||||||
|
* @see https://sourceware.org/bugzilla/show_bug.cgi?id=12683
|
||||||
|
*/
|
||||||
int pthread_cancel(pthread_t thread) {
|
int pthread_cancel(pthread_t thread) {
|
||||||
return ESRCH;
|
kprintf("error: pthread_cancel() is unsupported, please read the "
|
||||||
|
"cosmopolitan libc documentation for further details\n");
|
||||||
|
_Exit(1);
|
||||||
}
|
}
|
||||||
|
|
4
third_party/nsync/common.internal.h
vendored
4
third_party/nsync/common.internal.h
vendored
|
@ -222,11 +222,7 @@ static const uint32_t NSYNC_WAITER_TAG = 0x726d2ba9;
|
||||||
|
|
||||||
#define CONTAINER(t_, f_, p_) ((t_ *)(((char *)(p_)) - offsetof(t_, f_)))
|
#define CONTAINER(t_, f_, p_) ((t_ *)(((char *)(p_)) - offsetof(t_, f_)))
|
||||||
|
|
||||||
#ifdef TINY
|
|
||||||
#define ASSERT(x) _unassert(x)
|
|
||||||
#else
|
|
||||||
#define ASSERT(x) _npassert(x)
|
#define ASSERT(x) _npassert(x)
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Return a pointer to the nsync_waiter_s containing nsync_dll_element_ *e. */
|
/* Return a pointer to the nsync_waiter_s containing nsync_dll_element_ *e. */
|
||||||
#define DLL_NSYNC_WAITER(e) \
|
#define DLL_NSYNC_WAITER(e) \
|
||||||
|
|
4
third_party/nsync/mu_semaphore.c
vendored
4
third_party/nsync/mu_semaphore.c
vendored
|
@ -30,11 +30,7 @@ Copyright 2016 Google, Inc.\\n\
|
||||||
https://github.com/google/nsync\"");
|
https://github.com/google/nsync\"");
|
||||||
// clang-format off
|
// clang-format off
|
||||||
|
|
||||||
#ifdef TINY
|
|
||||||
#define ASSERT(x) _unassert(x)
|
|
||||||
#else
|
|
||||||
#define ASSERT(x) _npassert(x)
|
#define ASSERT(x) _npassert(x)
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Check that atomic operations on nsync_atomic_uint32_ can be applied to int. */
|
/* Check that atomic operations on nsync_atomic_uint32_ can be applied to int. */
|
||||||
static const int assert_int_size = 1 /
|
static const int assert_int_size = 1 /
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue