Fix bugs and make code tinier

- Fixed bug where stdio eof wasn't being sticky
- Fixed bug where fseeko() wasn't clearing eof state
- Removed assert() usage from libc favoring _unassert() / _npassert()
This commit is contained in:
Justine Tunney 2022-10-09 22:38:28 -07:00
parent 9b7c8db846
commit d5910e2673
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
115 changed files with 510 additions and 290 deletions

View file

@ -378,7 +378,7 @@ static textwindows int afd_poll(int64_t afd_device_handle,
struct NtIoStatusBlock *io_status_block) {
NtStatus status;
/* Blocking operation is not supported.*/
assert(io_status_block);
_npassert(io_status_block);
io_status_block->Status = kNtStatusPending;
status =
NtDeviceIoControlFile(afd_device_handle, 0, NULL, io_status_block,
@ -425,11 +425,11 @@ static textwindows void queue__detach_node(struct QueueNode *node) {
node->next->prev = node->prev;
}
static textwindows bool queue_is_enqueued(const struct QueueNode *node) {
forceinline bool queue_is_enqueued(const struct QueueNode *node) {
return node->prev != node;
}
static textwindows bool queue_is_empty(const struct Queue *queue) {
forceinline bool queue_is_empty(const struct Queue *queue) {
return !queue_is_enqueued(&queue->head);
}
@ -558,7 +558,7 @@ static textwindows int ts_tree_add(struct TsTree *ts_tree,
}
static textwindows void port__free(struct PortState *port) {
assert(port != NULL);
_npassert(port);
free(port);
}
@ -586,7 +586,7 @@ err1:
}
static textwindows int sock__cancel_poll(struct SockState *sock_state) {
assert(sock_state->poll_status == kPollPending);
_npassert(sock_state->poll_status == kPollPending);
if (afd_cancel_poll(poll_group_get_afd_device_handle(sock_state->poll_group),
&sock_state->io_status_block) < 0) {
return -1;
@ -702,13 +702,13 @@ static textwindows void reflock__await_event(void *address) {
static textwindows void reflock_ref(struct RefLock *reflock) {
long state = InterlockedAdd(&reflock->state, REFLOCK__REF);
/* Verify that the counter didn 't overflow and the lock isn' t destroyed.*/
assert((state & REFLOCK__DESTROY_MASK) == 0);
_npassert((state & REFLOCK__DESTROY_MASK) == 0);
}
static textwindows void reflock_unref(struct RefLock *reflock) {
long state = InterlockedAdd(&reflock->state, -REFLOCK__REF);
/* Verify that the lock was referenced and not already destroyed.*/
assert((state & REFLOCK__DESTROY_MASK & ~REFLOCK__DESTROY) == 0);
_npassert((state & REFLOCK__DESTROY_MASK & ~REFLOCK__DESTROY) == 0);
if (state == REFLOCK__DESTROY) reflock__signal_event(reflock);
}
@ -744,10 +744,10 @@ static textwindows void reflock_unref_and_destroy(struct RefLock *reflock) {
state = InterlockedAdd(&reflock->state, REFLOCK__DESTROY - REFLOCK__REF);
ref_count = state & REFLOCK__REF_MASK;
/* Verify that the lock was referenced and not already destroyed. */
assert((state & REFLOCK__DESTROY_MASK) == REFLOCK__DESTROY);
_npassert((state & REFLOCK__DESTROY_MASK) == REFLOCK__DESTROY);
if (ref_count != 0) reflock__await_event(reflock);
state = InterlockedExchange(&reflock->state, REFLOCK__POISON);
assert(state == REFLOCK__DESTROY);
_npassert(state == REFLOCK__DESTROY);
}
static textwindows void ts_tree_node_unref_and_destroy(
@ -775,13 +775,13 @@ static textwindows void poll_group_release(struct PollGroup *poll_group) {
struct PortState *port_state = poll_group->port_state;
struct Queue *poll_group_queue = port_get_poll_group_queue(port_state);
poll_group->group_size--;
assert(poll_group->group_size < MAX_GROUP_SIZE);
_npassert(poll_group->group_size < MAX_GROUP_SIZE);
queue_move_to_end(poll_group_queue, &poll_group->queue_node);
/* Poll groups are currently only freed when the epoll port is closed. */
}
static textwindows void sock__free(struct SockState *sock_state) {
assert(sock_state != NULL);
_npassert(sock_state != NULL);
free(sock_state);
}
@ -827,7 +827,7 @@ static textwindows void sock_force_delete(struct PortState *port_state,
}
static textwindows void poll_group_delete(struct PollGroup *poll_group) {
assert(poll_group->group_size == 0);
_npassert(poll_group->group_size == 0);
CloseHandle(poll_group->afd_device_handle);
queue_remove(&poll_group->queue_node);
free(poll_group);
@ -839,7 +839,7 @@ static textwindows int port_delete(struct PortState *port_state) {
struct SockState *sock_state;
struct PollGroup *poll_group;
/* At this point the IOCP port should have been closed.*/
assert(!port_state->iocp_handle);
_npassert(!port_state->iocp_handle);
while ((tree_node = tree_root(&port_state->sock_tree)) != NULL) {
sock_state = sock_state_from_tree_node(tree_node);
sock_force_delete(port_state, sock_state);
@ -852,14 +852,14 @@ static textwindows int port_delete(struct PortState *port_state) {
poll_group = poll_group_from_queue_node(queue_node);
poll_group_delete(poll_group);
}
assert(queue_is_empty(&port_state->sock_update_queue));
_npassert(queue_is_empty(&port_state->sock_update_queue));
DeleteCriticalSection(&port_state->lock);
port__free(port_state);
return 0;
}
static textwindows int64_t port_get_iocp_handle(struct PortState *port_state) {
assert(port_state->iocp_handle);
_npassert(port_state->iocp_handle);
return port_state->iocp_handle;
}
@ -933,7 +933,7 @@ static textwindows uint32_t sock__afd_events_to_epoll_events(uint32_t a) {
static textwindows int sock_update(struct PortState *port_state,
struct SockState *sock_state) {
assert(!sock_state->delete_pending);
_npassert(!sock_state->delete_pending);
if ((sock_state->poll_status == kPollPending) &&
!(sock_state->user_events & KNOWN_EVENTS & ~sock_state->pending_events)) {
/* All the events the user is interested in are already being