Make fixes and improvements

- Fix handling of precision in hex float formatting
- Enhance the cocmd interpreter for system() and popen()
- Manually ran the Lua unit tests, which are now passing
- Let stdio i/o operations happen when file is in error state
- We're now saving and restoring xmm in ftrace out of paranoia
This commit is contained in:
Justine Tunney 2023-07-09 05:11:25 -07:00
parent 95fbdb4f76
commit 41396ff48a
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
43 changed files with 495 additions and 261 deletions

View file

@ -36,20 +36,18 @@ __nt2sysv:
mov %rsp,%rbp
// TODO(jart): We should probably find some way to use our own
// stack when Windows delivers signals ;_;
sub $0x100,%rsp
sub $256,%rsp
push %rbx
push %rdi
push %rsi
pushf # TODO(jart): Do we need it?
lea -0x80(%rbp),%rdi
call _savexmm
pushf // TODO(jart): Do we need it?
call __xmm_save
mov %rcx,%rdi
mov %rdx,%rsi
mov %r8,%rdx
mov %r9,%rcx
call *%rax
lea -0x80(%rbp),%rdi
call _loadxmm
call __xmm_load
popf
pop %rsi
pop %rdi

View file

@ -1,44 +0,0 @@
/*-*- mode:unix-assembly; indent-tabs-mode:t; tab-width:8; coding:utf-8 -*-│
vi: set et ft=asm ts=8 tw=8 fenc=utf-8 :vi
Copyright 2020 Justine Alexandra Roberts Tunney
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted, provided that the
above copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/macros.internal.h"
// Stores XMM registers to buffer.
//
// @param %rdi points to &(forcealign(16) uint8_t[256])[128]
// @note modern cpus have out-of-order execution engines
_savexmm:
.leafprologue
movaps %xmm0,-0x80(%rdi)
movaps %xmm1,-0x70(%rdi)
movaps %xmm2,-0x60(%rdi)
movaps %xmm3,-0x50(%rdi)
movaps %xmm4,-0x40(%rdi)
movaps %xmm5,-0x30(%rdi)
movaps %xmm6,-0x20(%rdi)
movaps %xmm7,-0x10(%rdi)
movaps %xmm8,0x00(%rdi)
movaps %xmm9,0x10(%rdi)
movaps %xmm10,0x20(%rdi)
movaps %xmm11,0x30(%rdi)
movaps %xmm12,0x40(%rdi)
movaps %xmm13,0x50(%rdi)
movaps %xmm14,0x60(%rdi)
movaps %xmm15,0x70(%rdi)
.leafepilogue
.endfn _savexmm,globl,hidden

View file

@ -17,28 +17,48 @@
PERFORMANCE OF THIS SOFTWARE.
*/
#include "libc/macros.internal.h"
.privileged
// Loads XMM registers from buffer.
//
// @param %rdi points to &(forcealign(16) uint8_t[256])[128]
// @note modern cpus have out-of-order execution engines
_loadxmm:
__xmm_save:
lea -128(%rbp),%rdi
.leafprologue
movaps -0x80(%rdi),%xmm0
movaps -0x70(%rdi),%xmm1
movaps -0x60(%rdi),%xmm2
movaps -0x50(%rdi),%xmm3
movaps -0x40(%rdi),%xmm4
movaps -0x30(%rdi),%xmm5
movaps -0x20(%rdi),%xmm6
movaps -0x10(%rdi),%xmm7
movaps 0x00(%rdi),%xmm8
movaps 0x10(%rdi),%xmm9
movaps 0x20(%rdi),%xmm10
movaps 0x30(%rdi),%xmm11
movaps 0x40(%rdi),%xmm12
movaps 0x50(%rdi),%xmm13
movaps 0x60(%rdi),%xmm14
movaps 0x70(%rdi),%xmm15
movdqu %xmm0,-0x80(%rdi)
movdqu %xmm1,-0x70(%rdi)
movdqu %xmm2,-0x60(%rdi)
movdqu %xmm3,-0x50(%rdi)
movdqu %xmm4,-0x40(%rdi)
movdqu %xmm5,-0x30(%rdi)
movdqu %xmm6,-0x20(%rdi)
movdqu %xmm7,-0x10(%rdi)
movdqu %xmm8,0x00(%rdi)
movdqu %xmm9,0x10(%rdi)
movdqu %xmm10,0x20(%rdi)
movdqu %xmm11,0x30(%rdi)
movdqu %xmm12,0x40(%rdi)
movdqu %xmm13,0x50(%rdi)
movdqu %xmm14,0x60(%rdi)
movdqu %xmm15,0x70(%rdi)
.leafepilogue
.endfn _loadxmm,globl,hidden
.endfn __xmm_save,globl,hidden
__xmm_load:
lea -128(%rbp),%rdi
.leafprologue
movdqu -0x80(%rdi),%xmm0
movdqu -0x70(%rdi),%xmm1
movdqu -0x60(%rdi),%xmm2
movdqu -0x50(%rdi),%xmm3
movdqu -0x40(%rdi),%xmm4
movdqu -0x30(%rdi),%xmm5
movdqu -0x20(%rdi),%xmm6
movdqu -0x10(%rdi),%xmm7
movdqu 0x00(%rdi),%xmm8
movdqu 0x10(%rdi),%xmm9
movdqu 0x20(%rdi),%xmm10
movdqu 0x30(%rdi),%xmm11
movdqu 0x40(%rdi),%xmm12
movdqu 0x50(%rdi),%xmm13
movdqu 0x60(%rdi),%xmm14
movdqu 0x70(%rdi),%xmm15
.leafepilogue
.endfn __xmm_load,globl,hidden