Make considerably more progress on AARCH64

- Utilities like pledge.com now build
- kprintf() will no longer balk at 48-bit addresses
- There's a new aarch64-dbg build mode that should work
- gc() and defer() are mostly pacified; avoid using them on aarch64
- THIRD_PART_STB now has Arm Neon intrinsics for fast image handling
This commit is contained in:
Justine Tunney 2023-05-12 22:42:57 -07:00
parent 1bfb3aab1b
commit fd34ef732d
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
91 changed files with 1288 additions and 1192 deletions

View file

@ -19,12 +19,9 @@ forceinline long __sysv_exit(long rc) {
: "memory", "cc");
#elif defined(__aarch64__)
register long r0 asm("x0") = rc;
register long r8 asm("x8") = __NR_exit_group;
register long res_x0 asm("x0");
asm volatile("mov\tx8,%1\n\t"
"svc\t0"
: "=r"(res_x0)
: "i"(94), "r"(r0)
: "x8", "memory");
asm volatile("svc\t0" : "=r"(res_x0) : "r"(r0), "r"(r8) : "memory");
ax = res_x0;
#else
ax = syscall(__NR_exit_group, rc);
@ -41,12 +38,13 @@ forceinline int __sysv_close(long fd) {
: "rdx", "memory", "cc");
#elif defined(__aarch64__)
register long r0 asm("x0") = fd;
register long r8 asm("x8") = __NR_close;
register long res_x0 asm("x0");
asm volatile("mov\tx8,%1\n\t"
"svc\t0"
: "=r"(res_x0)
: "i"(57), "r"(r0)
: "x8", "memory");
: "r"(r0), "r"(r8)
: "memory");
ax = res_x0;
#else
ax = syscall(__NR_close, fd);
@ -66,12 +64,12 @@ forceinline int __sysv_open(const char *path, long flags, long mode) {
register long r1 asm("x1") = (long)path;
register long r2 asm("x2") = (long)flags;
register long r3 asm("x3") = (long)mode;
register long r8 asm("x8") = (long)__NR_open;
register long res_x0 asm("x0");
asm volatile("mov\tx8,%1\n\t"
"svc\t0"
asm volatile("svc\t0"
: "=r"(res_x0)
: "i"(56), "r"(r0), "r"(r1), "r"(r2), "r"(r3)
: "x8", "memory");
: "r"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r8)
: "memory");
ax = res_x0;
#else
ax = syscall(__NR_open, path, flags, mode);
@ -90,12 +88,12 @@ forceinline long __sysv_read(long fd, void *data, unsigned long size) {
register long r0 asm("x0") = (long)fd;
register long r1 asm("x1") = (long)data;
register long r2 asm("x2") = (long)size;
register long r8 asm("x8") = (long)__NR_read;
register long res_x0 asm("x0");
asm volatile("mov\tx8,%1\n\t"
"svc\t0"
asm volatile("svc\t0"
: "=r"(res_x0)
: "i"(63), "r"(r0), "r"(r1), "r"(r2)
: "x8", "memory");
: "r"(r0), "r"(r1), "r"(r2), "r"(r8)
: "memory");
ax = res_x0;
#else
ax = syscall(__NR_read, fd, data, size);
@ -114,12 +112,12 @@ forceinline long __sysv_write(long fd, const void *data, unsigned long size) {
register long r0 asm("x0") = (long)fd;
register long r1 asm("x1") = (long)data;
register long r2 asm("x2") = (long)size;
register long r8 asm("x8") = (long)__NR_write;
register long res_x0 asm("x0");
asm volatile("mov\tx8,%1\n\t"
"svc\t0"
asm volatile("svc\t0"
: "=r"(res_x0)
: "i"(64), "r"(r0), "r"(r1), "r"(r2)
: "x8", "memory");
: "i"(64), "r"(r0), "r"(r1), "r"(r2), "r"(r8)
: "memory");
ax = res_x0;
#else
ax = syscall(__NR_write, fd, data, size);
@ -138,12 +136,12 @@ forceinline long __sysv_mprotect(void *addr, size_t size, long prot) {
register long r0 asm("x0") = (long)addr;
register long r1 asm("x1") = (long)size;
register long r2 asm("x2") = (long)prot;
register long r8 asm("x8") = (long)__NR_mprotect;
register long res_x0 asm("x0");
asm volatile("mov\tx8,%1\n\t"
"svc\t0"
asm volatile("svc\t0"
: "=r"(res_x0)
: "i"(226), "r"(r0), "r"(r1), "r"(r2)
: "x8", "memory");
: "r"(r0), "r"(r1), "r"(r2), "r"(r8)
: "memory");
ax = res_x0;
#else
ax = syscall(__NR_mprotect, addr, size, prot);
@ -159,12 +157,9 @@ forceinline int __sysv_getpid(void) {
: "0"(__NR_getpid)
: "rdx", "memory", "cc");
#elif defined(__aarch64__)
register long r8 asm("x8") = (long)__NR_getpid;
register long res_x0 asm("x0");
asm volatile("mov\tx8,%1\n\t"
"svc\t0"
: "=r"(res_x0)
: "i"(172)
: "x8", "memory");
asm volatile("svc\t0" : "=r"(res_x0) : "r"(r8) : "memory");
ax = res_x0;
#else
ax = syscall(__NR_getpid);