Add x86_64-linux-gnu emulator

I wanted a tiny scriptable meltdown proof way to run userspace programs
and visualize how program execution impacts memory. It helps to explain
how things like Actually Portable Executable works. It can show you how
the GCC generated code is going about manipulating matrices and more. I
didn't feel fully comfortable with Qemu and Bochs because I'm not smart
enough to understand them. I wanted something like gVisor but with much
stronger levels of assurances. I wanted a single binary that'll run, on
all major operating systems with an embedded GPL barrier ZIP filesystem
that is tiny enough to transpile to JavaScript and run in browsers too.

https://justine.storage.googleapis.com/emulator625.mp4
This commit is contained in:
Justine Tunney 2020-08-25 04:23:25 -07:00
parent 467504308a
commit f4f4caab0e
1052 changed files with 65667 additions and 7825 deletions

View file

@ -1,55 +1,46 @@
#ifndef COSMOPOLITAN_LIBC_INTRIN_PALIGNR_H_
#define COSMOPOLITAN_LIBC_INTRIN_PALIGNR_H_
#include "libc/assert.h"
#include "libc/intrin/macros.h"
#include "libc/macros.h"
#include "libc/str/str.h"
#if !(__ASSEMBLER__ + __LINKER__ + 0)
COSMOPOLITAN_C_START_
void pvalignr(void *, const void *, const void *, size_t);
/**
* Overlaps vectors.
*
* 𝑖= 0 means 𝑐𝑎
* 0<𝑖<16 means 𝑐𝑎𝑏
* 𝑖=16 means 𝑐𝑏
* 16<𝑖<32 means 𝑐𝑏0
* 𝑖32 means 𝑐0
*
* @param 𝑖 needs to be a literal, constexpr, or embedding
* @see pvalignr()
* @mayalias
*/
static void palignr(void *c, const void *b, const void *a, size_t i) {
char t[48];
memcpy(t, a, 16);
memcpy(t + 16, b, 16);
memset(t + 32, 0, 16);
memcpy(c, t + MIN(32, i), 16);
}
void palignr(void *, const void *, const void *, unsigned long);
#ifndef __STRICT_ANSI__
#define palignr(C, B, A, I) \
do { \
if (!IsModeDbg() && X86_NEED(SSE) && X86_HAVE(SSSE3)) { \
__intrin_xmm_t *Xmm0 = (void *)(C); \
const __intrin_xmm_t *Xmm1 = (const __intrin_xmm_t *)(B); \
const __intrin_xmm_t *Xmm2 = (const __intrin_xmm_t *)(A); \
if (!X86_NEED(AVX)) { \
asm("palignr\t%2,%1,%0" \
: "=x"(*Xmm0) \
: "x"(*Xmm2), "i"(I), "0"(*Xmm1)); \
} else { \
asm("vpalignr\t%3,%2,%1,%0" \
: "=x"(*Xmm0) \
: "x"(*Xmm1), "x"(*Xmm2), "i"(I)); \
} \
} else { \
palignr(C, B, A, I); \
} \
__intrin_xmm_t __palignrs(__intrin_xmm_t, __intrin_xmm_t);
#define palignr(C, B, A, I) \
do { \
if (likely(!IsModeDbg() && X86_NEED(SSE) && X86_HAVE(SSSE3))) { \
__intrin_xmm_t *Xmm0 = (void *)(C); \
const __intrin_xmm_t *Xmm1 = (const __intrin_xmm_t *)(B); \
const __intrin_xmm_t *Xmm2 = (const __intrin_xmm_t *)(A); \
if (isconstant(I)) { \
if (!X86_NEED(AVX)) { \
asm("palignr\t%2,%1,%0" \
: "=x"(*Xmm0) \
: "x"(*Xmm2), "i"(I), "0"(*Xmm1)); \
} else { \
asm("vpalignr\t%3,%2,%1,%0" \
: "=x"(*Xmm0) \
: "x"(*Xmm1), "x"(*Xmm2), "i"(I)); \
} \
} else { \
unsigned long Vimm = (I); \
typeof(__palignrs) *Fn; \
if (likely(Vimm < 32)) { \
Fn = (typeof(__palignrs) *)((uintptr_t)&__palignrs + Vimm * 8); \
*Xmm0 = Fn(*Xmm1, *Xmm2); \
} else { \
memset(Xmm0, 0, 16); \
} \
} \
} else { \
palignr(C, B, A, I); \
} \
} while (0)
#endif
COSMOPOLITAN_C_END_
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
#endif /* COSMOPOLITAN_LIBC_INTRIN_PALIGNR_H_ */