Apply touchups to last PR

Compilers like GCC require comments on lines like `#endif rdmsr`. Since
the rdmsr macro was only being used in arch_prctl(), I've localized the
macro, and I'm considering deleting arch_prctl() too, since there isn't
any way to have mem segments unfortunately across operating systems ;_;
The remaining changed lines are due to clang-format which runs on auto.
This commit is contained in:
Justine Tunney 2021-02-04 16:37:22 -08:00
parent 888cfa74d7
commit 3384a5a48c
6 changed files with 159 additions and 135 deletions

View file

@ -24,6 +24,8 @@ unsigned long hamming(unsigned long, unsigned long) pureconst;
intptr_t lockxchg(void *, void *, size_t);
bool cmpxchg(void *, intptr_t, intptr_t, size_t);
bool lockcmpxchg(void *, intptr_t, intptr_t, size_t);
intptr_t atomic_load(void *, size_t);
intptr_t atomic_store(void *, intptr_t, size_t);
/*───────────────────────────────────────────────────────────────────────────│─╗
cosmopolitan § bits » no assembly required
@ -174,7 +176,6 @@ bool lockcmpxchg(void *, intptr_t, intptr_t, size_t);
* @see Intel's Six-Thousand Page Manual V.3A §8.2.3.1
* @see atomic_store()
*/
#ifndef atomic_load
#define atomic_load(MEM) \
({ \
autotype(MEM) Mem = (MEM); \
@ -182,7 +183,6 @@ bool lockcmpxchg(void *, intptr_t, intptr_t, size_t);
asm("mov\t%1,%0" : "=r"(Reg) : "m"(*Mem)); \
Reg; \
})
#endif /* atomic_load */
/**
* Saves scalar to memory w/ one operation.
@ -200,7 +200,6 @@ bool lockcmpxchg(void *, intptr_t, intptr_t, size_t);
* @see Intel Six-Thousand Page Manual Manual V.3A §8.2.3.1
* @see atomic_load()
*/
#ifndef atomic_store
#define atomic_store(MEM, VAL) \
({ \
autotype(VAL) Val = (VAL); \
@ -208,7 +207,6 @@ bool lockcmpxchg(void *, intptr_t, intptr_t, size_t);
asm("mov%z1\t%1,%0" : "=m"(*Mem) : "r"(Val)); \
Val; \
})
#endif /* atomic_store */
#define bts(MEM, BIT) __BitOp("bts", BIT, MEM) /** bit test and set */
#define btr(MEM, BIT) __BitOp("btr", BIT, MEM) /** bit test and reset */
@ -236,7 +234,6 @@ bool lockcmpxchg(void *, intptr_t, intptr_t, size_t);
* @return LOCALVAR[0]
* @see xchg()
*/
#ifndef lockxchg
#define lockxchg(MEMORY, LOCALVAR) \
({ \
_Static_assert( \
@ -244,7 +241,6 @@ bool lockcmpxchg(void *, intptr_t, intptr_t, size_t);
asm("xchg\t%0,%1" : "+%m"(*(MEMORY)), "+r"(*(LOCALVAR))); \
*(LOCALVAR); \
})
#endif /* lockxchg */
/**
* Compares and exchanges.
@ -253,7 +249,6 @@ bool lockcmpxchg(void *, intptr_t, intptr_t, size_t);
* @return true if value was exchanged, otherwise false
* @see lockcmpxchg()
*/
#ifndef cmpxchg
#define cmpxchg(IFTHING, ISEQUALTOME, REPLACEITWITHME) \
({ \
bool DidIt; \
@ -266,7 +261,6 @@ bool lockcmpxchg(void *, intptr_t, intptr_t, size_t);
: "cc"); \
DidIt; \
})
#endif /* cmpxchg */
/**
* Compares and exchanges w/ one operation.
@ -275,7 +269,6 @@ bool lockcmpxchg(void *, intptr_t, intptr_t, size_t);
* @return true if value was exchanged, otherwise false
* @see lockcmpxchg()
*/
#ifndef lockcmpxchg
#define lockcmpxchg(IFTHING, ISEQUALTOME, REPLACEITWITHME) \
({ \
bool DidIt; \
@ -288,52 +281,6 @@ bool lockcmpxchg(void *, intptr_t, intptr_t, size_t);
: "cc"); \
DidIt; \
})
#endif /* lockcmpxchg */
/**
* Gets value of extended control register.
*/
#ifndef xgetbv
#define xgetbv(xcr_register_num) \
({ \
unsigned hi, lo; \
asm("xgetbv" : "=d"(hi), "=a"(lo) : "c"(cr_register_num)); \
(uint64_t) hi << 32 | lo; \
})
#endif /* xgetbv */
/**
* Reads model-specific register.
* @note programs running as guests won't have authorization
*/
#ifndef rdmsr
#define rdmsr(msr) \
({ \
uint32_t lo, hi; \
asm volatile("rdmsr" : "=a"(lo), "=d"(hi) : "c"(msr)); \
(uint64_t) hi << 32 | lo; \
})
#endif rdmsr
/**
* Writes model-specific register.
* @note programs running as guests won't have authorization
*/
#define wrmsr(msr, val) \
do { \
uint64_t val_ = (val); \
asm volatile("wrmsr" \
: /* no outputs */ \
: "c"(msr), "a"((uint32_t)val_), \
"d"((uint32_t)(val_ >> 32))); \
} while (0)
/**
* Tells CPU page tables changed for virtual address.
* @note programs running as guests won't have authorization
*/
#define invlpg(MEM) \
asm volatile("invlpg\t(%0)" : /* no outputs */ : "r"(MEM) : "memory")
#define IsAddressCanonicalForm(P) \
({ \
@ -392,6 +339,9 @@ bool lockcmpxchg(void *, intptr_t, intptr_t, size_t);
lockcmpxchg(MEM, (intptr_t)(CMP), (intptr_t)(VAL), sizeof(*(MEM)))
#define lockxchg(MEM, VAR) \
lockxchg(MEM, VAR, sizeof(*(MEM)) / (sizeof(*(MEM)) == sizeof(*(VAR))))
#define atomic_store(MEM, VAL) \
atomic_store(MEM, VAL, sizeof(*(MEM)) / (sizeof(*(MEM)) == sizeof(*(VAL))))
#define atomic_load(MEM) atomic_load(MEM, sizeof(*(MEM)))
#endif /* __GNUC__ && !__STRICT_ANSI__ */
COSMOPOLITAN_C_END_
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */