Make AARCH64 harder, better, faster, stronger

- Perform some housekeeping on scalar math function code
- Import ARM's Optimized Routines for SIMD string processing
- Upgrade to latest Chromium zlib and enable more SIMD optimizations
This commit is contained in:
Justine Tunney 2023-05-15 01:51:29 -07:00
parent 550b52abf6
commit cc1732bc42
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
143 changed files with 15661 additions and 1329 deletions

View file

@ -20,6 +20,7 @@
#include "libc/intrin/likely.h"
#include "libc/nexgen32e/x86feature.h"
#include "libc/str/str.h"
#ifndef __aarch64__
#define PMOVMSKB(x) __builtin_ia32_pmovmskb128(x)
@ -129,7 +130,9 @@ microarchitecture("avx") static int memcmp_avx(const unsigned char *p,
* memcmp n=32768 29 ps/byte 32,851 mb/s
* memcmp n=131072 33 ps/byte 28,983 mb/s
*
* @return unsigned char subtraction at stop index
* @return an integer that's (1) equal to zero if `a` is equal to `b`,
* (2) less than zero if `a` is less than `b`, or (3) greater than
* zero if `a` is greater than `b`
* @asyncsignalsafe
*/
int memcmp(const void *a, const void *b, size_t n) {
@ -200,3 +203,5 @@ int memcmp(const void *a, const void *b, size_t n) {
}
return 0;
}
#endif /* __aarch64__ */