mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-10-17 07:06:11 +00:00
Introduce support for GGJT v3 file format
llama.com can now load weights that use the new file format which was introduced a few weeks ago. Note that, unlike llama.cpp, we will keep support for old file formats in our tool so you don't need to convert your weights when the upstream project makes breaking changes. Please note that using ggjt v3 does make avx2 inference go 5% faster for me.
This commit is contained in:
parent
6ae18a10ba
commit
8fdb31681a
33 changed files with 3829 additions and 371 deletions
12
third_party/ggml/main.cc
vendored
12
third_party/ggml/main.cc
vendored
|
@ -210,17 +210,11 @@ static int on_missing_feature(const char *name) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
void MakeProcessNice(void) {
|
||||
setpriority(PRIO_PROCESS, 0, 10);
|
||||
ioprio_set(IOPRIO_WHO_PROCESS, 0, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
|
||||
struct sched_param param = {sched_get_priority_min(SCHED_IDLE)};
|
||||
sched_setscheduler(0, SCHED_IDLE, ¶m);
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
|
||||
MakeProcessNice();
|
||||
ShowCrashReports();
|
||||
|
||||
setvbuf(stdin, NULL, _IONBF, 0);
|
||||
setvbuf(stdout, NULL, _IONBF, 0);
|
||||
setvbuf(stderr, NULL, _IONBF, 0);
|
||||
|
@ -232,9 +226,7 @@ int main(int argc, char ** argv) {
|
|||
if (!X86_HAVE(AVX)) return on_missing_feature("avx");
|
||||
if (!X86_HAVE(FMA)) return on_missing_feature("fma");
|
||||
if (!X86_HAVE(SSE3)) return on_missing_feature("sse3");
|
||||
if (!X86_HAVE(F16C)) {
|
||||
fprintf(stderr, "%s: warning: cpuid f16c not detected; inference might crash\n", __func__);
|
||||
}
|
||||
if (!X86_HAVE(F16C)) return on_missing_feature("f16c");
|
||||
#endif /* __x86_64__ */
|
||||
|
||||
if (gpt_params_parse(argc, argv, params) == false) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue