reduce batch sizes and skip all intrinsic flags except AVX when building in compatibility mode.

This commit is contained in:
Concedo 2023-04-13 11:32:05 +08:00
parent f4257a8eef
commit 5c22f7e4c4
3 changed files with 9 additions and 4 deletions

View file

@ -72,8 +72,13 @@ endif
# feel free to update the Makefile for your architecture and send a pull request or issue
ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686))
# Use all CPU extensions that are available:
CFLAGS += -mf16c -mavx -msse3
BONUSCFLAGS += -mfma -mavx2
CFLAGS += -mavx
ifeq ($(OS),Windows_NT)
BONUSCFLAGS += -mfma -mavx2 -mf16c -msse3
else
# if not on windows, they are clearly building it themselves, so lets just use whatever is supported
CFLAGS += -march=native -mtune=native
endif
endif
ifneq ($(filter ppc64%,$(UNAME_M)),)
POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)

View file

@ -224,7 +224,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
int original_threads = params.n_threads;
if (blasmode)
{
params.n_batch = 1024;
params.n_batch = 512; //received reports of 1024 and above crashing on some models
params.n_threads = 1;
}

View file

@ -160,7 +160,7 @@ generation_outputs llama_generate(const generation_inputs inputs, generation_out
int original_threads = params.n_threads;
if (blasmode)
{
params.n_batch = 1024;
params.n_batch = 512; //received reports of 1024 and above crashing on some models
params.n_threads = 1;
}