wip cleanup before big merge

This commit is contained in:
Concedo 2023-05-20 12:48:28 +08:00
parent 010b2753d9
commit 4e86a07e57
5 changed files with 2976 additions and 2989 deletions

2978
ggml.c

File diff suppressed because it is too large Load diff

2984
ggml_v2.c

File diff suppressed because it is too large Load diff

View file

@ -1080,7 +1080,6 @@ static void llama_model_load_internal(
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
if(GetQuantsUnshuffled())
{
SetGPULayers(n_gpu);
fprintf(stderr, "%s: [opencl] offloading %d layers to GPU\n", __func__, n_gpu);

View file

@ -333,7 +333,6 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
// const int n_gpu = std::min(gpulayers, int(hparams.n_layer));
// if(GetQuantsUnshuffled())
// {
// SetGPULayers(n_gpu);
// fprintf(stderr, "%s: [opencl] offloading %d layers to GPU\n", __func__, n_gpu);

View file

@ -336,7 +336,6 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
// const int n_gpu = std::min(gpulayers, int(hparams.n_layer));
// if(GetQuantsUnshuffled())
// {
// SetGPULayers(n_gpu);
// fprintf(stderr, "%s: [opencl] offloading %d layers to GPU\n", __func__, n_gpu);