Add '-ngl' support to finetune.cpp

This commit is contained in:
Andrew Godfrey 2023-10-21 22:41:24 -07:00
parent 2b4ea35e56
commit facb1a3e0f
3 changed files with 16 additions and 0 deletions

View file

@ -1080,6 +1080,8 @@ struct train_params_common get_default_train_params_common() {
params.adam_beta2 = 0.999f;
params.adam_gclip = 1.0f;
params.adam_eps_f = 0.0f;
params.n_gpu_layers = 0;
return params;
}

View file

@ -80,6 +80,8 @@ struct train_params_common {
float adam_beta2;
float adam_gclip;
float adam_eps_f;
int32_t n_gpu_layers;
};
typedef void (*save_train_files_callback)(void * data, struct train_state * train);

View file

@ -1459,6 +1459,17 @@ static bool train_params_parse(int argc, char ** argv, struct train_params * par
}
params->n_rank_w3 = std::stoi(argv[i]);
params->custom_n_rank_w3 = true;
} else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers") {
if (++i >= argc) {
invalid_param = true;
break;
}
#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
params->common.n_gpu_layers = std::stoi(argv[i]);
#else
fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n");
fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
#endif
} else {
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
train_print_usage(argc, argv, &default_params);
@ -1545,6 +1556,7 @@ int main(int argc, char ** argv) {
srand(params.common.seed);
struct llama_model_params llama_mparams = llama_model_default_params();
llama_mparams.n_gpu_layers = params.common.n_gpu_layers;
llama_mparams.vocab_only = false;
printf("%s: model base = '%s'\n", __func__, params.fn_model_base);