diff --git a/examples/common.cpp b/examples/common.cpp index 43a105cdd..86c1eef41 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -277,12 +277,12 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { params.use_color = true; } else if (arg == "--mlock") { params.use_mlock = true; - } else if (arg == "--gpu-layers") { + } else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers") { if (++i >= argc) { invalid_param = true; break; } - params.gpu_layers = std::stoi(argv[i]); + params.n_gpu_layers = std::stoi(argv[i]); } else if (arg == "--no-mmap") { params.use_mmap = false; } else if (arg == "--mtest") { @@ -427,7 +427,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { if (llama_mmap_supported()) { fprintf(stderr, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); } - fprintf(stderr, " --gpu-layers number of layers to store in VRAM\n"); + fprintf(stderr, " -ngl N, --n-gpu-layers N\n"); + fprintf(stderr, " number of layers to store in VRAM\n"); fprintf(stderr, " --mtest compute maximum memory usage\n"); fprintf(stderr, " --verbose-prompt print prompt before generation\n"); fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n"); @@ -470,15 +471,15 @@ std::vector llama_tokenize(struct llama_context * ctx, const std::s struct llama_context * llama_init_from_gpt_params(const gpt_params & params) { auto lparams = llama_context_default_params(); - lparams.n_ctx = params.n_ctx; - lparams.n_parts = params.n_parts; - lparams.seed = params.seed; - lparams.f16_kv = params.memory_f16; - lparams.use_mmap = params.use_mmap; - lparams.use_mlock = params.use_mlock; - lparams.gpu_layers = params.gpu_layers; - lparams.logits_all = params.perplexity; - lparams.embedding = params.embedding; + lparams.n_ctx = params.n_ctx; + lparams.n_parts = params.n_parts; + lparams.n_gpu_layers = params.n_gpu_layers; + lparams.seed = params.seed; + lparams.f16_kv = params.memory_f16; + lparams.use_mmap = params.use_mmap; + lparams.use_mlock = params.use_mlock; + lparams.logits_all = params.perplexity; + lparams.embedding = params.embedding; llama_context * lctx = llama_init_from_file(params.model.c_str(), lparams); diff --git a/examples/common.h b/examples/common.h index 636dc3594..717838f06 100644 --- a/examples/common.h +++ b/examples/common.h @@ -21,13 +21,14 @@ int32_t get_num_physical_cores(); struct gpt_params { - int32_t seed = -1; // RNG seed + int32_t seed = -1; // RNG seed int32_t n_threads = get_num_physical_cores(); int32_t n_predict = -1; // new tokens to predict - int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions) - int32_t n_ctx = 512; // context size - int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) - int32_t n_keep = 0; // number of tokens to keep from initial prompt + int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions) + int32_t n_ctx = 512; // context size + int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) + int32_t n_keep = 0; // number of tokens to keep from initial prompt + int32_t n_gpu_layers = 0; // number of layers to store in VRAM // sampling parameters std::unordered_map logit_bias; // logit bias for specific tokens @@ -69,7 +70,6 @@ struct gpt_params { bool perplexity = false; // compute perplexity over the prompt bool use_mmap = true; // use mmap for faster loads bool use_mlock = false; // use mlock to keep model in memory - int gpu_layers = 0; // number of layers to store in VRAM bool mem_test = false; // compute maximum memory usage bool verbose_prompt = false; // print prompt tokens before generation }; diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 812e0d402..b6a7754d5 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -729,7 +729,7 @@ static void ggml_cuda_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor const size_t q_sz = ggml_type_size(type) * x_ne / ggml_blck_size(type); size_t x_size, y_size, d_size, q_size; - float * d_X; + float * d_X = nullptr; if (!mul_mat_vec) { d_X = (float *) ggml_cuda_pool_malloc(n_mm * sizeof(float) * x_ne, &x_size); } diff --git a/llama.cpp b/llama.cpp index fd890ed31..436d0c678 100644 --- a/llama.cpp +++ b/llama.cpp @@ -813,13 +813,13 @@ struct llama_context_params llama_context_default_params() { struct llama_context_params result = { /*.n_ctx =*/ 512, /*.n_parts =*/ -1, + /*.gpu_layers =*/ 0, /*.seed =*/ -1, /*.f16_kv =*/ false, /*.logits_all =*/ false, /*.vocab_only =*/ false, /*.use_mmap =*/ true, /*.use_mlock =*/ false, - /*.gpu_layers =*/ 0, /*.embedding =*/ false, /*.progress_callback =*/ nullptr, /*.progress_callback_user_data =*/ nullptr, @@ -880,10 +880,10 @@ static void llama_model_load_internal( const std::string & fname, llama_context & lctx, int n_ctx, + int n_gpu_layers, ggml_type memory_type, bool use_mmap, bool use_mlock, - int gpu_layers, bool vocab_only, llama_progress_callback progress_callback, void * progress_callback_user_data) { @@ -1027,15 +1027,30 @@ static void llama_model_load_internal( model.mapping = std::move(ml->mapping); #ifdef GGML_USE_CUBLAS - for (int i = 0; i < std::min(gpu_layers, int(hparams.n_layer)); ++i) { - auto & layer = model.layers[i]; - ggml_cuda_transform_tensor(layer.wq); - ggml_cuda_transform_tensor(layer.wk); - ggml_cuda_transform_tensor(layer.wv); - ggml_cuda_transform_tensor(layer.wo); - ggml_cuda_transform_tensor(layer.w1); - ggml_cuda_transform_tensor(layer.w2); - ggml_cuda_transform_tensor(layer.w3); + { + const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer)); + + fprintf(stderr, "%s: [cublas] offloading %d layers to GPU\n", __func__, n_gpu); + + size_t vram_total = 0; + + for (int i = 0; i < n_gpu; ++i) { + const auto & layer = model.layers[i]; + + ggml_cuda_transform_tensor(layer.wq); vram_total += ggml_nbytes(layer.wq); + ggml_cuda_transform_tensor(layer.wk); vram_total += ggml_nbytes(layer.wk); + ggml_cuda_transform_tensor(layer.wv); vram_total += ggml_nbytes(layer.wv); + ggml_cuda_transform_tensor(layer.wo); vram_total += ggml_nbytes(layer.wo); + ggml_cuda_transform_tensor(layer.w1); vram_total += ggml_nbytes(layer.w1); + ggml_cuda_transform_tensor(layer.w2); vram_total += ggml_nbytes(layer.w2); + ggml_cuda_transform_tensor(layer.w3); vram_total += ggml_nbytes(layer.w3); + } + if (n_gpu_layers > (int) hparams.n_layer) { + fprintf(stderr, "%s: [cublas] offloading output layer to GPU\n", __func__); + ggml_cuda_transform_tensor(model.output); vram_total += ggml_nbytes(model.output); + } + + fprintf(stderr, "%s: [cublas] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024); } #endif @@ -1048,15 +1063,15 @@ static bool llama_model_load( const std::string & fname, llama_context & lctx, int n_ctx, + int n_gpu_layers, ggml_type memory_type, bool use_mmap, bool use_mlock, - int gpu_layers, bool vocab_only, llama_progress_callback progress_callback, void *progress_callback_user_data) { try { - llama_model_load_internal(fname, lctx, n_ctx, memory_type, use_mmap, use_mlock, gpu_layers, + llama_model_load_internal(fname, lctx, n_ctx, n_gpu_layers, memory_type, use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data); return true; } catch (const std::string & err) { @@ -2114,8 +2129,8 @@ struct llama_context * llama_init_from_file( ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; - if (!llama_model_load(path_model, *ctx, params.n_ctx, memory_type, - params.use_mmap, params.use_mlock, params.gpu_layers, params.vocab_only, + if (!llama_model_load(path_model, *ctx, params.n_ctx, params.n_gpu_layers, memory_type, + params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback, params.progress_callback_user_data)) { fprintf(stderr, "%s: failed to load model\n", __func__); llama_free(ctx); diff --git a/llama.h b/llama.h index 07e686208..78017fc86 100644 --- a/llama.h +++ b/llama.h @@ -54,16 +54,16 @@ extern "C" { typedef void (*llama_progress_callback)(float progress, void *ctx); struct llama_context_params { - int n_ctx; // text context - int n_parts; // -1 for default - int seed; // RNG seed, -1 for random + int n_ctx; // text context + int n_parts; // -1 for default + int n_gpu_layers; // number of layers to store in VRAM + int seed; // RNG seed, -1 for random bool f16_kv; // use fp16 for KV cache bool logits_all; // the llama_eval() call computes all logits, not just the last one bool vocab_only; // only load the vocabulary, no weights bool use_mmap; // use mmap if possible bool use_mlock; // force system to keep model in RAM - int gpu_layers; // number of layers to store in VRAM bool embedding; // embedding mode only // called with a progress value between 0 and 1, pass NULL to disable