llama : offload "output" tensor to GPU too + coding style fixes
This commit is contained in:
parent
bb0993ed48
commit
ad8a9e6971
5 changed files with 54 additions and 38 deletions
|
@ -277,12 +277,12 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||||
params.use_color = true;
|
params.use_color = true;
|
||||||
} else if (arg == "--mlock") {
|
} else if (arg == "--mlock") {
|
||||||
params.use_mlock = true;
|
params.use_mlock = true;
|
||||||
} else if (arg == "--gpu-layers") {
|
} else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params.gpu_layers = std::stoi(argv[i]);
|
params.n_gpu_layers = std::stoi(argv[i]);
|
||||||
} else if (arg == "--no-mmap") {
|
} else if (arg == "--no-mmap") {
|
||||||
params.use_mmap = false;
|
params.use_mmap = false;
|
||||||
} else if (arg == "--mtest") {
|
} else if (arg == "--mtest") {
|
||||||
|
@ -427,7 +427,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
||||||
if (llama_mmap_supported()) {
|
if (llama_mmap_supported()) {
|
||||||
fprintf(stderr, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
fprintf(stderr, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
||||||
}
|
}
|
||||||
fprintf(stderr, " --gpu-layers number of layers to store in VRAM\n");
|
fprintf(stderr, " -ngl N, --n-gpu-layers N\n");
|
||||||
|
fprintf(stderr, " number of layers to store in VRAM\n");
|
||||||
fprintf(stderr, " --mtest compute maximum memory usage\n");
|
fprintf(stderr, " --mtest compute maximum memory usage\n");
|
||||||
fprintf(stderr, " --verbose-prompt print prompt before generation\n");
|
fprintf(stderr, " --verbose-prompt print prompt before generation\n");
|
||||||
fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
|
fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
|
||||||
|
@ -470,15 +471,15 @@ std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::s
|
||||||
struct llama_context * llama_init_from_gpt_params(const gpt_params & params) {
|
struct llama_context * llama_init_from_gpt_params(const gpt_params & params) {
|
||||||
auto lparams = llama_context_default_params();
|
auto lparams = llama_context_default_params();
|
||||||
|
|
||||||
lparams.n_ctx = params.n_ctx;
|
lparams.n_ctx = params.n_ctx;
|
||||||
lparams.n_parts = params.n_parts;
|
lparams.n_parts = params.n_parts;
|
||||||
lparams.seed = params.seed;
|
lparams.n_gpu_layers = params.n_gpu_layers;
|
||||||
lparams.f16_kv = params.memory_f16;
|
lparams.seed = params.seed;
|
||||||
lparams.use_mmap = params.use_mmap;
|
lparams.f16_kv = params.memory_f16;
|
||||||
lparams.use_mlock = params.use_mlock;
|
lparams.use_mmap = params.use_mmap;
|
||||||
lparams.gpu_layers = params.gpu_layers;
|
lparams.use_mlock = params.use_mlock;
|
||||||
lparams.logits_all = params.perplexity;
|
lparams.logits_all = params.perplexity;
|
||||||
lparams.embedding = params.embedding;
|
lparams.embedding = params.embedding;
|
||||||
|
|
||||||
llama_context * lctx = llama_init_from_file(params.model.c_str(), lparams);
|
llama_context * lctx = llama_init_from_file(params.model.c_str(), lparams);
|
||||||
|
|
||||||
|
|
|
@ -21,13 +21,14 @@
|
||||||
int32_t get_num_physical_cores();
|
int32_t get_num_physical_cores();
|
||||||
|
|
||||||
struct gpt_params {
|
struct gpt_params {
|
||||||
int32_t seed = -1; // RNG seed
|
int32_t seed = -1; // RNG seed
|
||||||
int32_t n_threads = get_num_physical_cores();
|
int32_t n_threads = get_num_physical_cores();
|
||||||
int32_t n_predict = -1; // new tokens to predict
|
int32_t n_predict = -1; // new tokens to predict
|
||||||
int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions)
|
int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions)
|
||||||
int32_t n_ctx = 512; // context size
|
int32_t n_ctx = 512; // context size
|
||||||
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
|
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
|
||||||
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
||||||
|
int32_t n_gpu_layers = 0; // number of layers to store in VRAM
|
||||||
|
|
||||||
// sampling parameters
|
// sampling parameters
|
||||||
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
|
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
|
||||||
|
@ -69,7 +70,6 @@ struct gpt_params {
|
||||||
bool perplexity = false; // compute perplexity over the prompt
|
bool perplexity = false; // compute perplexity over the prompt
|
||||||
bool use_mmap = true; // use mmap for faster loads
|
bool use_mmap = true; // use mmap for faster loads
|
||||||
bool use_mlock = false; // use mlock to keep model in memory
|
bool use_mlock = false; // use mlock to keep model in memory
|
||||||
int gpu_layers = 0; // number of layers to store in VRAM
|
|
||||||
bool mem_test = false; // compute maximum memory usage
|
bool mem_test = false; // compute maximum memory usage
|
||||||
bool verbose_prompt = false; // print prompt tokens before generation
|
bool verbose_prompt = false; // print prompt tokens before generation
|
||||||
};
|
};
|
||||||
|
|
|
@ -729,7 +729,7 @@ static void ggml_cuda_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor
|
||||||
const size_t q_sz = ggml_type_size(type) * x_ne / ggml_blck_size(type);
|
const size_t q_sz = ggml_type_size(type) * x_ne / ggml_blck_size(type);
|
||||||
|
|
||||||
size_t x_size, y_size, d_size, q_size;
|
size_t x_size, y_size, d_size, q_size;
|
||||||
float * d_X;
|
float * d_X = nullptr;
|
||||||
if (!mul_mat_vec) {
|
if (!mul_mat_vec) {
|
||||||
d_X = (float *) ggml_cuda_pool_malloc(n_mm * sizeof(float) * x_ne, &x_size);
|
d_X = (float *) ggml_cuda_pool_malloc(n_mm * sizeof(float) * x_ne, &x_size);
|
||||||
}
|
}
|
||||||
|
|
45
llama.cpp
45
llama.cpp
|
@ -813,13 +813,13 @@ struct llama_context_params llama_context_default_params() {
|
||||||
struct llama_context_params result = {
|
struct llama_context_params result = {
|
||||||
/*.n_ctx =*/ 512,
|
/*.n_ctx =*/ 512,
|
||||||
/*.n_parts =*/ -1,
|
/*.n_parts =*/ -1,
|
||||||
|
/*.gpu_layers =*/ 0,
|
||||||
/*.seed =*/ -1,
|
/*.seed =*/ -1,
|
||||||
/*.f16_kv =*/ false,
|
/*.f16_kv =*/ false,
|
||||||
/*.logits_all =*/ false,
|
/*.logits_all =*/ false,
|
||||||
/*.vocab_only =*/ false,
|
/*.vocab_only =*/ false,
|
||||||
/*.use_mmap =*/ true,
|
/*.use_mmap =*/ true,
|
||||||
/*.use_mlock =*/ false,
|
/*.use_mlock =*/ false,
|
||||||
/*.gpu_layers =*/ 0,
|
|
||||||
/*.embedding =*/ false,
|
/*.embedding =*/ false,
|
||||||
/*.progress_callback =*/ nullptr,
|
/*.progress_callback =*/ nullptr,
|
||||||
/*.progress_callback_user_data =*/ nullptr,
|
/*.progress_callback_user_data =*/ nullptr,
|
||||||
|
@ -880,10 +880,10 @@ static void llama_model_load_internal(
|
||||||
const std::string & fname,
|
const std::string & fname,
|
||||||
llama_context & lctx,
|
llama_context & lctx,
|
||||||
int n_ctx,
|
int n_ctx,
|
||||||
|
int n_gpu_layers,
|
||||||
ggml_type memory_type,
|
ggml_type memory_type,
|
||||||
bool use_mmap,
|
bool use_mmap,
|
||||||
bool use_mlock,
|
bool use_mlock,
|
||||||
int gpu_layers,
|
|
||||||
bool vocab_only,
|
bool vocab_only,
|
||||||
llama_progress_callback progress_callback,
|
llama_progress_callback progress_callback,
|
||||||
void * progress_callback_user_data) {
|
void * progress_callback_user_data) {
|
||||||
|
@ -1027,15 +1027,30 @@ static void llama_model_load_internal(
|
||||||
|
|
||||||
model.mapping = std::move(ml->mapping);
|
model.mapping = std::move(ml->mapping);
|
||||||
#ifdef GGML_USE_CUBLAS
|
#ifdef GGML_USE_CUBLAS
|
||||||
for (int i = 0; i < std::min(gpu_layers, int(hparams.n_layer)); ++i) {
|
{
|
||||||
auto & layer = model.layers[i];
|
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
|
||||||
ggml_cuda_transform_tensor(layer.wq);
|
|
||||||
ggml_cuda_transform_tensor(layer.wk);
|
fprintf(stderr, "%s: [cublas] offloading %d layers to GPU\n", __func__, n_gpu);
|
||||||
ggml_cuda_transform_tensor(layer.wv);
|
|
||||||
ggml_cuda_transform_tensor(layer.wo);
|
size_t vram_total = 0;
|
||||||
ggml_cuda_transform_tensor(layer.w1);
|
|
||||||
ggml_cuda_transform_tensor(layer.w2);
|
for (int i = 0; i < n_gpu; ++i) {
|
||||||
ggml_cuda_transform_tensor(layer.w3);
|
const auto & layer = model.layers[i];
|
||||||
|
|
||||||
|
ggml_cuda_transform_tensor(layer.wq); vram_total += ggml_nbytes(layer.wq);
|
||||||
|
ggml_cuda_transform_tensor(layer.wk); vram_total += ggml_nbytes(layer.wk);
|
||||||
|
ggml_cuda_transform_tensor(layer.wv); vram_total += ggml_nbytes(layer.wv);
|
||||||
|
ggml_cuda_transform_tensor(layer.wo); vram_total += ggml_nbytes(layer.wo);
|
||||||
|
ggml_cuda_transform_tensor(layer.w1); vram_total += ggml_nbytes(layer.w1);
|
||||||
|
ggml_cuda_transform_tensor(layer.w2); vram_total += ggml_nbytes(layer.w2);
|
||||||
|
ggml_cuda_transform_tensor(layer.w3); vram_total += ggml_nbytes(layer.w3);
|
||||||
|
}
|
||||||
|
if (n_gpu_layers > (int) hparams.n_layer) {
|
||||||
|
fprintf(stderr, "%s: [cublas] offloading output layer to GPU\n", __func__);
|
||||||
|
ggml_cuda_transform_tensor(model.output); vram_total += ggml_nbytes(model.output);
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stderr, "%s: [cublas] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1048,15 +1063,15 @@ static bool llama_model_load(
|
||||||
const std::string & fname,
|
const std::string & fname,
|
||||||
llama_context & lctx,
|
llama_context & lctx,
|
||||||
int n_ctx,
|
int n_ctx,
|
||||||
|
int n_gpu_layers,
|
||||||
ggml_type memory_type,
|
ggml_type memory_type,
|
||||||
bool use_mmap,
|
bool use_mmap,
|
||||||
bool use_mlock,
|
bool use_mlock,
|
||||||
int gpu_layers,
|
|
||||||
bool vocab_only,
|
bool vocab_only,
|
||||||
llama_progress_callback progress_callback,
|
llama_progress_callback progress_callback,
|
||||||
void *progress_callback_user_data) {
|
void *progress_callback_user_data) {
|
||||||
try {
|
try {
|
||||||
llama_model_load_internal(fname, lctx, n_ctx, memory_type, use_mmap, use_mlock, gpu_layers,
|
llama_model_load_internal(fname, lctx, n_ctx, n_gpu_layers, memory_type, use_mmap, use_mlock,
|
||||||
vocab_only, progress_callback, progress_callback_user_data);
|
vocab_only, progress_callback, progress_callback_user_data);
|
||||||
return true;
|
return true;
|
||||||
} catch (const std::string & err) {
|
} catch (const std::string & err) {
|
||||||
|
@ -2114,8 +2129,8 @@ struct llama_context * llama_init_from_file(
|
||||||
|
|
||||||
ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
|
ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
|
||||||
|
|
||||||
if (!llama_model_load(path_model, *ctx, params.n_ctx, memory_type,
|
if (!llama_model_load(path_model, *ctx, params.n_ctx, params.n_gpu_layers, memory_type,
|
||||||
params.use_mmap, params.use_mlock, params.gpu_layers, params.vocab_only,
|
params.use_mmap, params.use_mlock, params.vocab_only,
|
||||||
params.progress_callback, params.progress_callback_user_data)) {
|
params.progress_callback, params.progress_callback_user_data)) {
|
||||||
fprintf(stderr, "%s: failed to load model\n", __func__);
|
fprintf(stderr, "%s: failed to load model\n", __func__);
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
|
|
8
llama.h
8
llama.h
|
@ -54,16 +54,16 @@ extern "C" {
|
||||||
typedef void (*llama_progress_callback)(float progress, void *ctx);
|
typedef void (*llama_progress_callback)(float progress, void *ctx);
|
||||||
|
|
||||||
struct llama_context_params {
|
struct llama_context_params {
|
||||||
int n_ctx; // text context
|
int n_ctx; // text context
|
||||||
int n_parts; // -1 for default
|
int n_parts; // -1 for default
|
||||||
int seed; // RNG seed, -1 for random
|
int n_gpu_layers; // number of layers to store in VRAM
|
||||||
|
int seed; // RNG seed, -1 for random
|
||||||
|
|
||||||
bool f16_kv; // use fp16 for KV cache
|
bool f16_kv; // use fp16 for KV cache
|
||||||
bool logits_all; // the llama_eval() call computes all logits, not just the last one
|
bool logits_all; // the llama_eval() call computes all logits, not just the last one
|
||||||
bool vocab_only; // only load the vocabulary, no weights
|
bool vocab_only; // only load the vocabulary, no weights
|
||||||
bool use_mmap; // use mmap if possible
|
bool use_mmap; // use mmap if possible
|
||||||
bool use_mlock; // force system to keep model in RAM
|
bool use_mlock; // force system to keep model in RAM
|
||||||
int gpu_layers; // number of layers to store in VRAM
|
|
||||||
bool embedding; // embedding mode only
|
bool embedding; // embedding mode only
|
||||||
|
|
||||||
// called with a progress value between 0 and 1, pass NULL to disable
|
// called with a progress value between 0 and 1, pass NULL to disable
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue