fix for cublas

This commit is contained in:
Concedo 2023-05-21 21:02:21 +08:00
parent 24127ebf98
commit 994be9a4db

View file

@ -1060,33 +1060,7 @@ static void llama_v2_model_load_internal(
ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &lctx.model.mlock_mmap : NULL);
model.mapping = std::move(ml->mapping);
#if defined(GGML_USE_CUBLAS)
{
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
fprintf(stderr, "%s: [cublas] offloading %d layers to GPU\n", __func__, n_gpu);
size_t vram_total = 0;
for (int i = 0; i < n_gpu; ++i) {
const auto & layer = model.layers[i];
ggml_cuda_transform_tensor(layer.wq); vram_total += ggml_v2_nbytes(layer.wq);
ggml_cuda_transform_tensor(layer.wk); vram_total += ggml_v2_nbytes(layer.wk);
ggml_cuda_transform_tensor(layer.wv); vram_total += ggml_v2_nbytes(layer.wv);
ggml_cuda_transform_tensor(layer.wo); vram_total += ggml_v2_nbytes(layer.wo);
ggml_cuda_transform_tensor(layer.w1); vram_total += ggml_v2_nbytes(layer.w1);
ggml_cuda_transform_tensor(layer.w2); vram_total += ggml_v2_nbytes(layer.w2);
ggml_cuda_transform_tensor(layer.w3); vram_total += ggml_v2_nbytes(layer.w3);
}
if (n_gpu_layers > (int) hparams.n_layer) {
fprintf(stderr, "%s: [cublas] offloading output layer to GPU\n", __func__);
ggml_cuda_transform_tensor(model.output); vram_total += ggml_v2_nbytes(model.output);
}
fprintf(stderr, "%s: [cublas] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
}
#elif defined(GGML_USE_CLBLAST)
#if defined(GGML_USE_CLBLAST)
{
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
if(GetQuantsUnshuffled())