added the llama_v2 cuda back (+2 squashed commit)
Squashed commit: [1c97fd4] Revert "fix for cublas" This reverts commit994be9a4db
. [fce03c3] Revert "fix for cublas" This reverts commit33528f5b1d
.
This commit is contained in:
parent
fb67506c1b
commit
c44b9c3ecf
1 changed files with 31 additions and 3 deletions
|
@ -9,7 +9,9 @@
|
||||||
#include "llama_v2.h"
|
#include "llama_v2.h"
|
||||||
|
|
||||||
#include "ggml_v2.h"
|
#include "ggml_v2.h"
|
||||||
#if defined(GGML_USE_CLBLAST)
|
#ifdef GGML_USE_CUBLAS
|
||||||
|
#include "ggml_v2-cuda.h"
|
||||||
|
#elif defined(GGML_USE_CLBLAST)
|
||||||
#include "ggml_v2-opencl.h"
|
#include "ggml_v2-opencl.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1058,7 +1060,33 @@ static void llama_v2_model_load_internal(
|
||||||
ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &lctx.model.mlock_mmap : NULL);
|
ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &lctx.model.mlock_mmap : NULL);
|
||||||
|
|
||||||
model.mapping = std::move(ml->mapping);
|
model.mapping = std::move(ml->mapping);
|
||||||
#if defined(GGML_USE_CLBLAST)
|
#if defined(GGML_USE_CUBLAS)
|
||||||
|
{
|
||||||
|
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
|
||||||
|
|
||||||
|
fprintf(stderr, "%s: [old cublas] offloading %d layers to GPU\n", __func__, n_gpu);
|
||||||
|
|
||||||
|
size_t vram_total = 0;
|
||||||
|
|
||||||
|
for (int i = 0; i < n_gpu; ++i) {
|
||||||
|
const auto & layer = model.layers[i];
|
||||||
|
|
||||||
|
ggml_v2_cuda_transform_tensor(layer.wq); vram_total += ggml_v2_nbytes(layer.wq);
|
||||||
|
ggml_v2_cuda_transform_tensor(layer.wk); vram_total += ggml_v2_nbytes(layer.wk);
|
||||||
|
ggml_v2_cuda_transform_tensor(layer.wv); vram_total += ggml_v2_nbytes(layer.wv);
|
||||||
|
ggml_v2_cuda_transform_tensor(layer.wo); vram_total += ggml_v2_nbytes(layer.wo);
|
||||||
|
ggml_v2_cuda_transform_tensor(layer.w1); vram_total += ggml_v2_nbytes(layer.w1);
|
||||||
|
ggml_v2_cuda_transform_tensor(layer.w2); vram_total += ggml_v2_nbytes(layer.w2);
|
||||||
|
ggml_v2_cuda_transform_tensor(layer.w3); vram_total += ggml_v2_nbytes(layer.w3);
|
||||||
|
}
|
||||||
|
if (n_gpu_layers > (int) hparams.n_layer) {
|
||||||
|
fprintf(stderr, "%s: [old cublas] offloading output layer to GPU\n", __func__);
|
||||||
|
ggml_v2_cuda_transform_tensor(model.output); vram_total += ggml_v2_nbytes(model.output);
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stderr, "%s: [old cublas] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
||||||
|
}
|
||||||
|
#elif defined(GGML_USE_CLBLAST)
|
||||||
{
|
{
|
||||||
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
|
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
|
||||||
if(GetQuantsUnshuffled())
|
if(GetQuantsUnshuffled())
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue