Update llama_v3.cpp (#393)
Fixing C2065 compiler error. Missed '3' on 3 separate identifiers (kB > kB3, MB > MB3)
This commit is contained in:
parent
bfdc596d58
commit
8263fd7bdb
1 changed files with 2 additions and 2 deletions
|
@ -1315,8 +1315,8 @@ static void llama_v3_model_load_internal(
|
||||||
ggml_cuda_set_scratch_size(vram_scratch);
|
ggml_cuda_set_scratch_size(vram_scratch);
|
||||||
if (n_gpu_layers > 0) {
|
if (n_gpu_layers > 0) {
|
||||||
LLAMA_V3_LOG_INFO("%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer\n",
|
LLAMA_V3_LOG_INFO("%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer\n",
|
||||||
__func__, vram_scratch_base / kB, vram_scratch_per_context,
|
__func__, vram_scratch_base / kB3, vram_scratch_per_context,
|
||||||
(vram_scratch + MB - 1) / MB); // round up
|
(vram_scratch + MB3 - 1) / MB3); // round up
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif // GGML_USE_CUBLAS
|
#endif // GGML_USE_CUBLAS
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue