cuda : rename build flag to LLAMA_CUDA (#6299)

This commit is contained in:
slaren 2024-03-26 01:16:01 +01:00 committed by GitHub
parent b06c16ef9f
commit 280345968d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
28 changed files with 129 additions and 115 deletions

View file

@ -124,7 +124,7 @@ llama_print_timings: total time = 34570.79 ms
## Orin compile and run
### compile
```sh
make LLAMA_CUBLAS=1 CUDA_DOCKER_ARCH=sm_87 LLAMA_CUDA_F16=1 -j 32
make LLAMA_CUDA=1 CUDA_DOCKER_ARCH=sm_87 LLAMA_CUDA_F16=1 -j 32
```
### run on Orin

View file

@ -7,7 +7,7 @@
#include "ggml-alloc.h"
#include "ggml-backend.h"
#ifdef GGML_USE_CUBLAS
#ifdef GGML_USE_CUDA
#include "ggml-cuda.h"
#endif
@ -968,7 +968,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
}
}
#ifdef GGML_USE_CUBLAS
#ifdef GGML_USE_CUDA
new_clip->backend = ggml_backend_cuda_init(0);
printf("%s: CLIP using CUDA backend\n", __func__);
#endif