Merge remote-tracking branch 'occam/opencl-dev' into concedo_experimental
This commit is contained in:
commit
8d0c81e7cc
3 changed files with 13 additions and 16 deletions
|
@ -993,11 +993,11 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
|
||||||
cl_kernel* dmmv = ggml_get_dequantize_mul_mat_vec_cl(type);
|
cl_kernel* dmmv = ggml_get_dequantize_mul_mat_vec_cl(type);
|
||||||
GGML_ASSERT(to_fp32_cl != nullptr);
|
GGML_ASSERT(to_fp32_cl != nullptr);
|
||||||
|
|
||||||
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
|
||||||
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
|
||||||
size_t ev_idx = 0;
|
size_t ev_idx = 0;
|
||||||
std::vector<cl_event> events;
|
std::vector<cl_event> events;
|
||||||
|
|
||||||
|
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
||||||
|
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
||||||
// copy src0 to device if necessary
|
// copy src0 to device if necessary
|
||||||
if (src0->backend == GGML_BACKEND_CPU) {
|
if (src0->backend == GGML_BACKEND_CPU) {
|
||||||
events.emplace_back();
|
events.emplace_back();
|
||||||
|
@ -1061,6 +1061,9 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
|
||||||
for (auto *event : events) {
|
for (auto *event : events) {
|
||||||
clReleaseEvent(event);
|
clReleaseEvent(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ev_idx = 0;
|
||||||
|
events.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ void * ggml_cl_host_malloc(size_t size);
|
||||||
void ggml_cl_host_free(void * ptr);
|
void ggml_cl_host_free(void * ptr);
|
||||||
|
|
||||||
void ggml_cl_transform_tensor(struct ggml_tensor * tensor);
|
void ggml_cl_transform_tensor(struct ggml_tensor * tensor);
|
||||||
void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, const size_t offset);
|
void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, size_t offset);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
18
llama.cpp
18
llama.cpp
|
@ -1012,8 +1012,10 @@ static void llama_model_load_internal(
|
||||||
|
|
||||||
#if defined(GGML_USE_CUBLAS)
|
#if defined(GGML_USE_CUBLAS)
|
||||||
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CUDA
|
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CUDA
|
||||||
|
fprintf(stderr, "%s: using CUDA for GPU acceleration\n", __func__);
|
||||||
#elif defined(GGML_USE_CLBLAST)
|
#elif defined(GGML_USE_CLBLAST)
|
||||||
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CL
|
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CL
|
||||||
|
fprintf(stderr, "%s: using OpenCL for GPU acceleration\n", __func__);
|
||||||
#else
|
#else
|
||||||
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CPU
|
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CPU
|
||||||
#endif
|
#endif
|
||||||
|
@ -1095,22 +1097,14 @@ static void llama_model_load_internal(
|
||||||
fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
|
fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
|
||||||
mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
|
mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
|
||||||
|
|
||||||
#if defined(GGML_USE_CUBLAS)
|
|
||||||
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
|
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
|
||||||
|
|
||||||
fprintf(stderr, "%s: [cublas] offloading %d layers to GPU\n", __func__, n_gpu);
|
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
|
||||||
|
fprintf(stderr, "%s: offloading %d layers to GPU\n", __func__, n_gpu);
|
||||||
if (n_gpu_layers > (int) hparams.n_layer) {
|
if (n_gpu_layers > (int) hparams.n_layer) {
|
||||||
fprintf(stderr, "%s: [cublas] offloading output layer to GPU\n", __func__);
|
fprintf(stderr, "%s: offloading output layer to GPU\n", __func__);
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%s: [cublas] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
fprintf(stderr, "%s: total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
||||||
#elif defined(GGML_USE_CLBLAST)
|
|
||||||
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
|
|
||||||
|
|
||||||
fprintf(stderr, "%s: [opencl] offloading %d layers to GPU\n", __func__, n_gpu);
|
|
||||||
if (n_gpu_layers > (int) hparams.n_layer) {
|
|
||||||
fprintf(stderr, "%s: [opencl] offloading output layer to GPU\n", __func__);
|
|
||||||
}
|
|
||||||
fprintf(stderr, "%s: [opencl] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
|
||||||
#else
|
#else
|
||||||
(void) n_gpu_layers;
|
(void) n_gpu_layers;
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue