Fix build and remove warning

This commit is contained in:
Howard Su 2023-06-16 21:09:45 +08:00
parent 2d5e8b2ca0
commit 7b737917d1
2 changed files with 8 additions and 5 deletions

View file

@ -1195,6 +1195,8 @@ void ggml_cl_transform_tensor(void * data, ggml_tensor * tensor) {
} }
bool ggml_cl_get_data(struct ggml_tensor * tensor, size_t offset, size_t size, void * dst) { bool ggml_cl_get_data(struct ggml_tensor * tensor, size_t offset, size_t size, void * dst) {
CL_CHECK(clEnqueueReadBuffer(queue, tensor->data, true, offset, size, dst, 0, NULL, NULL)); CL_CHECK(clEnqueueReadBuffer(queue, (cl_mem)tensor->data, true, offset, size, dst, 0, NULL, NULL));
CL_CHECK(clFinish(queue)); CL_CHECK(clFinish(queue));
return true;
} }

View file

@ -1742,9 +1742,6 @@ static bool llama_eval_internal(
embedding_out.resize(n_embd); embedding_out.resize(n_embd);
switch(embeddings->backend) switch(embeddings->backend)
{ {
case GGML_BACKEND_CPU:
memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd);
break;
#if defined(GGML_USE_CUBLAS) #if defined(GGML_USE_CUBLAS)
case GGML_BACKEND_GPU: case GGML_BACKEND_GPU:
case GGML_BACKEND_GPU_SPLIT: case GGML_BACKEND_GPU_SPLIT:
@ -1753,9 +1750,13 @@ static bool llama_eval_internal(
#elif defined(GGML_USE_CLBAST) #elif defined(GGML_USE_CLBAST)
case GGML_BACKEND_GPU: case GGML_BACKEND_GPU:
case GGML_BACKEND_GPU_SPLIT: case GGML_BACKEND_GPU_SPLIT:
ggml_cuda_get_data(embeddings, (n_embd*(N - 1)) * sizeof(float), n_embd * sizeof(float), embedding_out.data()); ggml_cl_get_data(embeddings, (n_embd*(N - 1)) * sizeof(float), n_embd * sizeof(float), embedding_out.data());
break; break;
#endif #endif
case GGML_BACKEND_CPU:
default:
memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd);
break;
} }