cuda : fix LLAMA_CUDA_F16 build (#6197)
This commit is contained in:
parent
cfd3be76e3
commit
03a8f8fafe
1 changed files with 1 additions and 1 deletions
|
@ -9453,7 +9453,7 @@ static void ggml_cuda_op_dequantize_mul_mat_vec(
|
||||||
|
|
||||||
// on some GPUs it is faster to convert src1 to half and to use half precision intrinsics
|
// on some GPUs it is faster to convert src1 to half and to use half precision intrinsics
|
||||||
#ifdef GGML_CUDA_F16
|
#ifdef GGML_CUDA_F16
|
||||||
cuda_pool_alloc<half> src1_dfloat_a;
|
ggml_cuda_pool_alloc<half> src1_dfloat_a(ctx.pool());
|
||||||
half * src1_dfloat = nullptr; // dfloat == half
|
half * src1_dfloat = nullptr; // dfloat == half
|
||||||
|
|
||||||
bool src1_convert_f16 =
|
bool src1_convert_f16 =
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue