fixing more compile issues

This commit is contained in:
Concedo 2023-05-15 20:10:54 +08:00
parent 6504150fac
commit 72836d4eac
6 changed files with 10 additions and 6 deletions

View file

@ -304,7 +304,6 @@ if (GGML_CUDA_SOURCES)
message(STATUS "GGML CUDA sources found, configuring CUDA architecture") message(STATUS "GGML CUDA sources found, configuring CUDA architecture")
set_property(TARGET ggml PROPERTY CUDA_ARCHITECTURES OFF) set_property(TARGET ggml PROPERTY CUDA_ARCHITECTURES OFF)
set_property(TARGET ggml PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto") set_property(TARGET ggml PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto")
set_property(TARGET llama PROPERTY CUDA_ARCHITECTURES OFF)
endif() endif()
set(TARGET koboldcpp) set(TARGET koboldcpp)

View file

@ -357,7 +357,8 @@ bool legacy_gpt2_eval(
struct ggml_v1_context * ctx0 = ggml_v1_init(params); struct ggml_v1_context * ctx0 = ggml_v1_init(params);
struct ggml_v1_cgraph gf = { .n_threads = n_threads }; struct ggml_v1_cgraph gf = {};
gf.n_threads = n_threads;
struct ggml_v1_tensor * embd = ggml_v1_new_tensor_1d(ctx0, GGML_V1_TYPE_I32, N); struct ggml_v1_tensor * embd = ggml_v1_new_tensor_1d(ctx0, GGML_V1_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N*ggml_v1_element_size(embd)); memcpy(embd->data, embd_inp.data(), N*ggml_v1_element_size(embd));

View file

@ -376,7 +376,8 @@ bool gpt2_eval(
struct ggml_context * ctx0 = ggml_init(params); struct ggml_context * ctx0 = ggml_init(params);
struct ggml_cgraph gf = { .n_threads = n_threads }; struct ggml_cgraph gf = {};
gf.n_threads = n_threads;
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd)); memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));

View file

@ -407,7 +407,8 @@ bool legacy_gptj_eval(
struct ggml_v1_context * ctx0 = ggml_v1_init(params); struct ggml_v1_context * ctx0 = ggml_v1_init(params);
struct ggml_v1_cgraph gf = { .n_threads = n_threads }; struct ggml_v1_cgraph gf = {};
gf.n_threads = n_threads;
struct ggml_v1_tensor * embd = ggml_v1_new_tensor_1d(ctx0, GGML_V1_TYPE_I32, N); struct ggml_v1_tensor * embd = ggml_v1_new_tensor_1d(ctx0, GGML_V1_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N*ggml_v1_element_size(embd)); memcpy(embd->data, embd_inp.data(), N*ggml_v1_element_size(embd));

View file

@ -388,7 +388,8 @@ bool gptj_eval(
struct ggml_context * ctx0 = ggml_init(params); struct ggml_context * ctx0 = ggml_init(params);
struct ggml_cgraph gf = { .n_threads = n_threads }; struct ggml_cgraph gf = {};
gf.n_threads = n_threads;
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd)); memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));

View file

@ -382,7 +382,8 @@ bool stablelm_eval(
struct ggml_context * ctx0 = ggml_init(params); struct ggml_context * ctx0 = ggml_init(params);
struct ggml_cgraph gf = { .n_threads = n_threads }; struct ggml_cgraph gf = {};
gf.n_threads = n_threads;
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd)); memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));