make github CI happy

This commit is contained in:
zhou.weiguo 2024-04-22 20:13:45 +08:00
parent 9cba545fbf
commit 0054f3681b
No known key found for this signature in database
GPG key ID: 952EA81D18BB2FA8
15 changed files with 34 additions and 6 deletions

View file

@ -2810,6 +2810,7 @@ static llama_control_vector_data llama_control_vector_load_one(const llama_contr
/* .mem_size = */ ggml_tensor_overhead() * 128 + ggml_graph_overhead(),
/* .mem_buffer = */ nullptr,
/* .no_alloc = */ true,
/* .use_hwaccel= */ false
};
ggml_context * meta_ctx = ggml_init(meta_params);
struct gguf_init_params meta_gguf_params = {
@ -2880,6 +2881,7 @@ static llama_control_vector_data llama_control_vector_load_one(const llama_contr
/* .mem_size = */ ggml_tensor_overhead() * n_tensors + n_bytes,
/* .mem_buffer = */ nullptr,
/* .no_alloc = */ false,
/* .use_hwaccel= */ false
};
struct ggml_context * ctx = ggml_init(ggml_params);

View file

@ -1522,6 +1522,7 @@ int main(int argc, char ** argv) {
/*.mem_size =*/ compute_size,
/*.mem_buffer =*/ compute_addr,
/*.no_alloc =*/ false,
/*.use_hwaccel=*/ false
};
struct ggml_context * ctx0 = ggml_init(params);
@ -1598,6 +1599,7 @@ int main(int argc, char ** argv) {
/*.mem_size =*/ compute_size,
/*.mem_buffer =*/ compute_addr,
/*.no_alloc =*/ false,
/*.use_hwaccel=*/ false
};
struct ggml_context * ctx0 = ggml_init(params);

View file

@ -143,7 +143,8 @@ int main(int argc, char ** argv) {
struct ggml_init_params params = {
/*.mem_size =*/ ctx_size,
/*.mem_buffer =*/ NULL,
/* no_alloc =*/ 0
/* no_alloc =*/ 0,
/* use_hwaccel=*/ 0
};
ctx = ggml_init(params);

View file

@ -1634,6 +1634,7 @@ int main(int argc, char ** argv) {
ggml_tensor_overhead() * 2, // mem_size
NULL, // mem_buffer
true, // no_alloc
false // use_hwaccel
};
struct ggml_context * ctx_input = ggml_init(ctx_input_params);
@ -1656,6 +1657,7 @@ int main(int argc, char ** argv) {
estimated_compute_size_wo_data, // mem_size
NULL, // mem_buffer
true, // no_alloc
false // use_hwaccel
};
struct ggml_context * ctx_compute = NULL;
@ -1825,6 +1827,7 @@ int main(int argc, char ** argv) {
max_work_size, // mem_size
NULL, // mem_buffer
false, // no_alloc
false // use_hwaccel
};
struct ggml_context * ctx_work = ggml_init(ctx_work_params);

View file

@ -43,6 +43,7 @@ static bool gguf_ex_write(const std::string & fname) {
/*.mem_size =*/ 128ull*1024ull*1024ull,
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ false,
/*.use_hwaccel=*/ false
};
struct ggml_context * ctx_data = ggml_init(params);

View file

@ -543,6 +543,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
/*.mem_size =*/ ctx->buf_compute_meta.size(),
/*.mem_buffer =*/ ctx->buf_compute_meta.data(),
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};
struct ggml_context * ctx0 = ggml_init(params);
@ -1020,9 +1021,10 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
{
std::vector<uint8_t> read_buf;
struct ggml_init_params params = {
/*.mem_size =*/ (n_tensors + 1) * ggml_tensor_overhead(),
/*.mem_size =*/ (n_tensors + 1) * ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};
new_clip->ctx_data = ggml_init(params);

View file

@ -114,6 +114,7 @@ static bool clip_llava_handle_patches(clip_ctx * ctx_clip, std::vector<float *>
/*.mem_size =*/ ctx_size,
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ false, // NOTE: this should be false when using the legacy API
/*.use_hwaccel=*/ false
};
// Python reference code for full unpad:

View file

@ -1044,6 +1044,7 @@ int main(int argc, char ** argv) {
ggml_tensor_overhead() * 2, // mem_size
NULL, // mem_buffer
true, // no_alloc
false // use_hwaccel
};
struct ggml_context * ctx_input = ggml_init(ctx_input_params);
@ -1066,6 +1067,7 @@ int main(int argc, char ** argv) {
estimated_compute_size_wo_data, // mem_size
NULL, // mem_buffer
true, // no_alloc
false // use_hwaccel
};
struct ggml_context * ctx_compute = NULL;
@ -1218,6 +1220,7 @@ int main(int argc, char ** argv) {
max_work_size, // mem_size
NULL, // mem_buffer
false, // no_alloc
false // use_hwaccel
};
struct ggml_context * ctx_work = ggml_init(ctx_work_params);

View file

@ -1238,7 +1238,8 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
struct ggml_init_params params = {
/* .mem_size = */ sizeof(sched->context_buffer),
/* .mem_buffer = */ sched->context_buffer,
/* .no_alloc = */ true
/* .no_alloc = */ true,
/* .use_hwaccel =*/ false
};
ggml_free(sched->ctx);
@ -1980,7 +1981,8 @@ struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, s
struct ggml_init_params params = {
/* .mem_size = */ ggml_tensor_overhead()*hash_set.size + ggml_graph_overhead_custom(graph->size, false),
/* .mem_buffer = */ NULL,
/* .no_alloc = */ true
/* .no_alloc = */ true,
/* .use_hwaccel =*/ false
};
struct ggml_context * ctx_allocated = ggml_init(params);

View file

@ -2378,6 +2378,7 @@ static bool llama_kv_cache_init(
/*.mem_size =*/ 2u*n_layers*ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};
ggml_context * ctx = ggml_init(params);
if (!ctx) {
@ -4664,6 +4665,7 @@ static bool llm_load_tensors(
/*.mem_size =*/ ctx_size,
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};
ggml_context * ctx = ggml_init(params);
if (!ctx) {
@ -6535,6 +6537,7 @@ struct llm_build_context {
/*.mem_size =*/ buf_compute_meta.size(),
/*.mem_buffer =*/ buf_compute_meta.data(),
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};
ctx0 = ggml_init(params);
@ -14679,6 +14682,7 @@ static int llama_apply_lora_from_file_internal(
/* .mem_size */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
/* .mem_buffer */ nullptr,
/* .no_alloc */ true,
/* .use_hwaccel*/ false
};
ggml_context * lora_ctx = ggml_init(lora_init_params);
if (lora_ctx == nullptr) {
@ -14929,7 +14933,7 @@ void llama_backend_init(void) {
// needed to initialize f16 tables
{
struct ggml_init_params params = { 0, NULL, false };
struct ggml_init_params params = { 0, NULL, false, false };
struct ggml_context * ctx = ggml_init(params);
ggml_free(ctx);
}
@ -15540,6 +15544,7 @@ static bool llama_control_vector_init(struct llama_control_vector & cvec, const
/*.mem_size =*/ n_layers * ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};
ggml_context * ctx = ggml_init(params);
if (!ctx) {

View file

@ -359,6 +359,7 @@ struct test_case {
/* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
/* .mem_base = */ NULL,
/* .no_alloc = */ true,
/* .use_hwaccel=*/false
};
ggml_context * ctx = ggml_init(params);
@ -520,6 +521,7 @@ struct test_case {
/* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead_custom(graph_nodes, false),
/* .mem_base = */ NULL,
/* .no_alloc = */ true,
/* .use_hwaccel=*/false
};
ggml_context * ctx = ggml_init(params);

View file

@ -359,6 +359,7 @@ int main(int argc, const char ** argv) {
/* .mem_size = */ 256*1024*1024,
/* .mem_buffer = */ NULL,
/* .no_alloc = */ false,
/* .use_hwaccel= */ false
};
int64_t ne[4];

View file

@ -121,6 +121,7 @@ int main(int argc, char * argv[]) {
/* .mem_size = */ 1*1024,
/* .mem_buffer = */ NULL,
/* .no_alloc = */ true,
/* .use_hwaccel= */ false
};
struct ggml_context * ctx = ggml_init(ggml_params);

View file

@ -265,6 +265,7 @@ int main(int argc, char * argv[]) {
/* .mem_size = */ 1*1024,
/* .mem_buffer = */ NULL,
/* .no_alloc = */ true,
/* .use_hwaccel= */ false
};
struct ggml_context * ctx = ggml_init(ggml_params);

View file

@ -128,6 +128,7 @@ int main(int /*argc*/, const char ** /*argv*/) {
/* .mem_size = */ 128*1024*1024,
/* .mem_buffer = */ NULL,
/* .no_alloc = */ false,
/* .use_hwaccel= */ false
};
std::vector<uint8_t> work_buffer;