diff --git a/llama.cpp b/llama.cpp index 07482eaf2..fc114b164 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1638,7 +1638,7 @@ struct llama_context * llama_init_from_file( ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; if (!llama_model_load(path_model, *ctx, params.n_ctx, params.n_parts, memory_type, - params.vocab_only, , params.progress_callback, + params.vocab_only, params.progress_callback, params.progress_callback_user_data)) { fprintf(stderr, "%s: failed to load model\n", __func__); llama_free(ctx);