diff --git a/src/llama.cpp b/src/llama.cpp index bc4e408e0..113749bf8 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -2247,6 +2247,8 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(bool host_buffer GGML_UNUSED(host_buffer); } +namespace { + // // globals // @@ -2267,7 +2269,7 @@ struct llama_state { void * log_callback_user_data = nullptr; }; -static llama_state g_state; +llama_state g_state; // available llama models enum e_model { @@ -2333,9 +2335,9 @@ enum e_model { MODEL_27B, }; -static const size_t kiB = 1024; -static const size_t MiB = 1024*kiB; -static const size_t GiB = 1024*MiB; +const size_t kiB = 1024; +const size_t MiB = 1024*kiB; +const size_t GiB = 1024*MiB; struct llama_hparams { bool vocab_only; @@ -2839,6 +2841,8 @@ struct llama_control_vector { } }; +} + struct llama_model { e_model type = MODEL_UNKNOWN; llm_arch arch = LLM_ARCH_UNKNOWN;