examples: cache hf model when --model not provided (#7353)

* examples: cache hf model when --model not provided

* examples: cache hf model when --model not provided

* examples: cache hf model when --model not provided

* examples: cache hf model when --model not provided

* examples: cache hf model when --model not provided
This commit is contained in:
Amir 2024-05-21 17:13:12 +03:00 committed by GitHub
parent d8ee902227
commit 11474e756d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 34 additions and 1 deletions

View file

@ -281,6 +281,7 @@ bool llama_should_add_bos_token(const llama_model * model);
//
bool create_directory_with_parents(const std::string & path);
std::string get_cache_directory();
void dump_vector_float_yaml(FILE * stream, const char * prop_name, const std::vector<float> & data);
void dump_vector_int_yaml(FILE * stream, const char * prop_name, const std::vector<int> & data);
void dump_string_yaml_multiline(FILE * stream, const char * prop_name, const char * data);