examples: cache hf model when --model not provided

This commit is contained in:
Amir 2024-05-20 15:05:32 +00:00
parent 5372f9bdb0
commit 2e70b6e374
2 changed files with 0 additions and 17 deletions

View file

@ -2081,17 +2081,6 @@ static bool llama_download_file(const std::string & url, const std::string & pat
}
}
// Create parent directories if not exist
const std::vector<std::string> path_parts = string_split(path_temporary, DIRECTORY_SEPARATOR);
std::string parent_dir = "";
struct stat st;
for (unsigned i = 0; i < path_parts.size() - 1; i++) {
parent_dir += path_parts[i] + DIRECTORY_SEPARATOR;
if (stat(parent_dir.c_str(), &st) != 0) {
mkdir(parent_dir.c_str(), S_IRWXU);
}
}
// Set the output file
std::unique_ptr<FILE, decltype(&fclose)> outfile(fopen(path_temporary.c_str(), "wb"), fclose);
if (!outfile) {

View file

@ -32,12 +32,6 @@
} while(0)
#define DEFAULT_MODEL_PATH "models/7B/ggml-model-f16.gguf"
#define DEFAULT_LLAMA_CACHE ".cache/"
#ifdef _WIN32
#include <direct.h>
#define mkdir(path, mode) _mkdir(path) // On Windows, _mkdir does not take mode
#endif
// build info
extern int LLAMA_BUILD_NUMBER;