llama_model_loader: if n_tensors declared not equals to loaded tensors in split, throw an exception instead of asserting

This commit is contained in:
Pierrick HYMBERT 2024-03-22 06:48:15 +01:00
parent 1a179bfc4e
commit 7cbe1eac78

View file

@ -2919,7 +2919,10 @@ struct llama_model_loader {
gguf_free(ctx_gguf);
}
get_key(llm_kv(LLM_KV_SPLIT_TENSORS_COUNT), n_tensors);
GGML_ASSERT(n_tensors == (int) weights.size());
int n_tensors_loaded = (int) weights.size();
if (n_tensors != n_tensors_loaded) {
throw std::runtime_error(format("corrupted model: %d tensors expected but %d found", n_tensors, n_tensors_loaded));
}
LLAMA_LOG_INFO("%s: additional %d GGUFs metadata loaded.\n", __func__, n_split);
}