llama_model_loader: if n_tensors declared not equals to loaded tensors in split, throw an exception instead of asserting
This commit is contained in:
parent
1a179bfc4e
commit
7cbe1eac78
1 changed files with 4 additions and 1 deletions
|
@ -2919,7 +2919,10 @@ struct llama_model_loader {
|
|||
gguf_free(ctx_gguf);
|
||||
}
|
||||
get_key(llm_kv(LLM_KV_SPLIT_TENSORS_COUNT), n_tensors);
|
||||
GGML_ASSERT(n_tensors == (int) weights.size());
|
||||
int n_tensors_loaded = (int) weights.size();
|
||||
if (n_tensors != n_tensors_loaded) {
|
||||
throw std::runtime_error(format("corrupted model: %d tensors expected but %d found", n_tensors, n_tensors_loaded));
|
||||
}
|
||||
|
||||
LLAMA_LOG_INFO("%s: additional %d GGUFs metadata loaded.\n", __func__, n_split);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue