llama : make all LLM maps const

This also requires using `std::map::at` instead of its `operator[]`
which does not exist for const maps.

* llama : name LLM_ARCH_UNKNOWN to "(unknown)"

This avoids errors from `std::map::at` when
getting the general name of the model architecture.
Using "(unknown)" instead of an empty string as per suggestion
https://github.com/ggerganov/llama.cpp/pull/5820#issuecomment-1973735284
This commit is contained in:
Francis Couture-Harpin 2024-03-01 14:26:20 -05:00
parent 3b257f8867
commit 6cf481b3ac

View file

@ -215,7 +215,7 @@ enum llm_arch {
LLM_ARCH_UNKNOWN, LLM_ARCH_UNKNOWN,
}; };
static std::map<llm_arch, const char *> LLM_ARCH_NAMES = { static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
{ LLM_ARCH_LLAMA, "llama" }, { LLM_ARCH_LLAMA, "llama" },
{ LLM_ARCH_FALCON, "falcon" }, { LLM_ARCH_FALCON, "falcon" },
{ LLM_ARCH_GPT2, "gpt2" }, { LLM_ARCH_GPT2, "gpt2" },
@ -240,6 +240,7 @@ static std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
{ LLM_ARCH_MINICPM, "minicpm" }, { LLM_ARCH_MINICPM, "minicpm" },
{ LLM_ARCH_GEMMA, "gemma" }, { LLM_ARCH_GEMMA, "gemma" },
{ LLM_ARCH_STARCODER2, "starcoder2" }, { LLM_ARCH_STARCODER2, "starcoder2" },
{ LLM_ARCH_UNKNOWN, "(unknown)" },
}; };
enum llm_kv { enum llm_kv {
@ -300,7 +301,7 @@ enum llm_kv {
LLM_KV_TOKENIZER_RWKV, LLM_KV_TOKENIZER_RWKV,
}; };
static std::map<llm_kv, const char *> LLM_KV_NAMES = { static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
{ LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" }, { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
{ LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" }, { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
{ LLM_KV_GENERAL_ALIGNMENT, "general.alignment" }, { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
@ -364,7 +365,7 @@ struct LLM_KV {
llm_arch arch; llm_arch arch;
std::string operator()(llm_kv kv) const { std::string operator()(llm_kv kv) const {
return ::format(LLM_KV_NAMES[kv], LLM_ARCH_NAMES[arch]); return ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch));
} }
}; };
@ -399,7 +400,7 @@ enum llm_tensor {
LLM_TENSOR_LAYER_OUT_NORM, LLM_TENSOR_LAYER_OUT_NORM,
}; };
static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = { static const std::map<llm_arch, const std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
{ {
LLM_ARCH_LLAMA, LLM_ARCH_LLAMA,
{ {
@ -810,13 +811,6 @@ static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES =
static llm_arch llm_arch_from_string(const std::string & name) { static llm_arch llm_arch_from_string(const std::string & name) {
for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
if (kv.second == nullptr) { if (kv.second == nullptr) {
// LLM_ARCH_UNKNOWN does not have a name,
// but is somehow still in the LLM_ARCH_NAMES map.
if (kv.first == LLM_ARCH_UNKNOWN) {
// skip string comparison
continue;
}
// known architectures should always have a name
GGML_ASSERT(false && "missing architecture in LLM_ARCH_NAMES"); GGML_ASSERT(false && "missing architecture in LLM_ARCH_NAMES");
} }
if (kv.second == name) { if (kv.second == name) {
@ -842,38 +836,38 @@ struct LLM_TN {
llm_arch arch; llm_arch arch;
std::string operator()(llm_tensor tensor) const { std::string operator()(llm_tensor tensor) const {
if (LLM_TENSOR_NAMES[arch].find(tensor) == LLM_TENSOR_NAMES[arch].end()) { if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
return "__missing__"; return "__missing__";
} }
return LLM_TENSOR_NAMES[arch].at(tensor); return LLM_TENSOR_NAMES.at(arch).at(tensor);
} }
std::string operator()(llm_tensor tensor, const std::string & suffix) const { std::string operator()(llm_tensor tensor, const std::string & suffix) const {
if (LLM_TENSOR_NAMES[arch].find(tensor) == LLM_TENSOR_NAMES[arch].end()) { if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
return "__missing__"; return "__missing__";
} }
return LLM_TENSOR_NAMES[arch].at(tensor) + "." + suffix; return LLM_TENSOR_NAMES.at(arch).at(tensor) + "." + suffix;
} }
std::string operator()(llm_tensor tensor, int bid) const { std::string operator()(llm_tensor tensor, int bid) const {
if (LLM_TENSOR_NAMES[arch].find(tensor) == LLM_TENSOR_NAMES[arch].end()) { if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
return "__missing__"; return "__missing__";
} }
return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid); return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid);
} }
std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const { std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
if (LLM_TENSOR_NAMES[arch].find(tensor) == LLM_TENSOR_NAMES[arch].end()) { if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
return "__missing__"; return "__missing__";
} }
return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid) + "." + suffix; return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid) + "." + suffix;
} }
std::string operator()(llm_tensor tensor, const std::string & suffix, int bid, int xid) const { std::string operator()(llm_tensor tensor, const std::string & suffix, int bid, int xid) const {
if (LLM_TENSOR_NAMES[arch].find(tensor) == LLM_TENSOR_NAMES[arch].end()) { if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
return "__missing__"; return "__missing__";
} }
return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid, xid) + "." + suffix; return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid, xid) + "." + suffix;
} }
}; };
@ -881,7 +875,7 @@ struct LLM_TN {
// gguf helpers // gguf helpers
// //
static std::map<int32_t, const char *> LLAMA_ROPE_SCALING_TYPES = { static const std::map<int32_t, const char *> LLAMA_ROPE_SCALING_TYPES = {
{ LLAMA_ROPE_SCALING_TYPE_NONE, "none" }, { LLAMA_ROPE_SCALING_TYPE_NONE, "none" },
{ LLAMA_ROPE_SCALING_TYPE_LINEAR, "linear" }, { LLAMA_ROPE_SCALING_TYPE_LINEAR, "linear" },
{ LLAMA_ROPE_SCALING_TYPE_YARN, "yarn" }, { LLAMA_ROPE_SCALING_TYPE_YARN, "yarn" },