From 57230b51963cfd58b1426206fda184deac03fc26 Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Wed, 17 May 2023 16:28:20 +0800 Subject: [PATCH] upgrade all other formats --- llama.cpp | 9 ++++++++- otherarch/gpt2_v2.cpp | 12 +++++++----- otherarch/gptj_v2.cpp | 13 ++++++------- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/llama.cpp b/llama.cpp index fddb51d82..532d6b5cd 100644 --- a/llama.cpp +++ b/llama.cpp @@ -56,6 +56,7 @@ static const size_t MB = 1024*1024; static const std::map & MEM_REQ_SCRATCH0() { static std::map k_sizes = { + { MODEL_UNKNOWN, 512ull * MB }, { MODEL_7B, 512ull * MB }, { MODEL_13B, 512ull * MB }, { MODEL_30B, 512ull * MB }, @@ -67,6 +68,7 @@ static const std::map & MEM_REQ_SCRATCH0() static const std::map & MEM_REQ_SCRATCH1() { static std::map k_sizes = { + { MODEL_UNKNOWN, 512ull * MB }, { MODEL_7B, 512ull * MB }, { MODEL_13B, 512ull * MB }, { MODEL_30B, 512ull * MB }, @@ -79,6 +81,7 @@ static const std::map & MEM_REQ_SCRATCH1() static const std::map & MEM_REQ_KV_SELF() { static std::map k_sizes = { + { MODEL_UNKNOWN, 1026ull * MB }, { MODEL_7B, 1026ull * MB }, { MODEL_13B, 1608ull * MB }, { MODEL_30B, 3124ull * MB }, @@ -92,6 +95,7 @@ static const std::map & MEM_REQ_KV_SELF() static const std::map & MEM_REQ_EVAL() { static std::map k_sizes = { + { MODEL_UNKNOWN, 800ull * MB }, { MODEL_7B, 800ull * MB }, { MODEL_13B, 1024ull * MB }, { MODEL_30B, 1280ull * MB }, @@ -887,7 +891,9 @@ static const char *llama_model_type_name(e_model type) { case MODEL_13B: return "13B"; case MODEL_30B: return "30B"; case MODEL_65B: return "65B"; - default: LLAMA_ASSERT(false); + default: + printf("\nWARNING: NON-STANDARD LLAMA FILE DETECTED. DEFAULT TO 7B SIZE.\n"); + return "UNKNOWN"; } } @@ -920,6 +926,7 @@ static void llama_model_load_internal( case 40: model.type = e_model::MODEL_13B; break; case 60: model.type = e_model::MODEL_30B; break; case 80: model.type = e_model::MODEL_65B; break; + default: model.type = e_model::MODEL_UNKNOWN; break; } hparams.n_ctx = n_ctx; diff --git a/otherarch/gpt2_v2.cpp b/otherarch/gpt2_v2.cpp index 96f4bf215..945722b48 100644 --- a/otherarch/gpt2_v2.cpp +++ b/otherarch/gpt2_v2.cpp @@ -49,7 +49,6 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g fin.read((char *) &hparams.ftype, sizeof(hparams.ftype)); const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR; - hparams.ftype %= GGML_QNT_VERSION_FACTOR; printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); @@ -57,6 +56,9 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g printf("%s: n_head = %d\n", __func__, hparams.n_head); printf("%s: n_layer = %d\n", __func__, hparams.n_layer); printf("%s: ftype = %d\n", __func__, hparams.ftype); + printf("%s: qntvr = %d\n", __func__, qntvr); + + hparams.ftype %= GGML_QNT_VERSION_FACTOR; } // load vocab @@ -134,8 +136,9 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g ctx_size += 1.5*(n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // memory_k ctx_size += 1.5*(n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // memory_v - ctx_size += (6 + 12*n_layer)*256; // object overhead + ctx_size += (6 + 12*n_layer)*512; // object overhead + printf("%s: ggml tensor size = %d bytes\n", __func__, (int) sizeof(ggml_tensor)); printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); } @@ -431,11 +434,10 @@ bool gpt2_eval( { struct ggml_tensor * Qcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0*sizeof(float)*n_embd); struct ggml_tensor * Kcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1*sizeof(float)*n_embd); - + struct ggml_tensor * Vcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2*sizeof(float)*n_embd); + // store key and value to memory if (N >= 1) { - struct ggml_tensor * Vcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2*sizeof(float)*n_embd); - struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past)); struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_v, N*n_embd, (ggml_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past)); diff --git a/otherarch/gptj_v2.cpp b/otherarch/gptj_v2.cpp index ccfd85897..5e1c22567 100644 --- a/otherarch/gptj_v2.cpp +++ b/otherarch/gptj_v2.cpp @@ -47,10 +47,9 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g fin.read((char *) &hparams.n_head, sizeof(hparams.n_head)); fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); fin.read((char *) &hparams.n_rot, sizeof(hparams.n_rot)); - fin.read((char *) &hparams.ftype, sizeof(hparams.ftype)); + fin.read((char *) &hparams.ftype, sizeof(hparams.ftype)); const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR; - hparams.ftype %= GGML_QNT_VERSION_FACTOR; printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); @@ -59,6 +58,9 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g printf("%s: n_layer = %d\n", __func__, hparams.n_layer); printf("%s: n_rot = %d\n", __func__, hparams.n_rot); printf("%s: ftype = %d\n", __func__, hparams.ftype); + printf("%s: qntvr = %d\n", __func__, qntvr); + + hparams.ftype %= GGML_QNT_VERSION_FACTOR; } // load vocab @@ -134,7 +136,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v - ctx_size += (5 + 10*n_layer)*256; // object overhead + ctx_size += (5 + 10*n_layer)*512; // object overhead printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); } @@ -160,7 +162,6 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; model.layers.resize(n_layer); @@ -358,8 +359,6 @@ bool gptj_eval( const int n_vocab = hparams.n_vocab; const int n_rot = hparams.n_rot; - const int d_key = n_embd/n_head; - static size_t buf_size = 256u*1024*1024; static void * buf = malloc(buf_size); @@ -551,7 +550,7 @@ bool gptj_eval( //if (n_past%100 == 0) { // ggml_graph_print (&gf); - // ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot"); + // ggml_graph_dump_dot(&gf, NULL, "gpt-j.dot"); //} //embd_w.resize(n_vocab*N);