handle ctx manip
This commit is contained in:
parent
df9135e3a9
commit
d5e4cf7ffe
4 changed files with 22 additions and 12 deletions
|
@ -12,6 +12,7 @@
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
#include "model_adapter.h"
|
#include "model_adapter.h"
|
||||||
|
|
||||||
|
@ -39,6 +40,8 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t origmaxctx = model.hparams.n_ctx;
|
||||||
|
|
||||||
// load hparams
|
// load hparams
|
||||||
{
|
{
|
||||||
auto & hparams = model.hparams;
|
auto & hparams = model.hparams;
|
||||||
|
@ -53,7 +56,7 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
|
||||||
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
|
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
|
||||||
|
|
||||||
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
||||||
printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
|
printf("%s: n_ctx = %d (%d)\n", __func__, hparams.n_ctx,origmaxctx);
|
||||||
printf("%s: n_embd = %d\n", __func__, hparams.n_embd);
|
printf("%s: n_embd = %d\n", __func__, hparams.n_embd);
|
||||||
printf("%s: n_head = %d\n", __func__, hparams.n_head);
|
printf("%s: n_head = %d\n", __func__, hparams.n_head);
|
||||||
printf("%s: n_layer = %d\n", __func__, hparams.n_layer);
|
printf("%s: n_layer = %d\n", __func__, hparams.n_layer);
|
||||||
|
@ -154,8 +157,8 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
|
||||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
|
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
|
||||||
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b
|
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b
|
||||||
|
|
||||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_k
|
ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_k
|
||||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_v
|
ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_v
|
||||||
|
|
||||||
ctx_size += (6 + 12*n_layer)*1024; // object overhead
|
ctx_size += (6 + 12*n_layer)*1024; // object overhead
|
||||||
|
|
||||||
|
@ -256,7 +259,7 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
|
||||||
const int n_layer = hparams.n_layer;
|
const int n_layer = hparams.n_layer;
|
||||||
const int n_ctx = hparams.n_ctx;
|
const int n_ctx = hparams.n_ctx;
|
||||||
|
|
||||||
const int n_mem = n_layer*n_ctx;
|
const int n_mem = n_layer*std::max(origmaxctx,n_ctx);
|
||||||
const int n_elements = n_embd*n_mem;
|
const int n_elements = n_embd*n_mem;
|
||||||
|
|
||||||
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
#include "model_adapter.h"
|
#include "model_adapter.h"
|
||||||
|
|
||||||
|
@ -39,6 +40,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t origmaxctx = model.hparams.n_ctx;
|
||||||
|
|
||||||
// load hparams
|
// load hparams
|
||||||
{
|
{
|
||||||
auto & hparams = model.hparams;
|
auto & hparams = model.hparams;
|
||||||
|
@ -54,7 +57,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
||||||
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
|
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
|
||||||
|
|
||||||
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
||||||
printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
|
printf("%s: n_ctx = %d (%d)\n", __func__, hparams.n_ctx,origmaxctx);
|
||||||
printf("%s: n_embd = %d\n", __func__, hparams.n_embd);
|
printf("%s: n_embd = %d\n", __func__, hparams.n_embd);
|
||||||
printf("%s: n_head = %d\n", __func__, hparams.n_head);
|
printf("%s: n_head = %d\n", __func__, hparams.n_head);
|
||||||
printf("%s: n_layer = %d\n", __func__, hparams.n_layer);
|
printf("%s: n_layer = %d\n", __func__, hparams.n_layer);
|
||||||
|
@ -138,8 +141,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
||||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
|
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
|
||||||
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b
|
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b
|
||||||
|
|
||||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k
|
ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k
|
||||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v
|
ctx_size += std::max(origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v
|
||||||
|
|
||||||
ctx_size += (5 + 10*n_layer)*512; // object overhead
|
ctx_size += (5 + 10*n_layer)*512; // object overhead
|
||||||
|
|
||||||
|
@ -232,7 +235,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
||||||
const int n_layer = hparams.n_layer;
|
const int n_layer = hparams.n_layer;
|
||||||
const int n_ctx = hparams.n_ctx;
|
const int n_ctx = hparams.n_ctx;
|
||||||
|
|
||||||
const int n_mem = n_layer*n_ctx;
|
const int n_mem = n_layer*std::max(origmaxctx,n_ctx);
|
||||||
const int n_elements = n_embd*n_mem;
|
const int n_elements = n_embd*n_mem;
|
||||||
|
|
||||||
model.memory_k = ggml_new_tensor_1d(ctx, memory_type, n_elements);
|
model.memory_k = ggml_new_tensor_1d(ctx, memory_type, n_elements);
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
#include "model_adapter.h"
|
#include "model_adapter.h"
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
#if defined(GGML_USE_CLBLAST)
|
#if defined(GGML_USE_CLBLAST)
|
||||||
#include "ggml-opencl.h"
|
#include "ggml-opencl.h"
|
||||||
|
@ -37,6 +38,8 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t origmaxctx = model.hparams.n_ctx;
|
||||||
|
|
||||||
// load hparams
|
// load hparams
|
||||||
{
|
{
|
||||||
auto & hparams = model.hparams;
|
auto & hparams = model.hparams;
|
||||||
|
@ -53,7 +56,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
||||||
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
|
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
|
||||||
|
|
||||||
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
||||||
printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
|
printf("%s: n_ctx = %d (%d)\n", __func__, hparams.n_ctx,origmaxctx);
|
||||||
printf("%s: n_embd = %d\n", __func__, hparams.n_embd);
|
printf("%s: n_embd = %d\n", __func__, hparams.n_embd);
|
||||||
printf("%s: n_head = %d\n", __func__, hparams.n_head);
|
printf("%s: n_head = %d\n", __func__, hparams.n_head);
|
||||||
printf("%s: n_layer = %d\n", __func__, hparams.n_layer);
|
printf("%s: n_layer = %d\n", __func__, hparams.n_layer);
|
||||||
|
@ -133,8 +136,8 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
||||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
|
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
|
||||||
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b
|
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b
|
||||||
|
|
||||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_k
|
ctx_size += std::max((size_t)origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_k
|
||||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_v
|
ctx_size += std::max((size_t)origmaxctx,n_ctx)*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_v
|
||||||
|
|
||||||
ctx_size += (6 + 16*n_layer)*1024; // object overhead
|
ctx_size += (6 + 16*n_layer)*1024; // object overhead
|
||||||
|
|
||||||
|
@ -232,7 +235,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
||||||
const int n_layer = hparams.n_layer;
|
const int n_layer = hparams.n_layer;
|
||||||
const int n_ctx = hparams.n_ctx;
|
const int n_ctx = hparams.n_ctx;
|
||||||
|
|
||||||
const int64_t n_mem = n_layer*n_ctx;
|
const int64_t n_mem = n_layer*std::max(origmaxctx,n_ctx);
|
||||||
const int64_t n_elements = n_embd*n_mem;
|
const int64_t n_elements = n_embd*n_mem;
|
||||||
|
|
||||||
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue