Apply suggestions from code review
This commit is contained in:
parent
9eb4598fa3
commit
f056beb384
3 changed files with 7 additions and 4 deletions
6
main.cpp
6
main.cpp
|
@ -218,7 +218,7 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
|
|||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_k
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_v
|
||||
|
||||
ctx_size += (5 + 10*n_layer)*hparams.n_ctx; // object overhead
|
||||
ctx_size += (5 + 10*n_layer)*256; // object overhead
|
||||
|
||||
fprintf(stderr, "%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0));
|
||||
}
|
||||
|
@ -547,7 +547,9 @@ bool llama_eval(
|
|||
|
||||
const int d_key = n_embd/n_head;
|
||||
|
||||
static size_t buf_size = hparams.n_ctx*1024*1024;
|
||||
// TODO: check if this size scales with n_ctx linearly and remove constant. somehow I feel it wasn't the case
|
||||
// static size_t buf_size = hparams.n_ctx*1024*1024;
|
||||
static size_t buf_size = 512u*1024*1024;
|
||||
static void * buf = malloc(buf_size);
|
||||
|
||||
if (mem_per_token > 0 && mem_per_token*N > buf_size) {
|
||||
|
|
1
utils.h
1
utils.h
|
@ -18,6 +18,7 @@ struct gpt_params {
|
|||
int32_t n_predict = 128; // new tokens to predict
|
||||
int32_t repeat_last_n = 64; // last n tokens to penalize
|
||||
int32_t n_ctx = 512; //context size
|
||||
|
||||
// sampling parameters
|
||||
int32_t top_k = 40;
|
||||
float top_p = 0.95f;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue