added ctx_size parameter (#148)

* added ctx_size parameter

* added it in more places

* Apply suggestions from code review

---------

Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
Justin Suess 2023-03-15 15:42:40 -04:00 committed by GitHub
parent 16b2c61a22
commit 2d64715ad4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 8 additions and 3 deletions

View file

@ -547,6 +547,8 @@ bool llama_eval(
const int d_key = n_embd/n_head;
// TODO: check if this size scales with n_ctx linearly and remove constant. somehow I feel it wasn't the case
// static size_t buf_size = hparams.n_ctx*1024*1024;
static size_t buf_size = 512u*1024*1024;
static void * buf = malloc(buf_size);
@ -819,8 +821,7 @@ int main(int argc, char ** argv) {
// load the model
{
const int64_t t_start_us = ggml_time_us();
if (!llama_model_load(params.model, model, vocab, 512)) { // TODO: set context from user input ??
if (!llama_model_load(params.model, model, vocab, params.n_ctx)) {
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
return 1;
}