From 18bb0ab1279033647a2a521a7d5d963ca967bc9a Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Fri, 4 Aug 2023 21:47:17 +0800 Subject: [PATCH] up ver, support 16k ctx --- expose.h | 2 +- gpttype_adapter.cpp | 3 ++- koboldcpp.py | 8 ++++---- otherarch/gpt2_v3.cpp | 3 ++- otherarch/gptj_v3.cpp | 2 +- otherarch/mpt_v3.cpp | 2 +- otherarch/neox_v3.cpp | 2 +- 7 files changed, 12 insertions(+), 10 deletions(-) diff --git a/expose.h b/expose.h index 2a8b674fa..0905bc3ec 100644 --- a/expose.h +++ b/expose.h @@ -75,7 +75,7 @@ struct generation_inputs struct generation_outputs { int status = -1; - char text[16384]; //16kb should be enough for any response + char text[24576]; //24kb should be enough for any response }; extern std::string executable_path; diff --git a/gpttype_adapter.cpp b/gpttype_adapter.cpp index daf56d983..e3941453f 100644 --- a/gpttype_adapter.cpp +++ b/gpttype_adapter.cpp @@ -374,7 +374,8 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in else { //approximate NTK aware ctx - rope_freq_base = (params.n_ctx <= 3072 ? 26000.0f : (params.n_ctx <= 4096 ? 32000.0f : (params.n_ctx <= 6144 ? 54000.0f : 82684.0f))); + rope_freq_base = (params.n_ctx <= 3072 ? 26000.0f : (params.n_ctx <= 4096 ? 32000.0f : (params.n_ctx <= 6144 ? 54000.0f : (params.n_ctx <= 8192 ? 82684.0f : (params.n_ctx <= 12288 ? 140000.0f : 200000.0f))))); + } printf("Using automatic RoPE scaling (scale:%.3f, base:%.1f)\n",rope_freq_scale,rope_freq_base); diff --git a/koboldcpp.py b/koboldcpp.py index 59b6d86e3..1e140b9bb 100755 --- a/koboldcpp.py +++ b/koboldcpp.py @@ -66,7 +66,7 @@ class generation_inputs(ctypes.Structure): class generation_outputs(ctypes.Structure): _fields_ = [("status", ctypes.c_int), - ("text", ctypes.c_char * 16384)] + ("text", ctypes.c_char * 24576)] handle = None @@ -303,7 +303,7 @@ maxhordectx = 1024 maxhordelen = 256 modelbusy = threading.Lock() defaultport = 5001 -KcppVersion = "1.38" +KcppVersion = "1.39" showdebug = True showsamplerwarning = True showmaxctxwarning = True @@ -711,7 +711,7 @@ def show_new_gui(): # slider data blasbatchsize_values = ["-1", "32", "64", "128", "256", "512", "1024"] blasbatchsize_text = ["Don't Batch BLAS","32","64","128","256","512","1024"] - contextsize_text = ["512", "1024", "2048", "3072", "4096", "6144", "8192"] + contextsize_text = ["512", "1024", "2048", "3072", "4096", "6144", "8192", "12288", "16384"] runopts = [opt for lib, opt in lib_option_pairs if file_exists(lib) or os.name == 'nt' and file_exists(opt + ".dll")] antirunopts = [opt.replace("Use ", "") for lib, opt in lib_option_pairs if not file_exists(lib) or os.name == 'nt' and not file_exists(opt + ".dll")] if not any(runopts): @@ -1713,7 +1713,7 @@ if __name__ == '__main__': parser.add_argument("--blasthreads", help="Use a different number of threads during BLAS if specified. Otherwise, has the same value as --threads",metavar=('[threads]'), type=int, default=0) parser.add_argument("--psutil_set_threads", help="Experimental flag. If set, uses psutils to determine thread count based on physical cores.", action='store_true') parser.add_argument("--highpriority", help="Experimental flag. If set, increases the process CPU priority, potentially speeding up generation. Use caution.", action='store_true') - parser.add_argument("--contextsize", help="Controls the memory allocated for maximum context size, only change if you need more RAM for big contexts. (default 2048)", type=int,choices=[512,1024,2048,3072,4096,6144,8192], default=2048) + parser.add_argument("--contextsize", help="Controls the memory allocated for maximum context size, only change if you need more RAM for big contexts. (default 2048)", type=int,choices=[512,1024,2048,3072,4096,6144,8192,12288,16384], default=2048) parser.add_argument("--blasbatchsize", help="Sets the batch size used in BLAS processing (default 512). Setting it to -1 disables BLAS mode, but keeps other benefits like GPU offload.", type=int,choices=[-1,32,64,128,256,512,1024], default=512) parser.add_argument("--ropeconfig", help="If set, uses customized RoPE scaling from configured frequency scale and frequency base (e.g. --ropeconfig 0.25 10000). Otherwise, uses NTK-Aware scaling set automatically based on context size. For linear rope, simply set the freq-scale and ignore the freq-base",metavar=('[rope-freq-scale]', '[rope-freq-base]'), default=[0.0, 10000.0], type=float, nargs='+') parser.add_argument("--stream", help="Uses streaming when generating tokens. Only for the Kobold Lite UI.", action='store_true') diff --git a/otherarch/gpt2_v3.cpp b/otherarch/gpt2_v3.cpp index 608a61ac2..dd178ca02 100644 --- a/otherarch/gpt2_v3.cpp +++ b/otherarch/gpt2_v3.cpp @@ -416,9 +416,10 @@ bool gpt2_eval( // use 2 scratch buffers // TODO: very hacky solution - reimplement in a more elegant way - static size_t scr0_size = (n_embd>2400?512u:256u)*1024*1024; + static size_t scr0_size = (n_embd>2400?512u:256u)*1024*1024*(hparams.n_ctx>8192?2:1); static size_t scr1_size = (n_embd>2400?512u:256u)*1024*1024; + static void * scr0 = malloc(scr0_size); static void * scr1 = malloc(scr1_size); diff --git a/otherarch/gptj_v3.cpp b/otherarch/gptj_v3.cpp index 91eb355bb..1cc566208 100644 --- a/otherarch/gptj_v3.cpp +++ b/otherarch/gptj_v3.cpp @@ -417,7 +417,7 @@ bool gptj_eval( // use 2 scratch buffers // TODO: very hacky solution - reimplement in a more elegant way - static size_t scr0_size = 512u*1024*1024; + static size_t scr0_size = 512u*1024*1024*(hparams.n_ctx>8192?2:1); static size_t scr1_size = 512u*1024*1024; static void * scr0 = malloc(scr0_size); diff --git a/otherarch/mpt_v3.cpp b/otherarch/mpt_v3.cpp index 211464f89..6d2b46568 100644 --- a/otherarch/mpt_v3.cpp +++ b/otherarch/mpt_v3.cpp @@ -354,7 +354,7 @@ bool mpt_eval(const mpt_model & model, const int n_threads, const int n_past, // use 2 scratch buffers // TODO: very hacky solution - reimplement in a more elegant way //MPT 30B needs more scratch memory - static size_t scr0_size = (n_embd>=7168?2048u:1024u)*1024*1024; + static size_t scr0_size = (n_embd>=7168?2048u:1024u)*1024*1024*(hparams.n_ctx>8192?2:1); static size_t scr1_size = (n_embd>=7168?2048u:1024u)*1024*1024; static void * scr0 = malloc(scr0_size); diff --git a/otherarch/neox_v3.cpp b/otherarch/neox_v3.cpp index fdcaed9bb..61bc9230e 100644 --- a/otherarch/neox_v3.cpp +++ b/otherarch/neox_v3.cpp @@ -433,7 +433,7 @@ bool gpt_neox_eval( // use 2 scratch buffers // TODO: very hacky solution - reimplement in a more elegant way - static size_t scr0_size = (n_embd>2400?512u:256u)*1024*1024; + static size_t scr0_size = (n_embd>2400?512u:256u)*1024*1024*(hparams.n_ctx>8192?2:1); static size_t scr1_size = (n_embd>2400?512u:256u)*1024*1024; static void * scr0 = malloc(scr0_size);