up ver, support 16k ctx

This commit is contained in:
Concedo 2023-08-04 21:47:17 +08:00
parent f0764c6cfb
commit 18bb0ab127
7 changed files with 12 additions and 10 deletions

View file

@ -75,7 +75,7 @@ struct generation_inputs
struct generation_outputs
{
int status = -1;
char text[16384]; //16kb should be enough for any response
char text[24576]; //24kb should be enough for any response
};
extern std::string executable_path;

View file

@ -374,7 +374,8 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
else
{
//approximate NTK aware ctx
rope_freq_base = (params.n_ctx <= 3072 ? 26000.0f : (params.n_ctx <= 4096 ? 32000.0f : (params.n_ctx <= 6144 ? 54000.0f : 82684.0f)));
rope_freq_base = (params.n_ctx <= 3072 ? 26000.0f : (params.n_ctx <= 4096 ? 32000.0f : (params.n_ctx <= 6144 ? 54000.0f : (params.n_ctx <= 8192 ? 82684.0f : (params.n_ctx <= 12288 ? 140000.0f : 200000.0f)))));
}
printf("Using automatic RoPE scaling (scale:%.3f, base:%.1f)\n",rope_freq_scale,rope_freq_base);

View file

@ -66,7 +66,7 @@ class generation_inputs(ctypes.Structure):
class generation_outputs(ctypes.Structure):
_fields_ = [("status", ctypes.c_int),
("text", ctypes.c_char * 16384)]
("text", ctypes.c_char * 24576)]
handle = None
@ -303,7 +303,7 @@ maxhordectx = 1024
maxhordelen = 256
modelbusy = threading.Lock()
defaultport = 5001
KcppVersion = "1.38"
KcppVersion = "1.39"
showdebug = True
showsamplerwarning = True
showmaxctxwarning = True
@ -711,7 +711,7 @@ def show_new_gui():
# slider data
blasbatchsize_values = ["-1", "32", "64", "128", "256", "512", "1024"]
blasbatchsize_text = ["Don't Batch BLAS","32","64","128","256","512","1024"]
contextsize_text = ["512", "1024", "2048", "3072", "4096", "6144", "8192"]
contextsize_text = ["512", "1024", "2048", "3072", "4096", "6144", "8192", "12288", "16384"]
runopts = [opt for lib, opt in lib_option_pairs if file_exists(lib) or os.name == 'nt' and file_exists(opt + ".dll")]
antirunopts = [opt.replace("Use ", "") for lib, opt in lib_option_pairs if not file_exists(lib) or os.name == 'nt' and not file_exists(opt + ".dll")]
if not any(runopts):
@ -1713,7 +1713,7 @@ if __name__ == '__main__':
parser.add_argument("--blasthreads", help="Use a different number of threads during BLAS if specified. Otherwise, has the same value as --threads",metavar=('[threads]'), type=int, default=0)
parser.add_argument("--psutil_set_threads", help="Experimental flag. If set, uses psutils to determine thread count based on physical cores.", action='store_true')
parser.add_argument("--highpriority", help="Experimental flag. If set, increases the process CPU priority, potentially speeding up generation. Use caution.", action='store_true')
parser.add_argument("--contextsize", help="Controls the memory allocated for maximum context size, only change if you need more RAM for big contexts. (default 2048)", type=int,choices=[512,1024,2048,3072,4096,6144,8192], default=2048)
parser.add_argument("--contextsize", help="Controls the memory allocated for maximum context size, only change if you need more RAM for big contexts. (default 2048)", type=int,choices=[512,1024,2048,3072,4096,6144,8192,12288,16384], default=2048)
parser.add_argument("--blasbatchsize", help="Sets the batch size used in BLAS processing (default 512). Setting it to -1 disables BLAS mode, but keeps other benefits like GPU offload.", type=int,choices=[-1,32,64,128,256,512,1024], default=512)
parser.add_argument("--ropeconfig", help="If set, uses customized RoPE scaling from configured frequency scale and frequency base (e.g. --ropeconfig 0.25 10000). Otherwise, uses NTK-Aware scaling set automatically based on context size. For linear rope, simply set the freq-scale and ignore the freq-base",metavar=('[rope-freq-scale]', '[rope-freq-base]'), default=[0.0, 10000.0], type=float, nargs='+')
parser.add_argument("--stream", help="Uses streaming when generating tokens. Only for the Kobold Lite UI.", action='store_true')

View file

@ -416,9 +416,10 @@ bool gpt2_eval(
// use 2 scratch buffers
// TODO: very hacky solution - reimplement in a more elegant way
static size_t scr0_size = (n_embd>2400?512u:256u)*1024*1024;
static size_t scr0_size = (n_embd>2400?512u:256u)*1024*1024*(hparams.n_ctx>8192?2:1);
static size_t scr1_size = (n_embd>2400?512u:256u)*1024*1024;
static void * scr0 = malloc(scr0_size);
static void * scr1 = malloc(scr1_size);

View file

@ -417,7 +417,7 @@ bool gptj_eval(
// use 2 scratch buffers
// TODO: very hacky solution - reimplement in a more elegant way
static size_t scr0_size = 512u*1024*1024;
static size_t scr0_size = 512u*1024*1024*(hparams.n_ctx>8192?2:1);
static size_t scr1_size = 512u*1024*1024;
static void * scr0 = malloc(scr0_size);

View file

@ -354,7 +354,7 @@ bool mpt_eval(const mpt_model & model, const int n_threads, const int n_past,
// use 2 scratch buffers
// TODO: very hacky solution - reimplement in a more elegant way
//MPT 30B needs more scratch memory
static size_t scr0_size = (n_embd>=7168?2048u:1024u)*1024*1024;
static size_t scr0_size = (n_embd>=7168?2048u:1024u)*1024*1024*(hparams.n_ctx>8192?2:1);
static size_t scr1_size = (n_embd>=7168?2048u:1024u)*1024*1024;
static void * scr0 = malloc(scr0_size);

View file

@ -433,7 +433,7 @@ bool gpt_neox_eval(
// use 2 scratch buffers
// TODO: very hacky solution - reimplement in a more elegant way
static size_t scr0_size = (n_embd>2400?512u:256u)*1024*1024;
static size_t scr0_size = (n_embd>2400?512u:256u)*1024*1024*(hparams.n_ctx>8192?2:1);
static size_t scr1_size = (n_embd>2400?512u:256u)*1024*1024;
static void * scr0 = malloc(scr0_size);