fixed bbs -1 and allow bbs = 2048

This commit is contained in:
Concedo 2023-08-06 17:47:05 +08:00
parent 18bb0ab127
commit bcfdd0e662
3 changed files with 11 additions and 5 deletions

View file

@ -347,6 +347,10 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
debugmode = inputs.debugmode;
unbanTokens = inputs.unban_tokens;
blasbatchsize = inputs.blasbatchsize;
if(blasbatchsize<=0)
{
blasbatchsize = 8;
}
params.memory_f16 = inputs.f16_kv;
params.n_ctx = inputs.max_context_length;

View file

@ -709,8 +709,8 @@ def show_new_gui():
(lib_failsafe, "Failsafe Mode (Old CPU)")]
openblas_option, clblast_option, cublas_option, default_option, noavx2_option, failsafe_option = (opt if file_exists(lib) or (os.name == 'nt' and file_exists(opt + ".dll")) else None for lib, opt in lib_option_pairs)
# slider data
blasbatchsize_values = ["-1", "32", "64", "128", "256", "512", "1024"]
blasbatchsize_text = ["Don't Batch BLAS","32","64","128","256","512","1024"]
blasbatchsize_values = ["-1", "32", "64", "128", "256", "512", "1024", "2048"]
blasbatchsize_text = ["Don't Batch BLAS","32","64","128","256","512","1024","2048"]
contextsize_text = ["512", "1024", "2048", "3072", "4096", "6144", "8192", "12288", "16384"]
runopts = [opt for lib, opt in lib_option_pairs if file_exists(lib) or os.name == 'nt' and file_exists(opt + ".dll")]
antirunopts = [opt.replace("Use ", "") for lib, opt in lib_option_pairs if not file_exists(lib) or os.name == 'nt' and not file_exists(opt + ".dll")]
@ -1306,7 +1306,7 @@ def show_old_gui():
tk.Label(root, text = "(Note: KoboldCpp only works with GGML model formats!)",
font = ("Arial", 9)).grid(row=1,column=0)
blasbatchopts = ["Don't Batch BLAS","BLAS = 32","BLAS = 64","BLAS = 128","BLAS = 256","BLAS = 512","BLAS = 1024"]
blasbatchopts = ["Don't Batch BLAS","BLAS = 32","BLAS = 64","BLAS = 128","BLAS = 256","BLAS = 512","BLAS = 1024","BLAS = 2048"]
blaschoice = tk.StringVar()
blaschoice.set("BLAS = 512")
@ -1416,6 +1416,8 @@ def show_old_gui():
args.blasbatchsize = 512
if selblaschoice==blasbatchopts[6]:
args.blasbatchsize = 1024
if selblaschoice==blasbatchopts[7]:
args.blasbatchsize = 2048
root = tk.Tk()
root.attributes("-alpha", 0)
@ -1714,7 +1716,7 @@ if __name__ == '__main__':
parser.add_argument("--psutil_set_threads", help="Experimental flag. If set, uses psutils to determine thread count based on physical cores.", action='store_true')
parser.add_argument("--highpriority", help="Experimental flag. If set, increases the process CPU priority, potentially speeding up generation. Use caution.", action='store_true')
parser.add_argument("--contextsize", help="Controls the memory allocated for maximum context size, only change if you need more RAM for big contexts. (default 2048)", type=int,choices=[512,1024,2048,3072,4096,6144,8192,12288,16384], default=2048)
parser.add_argument("--blasbatchsize", help="Sets the batch size used in BLAS processing (default 512). Setting it to -1 disables BLAS mode, but keeps other benefits like GPU offload.", type=int,choices=[-1,32,64,128,256,512,1024], default=512)
parser.add_argument("--blasbatchsize", help="Sets the batch size used in BLAS processing (default 512). Setting it to -1 disables BLAS mode, but keeps other benefits like GPU offload.", type=int,choices=[-1,32,64,128,256,512,1024,2048], default=512)
parser.add_argument("--ropeconfig", help="If set, uses customized RoPE scaling from configured frequency scale and frequency base (e.g. --ropeconfig 0.25 10000). Otherwise, uses NTK-Aware scaling set automatically based on context size. For linear rope, simply set the freq-scale and ignore the freq-base",metavar=('[rope-freq-scale]', '[rope-freq-base]'), default=[0.0, 10000.0], type=float, nargs='+')
parser.add_argument("--stream", help="Uses streaming when generating tokens. Only for the Kobold Lite UI.", action='store_true')
parser.add_argument("--smartcontext", help="Reserving a portion of context to try processing less frequently.", action='store_true')

View file

@ -1042,7 +1042,7 @@ static void llama_model_load_internal(
void * progress_callback_user_data) {
model.t_start_us = ggml_time_us();
size_t blasbatchmul = (n_batch>512?2:1);
size_t blasbatchmul = (n_batch>512?(n_batch>1024?4:2):1);
std::unique_ptr<llama_model_loader> ml(new llama_model_loader(fname, use_mmap));