Expose low_vram for CUDA

Enabling --lowvram instructs the program to not allocate a VRAM scratch buffer for holding temporary results. Reduces VRAM usage at the cost of performance, particularly prompt processing speed. Requires CUDA
This commit is contained in:
YellowRoseCx 2023-06-26 16:47:22 -05:00
parent d2034ced7b
commit 8afa800fb6
3 changed files with 6 additions and 1 deletions

View file

@ -8,6 +8,7 @@ struct load_model_inputs
const int max_context_length; const int max_context_length;
const int batch_size; const int batch_size;
const bool f16_kv; const bool f16_kv;
const bool low_vram;
const char * executable_path; const char * executable_path;
const char * model_filename; const char * model_filename;
const char * lora_filename; const char * lora_filename;

View file

@ -377,6 +377,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
//llama_ctx_paran_parts = -1; //llama_ctx_paran_parts = -1;
llama_ctx_params.seed = -1; llama_ctx_params.seed = -1;
llama_ctx_params.f16_kv = inputs.f16_kv; llama_ctx_params.f16_kv = inputs.f16_kv;
llama_ctx_params.low_vram = inputs.low_vram;
llama_ctx_params.logits_all = false; llama_ctx_params.logits_all = false;
llama_ctx_params.use_mmap = inputs.use_mmap; llama_ctx_params.use_mmap = inputs.use_mmap;
llama_ctx_params.use_mlock = inputs.use_mlock; llama_ctx_params.use_mlock = inputs.use_mlock;

View file

@ -16,6 +16,7 @@ class load_model_inputs(ctypes.Structure):
("max_context_length", ctypes.c_int), ("max_context_length", ctypes.c_int),
("batch_size", ctypes.c_int), ("batch_size", ctypes.c_int),
("f16_kv", ctypes.c_bool), ("f16_kv", ctypes.c_bool),
("low_vram", ctypes.c_bool),
("executable_path", ctypes.c_char_p), ("executable_path", ctypes.c_char_p),
("model_filename", ctypes.c_char_p), ("model_filename", ctypes.c_char_p),
("lora_filename", ctypes.c_char_p), ("lora_filename", ctypes.c_char_p),
@ -150,6 +151,7 @@ def load_model(model_filename):
inputs.batch_size = 8 inputs.batch_size = 8
inputs.max_context_length = maxctx #initial value to use for ctx, can be overwritten inputs.max_context_length = maxctx #initial value to use for ctx, can be overwritten
inputs.threads = args.threads inputs.threads = args.threads
inputs.low_vram = args.lowvram
inputs.blasthreads = args.blasthreads inputs.blasthreads = args.blasthreads
inputs.f16_kv = True inputs.f16_kv = True
inputs.use_mmap = (not args.nommap) inputs.use_mmap = (not args.nommap)
@ -646,7 +648,7 @@ def show_gui():
#load all the vars #load all the vars
args.threads = int(threads_var.get()) args.threads = int(threads_var.get())
args.gpulayers = int(gpu_layers_var.get()) args.gpulayers = int(gpu_layers_var.get())
args.stream = (stream.get()==1) args.stream = (stream.get()==1)
args.smartcontext = (smartcontext.get()==1) args.smartcontext = (smartcontext.get()==1)
args.launch = (launchbrowser.get()==1) args.launch = (launchbrowser.get()==1)
@ -861,6 +863,7 @@ if __name__ == '__main__':
parser.add_argument("--hordeconfig", help="Sets the display model name to something else, for easy use on AI Horde. Optional additional parameters set the horde max genlength and max ctxlen.",metavar=('[hordename]', '[hordelength] [hordectx]'), nargs='+') parser.add_argument("--hordeconfig", help="Sets the display model name to something else, for easy use on AI Horde. Optional additional parameters set the horde max genlength and max ctxlen.",metavar=('[hordename]', '[hordelength] [hordectx]'), nargs='+')
compatgroup = parser.add_mutually_exclusive_group() compatgroup = parser.add_mutually_exclusive_group()
compatgroup.add_argument("--noblas", help="Do not use OpenBLAS for accelerated prompt ingestion", action='store_true') compatgroup.add_argument("--noblas", help="Do not use OpenBLAS for accelerated prompt ingestion", action='store_true')
parser.add_argument("--lowvram", help="Do not allocate a VRAM scratch buffer for holding temporary results. Reduces VRAM usage at the cost of performance, particularly prompt processing speed. Requires CUDA.", action='store_true')
compatgroup.add_argument("--useclblast", help="Use CLBlast instead of OpenBLAS for prompt ingestion. Must specify exactly 2 arguments, platform ID and device ID (e.g. --useclblast 1 0).", type=int, choices=range(0,9), nargs=2) compatgroup.add_argument("--useclblast", help="Use CLBlast instead of OpenBLAS for prompt ingestion. Must specify exactly 2 arguments, platform ID and device ID (e.g. --useclblast 1 0).", type=int, choices=range(0,9), nargs=2)
parser.add_argument("--gpulayers", help="Set number of layers to offload to GPU when using CLBlast. Requires CLBlast.",metavar=('[GPU layers]'), type=int, default=0) parser.add_argument("--gpulayers", help="Set number of layers to offload to GPU when using CLBlast. Requires CLBlast.",metavar=('[GPU layers]'), type=int, default=0)
args = parser.parse_args() args = parser.parse_args()