From 7555dae4ccbeffaecae9a094214772b5d89d3043 Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Mon, 24 Jul 2023 22:40:36 +0800 Subject: [PATCH] ditch advanced subparsers --- koboldcpp.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/koboldcpp.py b/koboldcpp.py index 80c3d79be..22375c165 100755 --- a/koboldcpp.py +++ b/koboldcpp.py @@ -212,7 +212,7 @@ def load_model(model_filename): os.environ["CUDA_VISIBLE_DEVICES"] = "2" for n in range(tensor_split_max): - if args.has_advanced=='advanced' and args.tensor_split and n < len(args.tensor_split): + if args.tensor_split and n < len(args.tensor_split): inputs.tensor_split[n] = float(args.tensor_split[n]) else: inputs.tensor_split[n] = 0 @@ -1648,11 +1648,8 @@ if __name__ == '__main__': compatgroup.add_argument("--useclblast", help="Use CLBlast for GPU Acceleration. Must specify exactly 2 arguments, platform ID and device ID (e.g. --useclblast 1 0).", type=int, choices=range(0,9), nargs=2) compatgroup.add_argument("--usecublas", help="Use CuBLAS for GPU Acceleration. Requires CUDA. Select lowvram to not allocate VRAM scratch buffer. Enter a number afterwards to select and use 1 GPU. Leaving no number will use all GPUs.", nargs='*',metavar=('[lowvram|normal] [main GPU ID]'), choices=['normal', 'lowvram', '0', '1', '2']) parser.add_argument("--gpulayers", help="Set number of layers to offload to GPU when using GPU. Requires GPU.",metavar=('[GPU layers]'), type=int, default=0) - - # for the seldom used esoteric commands - subparsers = parser.add_subparsers(title="Advanced Configs (For Experts)", dest="has_advanced") - advanced_subparser = subparsers.add_parser("advanced", help="Additional settings for experts. Run 'koboldcpp.py advanced --help' for more info") - advanced_subparser.add_argument("--tensor_split", help="CUDA with ALL set only. How to split tensors across multiple GPUs, space-separated list of proportions, e.g. 3 1", type=float, nargs='+') + parser.add_argument("--tensor_split", help="For CUDA with ALL GPU set only, ratio to split tensors across multiple GPUs, space-separated list of proportions, e.g. 7 3", metavar=('[Ratios]'), type=float, nargs='+') args = parser.parse_args() + main(args)