ditch advanced subparsers

This commit is contained in:
Concedo 2023-07-24 22:40:36 +08:00
parent 8a9b40840b
commit 7555dae4cc

View file

@ -212,7 +212,7 @@ def load_model(model_filename):
os.environ["CUDA_VISIBLE_DEVICES"] = "2" os.environ["CUDA_VISIBLE_DEVICES"] = "2"
for n in range(tensor_split_max): for n in range(tensor_split_max):
if args.has_advanced=='advanced' and args.tensor_split and n < len(args.tensor_split): if args.tensor_split and n < len(args.tensor_split):
inputs.tensor_split[n] = float(args.tensor_split[n]) inputs.tensor_split[n] = float(args.tensor_split[n])
else: else:
inputs.tensor_split[n] = 0 inputs.tensor_split[n] = 0
@ -1648,11 +1648,8 @@ if __name__ == '__main__':
compatgroup.add_argument("--useclblast", help="Use CLBlast for GPU Acceleration. Must specify exactly 2 arguments, platform ID and device ID (e.g. --useclblast 1 0).", type=int, choices=range(0,9), nargs=2) compatgroup.add_argument("--useclblast", help="Use CLBlast for GPU Acceleration. Must specify exactly 2 arguments, platform ID and device ID (e.g. --useclblast 1 0).", type=int, choices=range(0,9), nargs=2)
compatgroup.add_argument("--usecublas", help="Use CuBLAS for GPU Acceleration. Requires CUDA. Select lowvram to not allocate VRAM scratch buffer. Enter a number afterwards to select and use 1 GPU. Leaving no number will use all GPUs.", nargs='*',metavar=('[lowvram|normal] [main GPU ID]'), choices=['normal', 'lowvram', '0', '1', '2']) compatgroup.add_argument("--usecublas", help="Use CuBLAS for GPU Acceleration. Requires CUDA. Select lowvram to not allocate VRAM scratch buffer. Enter a number afterwards to select and use 1 GPU. Leaving no number will use all GPUs.", nargs='*',metavar=('[lowvram|normal] [main GPU ID]'), choices=['normal', 'lowvram', '0', '1', '2'])
parser.add_argument("--gpulayers", help="Set number of layers to offload to GPU when using GPU. Requires GPU.",metavar=('[GPU layers]'), type=int, default=0) parser.add_argument("--gpulayers", help="Set number of layers to offload to GPU when using GPU. Requires GPU.",metavar=('[GPU layers]'), type=int, default=0)
parser.add_argument("--tensor_split", help="For CUDA with ALL GPU set only, ratio to split tensors across multiple GPUs, space-separated list of proportions, e.g. 7 3", metavar=('[Ratios]'), type=float, nargs='+')
# for the seldom used esoteric commands
subparsers = parser.add_subparsers(title="Advanced Configs (For Experts)", dest="has_advanced")
advanced_subparser = subparsers.add_parser("advanced", help="Additional settings for experts. Run 'koboldcpp.py advanced --help' for more info")
advanced_subparser.add_argument("--tensor_split", help="CUDA with ALL set only. How to split tensors across multiple GPUs, space-separated list of proportions, e.g. 3 1", type=float, nargs='+')
args = parser.parse_args() args = parser.parse_args()
main(args) main(args)