diff --git a/koboldcpp.py b/koboldcpp.py index ab3ce85ec..a426efea3 100755 --- a/koboldcpp.py +++ b/koboldcpp.py @@ -1564,6 +1564,11 @@ def run_horde_worker(args, api_key, worker_name): def main(args): embedded_kailite = None + if args.config: + with open(args.config, 'r') as f: + config = json.load(f) + for key, value in config.items(): + setattr(args, key, value) if not args.model_param: args.model_param = args.model if not args.model_param: @@ -1708,6 +1713,7 @@ if __name__ == '__main__': parser.add_argument("--host", help="Host IP to listen on. If empty, all routable interfaces are accepted.", default="") parser.add_argument("--launch", help="Launches a web browser when load is completed.", action='store_true') parser.add_argument("--lora", help="LLAMA models only, applies a lora file on top of model. Experimental.", metavar=('[lora_filename]', '[lora_base]'), nargs='+') + parser.add_argument("--config", help="Load settings from a .kcpps file. Other arguments will be ignored", nargs="?") physical_core_limit = 1 if os.cpu_count()!=None and os.cpu_count()>1: physical_core_limit = int(os.cpu_count()/2)