From b78ceb1a2ea7c2e76e6a9b87317e438b2bfd4182 Mon Sep 17 00:00:00 2001 From: FNsi <125447286+FNsi@users.noreply.github.com> Date: Mon, 22 May 2023 19:31:17 +0800 Subject: [PATCH] merge-hf-and-lora-to-hf.py --- merge-hf-and-lora-to-hf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/merge-hf-and-lora-to-hf.py b/merge-hf-and-lora-to-hf.py index f8452ffd8..235704070 100644 --- a/merge-hf-and-lora-to-hf.py +++ b/merge-hf-and-lora-to-hf.py @@ -40,7 +40,7 @@ tokenizer.save_pretrained(args.out) # load model. model = LlamaForCausalLM.from_pretrained( - args.model_path, + args.model, load_in_8bit=False, torch_dtype=torch.float16, device_map={"": "cpu"},