Update convert-image-encoder-to-gguf.py

This commit is contained in:
John 2023-12-03 20:23:28 +01:00 committed by GitHub
parent fbbc42827b
commit 2f55ff6d97
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -80,6 +80,8 @@ ap.add_argument("--vision-only", action="store_true", required=False,
help="Save a vision-only model. It can't be used to encode texts") help="Save a vision-only model. It can't be used to encode texts")
ap.add_argument("--clip_model_is_vision", action="store_true", required=False, ap.add_argument("--clip_model_is_vision", action="store_true", required=False,
help="The clip model is a pure vision model (ShareGPT4V vision extract for example)") help="The clip model is a pure vision model (ShareGPT4V vision extract for example)")
ap.add_argument("--clip_model_is_openclip", action="store_true", required=False,
help="The clip model is from openclip (for ViT-SO400M type))")
ap.add_argument("--llava-projector", help="Path to llava.projector file. If specified, save an image encoder for LLaVA models.") ap.add_argument("--llava-projector", help="Path to llava.projector file. If specified, save an image encoder for LLaVA models.")
ap.add_argument("--image-mean", nargs=3, type=float, required=False, help="Override image mean values") ap.add_argument("--image-mean", nargs=3, type=float, required=False, help="Override image mean values")
ap.add_argument("--image-std", nargs=3, type=float, required=False, help="Override image std values") ap.add_argument("--image-std", nargs=3, type=float, required=False, help="Override image std values")
@ -104,7 +106,7 @@ if args.use_f32:
# output in the same directory as the model if output_dir is None # output in the same directory as the model if output_dir is None
dir_model = args.model_dir dir_model = args.model_dir
if args.clip_model_is_vision: if args.clip_model_is_vision or not os.path.exists(dir_model + "/vocab.json") or clip_model_is_openclip:
vocab = None vocab = None
tokens = None tokens = None
else: else:
@ -132,7 +134,7 @@ ftype = 1
if args.use_f32: if args.use_f32:
ftype = 0 ftype = 0
if args.clip_model_is_vision: if args.clip_model_is_vision or args.clip_model_is_openclip:
model = CLIPVisionModel.from_pretrained(dir_model) model = CLIPVisionModel.from_pretrained(dir_model)
processor = None processor = None
else: else: