Bugfix: wrong signature for quantize function

This commit is contained in:
Andrei Betlen 2023-04-04 22:36:59 -04:00 committed by Don Mahurin
parent ef5a9a6160
commit bd1c657f80

View file

@ -92,12 +92,12 @@ _lib.llama_free.restype = None
# TODO: not great API - very likely to change # TODO: not great API - very likely to change
# Returns 0 on success # Returns 0 on success
def llama_model_quantize( def llama_model_quantize(
fname_inp: bytes, fname_out: bytes, itype: c_int, qk: c_int fname_inp: bytes, fname_out: bytes, itype: c_int
) -> c_int: ) -> c_int:
return _lib.llama_model_quantize(fname_inp, fname_out, itype, qk) return _lib.llama_model_quantize(fname_inp, fname_out, itype)
_lib.llama_model_quantize.argtypes = [c_char_p, c_char_p, c_int, c_int] _lib.llama_model_quantize.argtypes = [c_char_p, c_char_p, c_int]
_lib.llama_model_quantize.restype = c_int _lib.llama_model_quantize.restype = c_int