Bugfix: wrong signature for quantize function
This commit is contained in:
parent
ef5a9a6160
commit
bd1c657f80
1 changed files with 3 additions and 3 deletions
|
@ -92,12 +92,12 @@ _lib.llama_free.restype = None
|
|||
# TODO: not great API - very likely to change
|
||||
# Returns 0 on success
|
||||
def llama_model_quantize(
|
||||
fname_inp: bytes, fname_out: bytes, itype: c_int, qk: c_int
|
||||
fname_inp: bytes, fname_out: bytes, itype: c_int
|
||||
) -> c_int:
|
||||
return _lib.llama_model_quantize(fname_inp, fname_out, itype, qk)
|
||||
return _lib.llama_model_quantize(fname_inp, fname_out, itype)
|
||||
|
||||
|
||||
_lib.llama_model_quantize.argtypes = [c_char_p, c_char_p, c_int, c_int]
|
||||
_lib.llama_model_quantize.argtypes = [c_char_p, c_char_p, c_int]
|
||||
_lib.llama_model_quantize.restype = c_int
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue