Change llama_tokenize return meaning
This commit is contained in:
parent
a9f900b645
commit
c3d13eaa4d
3 changed files with 3 additions and 2 deletions
|
@ -1455,7 +1455,7 @@ int llama_tokenize(
|
|||
|
||||
if (n_max_tokens < (int) res.size()) {
|
||||
fprintf(stderr, "%s: too many tokens\n", __func__);
|
||||
return 1;
|
||||
return -((int) res.size());
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < res.size(); i++) {
|
||||
|
|
2
llama.h
2
llama.h
|
@ -89,7 +89,7 @@ extern "C" {
|
|||
// Convert the provided text into tokens.
|
||||
// The tokens pointer must be large enough to hold the resulting tokens.
|
||||
// Returns the number of tokens on success, no more than n_max_tokens
|
||||
// Returns -1 on failure
|
||||
// Returns a negative number on failure - the number of tokens that would have been returned
|
||||
// TODO: not sure if correct
|
||||
LLAMA_API int llama_tokenize(
|
||||
struct llama_context * ctx,
|
||||
|
|
1
main.cpp
1
main.cpp
|
@ -155,6 +155,7 @@ void sigint_handler(int signo) {
|
|||
#endif
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
// has to be called once at the start of the program to init ggml stuff
|
||||
ggml_time_init();
|
||||
|
||||
gpt_params params;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue