infill : add new example + extend server API (#3296)

* vvhg-code-infill (#1)

* infill in separate example (#2)

* reverted changes to main and added infill example

* cleanup

* naming improvement

* make : add missing blank line

* fix missing semicolon

* brought infill up to current main code

* cleanup

---------

Co-authored-by: Cebtenzzre <cebtenzzre@gmail.com>
This commit is contained in:
vvhg1 2023-10-02 09:42:02 +02:00 committed by GitHub
parent f5ef5cfb18
commit c97f01c362
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 1067 additions and 1 deletions

View file

@ -1076,6 +1076,10 @@ struct llama_vocab {
id special_pad_id = -1;
id linefeed_id = 13;
id special_prefix_id = 32007;
id special_middle_id = 32009;
id special_suffix_id = 32008;
id special_eot_id = 32010;
int find_bpe_rank(std::string token_left, std::string token_right) const {
replace_all(token_left, " ", "\u0120");
@ -7489,6 +7493,22 @@ llama_token llama_token_eos(const struct llama_context * ctx) {
llama_token llama_token_nl(const struct llama_context * ctx) {
return ctx->model.vocab.linefeed_id;
}
llama_token llama_token_prefix(const struct llama_context * ctx) {
return ctx->model.vocab.special_prefix_id;
}
llama_token llama_token_middle(const struct llama_context * ctx) {
return ctx->model.vocab.special_middle_id;
}
llama_token llama_token_suffix(const struct llama_context * ctx) {
return ctx->model.vocab.special_suffix_id;
}
llama_token llama_token_eot(const struct llama_context * ctx) {
return ctx->model.vocab.special_eot_id;
}
int llama_tokenize(
const struct llama_model * model,