Merge branch 'master' into server-probs
This commit is contained in:
commit
3fc1127e2f
7 changed files with 212 additions and 22 deletions
12
README.md
12
README.md
|
@ -39,6 +39,7 @@ Last revision compatible with the old format: [dadbed9](https://github.com/ggerg
|
||||||
<li><a href="#memorydisk-requirements">Memory/Disk Requirements</a></li>
|
<li><a href="#memorydisk-requirements">Memory/Disk Requirements</a></li>
|
||||||
<li><a href="#quantization">Quantization</a></li>
|
<li><a href="#quantization">Quantization</a></li>
|
||||||
<li><a href="#interactive-mode">Interactive mode</a></li>
|
<li><a href="#interactive-mode">Interactive mode</a></li>
|
||||||
|
<li><a href="#constrained-output-with-grammars">Constrained output with grammars</a></li>
|
||||||
<li><a href="#instruction-mode-with-alpaca">Instruction mode with Alpaca</a></li>
|
<li><a href="#instruction-mode-with-alpaca">Instruction mode with Alpaca</a></li>
|
||||||
<li><a href="#using-openllama">Using OpenLLaMA</a></li>
|
<li><a href="#using-openllama">Using OpenLLaMA</a></li>
|
||||||
<li><a href="#using-gpt4all">Using GPT4All</a></li>
|
<li><a href="#using-gpt4all">Using GPT4All</a></li>
|
||||||
|
@ -604,6 +605,16 @@ PROMPT_TEMPLATE=./prompts/chat-with-bob.txt PROMPT_CACHE_FILE=bob.prompt.bin \
|
||||||
CHAT_SAVE_DIR=./chat/bob ./examples/chat-persistent.sh
|
CHAT_SAVE_DIR=./chat/bob ./examples/chat-persistent.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Constrained output with grammars
|
||||||
|
|
||||||
|
`llama.cpp` supports grammars to constrain model output. For example, you can force the model to output JSON only:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./main -m ./models/13B/ggml-model-q4_0.gguf -n 256 --grammar-file grammars/json.gbnf -p 'Request: schedule a call at 8pm; Command:'
|
||||||
|
```
|
||||||
|
|
||||||
|
The `grammars/` folder contains a handful of sample grammars. To write your own, check out the [GBNF Guide](./grammars/README.md).
|
||||||
|
|
||||||
### Instruction mode with Alpaca
|
### Instruction mode with Alpaca
|
||||||
|
|
||||||
1. First, download the `ggml` Alpaca model into the `./models` folder
|
1. First, download the `ggml` Alpaca model into the `./models` folder
|
||||||
|
@ -885,3 +896,4 @@ docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /
|
||||||
- [BLIS](./docs/BLIS.md)
|
- [BLIS](./docs/BLIS.md)
|
||||||
- [Performance troubleshooting](./docs/token_generation_performance_tips.md)
|
- [Performance troubleshooting](./docs/token_generation_performance_tips.md)
|
||||||
- [GGML tips & tricks](https://github.com/ggerganov/llama.cpp/wiki/GGML-Tips-&-Tricks)
|
- [GGML tips & tricks](https://github.com/ggerganov/llama.cpp/wiki/GGML-Tips-&-Tricks)
|
||||||
|
- [GBNF grammars](./grammars/README.md)
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
import sys, struct, math, argparse
|
import sys, struct, math, argparse, warnings
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
import gguf
|
import gguf
|
||||||
|
|
||||||
|
warnings.filterwarnings('error')
|
||||||
|
|
||||||
# Note: Does not support GGML_QKK_64
|
# Note: Does not support GGML_QKK_64
|
||||||
QK_K = 256
|
QK_K = 256
|
||||||
# Items here are (block size, type size)
|
# Items here are (block size, type size)
|
||||||
|
@ -215,15 +217,10 @@ class GGMLToGGUF:
|
||||||
if self.vocab_override is not None:
|
if self.vocab_override is not None:
|
||||||
vo = self.vocab_override
|
vo = self.vocab_override
|
||||||
print('* Adding vocab item(s)')
|
print('* Adding vocab item(s)')
|
||||||
for (idx, vitem) in enumerate(vo.all_tokens()):
|
for (idx, (vbytes, score, ttype)) in enumerate(vo.all_tokens()):
|
||||||
if len(vitem) == 3:
|
tokens.append(vbytes)
|
||||||
tokens.append(vitem[0])
|
scores.append(score)
|
||||||
scores.append(vitem[1])
|
toktypes.append(ttype)
|
||||||
toktypes.append(vitem[2])
|
|
||||||
else:
|
|
||||||
# Maybe try to guess the token type here?
|
|
||||||
tokens.append(vitem[0])
|
|
||||||
scores.append(vitem[1])
|
|
||||||
assert len(tokens) == hp.n_vocab, f'Override vocab has a different number of items than hyperparameters - override = {len(tokens)} but n_vocab={hp.n_vocab}'
|
assert len(tokens) == hp.n_vocab, f'Override vocab has a different number of items than hyperparameters - override = {len(tokens)} but n_vocab={hp.n_vocab}'
|
||||||
gguf_writer.add_token_list(tokens)
|
gguf_writer.add_token_list(tokens)
|
||||||
gguf_writer.add_token_scores(scores)
|
gguf_writer.add_token_scores(scores)
|
||||||
|
@ -231,9 +228,21 @@ class GGMLToGGUF:
|
||||||
gguf_writer.add_token_types(toktypes)
|
gguf_writer.add_token_types(toktypes)
|
||||||
return
|
return
|
||||||
print(f'* Adding {hp.n_vocab} vocab item(s)')
|
print(f'* Adding {hp.n_vocab} vocab item(s)')
|
||||||
|
assert len(self.model.vocab.items) >= 3, 'Cannot handle unexpectedly short model vocab'
|
||||||
for (tokid, (vbytes, vscore)) in enumerate(self.model.vocab.items):
|
for (tokid, (vbytes, vscore)) in enumerate(self.model.vocab.items):
|
||||||
tt = 1 # Normal
|
tt = 1 # Normal
|
||||||
if len(vbytes) == 0:
|
# Special handling for UNK, BOS, EOS tokens.
|
||||||
|
if tokid <= 2:
|
||||||
|
if tokid == 0:
|
||||||
|
vbytes = b'<unk>'
|
||||||
|
tt = 2
|
||||||
|
elif tokid == 1:
|
||||||
|
vbytes = b'<s>'
|
||||||
|
tt = 3
|
||||||
|
else:
|
||||||
|
vbytes = b'</s>'
|
||||||
|
tt = 3
|
||||||
|
elif len(vbytes) == 0:
|
||||||
tt = 3 # Control
|
tt = 3 # Control
|
||||||
elif tokid >= 3 and tokid <= 258 and len(vbytes) == 1:
|
elif tokid >= 3 and tokid <= 258 and len(vbytes) == 1:
|
||||||
vbytes = bytes(f'<0x{vbytes[0]:02X}>', encoding = 'UTF-8')
|
vbytes = bytes(f'<0x{vbytes[0]:02X}>', encoding = 'UTF-8')
|
||||||
|
@ -246,6 +255,9 @@ class GGMLToGGUF:
|
||||||
gguf_writer.add_token_list(tokens)
|
gguf_writer.add_token_list(tokens)
|
||||||
gguf_writer.add_token_scores(scores)
|
gguf_writer.add_token_scores(scores)
|
||||||
gguf_writer.add_token_types(toktypes)
|
gguf_writer.add_token_types(toktypes)
|
||||||
|
gguf_writer.add_unk_token_id(0)
|
||||||
|
gguf_writer.add_bos_token_id(1)
|
||||||
|
gguf_writer.add_eos_token_id(2)
|
||||||
|
|
||||||
def add_tensors(self, gguf_writer):
|
def add_tensors(self, gguf_writer):
|
||||||
nm = self.name_map
|
nm = self.name_map
|
||||||
|
@ -315,7 +327,11 @@ def main():
|
||||||
data = np.memmap(cfg.input, mode = 'r')
|
data = np.memmap(cfg.input, mode = 'r')
|
||||||
model = GGMLV3Model()
|
model = GGMLV3Model()
|
||||||
print('* Scanning GGML input file')
|
print('* Scanning GGML input file')
|
||||||
offset = model.load(data, 0)
|
try:
|
||||||
|
offset = model.load(data, 0)
|
||||||
|
except OverflowError:
|
||||||
|
print(f'!!! Caught overflow loading tensors. The most likely issue is running on Windows but not in WSL. Try running in WSL if possible.', file = sys.stderr)
|
||||||
|
raise
|
||||||
print(f'* GGML model hyperparameters: {model.hyperparameters}')
|
print(f'* GGML model hyperparameters: {model.hyperparameters}')
|
||||||
vocab_override = None
|
vocab_override = None
|
||||||
params_override = None
|
params_override = None
|
||||||
|
@ -330,4 +346,5 @@ def main():
|
||||||
converter.save()
|
converter.save()
|
||||||
print(f'* Successful completion. Output saved to: {cfg.output}')
|
print(f'* Successful completion. Output saved to: {cfg.output}')
|
||||||
|
|
||||||
main()
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
|
@ -288,6 +288,10 @@ These options help improve the performance and memory usage of the LLaMA models.
|
||||||
|
|
||||||
- `--prompt-cache FNAME`: Specify a file to cache the model state after the initial prompt. This can significantly speed up the startup time when you're using longer prompts. The file is created during the first run and is reused and updated in subsequent runs. **Note**: Restoring a cached prompt does not imply restoring the exact state of the session at the point it was saved. So even when specifying a specific seed, you are not guaranteed to get the same sequence of tokens as the original generation.
|
- `--prompt-cache FNAME`: Specify a file to cache the model state after the initial prompt. This can significantly speed up the startup time when you're using longer prompts. The file is created during the first run and is reused and updated in subsequent runs. **Note**: Restoring a cached prompt does not imply restoring the exact state of the session at the point it was saved. So even when specifying a specific seed, you are not guaranteed to get the same sequence of tokens as the original generation.
|
||||||
|
|
||||||
|
### Grammars
|
||||||
|
|
||||||
|
- `--grammar GRAMMAR`, `--grammar-file FILE`: Specify a grammar (defined inline or in a file) to constrain model output to a specific format. For example, you could force the model to output JSON or to speak only in emojis. See the [GBNF guide](../../grammars/README.md) for details on the syntax.
|
||||||
|
|
||||||
### Quantization
|
### Quantization
|
||||||
|
|
||||||
For information about 4-bit quantization, which can significantly improve performance and reduce memory usage, please refer to llama.cpp's primary [README](../../README.md#prepare-data--run).
|
For information about 4-bit quantization, which can significantly improve performance and reduce memory usage, please refer to llama.cpp's primary [README](../../README.md#prepare-data--run).
|
||||||
|
|
|
@ -126,7 +126,7 @@ node .
|
||||||
|
|
||||||
`stream`: It allows receiving each predicted token in real-time instead of waiting for the completion to finish. To enable this, set to `true`.
|
`stream`: It allows receiving each predicted token in real-time instead of waiting for the completion to finish. To enable this, set to `true`.
|
||||||
|
|
||||||
`prompt`: Provide a prompt. Internally, the prompt is compared, and it detects if a part has already been evaluated, and the remaining part will be evaluate. A space is inserted in the front like main.cpp does.
|
`prompt`: Provide a prompt as a string, or as an array of strings and numbers representing tokens. Internally, the prompt is compared, and it detects if a part has already been evaluated, and the remaining part will be evaluate. If the prompt is a string, or an array with the first element given as a string, a space is inserted in the front like main.cpp does.
|
||||||
|
|
||||||
`stop`: Specify a JSON array of stopping strings.
|
`stop`: Specify a JSON array of stopping strings.
|
||||||
These words will not be included in the completion, so make sure to add them to the prompt for the next iteration (default: []).
|
These words will not be included in the completion, so make sure to add them to the prompt for the next iteration (default: []).
|
||||||
|
|
|
@ -191,6 +191,7 @@ struct llama_server_context
|
||||||
size_t n_past = 0;
|
size_t n_past = 0;
|
||||||
size_t n_remain = 0;
|
size_t n_remain = 0;
|
||||||
|
|
||||||
|
json prompt;
|
||||||
std::vector<llama_token> embd;
|
std::vector<llama_token> embd;
|
||||||
std::vector<llama_token> last_n_tokens;
|
std::vector<llama_token> last_n_tokens;
|
||||||
|
|
||||||
|
@ -268,6 +269,53 @@ struct llama_server_context
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::vector<llama_token> tokenize(json json_prompt, bool add_bos)
|
||||||
|
{
|
||||||
|
// If `add_bos` is true, we only add BOS, when json_prompt is a string,
|
||||||
|
// or the first element of the json_prompt array is a string.
|
||||||
|
std::vector<llama_token> prompt_tokens;
|
||||||
|
|
||||||
|
if (json_prompt.is_array())
|
||||||
|
{
|
||||||
|
bool first = true;
|
||||||
|
for (const auto& p : json_prompt)
|
||||||
|
{
|
||||||
|
if (p.is_string())
|
||||||
|
{
|
||||||
|
auto s = p.template get<std::string>();
|
||||||
|
std::vector<llama_token> p;
|
||||||
|
if (first)
|
||||||
|
{
|
||||||
|
s.insert(0, 1, ' '); // add a space if it's the first
|
||||||
|
p = ::llama_tokenize(ctx, s, add_bos);
|
||||||
|
first = false;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
p = ::llama_tokenize(ctx, s, false);
|
||||||
|
}
|
||||||
|
prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (first)
|
||||||
|
{
|
||||||
|
first = false;
|
||||||
|
}
|
||||||
|
prompt_tokens.push_back(p.template get<llama_token>());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
auto s = json_prompt.template get<std::string>();
|
||||||
|
s.insert(0, 1, ' '); // always add a first space
|
||||||
|
prompt_tokens = ::llama_tokenize(ctx, s, add_bos);
|
||||||
|
}
|
||||||
|
|
||||||
|
return prompt_tokens;
|
||||||
|
}
|
||||||
|
|
||||||
bool loadGrammar()
|
bool loadGrammar()
|
||||||
{
|
{
|
||||||
if (!params.grammar.empty()) {
|
if (!params.grammar.empty()) {
|
||||||
|
@ -295,8 +343,8 @@ struct llama_server_context
|
||||||
|
|
||||||
void loadPrompt()
|
void loadPrompt()
|
||||||
{
|
{
|
||||||
params.prompt.insert(0, 1, ' '); // always add a first space
|
auto prompt_tokens = tokenize(prompt, true); // always add BOS
|
||||||
std::vector<llama_token> prompt_tokens = ::llama_tokenize(ctx, params.prompt, true);
|
|
||||||
num_prompt_tokens = prompt_tokens.size();
|
num_prompt_tokens = prompt_tokens.size();
|
||||||
|
|
||||||
if (params.n_keep < 0)
|
if (params.n_keep < 0)
|
||||||
|
@ -1017,7 +1065,7 @@ static json format_final_response(llama_server_context &llama, const std::string
|
||||||
{"tokens_predicted", llama.num_tokens_predicted},
|
{"tokens_predicted", llama.num_tokens_predicted},
|
||||||
{"tokens_evaluated", llama.num_prompt_tokens},
|
{"tokens_evaluated", llama.num_prompt_tokens},
|
||||||
{"generation_settings", format_generation_settings(llama)},
|
{"generation_settings", format_generation_settings(llama)},
|
||||||
{"prompt", llama.params.prompt},
|
{"prompt", llama.prompt},
|
||||||
{"truncated", llama.truncated},
|
{"truncated", llama.truncated},
|
||||||
{"stopped_eos", llama.stopped_eos},
|
{"stopped_eos", llama.stopped_eos},
|
||||||
{"stopped_word", llama.stopped_word},
|
{"stopped_word", llama.stopped_word},
|
||||||
|
@ -1086,10 +1134,18 @@ static void parse_options_completion(const json &body, llama_server_context &lla
|
||||||
llama.params.penalize_nl = json_value(body, "penalize_nl", default_params.penalize_nl);
|
llama.params.penalize_nl = json_value(body, "penalize_nl", default_params.penalize_nl);
|
||||||
llama.params.n_keep = json_value(body, "n_keep", default_params.n_keep);
|
llama.params.n_keep = json_value(body, "n_keep", default_params.n_keep);
|
||||||
llama.params.seed = json_value(body, "seed", default_params.seed);
|
llama.params.seed = json_value(body, "seed", default_params.seed);
|
||||||
llama.params.prompt = json_value(body, "prompt", default_params.prompt);
|
|
||||||
llama.params.grammar = json_value(body, "grammar", default_params.grammar);
|
llama.params.grammar = json_value(body, "grammar", default_params.grammar);
|
||||||
llama.params.n_probs = json_value(body, "n_probs", default_params.n_probs);
|
llama.params.n_probs = json_value(body, "n_probs", default_params.n_probs);
|
||||||
|
|
||||||
|
if (body.count("prompt") != 0)
|
||||||
|
{
|
||||||
|
llama.prompt = body["prompt"];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
llama.prompt = "";
|
||||||
|
}
|
||||||
|
|
||||||
llama.params.logit_bias.clear();
|
llama.params.logit_bias.clear();
|
||||||
if (json_value(body, "ignore_eos", false))
|
if (json_value(body, "ignore_eos", false))
|
||||||
{
|
{
|
||||||
|
@ -1370,8 +1426,11 @@ int main(int argc, char **argv)
|
||||||
auto lock = llama.lock();
|
auto lock = llama.lock();
|
||||||
|
|
||||||
const json body = json::parse(req.body);
|
const json body = json::parse(req.body);
|
||||||
const std::string content = json_value<std::string>(body, "content", "");
|
std::vector<llama_token> tokens;
|
||||||
const std::vector<llama_token> tokens = llama_tokenize(llama.ctx, content, false);
|
if (body.count("content") != 0)
|
||||||
|
{
|
||||||
|
tokens = llama.tokenize(body["content"], false);
|
||||||
|
}
|
||||||
const json data = format_tokenizer_response(tokens);
|
const json data = format_tokenizer_response(tokens);
|
||||||
return res.set_content(data.dump(), "application/json"); });
|
return res.set_content(data.dump(), "application/json"); });
|
||||||
|
|
||||||
|
@ -1383,7 +1442,14 @@ int main(int argc, char **argv)
|
||||||
|
|
||||||
llama.rewind();
|
llama.rewind();
|
||||||
llama_reset_timings(llama.ctx);
|
llama_reset_timings(llama.ctx);
|
||||||
llama.params.prompt = json_value<std::string>(body, "content", "");
|
if (body.count("content") != 0)
|
||||||
|
{
|
||||||
|
llama.prompt = body["content"];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
llama.prompt = "";
|
||||||
|
}
|
||||||
llama.params.n_predict = 0;
|
llama.params.n_predict = 0;
|
||||||
llama.loadPrompt();
|
llama.loadPrompt();
|
||||||
llama.beginCompletion();
|
llama.beginCompletion();
|
||||||
|
|
91
grammars/README.md
Normal file
91
grammars/README.md
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
# GBNF Guide
|
||||||
|
|
||||||
|
GBNF (GGML BNF) is a format for defining [formal grammars](https://en.wikipedia.org/wiki/Formal_grammar) to constrain model outputs in `llama.cpp`. For example, you can use it to force the model to generate valid JSON, or speak only in emojis. GBNF grammars are supported in various ways in `examples/main` and `examples/server`.
|
||||||
|
|
||||||
|
## Background
|
||||||
|
|
||||||
|
[Bakus-Naur Form (BNF)](https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_form) is a notation for describing the syntax of formal languages like programming languages, file formats, and protocols. GBNF is an extension of BNF that primarily adds a few modern regex-like features.
|
||||||
|
|
||||||
|
## Basics
|
||||||
|
|
||||||
|
In GBNF, we define *production rules* that specify how a *non-terminal* (rule name) can be replaced with sequences of *terminals* (characters, specifically Unicode [code points](https://en.wikipedia.org/wiki/Code_point)) and other non-terminals. The basic format of a production rule is `nonterminal ::= sequence...`.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
Before going deeper, let's look at some of the features demonstrated in `grammars/chess.gbnf`, a small chess notation grammar:
|
||||||
|
```
|
||||||
|
# `root` specifies the pattern for the overall output
|
||||||
|
root ::= (
|
||||||
|
# it must start with the characters "1. " followed by a sequence
|
||||||
|
# of characters that match the `move` rule, followed by a space, followed
|
||||||
|
# by another move, and then a newline
|
||||||
|
"1. " move " " move "\n"
|
||||||
|
|
||||||
|
# it's followed by one or more subsequent moves, numbered with one or two digits
|
||||||
|
([1-9] [0-9]? ". " move " " move "\n")+
|
||||||
|
)
|
||||||
|
|
||||||
|
# `move` is an abstract representation, which can be a pawn, nonpawn, or castle.
|
||||||
|
# The `[+#]?` denotes the possibility of checking or mate signs after moves
|
||||||
|
move ::= (pawn | nonpawn | castle) [+#]?
|
||||||
|
|
||||||
|
pawn ::= ...
|
||||||
|
nonpawn ::= ...
|
||||||
|
castle ::= ...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Non-Terminals and Terminals
|
||||||
|
|
||||||
|
Non-terminal symbols (rule names) stand for a pattern of terminals and other non-terminals. They are required to be a dashed lowercase word, like `move`, `castle`, or `check-mate`.
|
||||||
|
|
||||||
|
Terminals are actual characters ([code points](https://en.wikipedia.org/wiki/Code_point)). They can be specified as a sequence like `"1"` or `"O-O"` or as ranges like `[1-9]` or `[NBKQR]`.
|
||||||
|
|
||||||
|
## Characters and character ranges
|
||||||
|
|
||||||
|
Terminals support the full range of Unicode. Unicode characters can be specified directly in the grammar, for example `hiragana ::= [ぁ-ゟ]`, or with escapes: 8-bit (`\xXX`), 16-bit (`\uXXXX`) or 32-bit (`\UXXXXXXXX`).
|
||||||
|
|
||||||
|
Character ranges can be negated with `^`:
|
||||||
|
```
|
||||||
|
single-line ::= [^\n]+ "\n"`
|
||||||
|
```
|
||||||
|
|
||||||
|
## Sequences and Alternatives
|
||||||
|
|
||||||
|
The order of symbols in a sequence matter. For example, in `"1. " move " " move "\n"`, the `"1. "` must come before the first `move`, etc.
|
||||||
|
|
||||||
|
Alternatives, denoted by `|`, give different sequences that are acceptable. For example, in `move ::= pawn | nonpawn | castle`, `move` can be a `pawn` move, a `nonpawn` move, or a `castle`.
|
||||||
|
|
||||||
|
Parentheses `()` can be used to group sequences, which allows for embedding alternatives in a larger rule or applying repetition and optptional symbols (below) to a sequence.
|
||||||
|
|
||||||
|
## Repetition and Optional Symbols
|
||||||
|
|
||||||
|
- `*` after a symbol or sequence means that it can be repeated zero or more times.
|
||||||
|
- `+` denotes that the symbol or sequence should appear one or more times.
|
||||||
|
- `?` makes the preceding symbol or sequence optional.
|
||||||
|
|
||||||
|
## Comments and newlines
|
||||||
|
|
||||||
|
Comments can be specified with `#`:
|
||||||
|
```
|
||||||
|
# defines optional whitspace
|
||||||
|
ws ::= [ \t\n]+
|
||||||
|
```
|
||||||
|
|
||||||
|
Newlines are allowed between rules and between symbols or sequences nested inside parentheses. Additionally, a newline after an alternate marker `|` will continue the current rule, even outside of parentheses.
|
||||||
|
|
||||||
|
## The root rule
|
||||||
|
|
||||||
|
In a full grammar, the `root` rule always defines the starting point of the grammar. In other words, it specifies what the entire output must match.
|
||||||
|
|
||||||
|
```
|
||||||
|
# a grammar for lists
|
||||||
|
root ::= ("- " item)+
|
||||||
|
item ::= [^\n]+ "\n"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Next steps
|
||||||
|
|
||||||
|
This guide provides a brief overview. Check out the GBNF files in this directory (`grammars/`) for examples of full grammars. You can try them out with:
|
||||||
|
```
|
||||||
|
./main -m <model> --grammar-file grammars/some-grammar.gbnf -p 'Some prompt'
|
||||||
|
```
|
|
@ -703,7 +703,7 @@ struct llama_vocab {
|
||||||
// default LLaMA special tokens
|
// default LLaMA special tokens
|
||||||
id special_bos_id = 1;
|
id special_bos_id = 1;
|
||||||
id special_eos_id = 2;
|
id special_eos_id = 2;
|
||||||
id special_unk_id = -1;
|
id special_unk_id = 0;
|
||||||
id special_sep_id = -1;
|
id special_sep_id = -1;
|
||||||
id special_pad_id = -1;
|
id special_pad_id = -1;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue