Using const auto references in range-based loop C++17
This commit is contained in:
parent
8bc1943efe
commit
574a6581ba
2 changed files with 4 additions and 5 deletions
|
@ -399,7 +399,7 @@ namespace grammar_parser {
|
||||||
void print_grammar(FILE * file, const parse_state & state) {
|
void print_grammar(FILE * file, const parse_state & state) {
|
||||||
try {
|
try {
|
||||||
std::map<uint32_t, std::string> symbol_id_names;
|
std::map<uint32_t, std::string> symbol_id_names;
|
||||||
for (auto kv : state.symbol_ids) {
|
for (const auto & kv : state.symbol_ids) {
|
||||||
symbol_id_names[kv.second] = kv.first;
|
symbol_id_names[kv.second] = kv.first;
|
||||||
}
|
}
|
||||||
for (size_t i = 0, end = state.rules.size(); i < end; i++) {
|
for (size_t i = 0, end = state.rules.size(); i < end; i++) {
|
||||||
|
|
|
@ -5429,7 +5429,6 @@ struct llm_tokenizer_bpe {
|
||||||
llm_symbol sym;
|
llm_symbol sym;
|
||||||
size_t char_len = std::min(word.size() - offset, (size_t) ::utf8_len(word[offset]));
|
size_t char_len = std::min(word.size() - offset, (size_t) ::utf8_len(word[offset]));
|
||||||
sym.text = word.c_str() + offset;
|
sym.text = word.c_str() + offset;
|
||||||
sym.n = 1;
|
|
||||||
sym.n = char_len;
|
sym.n = char_len;
|
||||||
offset += sym.n;
|
offset += sym.n;
|
||||||
sym.prev = index - 1;
|
sym.prev = index - 1;
|
||||||
|
@ -5992,7 +5991,7 @@ static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_
|
||||||
std::vector<llama_grammar_candidate> rejects;
|
std::vector<llama_grammar_candidate> rejects;
|
||||||
|
|
||||||
if (stack.empty()) {
|
if (stack.empty()) {
|
||||||
for (auto tok : candidates) {
|
for (const auto & tok : candidates) {
|
||||||
if (*tok.code_points != 0 || tok.partial_utf8.n_remain != 0) {
|
if (*tok.code_points != 0 || tok.partial_utf8.n_remain != 0) {
|
||||||
rejects.push_back(tok);
|
rejects.push_back(tok);
|
||||||
}
|
}
|
||||||
|
@ -6003,7 +6002,7 @@ static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_
|
||||||
const llama_grammar_element * stack_pos = stack.back();
|
const llama_grammar_element * stack_pos = stack.back();
|
||||||
|
|
||||||
std::vector<llama_grammar_candidate> next_candidates;
|
std::vector<llama_grammar_candidate> next_candidates;
|
||||||
for (auto tok : candidates) {
|
for (const auto & tok : candidates) {
|
||||||
if (*tok.code_points == 0) {
|
if (*tok.code_points == 0) {
|
||||||
// reached end of full codepoints in token, reject iff it ended in a partial sequence
|
// reached end of full codepoints in token, reject iff it ended in a partial sequence
|
||||||
// that cannot satisfy this position in grammar
|
// that cannot satisfy this position in grammar
|
||||||
|
@ -6029,7 +6028,7 @@ static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_
|
||||||
llama_grammar_advance_stack(rules, stack_after, next_stacks);
|
llama_grammar_advance_stack(rules, stack_after, next_stacks);
|
||||||
|
|
||||||
auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates);
|
auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates);
|
||||||
for (auto tok : next_rejects) {
|
for (const auto & tok : next_rejects) {
|
||||||
rejects.push_back({ tok.index, tok.code_points - 1, tok.partial_utf8 });
|
rejects.push_back({ tok.index, tok.code_points - 1, tok.partial_utf8 });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue