Merge unordered_map/vector changes with trunk updates
This commit is contained in:
commit
ef792ae8bd
8 changed files with 199 additions and 54 deletions
|
@ -6,7 +6,7 @@ RUN apt-get update && \
|
|||
apt-get install -y build-essential python3 python3-pip
|
||||
|
||||
RUN pip install --upgrade pip setuptools wheel \
|
||||
&& pip install torch torchvision torchaudio sentencepiece numpy
|
||||
&& pip install numpy requests sentencepiece torch tqdm
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -31,7 +31,7 @@ endif
|
|||
#
|
||||
|
||||
CFLAGS = -I. -O3 -DNDEBUG -std=c11 -fPIC
|
||||
CXXFLAGS = -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC
|
||||
CXXFLAGS = -I. -I./examples -O3 -DNDEBUG -std=c++17 -fPIC
|
||||
LDFLAGS =
|
||||
|
||||
# OS specific
|
||||
|
|
|
@ -11,6 +11,9 @@ Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++
|
|||
- Cache input prompts for faster initialization: https://github.com/ggerganov/llama.cpp/issues/64
|
||||
- Create a `llama.cpp` logo: https://github.com/ggerganov/llama.cpp/issues/105
|
||||
|
||||
**TEMPORARY NOTICE:**
|
||||
If you're updating to the latest master, you will need to regenerate your model files as the format has changed.
|
||||
|
||||
## Description
|
||||
|
||||
The main goal is to run the model using 4-bit quantization on a MacBook
|
||||
|
|
|
@ -60,7 +60,8 @@ def write_header(fout, hparams, ftype):
|
|||
|
||||
keys = ["vocab_size", "dim", "multiple_of", "n_heads", "n_layers"]
|
||||
values = [
|
||||
0x67676d6c, # magic: ggml in hex
|
||||
0x67676d66, # magic: ggml in hex
|
||||
1, # file version
|
||||
*[hparams[key] for key in keys],
|
||||
hparams["dim"] // hparams["n_heads"], # rot (obsolete)
|
||||
ftype
|
||||
|
@ -85,6 +86,7 @@ def write_tokens(fout, tokenizer):
|
|||
text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8")
|
||||
fout.write(struct.pack("i", len(text)))
|
||||
fout.write(text)
|
||||
fout.write(struct.pack("f", tokenizer.get_score(i)))
|
||||
|
||||
def process_and_write_variables(fout, model, ftype):
|
||||
|
||||
|
|
33
main.cpp
33
main.cpp
|
@ -3,6 +3,7 @@
|
|||
#include "utils.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
|
@ -104,10 +105,24 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
|
|||
{
|
||||
uint32_t magic;
|
||||
fin.read((char *) &magic, sizeof(magic));
|
||||
if (magic != 0x67676d6c) {
|
||||
if (magic == 0x67676d6c) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files!)\n",
|
||||
__func__, fname.c_str());
|
||||
return false;
|
||||
}
|
||||
if (magic != 0x67676d66) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t format_version;
|
||||
fin.read((char *) &format_version, sizeof(format_version));
|
||||
|
||||
if (format_version != 1) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ")\n",
|
||||
__func__, fname.c_str(), format_version);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
int n_ff = 0;
|
||||
|
@ -154,8 +169,14 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
|
|||
word.resize(len);
|
||||
fin.read((char *) word.data(), len);
|
||||
|
||||
float score;
|
||||
fin.read((char *) &score, sizeof(score));
|
||||
|
||||
vocab.token_to_id[word] = i;
|
||||
vocab.id_to_token[i] = word;
|
||||
|
||||
auto &tok_score = vocab.id_to_token[i];
|
||||
tok_score.token = word;
|
||||
tok_score.score = score;
|
||||
|
||||
//if (i < 30000) {
|
||||
// fprintf(stderr, "%s: vocab[%d] = '%s'\n", __func__, i, word.c_str());
|
||||
|
@ -867,7 +888,7 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
|
||||
// enable interactive mode if reverse prompt is specified
|
||||
if (!antipromptv_inp.size()) {
|
||||
if (antipromptv_inp.size() != 0) {
|
||||
params.interactive = true;
|
||||
}
|
||||
|
||||
|
@ -875,7 +896,7 @@ int main(int argc, char ** argv) {
|
|||
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
||||
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
||||
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
||||
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], vocab.id_to_token.at(embd_inp[i]).c_str());
|
||||
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], vocab.id_to_token.at(embd_inp[i]).token.c_str());
|
||||
}
|
||||
fprintf(stderr, "\n");
|
||||
if (params.interactive) {
|
||||
|
@ -897,7 +918,7 @@ int main(int argc, char ** argv) {
|
|||
fprintf(stderr, "%s: reverse prompt: '%s'\n", __func__, params.antiprompt.at(apindex).c_str());
|
||||
fprintf(stderr, "%s: number of tokens in reverse prompt = %zu\n", __func__, antiprompt_inp.size());
|
||||
for (int i = 0; i < (int) antiprompt_inp.size(); i++) {
|
||||
fprintf(stderr, "%6d -> '%s'\n", antiprompt_inp[i], vocab.id_to_token.at(antiprompt_inp[i]).c_str());
|
||||
fprintf(stderr, "%6d -> '%s'\n", antiprompt_inp[i], vocab.id_to_token.at(antiprompt_inp[i]).token.c_str());
|
||||
}
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
|
@ -1003,7 +1024,7 @@ int main(int argc, char ** argv) {
|
|||
// display text
|
||||
if (!input_noecho) {
|
||||
for (auto id : embd) {
|
||||
printf("%s", vocab.id_to_token[id].c_str());
|
||||
printf("%s", vocab.id_to_token[id].token.c_str());
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
|
28
quantize.cpp
28
quantize.cpp
|
@ -3,6 +3,7 @@
|
|||
#include "utils.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
|
@ -62,12 +63,28 @@ bool llama_model_quantize(const std::string & fname_inp, const std::string & fna
|
|||
{
|
||||
uint32_t magic;
|
||||
finp.read((char *) &magic, sizeof(magic));
|
||||
if (magic != 0x67676d6c) {
|
||||
if (magic == 0x67676d6c) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files!)\n",
|
||||
__func__, fname_inp.c_str());
|
||||
return false;
|
||||
}
|
||||
if (magic != 0x67676d66) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
fout.write((char *) &magic, sizeof(magic));
|
||||
|
||||
uint32_t format_version;
|
||||
finp.read((char *) &format_version, sizeof(format_version));
|
||||
|
||||
if (format_version != 1) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ")\n",
|
||||
__func__, fname_inp.c_str(), format_version);
|
||||
return false;
|
||||
}
|
||||
|
||||
fout.write((char *) &format_version, sizeof(format_version));
|
||||
}
|
||||
|
||||
llama_hparams hparams;
|
||||
|
@ -122,8 +139,15 @@ bool llama_model_quantize(const std::string & fname_inp, const std::string & fna
|
|||
finp.read ((char *) word.data(), len);
|
||||
fout.write((char *) word.data(), len);
|
||||
|
||||
float score;
|
||||
finp.read ((char *) &score, sizeof(score));
|
||||
fout.write((char *) &score, sizeof(score));
|
||||
|
||||
vocab.token_to_id[word] = i;
|
||||
vocab.id_to_token[i] = word;
|
||||
|
||||
auto &tok_score = vocab.id_to_token[i];
|
||||
tok_score.token = word;
|
||||
tok_score.score = score;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
169
utils.cpp
169
utils.cpp
|
@ -6,6 +6,7 @@
|
|||
#include <regex>
|
||||
#include <iostream>
|
||||
#include <iterator>
|
||||
#include <queue>
|
||||
#include <string>
|
||||
#include <math.h>
|
||||
|
||||
|
@ -294,58 +295,146 @@ std::vector<gpt_vocab::id> gpt_tokenize(const gpt_vocab & vocab, const std::stri
|
|||
return tokens;
|
||||
}
|
||||
|
||||
// TODO: Calculate this constant from the vocabulary
|
||||
#define MAX_TOKEN_LEN 18
|
||||
// SentencePiece implementation after https://guillaume-be.github.io/2020-05-30/sentence_piece
|
||||
std::vector<gpt_vocab::id> llama_tokenize(const gpt_vocab & vocab, const std::string & text, bool bos) {
|
||||
std::vector<gpt_vocab::id> res;
|
||||
std::vector<int> score;
|
||||
std::vector<gpt_vocab::id> prev;
|
||||
int len = text.length();
|
||||
static size_t utf8_len(char src) {
|
||||
const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
|
||||
uint8_t highbits = static_cast<uint8_t>(src) >> 4;
|
||||
return lookup[highbits];
|
||||
}
|
||||
|
||||
score.resize(len + 1);
|
||||
prev.resize(len + 1);
|
||||
struct llama_sp_symbol {
|
||||
using index = int;
|
||||
index prev;
|
||||
index next;
|
||||
std::string_view text;
|
||||
};
|
||||
|
||||
// Forward pass
|
||||
for (int i = 0; i < len; i++) {
|
||||
int max_len = std::min(len - i, MAX_TOKEN_LEN);
|
||||
for (int sub_len = 1; sub_len <= max_len; sub_len++) {
|
||||
auto sub = text.substr(i, sub_len);
|
||||
auto token = vocab.token_to_id.find(sub);
|
||||
if (token != vocab.token_to_id.end()) {
|
||||
int token_score = sub.length() * sub.length();
|
||||
int local_score = score[i] + token_score;
|
||||
int next = i + sub_len;
|
||||
if (score[next] < local_score) {
|
||||
score[next] = local_score;
|
||||
prev[next] = (*token).second;
|
||||
struct llama_sp_bigram {
|
||||
struct comparator {
|
||||
bool operator()(llama_sp_bigram & l, llama_sp_bigram & r) {
|
||||
return (l.score < r.score) || (l.score == r.score && l.left > r.left);
|
||||
}
|
||||
};
|
||||
using queue_storage = std::vector<llama_sp_bigram>;
|
||||
using queue = std::priority_queue<llama_sp_bigram, queue_storage, comparator>;
|
||||
llama_sp_symbol::index left;
|
||||
llama_sp_symbol::index right;
|
||||
float score;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
struct llama_tokenizer {
|
||||
llama_tokenizer(const gpt_vocab & vocab): vocab_(vocab) {}
|
||||
|
||||
void tokenize(std::string_view text, std::vector<gpt_vocab::id> & output) {
|
||||
// split string into utf8 chars
|
||||
int index = 0;
|
||||
while (!text.empty()) {
|
||||
llama_sp_symbol sym;
|
||||
size_t char_len = std::min(text.size(), utf8_len(text.data()[0]));
|
||||
sym.text = std::string_view(text.data(), char_len);
|
||||
sym.prev = index - 1;
|
||||
text.remove_prefix(char_len);
|
||||
sym.next = text.empty() ? -1 : index + 1;
|
||||
index++;
|
||||
symbols_.emplace_back(std::move(sym));
|
||||
}
|
||||
|
||||
// seed the work queue with all possible 2-character tokens.
|
||||
for (size_t i = 1; i < symbols_.size(); ++i) {
|
||||
try_add_bigram(i - 1, i);
|
||||
}
|
||||
|
||||
// keep substituting the highest frequency pairs for as long as we can.
|
||||
while (!work_queue_.empty()) {
|
||||
auto bigram = work_queue_.top();
|
||||
work_queue_.pop();
|
||||
|
||||
auto & left_sym = symbols_[bigram.left];
|
||||
auto & right_sym = symbols_[bigram.right];
|
||||
|
||||
// if one of the symbols already got merged, skip it.
|
||||
if (left_sym.text.empty() || right_sym.text.empty() ||
|
||||
left_sym.text.size() + right_sym.text.size() != bigram.size) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// merge the right sym into the left one
|
||||
left_sym.text = std::string_view(left_sym.text.data(), left_sym.text.size() + right_sym.text.size());
|
||||
right_sym.text = std::string_view("");
|
||||
|
||||
// remove the right sym from the chain
|
||||
left_sym.next = right_sym.next;
|
||||
if (right_sym.next >= 0) {
|
||||
symbols_[right_sym.next].prev = bigram.left;
|
||||
}
|
||||
|
||||
// find more substitutions
|
||||
try_add_bigram(left_sym.prev, bigram.left);
|
||||
try_add_bigram(bigram.left, left_sym.next);
|
||||
}
|
||||
|
||||
for (int i = 0; i != -1; i = symbols_[i].next) {
|
||||
auto& symbol = symbols_[i];
|
||||
auto token = vocab_.token_to_id.find(std::string(symbol.text));
|
||||
|
||||
if (token == vocab_.token_to_id.end()) {
|
||||
// output any symbols that did not form tokens as bytes.
|
||||
for (int j = 0; j < symbol.text.size(); ++j) {
|
||||
gpt_vocab::id token_id = static_cast<uint8_t>(symbol.text[j]) + 3;
|
||||
output.push_back(token_id);
|
||||
}
|
||||
} else {
|
||||
output.push_back((*token).second);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Backward pass
|
||||
int i = len;
|
||||
while (i > 0) {
|
||||
gpt_vocab::id token_id = prev[i];
|
||||
if (token_id == 0) {
|
||||
// TODO: Return error or something more meaningful
|
||||
printf("failed to tokenize string!\n");
|
||||
break;
|
||||
private:
|
||||
void try_add_bigram(int left, int right) {
|
||||
if (left == -1 || right == -1) {
|
||||
return;
|
||||
}
|
||||
res.push_back(token_id);
|
||||
const auto &token = vocab.id_to_token.at(token_id);
|
||||
i -= token.length();
|
||||
|
||||
std::string_view text(symbols_[left].text.data(), symbols_[left].text.size() + symbols_[right].text.size());
|
||||
auto token = vocab_.token_to_id.find(std::string(text));
|
||||
|
||||
if (token == vocab_.token_to_id.end()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (static_cast<size_t>((*token).second) >= vocab_.id_to_token.size()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto &tok_score = vocab_.id_to_token[(*token).second];
|
||||
|
||||
llama_sp_bigram bigram;
|
||||
bigram.left = left;
|
||||
bigram.right = right;
|
||||
bigram.score = tok_score.score;
|
||||
bigram.size = text.size();
|
||||
work_queue_.push(bigram);
|
||||
}
|
||||
|
||||
const gpt_vocab & vocab_;
|
||||
std::vector<llama_sp_symbol> symbols_;
|
||||
llama_sp_bigram::queue work_queue_;
|
||||
};
|
||||
|
||||
std::vector<gpt_vocab::id> llama_tokenize(const gpt_vocab & vocab, std::string_view text, bool bos) {
|
||||
llama_tokenizer tokenizer(vocab);
|
||||
std::vector<gpt_vocab::id> output;
|
||||
|
||||
if (text.size() == 0) {
|
||||
return output;
|
||||
}
|
||||
|
||||
if (bos) {
|
||||
res.push_back(1); // TODO: replace with vocab.bos
|
||||
output.push_back(1);
|
||||
}
|
||||
|
||||
// Pieces are in reverse order so correct that
|
||||
std::reverse(res.begin(), res.end());
|
||||
|
||||
return res;
|
||||
tokenizer.tokenize(text, output);
|
||||
return output;
|
||||
}
|
||||
|
||||
bool gpt_vocab_init(const std::string & fname, gpt_vocab & vocab) {
|
||||
|
@ -355,7 +444,7 @@ bool gpt_vocab_init(const std::string & fname, gpt_vocab & vocab) {
|
|||
|
||||
vocab.id_to_token.resize(vocab.token_to_id.size());
|
||||
for (const auto & kv : vocab.token_to_id) {
|
||||
vocab.id_to_token[kv.second] = kv.first;
|
||||
vocab.id_to_token[kv.second].token = kv.first;
|
||||
}
|
||||
|
||||
printf("%s: vocab size = %d\n", __func__, (int) vocab.token_to_id.size());
|
||||
|
|
10
utils.h
10
utils.h
|
@ -52,12 +52,18 @@ std::string gpt_random_prompt(std::mt19937 & rng);
|
|||
// Vocab utils
|
||||
//
|
||||
|
||||
struct token_score {
|
||||
using token_t = std::string;
|
||||
token_t token;
|
||||
float score;
|
||||
};
|
||||
|
||||
struct gpt_vocab {
|
||||
using id = int32_t;
|
||||
using token = std::string;
|
||||
|
||||
std::unordered_map<token, id> token_to_id;
|
||||
std::vector<token> id_to_token;
|
||||
std::vector<token_score> id_to_token;
|
||||
};
|
||||
|
||||
void replace(std::string & str, const std::string & needle, const std::string & replacement);
|
||||
|
@ -79,7 +85,7 @@ std::vector<gpt_vocab::id> gpt_tokenize(const gpt_vocab & vocab, const std::stri
|
|||
|
||||
// TODO: this is probably wrong, but I cannot figure out how this tokenizer works ..
|
||||
// ref: https://github.com/google/sentencepiece
|
||||
std::vector<gpt_vocab::id> llama_tokenize(const gpt_vocab & vocab, const std::string & text, bool bos);
|
||||
std::vector<gpt_vocab::id> llama_tokenize(const gpt_vocab & vocab, std::string_view text, bool bos);
|
||||
|
||||
// load the tokens from encoder.json
|
||||
bool gpt_vocab_init(const std::string & fname, gpt_vocab & vocab);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue