diff --git a/otherarch/gpt2_v1.cpp b/otherarch/gpt2_v1.cpp index 9414725e4..98377ffcb 100644 --- a/otherarch/gpt2_v1.cpp +++ b/otherarch/gpt2_v1.cpp @@ -624,148 +624,3 @@ bool legacy_gpt2_eval( return true; } - -// int main(int argc, char ** argv) { -// ggml_v1_time_init(); -// const int64_t t_main_start_us = ggml_v1_time_us(); - -// gpt_params params; -// params.model = "models/gpt-2-117M/ggml-model.bin"; - -// if (utils_gpt_params_parse(argc, argv, params) == false) { -// return 1; -// } - -// if (params.seed < 0) { -// params.seed = time(NULL); -// } - -// printf("%s: seed = %d\n", __func__, params.seed); - -// std::mt19937 rng(params.seed); -// if (params.prompt.empty()) { -// if( !isatty(STDIN_FILENO) ){ -// std::string line; -// while( std::getline(std::cin, line) ){ -// params.prompt = params.prompt + "\n" + line; -// } -// } else { -// params.prompt = utils_gpt_random_prompt(rng); -// } -// } - -// int64_t t_load_us = 0; - -// gpt_vocab vocab; -// gpt2_v1_model model; - -// // load the model -// { -// const int64_t t_start_us = ggml_v1_time_us(); - -// if (!legacy_gpt2_model_load(params.model, model, vocab, FileFormat::GPT2_1)) { -// fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); -// return 1; -// } - -// t_load_us = ggml_v1_time_us() - t_start_us; -// } - -// int n_past = 0; - -// int64_t t_sample_us = 0; -// int64_t t_predict_us = 0; - -// std::vector logits; - -// // tokenize the prompt -// std::vector embd_inp = ::gpt_tokenize(vocab, params.prompt); - -// params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); - -// printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); -// printf("\n"); - -// // submit the input prompt token-by-token -// // this reduces the memory usage during inference, at the cost of a bit of speed at the beginning -// std::vector embd; - -// // determine the required inference memory per token: -// size_t mem_per_token = 0; -// legacy_gpt2_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, FileFormat::GPT2_1); - -// for (int i = embd.size(); i < embd_inp.size() + params.n_predict; i++) { -// // predict -// if (embd.size() > 0) { -// const int64_t t_start_us = ggml_v1_time_us(); - -// if (!legacy_gpt2_eval(model, params.n_threads, n_past, embd, logits, mem_per_token, FileFormat::GPT2_1)) { -// printf("Failed to predict\n"); -// return 1; -// } - -// t_predict_us += ggml_v1_time_us() - t_start_us; -// } - -// n_past += embd.size(); -// embd.clear(); - -// if (i >= embd_inp.size()) { -// // sample next token -// const int top_k = params.top_k; -// const float top_p = params.top_p; -// const float temp = params.temp; - -// const int n_vocab = model.hparams.n_vocab; - -// gpt_vocab::id id = 0; - -// { -// const int64_t t_start_sample_us = ggml_v1_time_us(); - -// id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng); - -// t_sample_us += ggml_v1_time_us() - t_start_sample_us; -// } - -// // add it to the context -// embd.push_back(id); -// } else { -// // if here, it means we are still processing the input prompt -// for (int k = i; k < embd_inp.size(); k++) { -// embd.push_back(embd_inp[k]); -// if (embd.size() >= params.n_batch) { -// break; -// } -// } -// i += embd.size() - 1; -// } - -// // display text -// for (auto id : embd) { -// printf("%s", vocab.id_to_token[id].c_str()); -// } -// fflush(stdout); - -// // end of text token -// if (embd.back() == 50256) { -// break; -// } -// } - -// // report timing -// { -// const int64_t t_main_end_us = ggml_v1_time_us(); - -// printf("\n\n"); -// printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token); -// printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); -// printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); -// printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past); -// printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); -// } - -// ggml_v1_free(model.ctx); - -// return 0; -// } \ No newline at end of file diff --git a/otherarch/gpt2_v2.cpp b/otherarch/gpt2_v2.cpp index 030b9d450..0458948d2 100644 --- a/otherarch/gpt2_v2.cpp +++ b/otherarch/gpt2_v2.cpp @@ -72,12 +72,15 @@ ModelLoadResult gpt2_v2_model_load(const std::string & fname, gpt2_v2_model & mo } std::string word; + std::vector buf(128); + for (int i = 0; i < n_vocab; i++) { uint32_t len; fin.read((char *) &len, sizeof(len)); - word.resize(len); - fin.read((char *) word.data(), len); + buf.resize(len); + fin.read((char *) buf.data(), len); + word.assign(buf.data(), len); vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; diff --git a/otherarch/gpt2_v3.cpp b/otherarch/gpt2_v3.cpp index 7e957e9e2..eeb4eeab8 100644 --- a/otherarch/gpt2_v3.cpp +++ b/otherarch/gpt2_v3.cpp @@ -75,12 +75,15 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g } std::string word; + std::vector buf(128); + for (int i = 0; i < n_vocab; i++) { uint32_t len; fin.read((char *) &len, sizeof(len)); - word.resize(len); - fin.read((char *) word.data(), len); + buf.resize(len); + fin.read((char *) buf.data(), len); + word.assign(buf.data(), len); vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; diff --git a/otherarch/gptj_v2.cpp b/otherarch/gptj_v2.cpp index e84142798..cfb48a8c6 100644 --- a/otherarch/gptj_v2.cpp +++ b/otherarch/gptj_v2.cpp @@ -75,12 +75,15 @@ ModelLoadResult gptj_v2_model_load(const std::string & fname, gptj_v2_model & mo } std::string word; + std::vector buf(128); + for (int i = 0; i < n_vocab; i++) { uint32_t len; fin.read((char *) &len, sizeof(len)); - word.resize(len); - fin.read((char *) word.data(), len); + buf.resize(len); + fin.read((char *) buf.data(), len); + word.assign(buf.data(), len); vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; diff --git a/otherarch/gptj_v3.cpp b/otherarch/gptj_v3.cpp index 4b207e0bd..893e6ebd6 100644 --- a/otherarch/gptj_v3.cpp +++ b/otherarch/gptj_v3.cpp @@ -75,12 +75,15 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g } std::string word; + std::vector buf(128); + for (int i = 0; i < n_vocab; i++) { uint32_t len; fin.read((char *) &len, sizeof(len)); - word.resize(len); - fin.read((char *) word.data(), len); + buf.resize(len); + fin.read((char *) buf.data(), len); + word.assign(buf.data(), len); vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; diff --git a/otherarch/neox_v2.cpp b/otherarch/neox_v2.cpp index 5748bc0dc..8cfd821ad 100644 --- a/otherarch/neox_v2.cpp +++ b/otherarch/neox_v2.cpp @@ -75,12 +75,15 @@ ModelLoadResult gpt_neox_v2_model_load(const std::string & fname, gpt_neox_v2_mo const int32_t n_vocab = model.hparams.n_vocab; std::string word; + std::vector buf(128); + for (int i = 0; i < n_vocab; i++) { uint32_t len; fin.read((char *) &len, sizeof(len)); - word.resize(len); - fin.read((char *) word.data(), len); + buf.resize(len); + fin.read((char *) buf.data(), len); + word.assign(buf.data(), len); vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; diff --git a/otherarch/neox_v3.cpp b/otherarch/neox_v3.cpp index 32f13399f..65458f609 100644 --- a/otherarch/neox_v3.cpp +++ b/otherarch/neox_v3.cpp @@ -75,18 +75,22 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model & const int32_t n_vocab = model.hparams.n_vocab; std::string word; + std::vector buf(128); + for (int i = 0; i < n_vocab; i++) { uint32_t len; fin.read((char *) &len, sizeof(len)); - word.resize(len); - fin.read((char *) word.data(), len); + buf.resize(len); + fin.read((char *) buf.data(), len); + word.assign(buf.data(), len); vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; } } + // for the big tensors, we have the option to store the data in 16-bit floats or quantized // in order to save memory and also to speed up the computation ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype)); diff --git a/otherarch/utils.cpp b/otherarch/utils.cpp index e762614e3..5c0baad88 100644 --- a/otherarch/utils.cpp +++ b/otherarch/utils.cpp @@ -3,78 +3,7 @@ #include #include -bool utils_gpt_params_parse(int argc, char ** argv, gpt_params & params) { - for (int i = 1; i < argc; i++) { - std::string arg = argv[i]; - if (arg == "-s" || arg == "--seed") { - params.seed = std::stoi(argv[++i]); - } else if (arg == "-t" || arg == "--threads") { - params.n_threads = std::stoi(argv[++i]); - } else if (arg == "-p" || arg == "--prompt") { - params.prompt = argv[++i]; - } else if (arg == "-n" || arg == "--n_predict") { - params.n_predict = std::stoi(argv[++i]); - } else if (arg == "--top_k") { - params.top_k = std::stoi(argv[++i]); - } else if (arg == "--top_p") { - params.top_p = std::stof(argv[++i]); - } else if (arg == "--temp") { - params.temp = std::stof(argv[++i]); - } else if (arg == "-b" || arg == "--batch_size") { - params.n_batch = std::stoi(argv[++i]); - } else if (arg == "-m" || arg == "--model") { - params.model = argv[++i]; - } else if (arg == "-h" || arg == "--help") { - utils_gpt_print_usage(argc, argv, params); - exit(0); - } else { - fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); - utils_gpt_print_usage(argc, argv, params); - exit(0); - } - } - - return true; -} - -void utils_gpt_print_usage(int argc, char ** argv, const gpt_params & params) { - fprintf(stderr, "usage: %s [options]\n", argv[0]); - fprintf(stderr, "\n"); - fprintf(stderr, "options:\n"); - fprintf(stderr, " -h, --help show this help message and exit\n"); - fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n"); - fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); - fprintf(stderr, " -p PROMPT, --prompt PROMPT\n"); - fprintf(stderr, " prompt to start generation with (default: random)\n"); - fprintf(stderr, " -n N, --n_predict N number of tokens to predict (default: %d)\n", params.n_predict); - fprintf(stderr, " --top_k N top-k sampling (default: %d)\n", params.top_k); - fprintf(stderr, " --top_p N top-p sampling (default: %.1f)\n", params.top_p); - fprintf(stderr, " --temp N temperature (default: %.1f)\n", params.temp); - fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch); - fprintf(stderr, " -m FNAME, --model FNAME\n"); - fprintf(stderr, " model path (default: %s)\n", params.model.c_str()); - fprintf(stderr, "\n"); -} - -std::string utils_gpt_random_prompt(std::mt19937 & rng) { - const int r = rng() % 10; - switch (r) { - case 0: return "So"; - case 1: return "Once upon a time"; - case 2: return "When"; - case 3: return "The"; - case 4: return "After"; - case 5: return "If"; - case 6: return "import"; - case 7: return "He"; - case 8: return "She"; - case 9: return "They"; - default: return "To"; - } - - return "The"; -} void utreplace(std::string & str, const std::string & needle, const std::string & replacement) { size_t pos = 0; @@ -175,6 +104,31 @@ std::map json_parse(const std::string & fname) { return result; } + +void gpt_vocab::add_special_token(const std::string & token) { + special_tokens.push_back(token); +} + +static void append_utf8(char32_t ch, std::string & out) { + if (ch <= 0x7F) { + out.push_back(static_cast(ch)); + } else if (ch <= 0x7FF) { + out.push_back(static_cast(0xC0 | ((ch >> 6) & 0x1F))); + out.push_back(static_cast(0x80 | (ch & 0x3F))); + } else if (ch <= 0xFFFF) { + out.push_back(static_cast(0xE0 | ((ch >> 12) & 0x0F))); + out.push_back(static_cast(0x80 | ((ch >> 6) & 0x3F))); + out.push_back(static_cast(0x80 | (ch & 0x3F))); + } else if (ch <= 0x10FFFF) { + out.push_back(static_cast(0xF0 | ((ch >> 18) & 0x07))); + out.push_back(static_cast(0x80 | ((ch >> 12) & 0x3F))); + out.push_back(static_cast(0x80 | ((ch >> 6) & 0x3F))); + out.push_back(static_cast(0x80 | (ch & 0x3F))); + } else { + printf("Invalid Unicode code point\n"); + } +} + std::vector gpt_tokenize(const gpt_vocab & vocab, const std::string & text) { std::vector words; @@ -208,7 +162,8 @@ std::vector gpt_tokenize(const gpt_vocab & vocab, const std::stri if (it != vocab.token_to_id.end()) { tokens.push_back(it->second); i = j; - break; + j = n; + continue; } --j; } @@ -230,202 +185,6 @@ std::vector gpt_tokenize(const gpt_vocab & vocab, const std::stri return tokens; } -bool gpt_vocab_init(const std::string & fname, gpt_vocab & vocab) { - printf("%s: loading vocab from '%s'\n", __func__, fname.c_str()); - - vocab.token_to_id = ::json_parse(fname); - - for (const auto & kv : vocab.token_to_id) { - vocab.id_to_token[kv.second] = kv.first; - } - - printf("%s: vocab size = %d\n", __func__, (int) vocab.token_to_id.size()); - - // print the vocabulary - //for (auto kv : vocab.token_to_id) { - // printf("'%s' -> %d\n", kv.first.data(), kv.second); - //} - - return true; -} - -void gptj_sample_top_k(std::vector> & logits_id, int top_k) { - // find the top K tokens - std::partial_sort( - logits_id.begin(), - logits_id.begin() + top_k, logits_id.end(), - [](const std::pair & a, const std::pair & b) { - return a.first > b.first; - }); - - logits_id.resize(top_k); -} - -gpt_vocab::id gptj_sample_top_p_top_k( - const gpt_vocab & vocab, - const float * logits, - std::vector & last_n_tokens, - double repeat_penalty, - int top_k, - double top_p, - double temp, - std::mt19937 & rng) { - int n_logits = vocab.id_to_token.size(); - - std::vector> logits_id; - logits_id.reserve(n_logits); - - { - const double scale = 1.0/temp; - for (int i = 0; i < n_logits; ++i) { - // repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858) - // credit https://github.com/facebookresearch/llama/compare/main...shawwn:llama:main - if (std::find(last_n_tokens.begin(), last_n_tokens.end(), i) != last_n_tokens.end()) { - // if score < 0 then repetition penalty has to multiplied to reduce the previous token probability - if (logits[i] < 0.0) { - logits_id.push_back(std::make_pair(logits[i]*scale*repeat_penalty, i)); - } else { - logits_id.push_back(std::make_pair(logits[i]*scale/repeat_penalty, i)); - } - } else { - logits_id.push_back(std::make_pair(logits[i]*scale, i)); - } - } - } - - gptj_sample_top_k(logits_id, top_k > 0 ? std::min(top_k, n_logits) : n_logits); - - double maxl = -INFINITY; - for (const auto & kv : logits_id) { - maxl = std::max(maxl, kv.first); - } - - // compute probs for the top K tokens - std::vector probs; - probs.reserve(logits_id.size()); - - double sum = 0.0; - for (const auto & kv : logits_id) { - double p = exp(kv.first - maxl); - probs.push_back(p); - sum += p; - } - - // normalize the probs - for (auto & p : probs) { - p /= sum; - } - - if (top_p < 1.0f) { - double cumsum = 0.0f; - for (int i = 0; i < (int) probs.size(); i++) { - cumsum += probs[i]; - if (cumsum >= top_p) { - probs.resize(i + 1); - logits_id.resize(i + 1); - break; - } - } - - cumsum = 1.0/cumsum; - for (int i = 0; i < (int) probs.size(); i++) { - probs[i] *= cumsum; - } - } - - //printf("\n"); - //for (int i = 0; i < (int) 10; i++) { - // printf("%d: '%s' %f\n", i, vocab.id_to_token.at(logits_id[i].second).c_str(), probs[i]); - //} - //printf("\n\n"); - //exit(0); - - std::discrete_distribution<> dist(probs.begin(), probs.end()); - int idx = dist(rng); - - return logits_id[idx].second; -} - -gpt_vocab::id gpt_sample_top_k_top_p( - const gpt_vocab & vocab, - const float * logits, - int top_k, - double top_p, - double temp, - std::mt19937 & rng) { - int n_logits = vocab.id_to_token.size(); - - std::vector> logits_id; - logits_id.reserve(n_logits); - - { - const double scale = 1.0/temp; - for (int i = 0; i < n_logits; ++i) { - logits_id.push_back(std::make_pair(logits[i]*scale, i)); - } - } - - // find the top K tokens - std::partial_sort( - logits_id.begin(), - logits_id.begin() + top_k, logits_id.end(), - [](const std::pair & a, const std::pair & b) { - return a.first > b.first; - }); - - logits_id.resize(top_k); - - double maxl = -INFINITY; - for (const auto & kv : logits_id) { - maxl = std::max(maxl, kv.first); - } - - // compute probs for the top K tokens - std::vector probs; - probs.reserve(logits_id.size()); - - double sum = 0.0; - for (const auto & kv : logits_id) { - double p = exp(kv.first - maxl); - probs.push_back(p); - sum += p; - } - - // normalize the probs - for (auto & p : probs) { - p /= sum; - } - - if (top_p < 1.0f) { - double cumsum = 0.0f; - for (int i = 0; i < top_k; i++) { - cumsum += probs[i]; - if (cumsum >= top_p) { - top_k = i + 1; - probs.resize(top_k); - logits_id.resize(top_k); - break; - } - } - - cumsum = 1.0/cumsum; - for (int i = 0; i < (int) probs.size(); i++) { - probs[i] *= cumsum; - } - } - - //printf("\n"); - //for (int i = 0; i < (int) probs.size(); i++) { - // printf("%d: '%s' %f\n", i, vocab.id_to_token.at(logits_id[i].second).c_str(), probs[i]); - //} - //exit(0); - - std::discrete_distribution<> dist(probs.begin(), probs.end()); - int idx = dist(rng); - - return logits_id[idx].second; -} - bool should_transpose_layer(std::string name) { diff --git a/otherarch/utils.h b/otherarch/utils.h index 84ba5da08..bb57a8242 100644 --- a/otherarch/utils.h +++ b/otherarch/utils.h @@ -24,6 +24,9 @@ struct gpt_vocab { std::map token_to_id; std::map id_to_token; + std::vector special_tokens; + + void add_special_token(const std::string & token); }; void utreplace(std::string & str, const std::string & needle, const std::string & replacement); @@ -43,37 +46,6 @@ std::map json_parse(const std::string & fname); // std::vector gpt_tokenize(const gpt_vocab & vocab, const std::string & text); -// load the tokens from encoder.json -bool gpt_vocab_init(const std::string & fname, gpt_vocab & vocab); -// sample next token given probabilities for each embedding -// -// - consider only the top K tokens -// - from them, consider only the top tokens with cumulative probability > P -// -// TODO: not sure if this implementation is correct -// TODO: temperature is not implemented -// -gpt_vocab::id gpt_sample_top_k_top_p( - const gpt_vocab & vocab, - const float * logits, - int top_k, - double top_p, - double temp, - std::mt19937 & rng); - -gpt_vocab::id gptj_sample_top_p_top_k( - const gpt_vocab & vocab, - const float * logits, - std::vector & last_n_tokens, - double repeat_penalty, - int top_k, - double top_p, - double temp, - std::mt19937 & rng); - -bool utils_gpt_params_parse(int argc, char ** argv, gpt_params & params); -void utils_gpt_print_usage(int argc, char ** argv, const gpt_params & params); -std::string utils_gpt_random_prompt(std::mt19937 & rng); bool should_transpose_layer(std::string name); \ No newline at end of file