common : use common_ prefix for common library functions
This commit is contained in:
parent
3dc48fe75a
commit
4f7e4b5e19
33 changed files with 326 additions and 326 deletions
|
@ -493,7 +493,7 @@ std::string string_from(const struct llama_context * ctx, const std::vector<llam
|
||||||
first = false;
|
first = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto detokenized = llama_token_to_piece(ctx, token);
|
auto detokenized = common_token_to_piece(ctx, token);
|
||||||
|
|
||||||
detokenized.erase(
|
detokenized.erase(
|
||||||
std::remove_if(
|
std::remove_if(
|
||||||
|
@ -524,7 +524,7 @@ std::string string_from(const struct llama_context * ctx, const struct llama_bat
|
||||||
first = false;
|
first = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto detokenized = llama_token_to_piece(ctx, batch.token[i]);
|
auto detokenized = common_token_to_piece(ctx, batch.token[i]);
|
||||||
|
|
||||||
detokenized.erase(
|
detokenized.erase(
|
||||||
std::remove_if(
|
std::remove_if(
|
||||||
|
@ -819,16 +819,16 @@ std::string fs_get_cache_file(const std::string & filename) {
|
||||||
//
|
//
|
||||||
// Model utils
|
// Model utils
|
||||||
//
|
//
|
||||||
struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
struct common_init_result llama_init_from_gpt_params(gpt_params & params) {
|
||||||
llama_init_result iparams;
|
common_init_result iparams;
|
||||||
auto mparams = llama_model_params_from_gpt_params(params);
|
auto mparams = common_model_params_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = nullptr;
|
llama_model * model = nullptr;
|
||||||
|
|
||||||
if (!params.hf_repo.empty() && !params.hf_file.empty()) {
|
if (!params.hf_repo.empty() && !params.hf_file.empty()) {
|
||||||
model = llama_load_model_from_hf(params.hf_repo.c_str(), params.hf_file.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams);
|
model = common_load_model_from_hf(params.hf_repo.c_str(), params.hf_file.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams);
|
||||||
} else if (!params.model_url.empty()) {
|
} else if (!params.model_url.empty()) {
|
||||||
model = llama_load_model_from_url(params.model_url.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams);
|
model = common_load_model_from_url(params.model_url.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams);
|
||||||
} else {
|
} else {
|
||||||
model = llama_load_model_from_file(params.model.c_str(), mparams);
|
model = llama_load_model_from_file(params.model.c_str(), mparams);
|
||||||
}
|
}
|
||||||
|
@ -863,7 +863,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto cparams = llama_context_params_from_gpt_params(params);
|
auto cparams = common_context_params_from_gpt_params(params);
|
||||||
|
|
||||||
llama_context * lctx = llama_new_context_with_model(model, cparams);
|
llama_context * lctx = llama_new_context_with_model(model, cparams);
|
||||||
if (lctx == NULL) {
|
if (lctx == NULL) {
|
||||||
|
@ -900,7 +900,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
||||||
|
|
||||||
// load and optionally apply lora adapters
|
// load and optionally apply lora adapters
|
||||||
for (auto & la : params.lora_adapters) {
|
for (auto & la : params.lora_adapters) {
|
||||||
llama_lora_adapter_container loaded_la;
|
common_lora_adapter_container loaded_la;
|
||||||
loaded_la.path = la.path;
|
loaded_la.path = la.path;
|
||||||
loaded_la.scale = la.scale;
|
loaded_la.scale = la.scale;
|
||||||
loaded_la.adapter = llama_lora_adapter_init(model, la.path.c_str());
|
loaded_la.adapter = llama_lora_adapter_init(model, la.path.c_str());
|
||||||
|
@ -913,7 +913,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
||||||
iparams.lora_adapters.push_back(loaded_la); // copy to list of loaded adapters
|
iparams.lora_adapters.push_back(loaded_la); // copy to list of loaded adapters
|
||||||
}
|
}
|
||||||
if (!params.lora_init_without_apply) {
|
if (!params.lora_init_without_apply) {
|
||||||
llama_lora_adapters_apply(lctx, iparams.lora_adapters);
|
common_lora_adapters_apply(lctx, iparams.lora_adapters);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.sparams.ignore_eos && llama_token_eos(model) == LLAMA_TOKEN_NULL) {
|
if (params.sparams.ignore_eos && llama_token_eos(model) == LLAMA_TOKEN_NULL) {
|
||||||
|
@ -961,7 +961,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
||||||
return iparams;
|
return iparams;
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_lora_adapters_apply(struct llama_context * ctx, std::vector<llama_lora_adapter_container> & lora_adapters) {
|
void common_lora_adapters_apply(struct llama_context * ctx, std::vector<common_lora_adapter_container> & lora_adapters) {
|
||||||
llama_lora_adapter_clear(ctx);
|
llama_lora_adapter_clear(ctx);
|
||||||
for (auto & la : lora_adapters) {
|
for (auto & la : lora_adapters) {
|
||||||
if (la.scale != 0.0f) {
|
if (la.scale != 0.0f) {
|
||||||
|
@ -970,7 +970,7 @@ void llama_lora_adapters_apply(struct llama_context * ctx, std::vector<llama_lor
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & params) {
|
struct llama_model_params common_model_params_from_gpt_params(const gpt_params & params) {
|
||||||
auto mparams = llama_model_default_params();
|
auto mparams = llama_model_default_params();
|
||||||
|
|
||||||
if (params.n_gpu_layers != -1) {
|
if (params.n_gpu_layers != -1) {
|
||||||
|
@ -1022,7 +1022,7 @@ static ggml_type kv_cache_type_from_str(const std::string & s) {
|
||||||
throw std::runtime_error("Invalid cache type: " + s);
|
throw std::runtime_error("Invalid cache type: " + s);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params) {
|
struct llama_context_params common_context_params_from_gpt_params(const gpt_params & params) {
|
||||||
auto cparams = llama_context_default_params();
|
auto cparams = llama_context_default_params();
|
||||||
|
|
||||||
cparams.n_ctx = params.n_ctx;
|
cparams.n_ctx = params.n_ctx;
|
||||||
|
@ -1430,7 +1430,7 @@ struct llama_model * llama_load_model_from_hf(
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
struct llama_model * llama_load_model_from_url(
|
struct llama_model * common_load_model_from_url(
|
||||||
const char * /*model_url*/,
|
const char * /*model_url*/,
|
||||||
const char * /*path_model*/,
|
const char * /*path_model*/,
|
||||||
const char * /*hf_token*/,
|
const char * /*hf_token*/,
|
||||||
|
@ -1439,7 +1439,7 @@ struct llama_model * llama_load_model_from_url(
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct llama_model * llama_load_model_from_hf(
|
struct llama_model * common_load_model_from_hf(
|
||||||
const char * /*repo*/,
|
const char * /*repo*/,
|
||||||
const char * /*model*/,
|
const char * /*model*/,
|
||||||
const char * /*path_model*/,
|
const char * /*path_model*/,
|
||||||
|
@ -1455,11 +1455,11 @@ struct llama_model * llama_load_model_from_hf(
|
||||||
// Batch utils
|
// Batch utils
|
||||||
//
|
//
|
||||||
|
|
||||||
void llama_batch_clear(struct llama_batch & batch) {
|
void common_batch_clear(struct llama_batch & batch) {
|
||||||
batch.n_tokens = 0;
|
batch.n_tokens = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_batch_add(
|
void common_batch_add(
|
||||||
struct llama_batch & batch,
|
struct llama_batch & batch,
|
||||||
llama_token id,
|
llama_token id,
|
||||||
llama_pos pos,
|
llama_pos pos,
|
||||||
|
@ -1482,15 +1482,15 @@ void llama_batch_add(
|
||||||
// Vocab utils
|
// Vocab utils
|
||||||
//
|
//
|
||||||
|
|
||||||
std::vector<llama_token> llama_tokenize(
|
std::vector<llama_token> common_tokenize(
|
||||||
const struct llama_context * ctx,
|
const struct llama_context * ctx,
|
||||||
const std::string & text,
|
const std::string & text,
|
||||||
bool add_special,
|
bool add_special,
|
||||||
bool parse_special) {
|
bool parse_special) {
|
||||||
return llama_tokenize(llama_get_model(ctx), text, add_special, parse_special);
|
return common_tokenize(llama_get_model(ctx), text, add_special, parse_special);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<llama_token> llama_tokenize(
|
std::vector<llama_token> common_tokenize(
|
||||||
const struct llama_model * model,
|
const struct llama_model * model,
|
||||||
const std::string & text,
|
const std::string & text,
|
||||||
bool add_special,
|
bool add_special,
|
||||||
|
@ -1509,7 +1509,7 @@ std::vector<llama_token> llama_tokenize(
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) {
|
std::string common_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) {
|
||||||
std::string piece;
|
std::string piece;
|
||||||
piece.resize(piece.capacity()); // using string internal cache, 15 bytes + '\n'
|
piece.resize(piece.capacity()); // using string internal cache, 15 bytes + '\n'
|
||||||
const int n_chars = llama_token_to_piece(llama_get_model(ctx), token, &piece[0], piece.size(), 0, special);
|
const int n_chars = llama_token_to_piece(llama_get_model(ctx), token, &piece[0], piece.size(), 0, special);
|
||||||
|
@ -1525,7 +1525,7 @@ std::string llama_token_to_piece(const struct llama_context * ctx, llama_token t
|
||||||
return piece;
|
return piece;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string llama_detokenize(llama_context * ctx, const std::vector<llama_token> & tokens, bool special) {
|
std::string common_detokenize(llama_context * ctx, const std::vector<llama_token> & tokens, bool special) {
|
||||||
std::string text;
|
std::string text;
|
||||||
text.resize(std::max(text.capacity(), tokens.size()));
|
text.resize(std::max(text.capacity(), tokens.size()));
|
||||||
int32_t n_chars = llama_detokenize(llama_get_model(ctx), tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
|
int32_t n_chars = llama_detokenize(llama_get_model(ctx), tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
|
||||||
|
@ -1551,9 +1551,9 @@ bool llama_chat_verify_template(const std::string & tmpl) {
|
||||||
return res >= 0;
|
return res >= 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string llama_chat_apply_template(const struct llama_model * model,
|
std::string common_chat_apply_template(const struct llama_model * model,
|
||||||
const std::string & tmpl,
|
const std::string & tmpl,
|
||||||
const std::vector<llama_chat_msg> & msgs,
|
const std::vector<common_chat_msg> & msgs,
|
||||||
bool add_ass) {
|
bool add_ass) {
|
||||||
int alloc_size = 0;
|
int alloc_size = 0;
|
||||||
bool fallback = false; // indicate if we must fallback to default chatml
|
bool fallback = false; // indicate if we must fallback to default chatml
|
||||||
|
@ -1595,42 +1595,42 @@ std::string llama_chat_apply_template(const struct llama_model * model,
|
||||||
return formatted_chat;
|
return formatted_chat;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string llama_chat_format_single(const struct llama_model * model,
|
std::string common_chat_format_single(const struct llama_model * model,
|
||||||
const std::string & tmpl,
|
const std::string & tmpl,
|
||||||
const std::vector<llama_chat_msg> & past_msg,
|
const std::vector<common_chat_msg> & past_msg,
|
||||||
const llama_chat_msg & new_msg,
|
const common_chat_msg & new_msg,
|
||||||
bool add_ass) {
|
bool add_ass) {
|
||||||
std::ostringstream ss;
|
std::ostringstream ss;
|
||||||
auto fmt_past_msg = past_msg.empty() ? "" : llama_chat_apply_template(model, tmpl, past_msg, false);
|
auto fmt_past_msg = past_msg.empty() ? "" : common_chat_apply_template(model, tmpl, past_msg, false);
|
||||||
std::vector<llama_chat_msg> chat_new(past_msg);
|
std::vector<common_chat_msg> chat_new(past_msg);
|
||||||
// if the past_msg ends with a newline, we must preserve it in the formatted version
|
// if the past_msg ends with a newline, we must preserve it in the formatted version
|
||||||
if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {
|
if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {
|
||||||
ss << "\n";
|
ss << "\n";
|
||||||
};
|
};
|
||||||
// format chat with new_msg
|
// format chat with new_msg
|
||||||
chat_new.push_back(new_msg);
|
chat_new.push_back(new_msg);
|
||||||
auto fmt_new_msg = llama_chat_apply_template(model, tmpl, chat_new, add_ass);
|
auto fmt_new_msg = common_chat_apply_template(model, tmpl, chat_new, add_ass);
|
||||||
// get the diff part
|
// get the diff part
|
||||||
ss << fmt_new_msg.substr(fmt_past_msg.size(), fmt_new_msg.size() - fmt_past_msg.size());
|
ss << fmt_new_msg.substr(fmt_past_msg.size(), fmt_new_msg.size() - fmt_past_msg.size());
|
||||||
return ss.str();
|
return ss.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string llama_chat_format_example(const struct llama_model * model,
|
std::string common_chat_format_example(const struct llama_model * model,
|
||||||
const std::string & tmpl) {
|
const std::string & tmpl) {
|
||||||
std::vector<llama_chat_msg> msgs = {
|
std::vector<common_chat_msg> msgs = {
|
||||||
{"system", "You are a helpful assistant"},
|
{"system", "You are a helpful assistant"},
|
||||||
{"user", "Hello"},
|
{"user", "Hello"},
|
||||||
{"assistant", "Hi there"},
|
{"assistant", "Hi there"},
|
||||||
{"user", "How are you?"},
|
{"user", "How are you?"},
|
||||||
};
|
};
|
||||||
return llama_chat_apply_template(model, tmpl, msgs, true);
|
return common_chat_apply_template(model, tmpl, msgs, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// KV cache utils
|
// KV cache utils
|
||||||
//
|
//
|
||||||
|
|
||||||
void llama_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size) {
|
void common_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size) {
|
||||||
static const char slot_chars[] = ".123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+";
|
static const char slot_chars[] = ".123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+";
|
||||||
|
|
||||||
printf("=== Dumping KV cache. total cells %d, max sequences per cell %d, populated cells %d, total tokens in cache %d, largest empty slot=%d @ %d",
|
printf("=== Dumping KV cache. total cells %d, max sequences per cell %d, populated cells %d, total tokens in cache %d, largest empty slot=%d @ %d",
|
||||||
|
@ -1653,7 +1653,7 @@ void llama_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size) {
|
||||||
printf("\n=== Done dumping\n");
|
printf("\n=== Done dumping\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size) {
|
void common_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size) {
|
||||||
static const char slot_chars[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
|
static const char slot_chars[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
|
||||||
|
|
||||||
printf("=== Dumping KV cache. total cells %d, max sequences per cell %d, populated cells %d, total tokens in cache %d, largest empty slot=%d @ %d\n",
|
printf("=== Dumping KV cache. total cells %d, max sequences per cell %d, populated cells %d, total tokens in cache %d, largest empty slot=%d @ %d\n",
|
||||||
|
@ -1705,7 +1705,7 @@ void llama_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_siz
|
||||||
// Embedding utils
|
// Embedding utils
|
||||||
//
|
//
|
||||||
|
|
||||||
void llama_embd_normalize(const float * inp, float * out, int n, int embd_norm) {
|
void common_embd_normalize(const float * inp, float * out, int n, int embd_norm) {
|
||||||
double sum = 0.0;
|
double sum = 0.0;
|
||||||
|
|
||||||
switch (embd_norm) {
|
switch (embd_norm) {
|
||||||
|
@ -1739,7 +1739,7 @@ void llama_embd_normalize(const float * inp, float * out, int n, int embd_norm)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
float llama_embd_similarity_cos(const float * embd1, const float * embd2, int n){
|
float common_embd_similarity_cos(const float * embd1, const float * embd2, int n){
|
||||||
double sum = 0.0;
|
double sum = 0.0;
|
||||||
double sum1 = 0.0;
|
double sum1 = 0.0;
|
||||||
double sum2 = 0.0;
|
double sum2 = 0.0;
|
||||||
|
@ -1765,8 +1765,8 @@ float llama_embd_similarity_cos(const float * embd1, const float * embd2, int n)
|
||||||
// Control vector utils
|
// Control vector utils
|
||||||
//
|
//
|
||||||
|
|
||||||
static llama_control_vector_data llama_control_vector_load_one(const llama_control_vector_load_info & load_info) {
|
static common_control_vector_data llama_control_vector_load_one(const common_control_vector_load_info & load_info) {
|
||||||
llama_control_vector_data result = { -1, {} };
|
common_control_vector_data result = { -1, {} };
|
||||||
|
|
||||||
ggml_context * ctx = nullptr;
|
ggml_context * ctx = nullptr;
|
||||||
struct gguf_init_params meta_gguf_params = {
|
struct gguf_init_params meta_gguf_params = {
|
||||||
|
@ -1850,8 +1850,8 @@ static llama_control_vector_data llama_control_vector_load_one(const llama_contr
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_control_vector_data llama_control_vector_load(const std::vector<llama_control_vector_load_info> & load_infos) {
|
common_control_vector_data llama_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos) {
|
||||||
llama_control_vector_data result = { -1, {} };
|
common_control_vector_data result = { -1, {} };
|
||||||
|
|
||||||
for (const auto & info : load_infos) {
|
for (const auto & info : load_infos) {
|
||||||
auto cur = llama_control_vector_load_one(info);
|
auto cur = llama_control_vector_load_one(info);
|
||||||
|
|
|
@ -24,12 +24,12 @@
|
||||||
|
|
||||||
#define DEFAULT_MODEL_PATH "models/7B/ggml-model-f16.gguf"
|
#define DEFAULT_MODEL_PATH "models/7B/ggml-model-f16.gguf"
|
||||||
|
|
||||||
struct llama_lora_adapter_info {
|
struct common_lora_adapter_info {
|
||||||
std::string path;
|
std::string path;
|
||||||
float scale;
|
float scale;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct llama_lora_adapter_container : llama_lora_adapter_info {
|
struct common_lora_adapter_container : common_lora_adapter_info {
|
||||||
struct llama_lora_adapter * adapter;
|
struct llama_lora_adapter * adapter;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ extern char const * LLAMA_COMMIT;
|
||||||
extern char const * LLAMA_COMPILER;
|
extern char const * LLAMA_COMPILER;
|
||||||
extern char const * LLAMA_BUILD_TARGET;
|
extern char const * LLAMA_BUILD_TARGET;
|
||||||
|
|
||||||
struct llama_control_vector_load_info;
|
struct common_control_vector_load_info;
|
||||||
|
|
||||||
//
|
//
|
||||||
// CPU utils
|
// CPU utils
|
||||||
|
@ -208,9 +208,9 @@ struct gpt_params {
|
||||||
std::vector<llama_model_kv_override> kv_overrides;
|
std::vector<llama_model_kv_override> kv_overrides;
|
||||||
|
|
||||||
bool lora_init_without_apply = false; // only load lora to memory, but do not apply it to ctx (user can manually apply lora later using llama_lora_adapter_apply)
|
bool lora_init_without_apply = false; // only load lora to memory, but do not apply it to ctx (user can manually apply lora later using llama_lora_adapter_apply)
|
||||||
std::vector<llama_lora_adapter_info> lora_adapters; // lora adapter path with user defined scale
|
std::vector<common_lora_adapter_info> lora_adapters; // lora adapter path with user defined scale
|
||||||
|
|
||||||
std::vector<llama_control_vector_load_info> control_vectors; // control vector with user defined scale
|
std::vector<common_control_vector_load_info> control_vectors; // control vector with user defined scale
|
||||||
|
|
||||||
int32_t verbosity = 0;
|
int32_t verbosity = 0;
|
||||||
int32_t control_vector_layer_start = -1; // layer range for control vector
|
int32_t control_vector_layer_start = -1; // layer range for control vector
|
||||||
|
@ -404,29 +404,29 @@ std::string fs_get_cache_file(const std::string & filename);
|
||||||
// Model utils
|
// Model utils
|
||||||
//
|
//
|
||||||
|
|
||||||
struct llama_init_result {
|
struct common_init_result {
|
||||||
struct llama_model * model = nullptr;
|
struct llama_model * model = nullptr;
|
||||||
struct llama_context * context = nullptr;
|
struct llama_context * context = nullptr;
|
||||||
std::vector<llama_lora_adapter_container> lora_adapters;
|
std::vector<common_lora_adapter_container> lora_adapters;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct llama_init_result llama_init_from_gpt_params(gpt_params & params);
|
struct common_init_result llama_init_from_gpt_params(gpt_params & params);
|
||||||
|
|
||||||
struct llama_model_params llama_model_params_from_gpt_params (const gpt_params & params);
|
struct llama_model_params common_model_params_from_gpt_params (const gpt_params & params);
|
||||||
struct llama_context_params llama_context_params_from_gpt_params (const gpt_params & params);
|
struct llama_context_params common_context_params_from_gpt_params (const gpt_params & params);
|
||||||
struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_params & params);
|
struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_params & params);
|
||||||
|
|
||||||
struct llama_model * llama_load_model_from_url(const char * model_url, const char * path_model, const char * hf_token, const struct llama_model_params & params);
|
struct llama_model * common_load_model_from_url(const char * model_url, const char * path_model, const char * hf_token, const struct llama_model_params & params);
|
||||||
struct llama_model * llama_load_model_from_hf(const char * repo, const char * file, const char * path_model, const char * hf_token, const struct llama_model_params & params);
|
struct llama_model * common_load_model_from_hf(const char * repo, const char * file, const char * path_model, const char * hf_token, const struct llama_model_params & params);
|
||||||
|
|
||||||
// clear LoRA adapters from context, then apply new list of adapters
|
// clear LoRA adapters from context, then apply new list of adapters
|
||||||
void llama_lora_adapters_apply(struct llama_context * ctx, std::vector<llama_lora_adapter_container> & lora_adapters);
|
void common_lora_adapters_apply(struct llama_context * ctx, std::vector<common_lora_adapter_container> & lora_adapters);
|
||||||
|
|
||||||
// Batch utils
|
// Batch utils
|
||||||
|
|
||||||
void llama_batch_clear(struct llama_batch & batch);
|
void common_batch_clear(struct llama_batch & batch);
|
||||||
|
|
||||||
void llama_batch_add(
|
void common_batch_add(
|
||||||
struct llama_batch & batch,
|
struct llama_batch & batch,
|
||||||
llama_token id,
|
llama_token id,
|
||||||
llama_pos pos,
|
llama_pos pos,
|
||||||
|
@ -439,13 +439,13 @@ void llama_batch_add(
|
||||||
|
|
||||||
// tokenizes a string into a vector of tokens
|
// tokenizes a string into a vector of tokens
|
||||||
// should work similar to Python's `tokenizer.encode`
|
// should work similar to Python's `tokenizer.encode`
|
||||||
std::vector<llama_token> llama_tokenize(
|
std::vector<llama_token> common_tokenize(
|
||||||
const struct llama_context * ctx,
|
const struct llama_context * ctx,
|
||||||
const std::string & text,
|
const std::string & text,
|
||||||
bool add_special,
|
bool add_special,
|
||||||
bool parse_special = false);
|
bool parse_special = false);
|
||||||
|
|
||||||
std::vector<llama_token> llama_tokenize(
|
std::vector<llama_token> common_tokenize(
|
||||||
const struct llama_model * model,
|
const struct llama_model * model,
|
||||||
const std::string & text,
|
const std::string & text,
|
||||||
bool add_special,
|
bool add_special,
|
||||||
|
@ -453,7 +453,7 @@ std::vector<llama_token> llama_tokenize(
|
||||||
|
|
||||||
// tokenizes a token into a piece, optionally renders special/control tokens
|
// tokenizes a token into a piece, optionally renders special/control tokens
|
||||||
// should work similar to Python's `tokenizer.id_to_piece`
|
// should work similar to Python's `tokenizer.id_to_piece`
|
||||||
std::string llama_token_to_piece(
|
std::string common_token_to_piece(
|
||||||
const struct llama_context * ctx,
|
const struct llama_context * ctx,
|
||||||
llama_token token,
|
llama_token token,
|
||||||
bool special = true);
|
bool special = true);
|
||||||
|
@ -461,7 +461,7 @@ std::string llama_token_to_piece(
|
||||||
// detokenizes a vector of tokens into a string
|
// detokenizes a vector of tokens into a string
|
||||||
// should work similar to Python's `tokenizer.decode`
|
// should work similar to Python's `tokenizer.decode`
|
||||||
// optionally renders special/control tokens
|
// optionally renders special/control tokens
|
||||||
std::string llama_detokenize(
|
std::string common_detokenize(
|
||||||
llama_context * ctx,
|
llama_context * ctx,
|
||||||
const std::vector<llama_token> & tokens,
|
const std::vector<llama_token> & tokens,
|
||||||
bool special = true);
|
bool special = true);
|
||||||
|
@ -471,7 +471,7 @@ std::string llama_detokenize(
|
||||||
//
|
//
|
||||||
|
|
||||||
// same with llama_chat_message, but uses std::string
|
// same with llama_chat_message, but uses std::string
|
||||||
struct llama_chat_msg {
|
struct common_chat_msg {
|
||||||
std::string role;
|
std::string role;
|
||||||
std::string content;
|
std::string content;
|
||||||
};
|
};
|
||||||
|
@ -482,20 +482,20 @@ bool llama_chat_verify_template(const std::string & tmpl);
|
||||||
// CPP wrapper for llama_chat_apply_template
|
// CPP wrapper for llama_chat_apply_template
|
||||||
// If the built-in template is not supported, we default to chatml
|
// If the built-in template is not supported, we default to chatml
|
||||||
// If the custom "tmpl" is not supported, we throw an error
|
// If the custom "tmpl" is not supported, we throw an error
|
||||||
std::string llama_chat_apply_template(const struct llama_model * model,
|
std::string common_chat_apply_template(const struct llama_model * model,
|
||||||
const std::string & tmpl,
|
const std::string & tmpl,
|
||||||
const std::vector<llama_chat_msg> & chat,
|
const std::vector<common_chat_msg> & chat,
|
||||||
bool add_ass);
|
bool add_ass);
|
||||||
|
|
||||||
// Format single message, while taking into account the position of that message in chat history
|
// Format single message, while taking into account the position of that message in chat history
|
||||||
std::string llama_chat_format_single(const struct llama_model * model,
|
std::string common_chat_format_single(const struct llama_model * model,
|
||||||
const std::string & tmpl,
|
const std::string & tmpl,
|
||||||
const std::vector<llama_chat_msg> & past_msg,
|
const std::vector<common_chat_msg> & past_msg,
|
||||||
const llama_chat_msg & new_msg,
|
const common_chat_msg & new_msg,
|
||||||
bool add_ass);
|
bool add_ass);
|
||||||
|
|
||||||
// Returns an example of formatted chat
|
// Returns an example of formatted chat
|
||||||
std::string llama_chat_format_example(const struct llama_model * model,
|
std::string common_chat_format_example(const struct llama_model * model,
|
||||||
const std::string & tmpl);
|
const std::string & tmpl);
|
||||||
|
|
||||||
//
|
//
|
||||||
|
@ -503,31 +503,31 @@ std::string llama_chat_format_example(const struct llama_model * model,
|
||||||
//
|
//
|
||||||
|
|
||||||
// Dump the KV cache view with the number of sequences per cell.
|
// Dump the KV cache view with the number of sequences per cell.
|
||||||
void llama_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size = 80);
|
void common_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size = 80);
|
||||||
|
|
||||||
// Dump the KV cache view showing individual sequences in each cell (long output).
|
// Dump the KV cache view showing individual sequences in each cell (long output).
|
||||||
void llama_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
|
void common_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
|
||||||
|
|
||||||
//
|
//
|
||||||
// Embedding utils
|
// Embedding utils
|
||||||
//
|
//
|
||||||
|
|
||||||
void llama_embd_normalize(const float * inp, float * out, int n, int embd_norm = 2);
|
void common_embd_normalize(const float * inp, float * out, int n, int embd_norm = 2);
|
||||||
|
|
||||||
float llama_embd_similarity_cos(const float * embd1, const float * embd2, int n);
|
float common_embd_similarity_cos(const float * embd1, const float * embd2, int n);
|
||||||
|
|
||||||
//
|
//
|
||||||
// Control vector utils
|
// Control vector utils
|
||||||
//
|
//
|
||||||
|
|
||||||
struct llama_control_vector_data {
|
struct common_control_vector_data {
|
||||||
int n_embd;
|
int n_embd;
|
||||||
|
|
||||||
// stores data for layers [1, n_layer] where n_layer = data.size() / n_embd
|
// stores data for layers [1, n_layer] where n_layer = data.size() / n_embd
|
||||||
std::vector<float> data;
|
std::vector<float> data;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct llama_control_vector_load_info {
|
struct common_control_vector_load_info {
|
||||||
float strength;
|
float strength;
|
||||||
|
|
||||||
std::string fname;
|
std::string fname;
|
||||||
|
@ -535,7 +535,7 @@ struct llama_control_vector_load_info {
|
||||||
|
|
||||||
// Load control vectors, scale each by strength, and add them together.
|
// Load control vectors, scale each by strength, and add them together.
|
||||||
// On error, returns {-1, empty}
|
// On error, returns {-1, empty}
|
||||||
llama_control_vector_data llama_control_vector_load(const std::vector<llama_control_vector_load_info> & load_infos);
|
common_control_vector_data llama_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos);
|
||||||
|
|
||||||
//
|
//
|
||||||
// Split utils
|
// Split utils
|
||||||
|
|
|
@ -358,7 +358,7 @@ std::string gpt_sampler_prev_str(gpt_sampler * gsmpl, llama_context * ctx_main,
|
||||||
|
|
||||||
GGML_ASSERT(id != LLAMA_TOKEN_NULL && "null token in the sampling history - should not happen");
|
GGML_ASSERT(id != LLAMA_TOKEN_NULL && "null token in the sampling history - should not happen");
|
||||||
|
|
||||||
result += llama_token_to_piece(ctx_main, id);
|
result += common_token_to_piece(ctx_main, id);
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
|
|
@ -36,7 +36,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// initialize the model
|
// initialize the model
|
||||||
|
|
||||||
llama_model_params model_params = llama_model_params_from_gpt_params(params);
|
llama_model_params model_params = common_model_params_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ int main(int argc, char ** argv) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
|
llama_context_params ctx_params = common_context_params_from_gpt_params(params);
|
||||||
|
|
||||||
// ensure enough sequences are available
|
// ensure enough sequences are available
|
||||||
ctx_params.n_seq_max = n_pl.empty() ? 1 : *std::max_element(n_pl.begin(), n_pl.end());
|
ctx_params.n_seq_max = n_pl.empty() ? 1 : *std::max_element(n_pl.begin(), n_pl.end());
|
||||||
|
@ -92,7 +92,7 @@ int main(int argc, char ** argv) {
|
||||||
// warm up
|
// warm up
|
||||||
{
|
{
|
||||||
for (int i = 0; i < 16; ++i) {
|
for (int i = 0; i < 16; ++i) {
|
||||||
llama_batch_add(batch, 0, i, { 0 }, false);
|
common_batch_add(batch, 0, i, { 0 }, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
|
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
|
||||||
|
@ -122,11 +122,11 @@ int main(int argc, char ** argv) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
|
|
||||||
for (int i = 0; i < pp; ++i) {
|
for (int i = 0; i < pp; ++i) {
|
||||||
for (int j = 0; j < (is_pp_shared ? 1 : pl); ++j) {
|
for (int j = 0; j < (is_pp_shared ? 1 : pl); ++j) {
|
||||||
llama_batch_add(batch, 0, i, { j }, false);
|
common_batch_add(batch, 0, i, { j }, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
batch.logits[batch.n_tokens - 1] = true;
|
batch.logits[batch.n_tokens - 1] = true;
|
||||||
|
@ -151,10 +151,10 @@ int main(int argc, char ** argv) {
|
||||||
const auto t_tg_start = ggml_time_us();
|
const auto t_tg_start = ggml_time_us();
|
||||||
|
|
||||||
for (int i = 0; i < tg; ++i) {
|
for (int i = 0; i < tg; ++i) {
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
|
|
||||||
for (int j = 0; j < pl; ++j) {
|
for (int j = 0; j < pl; ++j) {
|
||||||
llama_batch_add(batch, 0, pp + i, { j }, true);
|
common_batch_add(batch, 0, pp + i, { j }, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
|
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
|
||||||
|
|
|
@ -39,7 +39,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// initialize the model
|
// initialize the model
|
||||||
|
|
||||||
llama_model_params model_params = llama_model_params_from_gpt_params(params);
|
llama_model_params model_params = common_model_params_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
||||||
|
|
||||||
|
@ -51,13 +51,13 @@ int main(int argc, char ** argv) {
|
||||||
// tokenize the prompt
|
// tokenize the prompt
|
||||||
|
|
||||||
std::vector<llama_token> tokens_list;
|
std::vector<llama_token> tokens_list;
|
||||||
tokens_list = ::llama_tokenize(model, params.prompt, true);
|
tokens_list = ::common_tokenize(model, params.prompt, true);
|
||||||
|
|
||||||
const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel;
|
const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel;
|
||||||
|
|
||||||
// initialize the context
|
// initialize the context
|
||||||
|
|
||||||
llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
|
llama_context_params ctx_params = common_context_params_from_gpt_params(params);
|
||||||
|
|
||||||
ctx_params.n_ctx = n_kv_req;
|
ctx_params.n_ctx = n_kv_req;
|
||||||
ctx_params.n_batch = std::max(n_predict, n_parallel);
|
ctx_params.n_batch = std::max(n_predict, n_parallel);
|
||||||
|
@ -94,7 +94,7 @@ int main(int argc, char ** argv) {
|
||||||
LOG("\n");
|
LOG("\n");
|
||||||
|
|
||||||
for (auto id : tokens_list) {
|
for (auto id : tokens_list) {
|
||||||
LOG("%s", llama_token_to_piece(ctx, id).c_str());
|
LOG("%s", common_token_to_piece(ctx, id).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
// create a llama_batch
|
// create a llama_batch
|
||||||
|
@ -108,7 +108,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// evaluate the initial prompt
|
// evaluate the initial prompt
|
||||||
for (size_t i = 0; i < tokens_list.size(); ++i) {
|
for (size_t i = 0; i < tokens_list.size(); ++i) {
|
||||||
llama_batch_add(batch, tokens_list[i], i, seq_ids, false);
|
common_batch_add(batch, tokens_list[i], i, seq_ids, false);
|
||||||
}
|
}
|
||||||
GGML_ASSERT(batch.n_tokens == (int) tokens_list.size());
|
GGML_ASSERT(batch.n_tokens == (int) tokens_list.size());
|
||||||
|
|
||||||
|
@ -123,8 +123,8 @@ int main(int argc, char ** argv) {
|
||||||
decoder_start_token_id = llama_token_bos(model);
|
decoder_start_token_id = llama_token_bos(model);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
llama_batch_add(batch, decoder_start_token_id, 0, seq_ids, false);
|
common_batch_add(batch, decoder_start_token_id, 0, seq_ids, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
// llama_decode will output logits only for the last token of the prompt
|
// llama_decode will output logits only for the last token of the prompt
|
||||||
|
@ -161,7 +161,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
while (n_cur <= n_predict) {
|
while (n_cur <= n_predict) {
|
||||||
// prepare the next batch
|
// prepare the next batch
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
|
|
||||||
// sample the next token for each parallel sequence / stream
|
// sample the next token for each parallel sequence / stream
|
||||||
for (int32_t i = 0; i < n_parallel; ++i) {
|
for (int32_t i = 0; i < n_parallel; ++i) {
|
||||||
|
@ -185,15 +185,15 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// if there is only one stream, we print immediately to stdout
|
// if there is only one stream, we print immediately to stdout
|
||||||
if (n_parallel == 1) {
|
if (n_parallel == 1) {
|
||||||
LOG("%s", llama_token_to_piece(ctx, new_token_id).c_str());
|
LOG("%s", common_token_to_piece(ctx, new_token_id).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
streams[i] += llama_token_to_piece(ctx, new_token_id);
|
streams[i] += common_token_to_piece(ctx, new_token_id);
|
||||||
|
|
||||||
i_batch[i] = batch.n_tokens;
|
i_batch[i] = batch.n_tokens;
|
||||||
|
|
||||||
// push this new token for next evaluation
|
// push this new token for next evaluation
|
||||||
llama_batch_add(batch, new_token_id, n_cur, { i }, true);
|
common_batch_add(batch, new_token_id, n_cur, { i }, true);
|
||||||
|
|
||||||
n_decode += 1;
|
n_decode += 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ template <class Iter>
|
||||||
static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
|
static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
|
||||||
std::string ret;
|
std::string ret;
|
||||||
for (; begin != end; ++begin) {
|
for (; begin != end; ++begin) {
|
||||||
ret += llama_token_to_piece(ctx, *begin);
|
ret += common_token_to_piece(ctx, *begin);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -272,8 +272,8 @@ struct tokenized_prompt {
|
||||||
|
|
||||||
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
|
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
|
||||||
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
|
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
|
||||||
tokens_pos = ::llama_tokenize(ctx, pos, add_bos, true);
|
tokens_pos = ::common_tokenize(ctx, pos, add_bos, true);
|
||||||
tokens_neg = ::llama_tokenize(ctx, neg, add_bos, true);
|
tokens_neg = ::common_tokenize(ctx, neg, add_bos, true);
|
||||||
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
|
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
|
||||||
padding_seq(ctx, tokens_pos, max_seq_len);
|
padding_seq(ctx, tokens_pos, max_seq_len);
|
||||||
padding_seq(ctx, tokens_neg, max_seq_len);
|
padding_seq(ctx, tokens_neg, max_seq_len);
|
||||||
|
@ -281,7 +281,7 @@ struct tokenized_prompt {
|
||||||
|
|
||||||
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
|
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
|
||||||
// TODO: customize padding token
|
// TODO: customize padding token
|
||||||
std::vector<llama_token> pad_tokens = ::llama_tokenize(ctx, " ", false);
|
std::vector<llama_token> pad_tokens = ::common_tokenize(ctx, " ", false);
|
||||||
llama_token pad_tok = pad_tokens.back();
|
llama_token pad_tok = pad_tokens.back();
|
||||||
while (tokens.size() < len) {
|
while (tokens.size() < len) {
|
||||||
tokens.push_back(pad_tok);
|
tokens.push_back(pad_tok);
|
||||||
|
@ -413,7 +413,7 @@ int main(int argc, char ** argv) {
|
||||||
llama_numa_init(params.numa);
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// load the model to get hparams
|
// load the model to get hparams
|
||||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
common_init_result llama_init = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_init.model;
|
llama_model * model = llama_init.model;
|
||||||
llama_context * ctx = llama_init.context;
|
llama_context * ctx = llama_init.context;
|
||||||
|
|
|
@ -28,7 +28,7 @@ static std::vector<std::string> split_lines(const std::string & s, const std::st
|
||||||
static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, llama_seq_id seq_id) {
|
static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, llama_seq_id seq_id) {
|
||||||
size_t n_tokens = tokens.size();
|
size_t n_tokens = tokens.size();
|
||||||
for (size_t i = 0; i < n_tokens; i++) {
|
for (size_t i = 0; i < n_tokens; i++) {
|
||||||
llama_batch_add(batch, tokens[i], i, { seq_id }, true);
|
common_batch_add(batch, tokens[i], i, { seq_id }, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
|
||||||
}
|
}
|
||||||
|
|
||||||
float * out = output + embd_pos * n_embd;
|
float * out = output + embd_pos * n_embd;
|
||||||
llama_embd_normalize(embd, out, n_embd, embd_norm);
|
common_embd_normalize(embd, out, n_embd, embd_norm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ int main(int argc, char ** argv) {
|
||||||
llama_numa_init(params.numa);
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// load the model
|
// load the model
|
||||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
common_init_result llama_init = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_init.model;
|
llama_model * model = llama_init.model;
|
||||||
llama_context * ctx = llama_init.context;
|
llama_context * ctx = llama_init.context;
|
||||||
|
@ -135,7 +135,7 @@ int main(int argc, char ** argv) {
|
||||||
// tokenize the prompts and trim
|
// tokenize the prompts and trim
|
||||||
std::vector<std::vector<int32_t>> inputs;
|
std::vector<std::vector<int32_t>> inputs;
|
||||||
for (const auto & prompt : prompts) {
|
for (const auto & prompt : prompts) {
|
||||||
auto inp = ::llama_tokenize(ctx, prompt, true, true);
|
auto inp = ::common_tokenize(ctx, prompt, true, true);
|
||||||
if (inp.size() > n_batch) {
|
if (inp.size() > n_batch) {
|
||||||
LOG_ERR("%s: number of tokens in input line (%lld) exceeds batch size (%lld), increase batch size and re-run\n",
|
LOG_ERR("%s: number of tokens in input line (%lld) exceeds batch size (%lld), increase batch size and re-run\n",
|
||||||
__func__, (long long int) inp.size(), (long long int) n_batch);
|
__func__, (long long int) inp.size(), (long long int) n_batch);
|
||||||
|
@ -159,7 +159,7 @@ int main(int argc, char ** argv) {
|
||||||
LOG_INF("%s: prompt %d: '%s'\n", __func__, i, prompts[i].c_str());
|
LOG_INF("%s: prompt %d: '%s'\n", __func__, i, prompts[i].c_str());
|
||||||
LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, inputs[i].size());
|
LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, inputs[i].size());
|
||||||
for (int j = 0; j < (int) inputs[i].size(); j++) {
|
for (int j = 0; j < (int) inputs[i].size(); j++) {
|
||||||
LOG("%6d -> '%s'\n", inputs[i][j], llama_token_to_piece(ctx, inputs[i][j]).c_str());
|
LOG("%6d -> '%s'\n", inputs[i][j], common_token_to_piece(ctx, inputs[i][j]).c_str());
|
||||||
}
|
}
|
||||||
LOG("\n\n");
|
LOG("\n\n");
|
||||||
}
|
}
|
||||||
|
@ -199,7 +199,7 @@ int main(int argc, char ** argv) {
|
||||||
batch_decode(ctx, batch, out, s, n_embd, params.embd_normalize);
|
batch_decode(ctx, batch, out, s, n_embd, params.embd_normalize);
|
||||||
e += pooling_type == LLAMA_POOLING_TYPE_NONE ? batch.n_tokens : s;
|
e += pooling_type == LLAMA_POOLING_TYPE_NONE ? batch.n_tokens : s;
|
||||||
s = 0;
|
s = 0;
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
// add to batch
|
// add to batch
|
||||||
|
@ -263,7 +263,7 @@ int main(int argc, char ** argv) {
|
||||||
LOG("\n");
|
LOG("\n");
|
||||||
for (int i = 0; i < n_prompts; i++) {
|
for (int i = 0; i < n_prompts; i++) {
|
||||||
for (int j = 0; j < n_prompts; j++) {
|
for (int j = 0; j < n_prompts; j++) {
|
||||||
float sim = llama_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
|
float sim = common_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
|
||||||
LOG("%6.2f ", sim);
|
LOG("%6.2f ", sim);
|
||||||
}
|
}
|
||||||
LOG("%1.10s", prompts[i].c_str());
|
LOG("%1.10s", prompts[i].c_str());
|
||||||
|
@ -296,7 +296,7 @@ int main(int argc, char ** argv) {
|
||||||
for (int i = 0;;) { // at least two iteration (n_embd_count > 1)
|
for (int i = 0;;) { // at least two iteration (n_embd_count > 1)
|
||||||
LOG(" [");
|
LOG(" [");
|
||||||
for (int j = 0;;) { // at least two iteration (n_embd_count > 1)
|
for (int j = 0;;) { // at least two iteration (n_embd_count > 1)
|
||||||
float sim = llama_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
|
float sim = common_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
|
||||||
LOG("%6.2f", sim);
|
LOG("%6.2f", sim);
|
||||||
j++;
|
j++;
|
||||||
if (j < n_embd_count) LOG(", "); else break;
|
if (j < n_embd_count) LOG(", "); else break;
|
||||||
|
|
|
@ -129,7 +129,7 @@ static bool ggml_debug(struct ggml_tensor * t, bool ask, void * user_data) {
|
||||||
static bool run(llama_context * ctx, const gpt_params & params) {
|
static bool run(llama_context * ctx, const gpt_params & params) {
|
||||||
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
|
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
|
||||||
|
|
||||||
std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, add_bos);
|
std::vector<llama_token> tokens = ::common_tokenize(ctx, params.prompt, add_bos);
|
||||||
|
|
||||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
|
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
|
||||||
LOG_ERR("%s : failed to eval\n", __func__);
|
LOG_ERR("%s : failed to eval\n", __func__);
|
||||||
|
@ -160,7 +160,7 @@ int main(int argc, char ** argv) {
|
||||||
params.warmup = false;
|
params.warmup = false;
|
||||||
|
|
||||||
// init
|
// init
|
||||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
common_init_result llama_init = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_init.model;
|
llama_model * model = llama_init.model;
|
||||||
llama_context * ctx = llama_init.context;
|
llama_context * ctx = llama_init.context;
|
||||||
|
|
|
@ -128,7 +128,7 @@ struct lora_merge_ctx {
|
||||||
|
|
||||||
lora_merge_ctx(
|
lora_merge_ctx(
|
||||||
std::string & base_fname,
|
std::string & base_fname,
|
||||||
std::vector<llama_lora_adapter_info> & lora_files,
|
std::vector<common_lora_adapter_info> & lora_files,
|
||||||
std::string & outfile,
|
std::string & outfile,
|
||||||
int n_threads) : base_model(base_fname, 0), n_threads(n_threads), fout(outfile, std::ios::binary) {
|
int n_threads) : base_model(base_fname, 0), n_threads(n_threads), fout(outfile, std::ios::binary) {
|
||||||
fout.exceptions(std::ofstream::failbit); // fail fast on write errors
|
fout.exceptions(std::ofstream::failbit); // fail fast on write errors
|
||||||
|
|
|
@ -15,11 +15,11 @@ static std::vector<std::vector<float>> encode(llama_context * ctx, const std::ve
|
||||||
llama_batch batch = llama_batch_init(llama_n_batch(ctx), 0, 1);
|
llama_batch batch = llama_batch_init(llama_n_batch(ctx), 0, 1);
|
||||||
|
|
||||||
for (uint64_t i = 0; i < sentences.size(); i++) {
|
for (uint64_t i = 0; i < sentences.size(); i++) {
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
|
|
||||||
const std::string input_string = instruction + sentences[i];
|
const std::string input_string = instruction + sentences[i];
|
||||||
|
|
||||||
std::vector<llama_token> inputs = llama_tokenize(model, input_string, true, false);
|
std::vector<llama_token> inputs = common_tokenize(model, input_string, true, false);
|
||||||
|
|
||||||
const int32_t n_toks = inputs.size();
|
const int32_t n_toks = inputs.size();
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ static std::vector<std::vector<float>> encode(llama_context * ctx, const std::ve
|
||||||
// inputs.push_back(llama_token_eos(model));
|
// inputs.push_back(llama_token_eos(model));
|
||||||
|
|
||||||
// we want to ignore instruction tokens for mean pooling
|
// we want to ignore instruction tokens for mean pooling
|
||||||
const int32_t n_inst = llama_tokenize(model, instruction, true, false).size();
|
const int32_t n_inst = common_tokenize(model, instruction, true, false).size();
|
||||||
|
|
||||||
#ifdef GRIT_DEBUG
|
#ifdef GRIT_DEBUG
|
||||||
// debug tokens - should be matching as referenced in the GritLM sample
|
// debug tokens - should be matching as referenced in the GritLM sample
|
||||||
|
@ -40,7 +40,7 @@ static std::vector<std::vector<float>> encode(llama_context * ctx, const std::ve
|
||||||
|
|
||||||
// add input to batch (this increments n_tokens)
|
// add input to batch (this increments n_tokens)
|
||||||
for (int32_t j = 0; j < n_toks; j++) {
|
for (int32_t j = 0; j < n_toks; j++) {
|
||||||
llama_batch_add(batch, inputs[j], j, { 0 }, j >= n_inst);
|
common_batch_add(batch, inputs[j], j, { 0 }, j >= n_inst);
|
||||||
}
|
}
|
||||||
|
|
||||||
// clear previous kv_cache values (irrelevant for embeddings)
|
// clear previous kv_cache values (irrelevant for embeddings)
|
||||||
|
@ -75,7 +75,7 @@ static std::vector<std::vector<float>> encode(llama_context * ctx, const std::ve
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<float> emb_norm(emb_unorm.size());
|
std::vector<float> emb_norm(emb_unorm.size());
|
||||||
llama_embd_normalize(emb_unorm.data(), emb_norm.data(), n_embd);
|
common_embd_normalize(emb_unorm.data(), emb_norm.data(), n_embd);
|
||||||
result.push_back(emb_norm);
|
result.push_back(emb_norm);
|
||||||
|
|
||||||
#ifdef GRIT_DEBUG
|
#ifdef GRIT_DEBUG
|
||||||
|
@ -105,16 +105,16 @@ static std::string generate(llama_context * ctx, llama_sampler * smpl, const std
|
||||||
|
|
||||||
llama_batch bat = llama_batch_init(llama_n_batch(ctx), 0, 1);
|
llama_batch bat = llama_batch_init(llama_n_batch(ctx), 0, 1);
|
||||||
|
|
||||||
std::vector<llama_token> inputs = llama_tokenize(model, prompt, false, true);
|
std::vector<llama_token> inputs = common_tokenize(model, prompt, false, true);
|
||||||
int32_t i_current_token = 0;
|
int32_t i_current_token = 0;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
llama_batch_clear(bat);
|
common_batch_clear(bat);
|
||||||
{
|
{
|
||||||
const int32_t n_inputs = inputs.size();
|
const int32_t n_inputs = inputs.size();
|
||||||
|
|
||||||
for (int32_t i = 0; i < n_inputs; i++) {
|
for (int32_t i = 0; i < n_inputs; i++) {
|
||||||
llama_batch_add(bat, inputs[i], i_current_token++, { 0 }, i == n_inputs - 1);
|
common_batch_add(bat, inputs[i], i_current_token++, { 0 }, i == n_inputs - 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
inputs.clear();
|
inputs.clear();
|
||||||
|
@ -127,7 +127,7 @@ static std::string generate(llama_context * ctx, llama_sampler * smpl, const std
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string piece = llama_token_to_piece(ctx, token);
|
std::string piece = common_token_to_piece(ctx, token);
|
||||||
if (stream) {
|
if (stream) {
|
||||||
std::printf("%s", piece.c_str());
|
std::printf("%s", piece.c_str());
|
||||||
std::fflush(stdout);
|
std::fflush(stdout);
|
||||||
|
@ -160,8 +160,8 @@ int main(int argc, char * argv[]) {
|
||||||
|
|
||||||
gpt_init();
|
gpt_init();
|
||||||
|
|
||||||
llama_model_params mparams = llama_model_params_from_gpt_params(params);
|
llama_model_params mparams = common_model_params_from_gpt_params(params);
|
||||||
llama_context_params cparams = llama_context_params_from_gpt_params(params);
|
llama_context_params cparams = common_context_params_from_gpt_params(params);
|
||||||
|
|
||||||
llama_backend_init();
|
llama_backend_init();
|
||||||
|
|
||||||
|
@ -199,10 +199,10 @@ int main(int argc, char * argv[]) {
|
||||||
|
|
||||||
const int n_embd = llama_n_embd(model);
|
const int n_embd = llama_n_embd(model);
|
||||||
|
|
||||||
const float cosine_sim_q0_d0 = llama_embd_similarity_cos(q_rep[0].data(), d_rep[0].data(), n_embd);
|
const float cosine_sim_q0_d0 = common_embd_similarity_cos(q_rep[0].data(), d_rep[0].data(), n_embd);
|
||||||
const float cosine_sim_q0_d1 = llama_embd_similarity_cos(q_rep[0].data(), d_rep[1].data(), n_embd);
|
const float cosine_sim_q0_d1 = common_embd_similarity_cos(q_rep[0].data(), d_rep[1].data(), n_embd);
|
||||||
const float cosine_sim_q1_d0 = llama_embd_similarity_cos(q_rep[1].data(), d_rep[0].data(), n_embd);
|
const float cosine_sim_q1_d0 = common_embd_similarity_cos(q_rep[1].data(), d_rep[0].data(), n_embd);
|
||||||
const float cosine_sim_q1_d1 = llama_embd_similarity_cos(q_rep[1].data(), d_rep[1].data(), n_embd);
|
const float cosine_sim_q1_d1 = common_embd_similarity_cos(q_rep[1].data(), d_rep[1].data(), n_embd);
|
||||||
|
|
||||||
std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[0].c_str(), documents[0].c_str(), cosine_sim_q0_d0);
|
std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[0].c_str(), documents[0].c_str(), cosine_sim_q0_d0);
|
||||||
std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[0].c_str(), documents[1].c_str(), cosine_sim_q0_d1);
|
std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[0].c_str(), documents[1].c_str(), cosine_sim_q0_d1);
|
||||||
|
|
|
@ -436,7 +436,7 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
|
||||||
auto tim1 = std::chrono::high_resolution_clock::now();
|
auto tim1 = std::chrono::high_resolution_clock::now();
|
||||||
LOG_INF("%s: tokenizing the input ..\n", __func__);
|
LOG_INF("%s: tokenizing the input ..\n", __func__);
|
||||||
|
|
||||||
std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, true);
|
std::vector<llama_token> tokens = ::common_tokenize(ctx, params.prompt, true);
|
||||||
|
|
||||||
auto tim2 = std::chrono::high_resolution_clock::now();
|
auto tim2 = std::chrono::high_resolution_clock::now();
|
||||||
LOG_INF("%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());
|
LOG_INF("%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());
|
||||||
|
@ -607,7 +607,7 @@ int main(int argc, char ** argv) {
|
||||||
params.warmup = false;
|
params.warmup = false;
|
||||||
|
|
||||||
// init
|
// init
|
||||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
common_init_result llama_init = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_init.model;
|
llama_model * model = llama_init.model;
|
||||||
llama_context * ctx = llama_init.context;
|
llama_context * ctx = llama_init.context;
|
||||||
|
|
|
@ -174,7 +174,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// load the model and apply lora adapter, if any
|
// load the model and apply lora adapter, if any
|
||||||
LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__);
|
LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__);
|
||||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
common_init_result llama_init = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
model = llama_init.model;
|
model = llama_init.model;
|
||||||
ctx = llama_init.context;
|
ctx = llama_init.context;
|
||||||
|
@ -202,8 +202,8 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
std::vector<llama_token> embd_inp;
|
std::vector<llama_token> embd_inp;
|
||||||
std::vector<llama_token> embd_end;
|
std::vector<llama_token> embd_end;
|
||||||
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
|
std::vector<llama_token> inp_pfx = ::common_tokenize(ctx, params.input_prefix, false);
|
||||||
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
|
std::vector<llama_token> inp_sfx = ::common_tokenize(ctx, params.input_suffix, false);
|
||||||
|
|
||||||
GGML_ASSERT(llama_token_prefix(model) >= 0);
|
GGML_ASSERT(llama_token_prefix(model) >= 0);
|
||||||
GGML_ASSERT(llama_token_suffix(model) >= 0);
|
GGML_ASSERT(llama_token_suffix(model) >= 0);
|
||||||
|
@ -257,13 +257,13 @@ int main(int argc, char ** argv) {
|
||||||
LOG_INF("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
LOG_INF("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
||||||
LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
||||||
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
||||||
LOG_INF("%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str());
|
LOG_INF("%6d -> '%s'\n", embd_inp[i], common_token_to_piece(ctx, embd_inp[i]).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.n_keep > 0) {
|
if (params.n_keep > 0) {
|
||||||
LOG_INF("%s: static prompt based on n_keep: '", __func__);
|
LOG_INF("%s: static prompt based on n_keep: '", __func__);
|
||||||
for (int i = 0; i < params.n_keep; i++) {
|
for (int i = 0; i < params.n_keep; i++) {
|
||||||
LOG_CNT("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
|
LOG_CNT("%s", common_token_to_piece(ctx, embd_inp[i]).c_str());
|
||||||
}
|
}
|
||||||
LOG_CNT("'\n");
|
LOG_CNT("'\n");
|
||||||
}
|
}
|
||||||
|
@ -446,7 +446,7 @@ int main(int argc, char ** argv) {
|
||||||
// display text
|
// display text
|
||||||
if (input_echo) {
|
if (input_echo) {
|
||||||
for (auto id : embd) {
|
for (auto id : embd) {
|
||||||
const std::string token_str = llama_token_to_piece(ctx, id);
|
const std::string token_str = common_token_to_piece(ctx, id);
|
||||||
LOG("%s", token_str.c_str());
|
LOG("%s", token_str.c_str());
|
||||||
|
|
||||||
if (embd.size() > 1) {
|
if (embd.size() > 1) {
|
||||||
|
@ -468,7 +468,7 @@ int main(int argc, char ** argv) {
|
||||||
if ((gpt_sampler_last(smpl) == llama_token_eot(model) || is_interacting) && params.interactive){
|
if ((gpt_sampler_last(smpl) == llama_token_eot(model) || is_interacting) && params.interactive){
|
||||||
if (is_interacting && !params.interactive_first) {
|
if (is_interacting && !params.interactive_first) {
|
||||||
// print an eot token
|
// print an eot token
|
||||||
LOG("%s", llama_token_to_piece(ctx, llama_token_eot(model)).c_str());
|
LOG("%s", common_token_to_piece(ctx, llama_token_eot(model)).c_str());
|
||||||
}
|
}
|
||||||
LOG("\n");
|
LOG("\n");
|
||||||
console::set_display(console::user_input);
|
console::set_display(console::user_input);
|
||||||
|
@ -505,8 +505,8 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// tokenize new prefix and suffix
|
// tokenize new prefix and suffix
|
||||||
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
|
std::vector<llama_token> inp_pfx = ::common_tokenize(ctx, params.input_prefix, false);
|
||||||
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
|
std::vector<llama_token> inp_sfx = ::common_tokenize(ctx, params.input_suffix, false);
|
||||||
|
|
||||||
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
|
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
|
||||||
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
|
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
|
||||||
|
@ -579,7 +579,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
const size_t original_size = embd_inp.size();
|
const size_t original_size = embd_inp.size();
|
||||||
|
|
||||||
const auto line_inp = ::llama_tokenize(ctx, buffer, false);
|
const auto line_inp = ::common_tokenize(ctx, buffer, false);
|
||||||
LOG_DBG("input tokens: %s\n", string_from(ctx, line_inp).c_str());
|
LOG_DBG("input tokens: %s\n", string_from(ctx, line_inp).c_str());
|
||||||
|
|
||||||
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
|
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
|
||||||
|
@ -587,7 +587,7 @@ int main(int argc, char ** argv) {
|
||||||
for (size_t i = original_size; i < embd_inp.size(); ++i) {
|
for (size_t i = original_size; i < embd_inp.size(); ++i) {
|
||||||
const llama_token token = embd_inp[i];
|
const llama_token token = embd_inp[i];
|
||||||
output_tokens.push_back(token);
|
output_tokens.push_back(token);
|
||||||
output_ss << llama_token_to_piece(ctx, token);
|
output_ss << common_token_to_piece(ctx, token);
|
||||||
}
|
}
|
||||||
|
|
||||||
n_remain -= line_inp.size();
|
n_remain -= line_inp.size();
|
||||||
|
@ -620,7 +620,7 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!params.interactive && n_remain <= 0) {
|
if (!params.interactive && n_remain <= 0) {
|
||||||
LOG("%s", llama_token_to_piece(ctx, llama_token_eot(model)).c_str());
|
LOG("%s", common_token_to_piece(ctx, llama_token_eot(model)).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG("\n");
|
LOG("\n");
|
||||||
|
|
|
@ -37,7 +37,7 @@ static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
|
||||||
|
|
||||||
static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
|
static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
|
||||||
std::string str2 = str;
|
std::string str2 = str;
|
||||||
std::vector<llama_token> embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos, true);
|
std::vector<llama_token> embd_inp = ::common_tokenize(ctx_llama, str2, add_bos, true);
|
||||||
eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
|
eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -51,7 +51,7 @@ static const char * sample(struct gpt_sampler * smpl,
|
||||||
if (llama_token_is_eog(llama_get_model(ctx_llama), id)) {
|
if (llama_token_is_eog(llama_get_model(ctx_llama), id)) {
|
||||||
ret = "</s>";
|
ret = "</s>";
|
||||||
} else {
|
} else {
|
||||||
ret = llama_token_to_piece(ctx_llama, id);
|
ret = common_token_to_piece(ctx_llama, id);
|
||||||
}
|
}
|
||||||
eval_id(ctx_llama, id, n_past);
|
eval_id(ctx_llama, id, n_past);
|
||||||
return ret.c_str();
|
return ret.c_str();
|
||||||
|
@ -159,16 +159,16 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
|
||||||
user_prompt = prompt.substr(image_pos + std::string("<image>").length());
|
user_prompt = prompt.substr(image_pos + std::string("<image>").length());
|
||||||
LOG_INF("system_prompt: %s\n", system_prompt.c_str());
|
LOG_INF("system_prompt: %s\n", system_prompt.c_str());
|
||||||
if (params->verbose_prompt) {
|
if (params->verbose_prompt) {
|
||||||
auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, system_prompt, true, true);
|
auto tmp = ::common_tokenize(ctx_llava->ctx_llama, system_prompt, true, true);
|
||||||
for (int i = 0; i < (int) tmp.size(); i++) {
|
for (int i = 0; i < (int) tmp.size(); i++) {
|
||||||
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
|
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
LOG_INF("user_prompt: %s\n", user_prompt.c_str());
|
LOG_INF("user_prompt: %s\n", user_prompt.c_str());
|
||||||
if (params->verbose_prompt) {
|
if (params->verbose_prompt) {
|
||||||
auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
|
auto tmp = ::common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
|
||||||
for (int i = 0; i < (int) tmp.size(); i++) {
|
for (int i = 0; i < (int) tmp.size(); i++) {
|
||||||
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
|
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -176,9 +176,9 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
|
||||||
system_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:";
|
system_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:";
|
||||||
user_prompt = prompt + "\nASSISTANT:";
|
user_prompt = prompt + "\nASSISTANT:";
|
||||||
if (params->verbose_prompt) {
|
if (params->verbose_prompt) {
|
||||||
auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
|
auto tmp = ::common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
|
||||||
for (int i = 0; i < (int) tmp.size(); i++) {
|
for (int i = 0; i < (int) tmp.size(); i++) {
|
||||||
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
|
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -219,7 +219,7 @@ static struct llama_model * llava_init(gpt_params * params) {
|
||||||
llama_backend_init();
|
llama_backend_init();
|
||||||
llama_numa_init(params->numa);
|
llama_numa_init(params->numa);
|
||||||
|
|
||||||
llama_model_params model_params = llama_model_params_from_gpt_params(*params);
|
llama_model_params model_params = common_model_params_from_gpt_params(*params);
|
||||||
|
|
||||||
llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
|
llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
|
||||||
if (model == NULL) {
|
if (model == NULL) {
|
||||||
|
@ -240,7 +240,7 @@ static struct llava_context * llava_init_context(gpt_params * params, llama_mode
|
||||||
auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
|
auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
|
||||||
|
|
||||||
|
|
||||||
llama_context_params ctx_params = llama_context_params_from_gpt_params(*params);
|
llama_context_params ctx_params = common_context_params_from_gpt_params(*params);
|
||||||
ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings
|
ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings
|
||||||
|
|
||||||
llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params);
|
llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params);
|
||||||
|
|
|
@ -29,7 +29,7 @@ static struct llama_model * llava_init(gpt_params * params) {
|
||||||
llama_backend_init();
|
llama_backend_init();
|
||||||
llama_numa_init(params->numa);
|
llama_numa_init(params->numa);
|
||||||
|
|
||||||
llama_model_params model_params = llama_model_params_from_gpt_params(*params);
|
llama_model_params model_params = common_model_params_from_gpt_params(*params);
|
||||||
|
|
||||||
llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
|
llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
|
||||||
if (model == NULL) {
|
if (model == NULL) {
|
||||||
|
@ -45,7 +45,7 @@ static struct llava_context * llava_init_context(gpt_params * params, llama_mode
|
||||||
prompt = "describe the image in detail.";
|
prompt = "describe the image in detail.";
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_context_params ctx_params = llama_context_params_from_gpt_params(*params);
|
llama_context_params ctx_params = common_context_params_from_gpt_params(*params);
|
||||||
if (params->n_ctx < 2048) {
|
if (params->n_ctx < 2048) {
|
||||||
// warn user here, "Image processing requires at least 2048 context, setting context to 2048"
|
// warn user here, "Image processing requires at least 2048 context, setting context to 2048"
|
||||||
LOG_WRN("%s: Image processing requires at least 2048 context, setting context to 2048\n" , __func__);
|
LOG_WRN("%s: Image processing requires at least 2048 context, setting context to 2048\n" , __func__);
|
||||||
|
@ -114,7 +114,7 @@ static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
|
||||||
|
|
||||||
static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
|
static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
|
||||||
std::string str2 = str;
|
std::string str2 = str;
|
||||||
std::vector<llama_token> embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos, true);
|
std::vector<llama_token> embd_inp = ::common_tokenize(ctx_llama, str2, add_bos, true);
|
||||||
return eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
|
return eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,7 +171,7 @@ static const char * sample(struct gpt_sampler * smpl,
|
||||||
if (llama_token_is_eog(llama_get_model(ctx_llama), id)) {
|
if (llama_token_is_eog(llama_get_model(ctx_llama), id)) {
|
||||||
ret = "</s>";
|
ret = "</s>";
|
||||||
} else {
|
} else {
|
||||||
ret = llama_token_to_piece(ctx_llama, id);
|
ret = common_token_to_piece(ctx_llama, id);
|
||||||
}
|
}
|
||||||
eval_id(ctx_llama, id, n_past);
|
eval_id(ctx_llama, id, n_past);
|
||||||
return ret.c_str();
|
return ret.c_str();
|
||||||
|
|
|
@ -56,7 +56,7 @@ int main(int argc, char ** argv) {
|
||||||
llama_numa_init(params.numa);
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// load the target model
|
// load the target model
|
||||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
common_init_result llama_init = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_init.model;
|
llama_model * model = llama_init.model;
|
||||||
llama_context * ctx = llama_init.context;
|
llama_context * ctx = llama_init.context;
|
||||||
|
@ -65,7 +65,7 @@ int main(int argc, char ** argv) {
|
||||||
std::vector<llama_token> inp;
|
std::vector<llama_token> inp;
|
||||||
std::vector<llama_token> all;
|
std::vector<llama_token> all;
|
||||||
|
|
||||||
inp = ::llama_tokenize(ctx, params.prompt, true, true);
|
inp = ::common_tokenize(ctx, params.prompt, true, true);
|
||||||
all = inp;
|
all = inp;
|
||||||
|
|
||||||
const int max_context_size = llama_n_ctx(ctx);
|
const int max_context_size = llama_n_ctx(ctx);
|
||||||
|
@ -79,7 +79,7 @@ int main(int argc, char ** argv) {
|
||||||
LOG("\n\n");
|
LOG("\n\n");
|
||||||
|
|
||||||
for (auto id : inp) {
|
for (auto id : inp) {
|
||||||
LOG("%s", llama_token_to_piece(ctx, id).c_str());
|
LOG("%s", common_token_to_piece(ctx, id).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
fflush(stderr);
|
fflush(stderr);
|
||||||
|
@ -161,7 +161,7 @@ int main(int argc, char ** argv) {
|
||||||
gpt_sampler_accept(smpl, id, true);
|
gpt_sampler_accept(smpl, id, true);
|
||||||
|
|
||||||
{
|
{
|
||||||
const std::string token_str = llama_token_to_piece(ctx, id);
|
const std::string token_str = common_token_to_piece(ctx, id);
|
||||||
|
|
||||||
LOG("%s", token_str.c_str());
|
LOG("%s", token_str.c_str());
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
|
@ -172,7 +172,7 @@ int main(int argc, char ** argv) {
|
||||||
// debug
|
// debug
|
||||||
if (dump_kv_cache) {
|
if (dump_kv_cache) {
|
||||||
llama_kv_cache_view_update(ctx, &kvc_view);
|
llama_kv_cache_view_update(ctx, &kvc_view);
|
||||||
llama_kv_cache_dump_view_seqs(kvc_view, 40);
|
common_kv_cache_dump_view_seqs(kvc_view, 40);
|
||||||
}
|
}
|
||||||
|
|
||||||
// build the mask from https://lmsys.org/blog/2023-11-21-lookahead-decoding/
|
// build the mask from https://lmsys.org/blog/2023-11-21-lookahead-decoding/
|
||||||
|
@ -201,10 +201,10 @@ int main(int argc, char ** argv) {
|
||||||
// V V V V V V
|
// V V V V V V
|
||||||
// id
|
// id
|
||||||
{
|
{
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
|
|
||||||
// current token - first token of the first level
|
// current token - first token of the first level
|
||||||
llama_batch_add(batch, id, n_past, seq_id_all, true);
|
common_batch_add(batch, id, n_past, seq_id_all, true);
|
||||||
|
|
||||||
// verification n-grams - queue this before the lookahead tokens for less KV cache fragmentation
|
// verification n-grams - queue this before the lookahead tokens for less KV cache fragmentation
|
||||||
{
|
{
|
||||||
|
@ -229,7 +229,7 @@ int main(int argc, char ** argv) {
|
||||||
ngrams_cur[g].tokens [j + 1] = t;
|
ngrams_cur[g].tokens [j + 1] = t;
|
||||||
ngrams_cur[g].i_batch[j + 1] = batch.n_tokens;
|
ngrams_cur[g].i_batch[j + 1] = batch.n_tokens;
|
||||||
|
|
||||||
llama_batch_add(batch, t, n_past + j + 1, { W + 1 + g }, true);
|
common_batch_add(batch, t, n_past + j + 1, { W + 1 + g }, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -241,13 +241,13 @@ int main(int argc, char ** argv) {
|
||||||
seq_id_look[j] = i + j + 1;
|
seq_id_look[j] = i + j + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_batch_add(batch, tokens_j[0][i], n_past + i, seq_id_look, false);
|
common_batch_add(batch, tokens_j[0][i], n_past + i, seq_id_look, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
// fill the rest of the levels
|
// fill the rest of the levels
|
||||||
for (int j = 1; j < N - 1; j++) {
|
for (int j = 1; j < N - 1; j++) {
|
||||||
for (int i = 0; i < W; i++) {
|
for (int i = 0; i < W; i++) {
|
||||||
llama_batch_add(batch, tokens_j[j][i], n_past + j + i, { i + 1 }, j == N - 2);
|
common_batch_add(batch, tokens_j[j][i], n_past + j + i, { i + 1 }, j == N - 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -287,7 +287,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// print
|
// print
|
||||||
{
|
{
|
||||||
const std::string token_str = llama_token_to_piece(ctx, id);
|
const std::string token_str = common_token_to_piece(ctx, id);
|
||||||
|
|
||||||
if (v == 0) {
|
if (v == 0) {
|
||||||
LOG("%s", token_str.c_str());
|
LOG("%s", token_str.c_str());
|
||||||
|
@ -327,7 +327,7 @@ int main(int argc, char ** argv) {
|
||||||
// print known n-grams starting with token id (debug)
|
// print known n-grams starting with token id (debug)
|
||||||
if (0 && v == 0) {
|
if (0 && v == 0) {
|
||||||
if (ngrams_observed.cnt[id] > 0) {
|
if (ngrams_observed.cnt[id] > 0) {
|
||||||
LOG("\n - %d n-grams starting with '%s'\n", ngrams_observed.cnt[id], llama_token_to_piece(ctx, id).c_str());
|
LOG("\n - %d n-grams starting with '%s'\n", ngrams_observed.cnt[id], common_token_to_piece(ctx, id).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < ngrams_observed.cnt[id]; i++) {
|
for (int i = 0; i < ngrams_observed.cnt[id]; i++) {
|
||||||
|
@ -336,7 +336,7 @@ int main(int argc, char ** argv) {
|
||||||
const int idx = id*(N - 1)*G + i*(N - 1);
|
const int idx = id*(N - 1)*G + i*(N - 1);
|
||||||
|
|
||||||
for (int j = 0; j < N - 1; j++) {
|
for (int j = 0; j < N - 1; j++) {
|
||||||
const std::string token_str = llama_token_to_piece(ctx, ngrams_observed.tokens[idx + j]);
|
const std::string token_str = common_token_to_piece(ctx, ngrams_observed.tokens[idx + j]);
|
||||||
|
|
||||||
LOG("%s", token_str.c_str());
|
LOG("%s", token_str.c_str());
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ int main(int argc, char ** argv){
|
||||||
llama_numa_init(params.numa);
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// load the model
|
// load the model
|
||||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
common_init_result llama_init = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_init.model;
|
llama_model * model = llama_init.model;
|
||||||
llama_context * ctx = llama_init.context;
|
llama_context * ctx = llama_init.context;
|
||||||
|
@ -31,7 +31,7 @@ int main(int argc, char ** argv){
|
||||||
|
|
||||||
// tokenize the prompt
|
// tokenize the prompt
|
||||||
std::vector<llama_token> inp;
|
std::vector<llama_token> inp;
|
||||||
inp = ::llama_tokenize(ctx, params.prompt, true, true);
|
inp = ::common_tokenize(ctx, params.prompt, true, true);
|
||||||
fprintf(stderr, "%s: tokenization done\n", __func__);
|
fprintf(stderr, "%s: tokenization done\n", __func__);
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -28,14 +28,14 @@ int main(int argc, char ** argv){
|
||||||
llama_numa_init(params.numa);
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// load the model
|
// load the model
|
||||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
common_init_result llama_init = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_init.model;
|
llama_model * model = llama_init.model;
|
||||||
llama_context * ctx = llama_init.context;
|
llama_context * ctx = llama_init.context;
|
||||||
|
|
||||||
// tokenize the prompt
|
// tokenize the prompt
|
||||||
std::vector<llama_token> inp;
|
std::vector<llama_token> inp;
|
||||||
inp = ::llama_tokenize(ctx, params.prompt, true, true);
|
inp = ::common_tokenize(ctx, params.prompt, true, true);
|
||||||
|
|
||||||
llama_ngram_cache ngram_cache_context;
|
llama_ngram_cache ngram_cache_context;
|
||||||
llama_ngram_cache ngram_cache_dynamic;
|
llama_ngram_cache ngram_cache_dynamic;
|
||||||
|
|
|
@ -31,14 +31,14 @@ int main(int argc, char ** argv){
|
||||||
llama_numa_init(params.numa);
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// load the model
|
// load the model
|
||||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
common_init_result llama_init = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_init.model;
|
llama_model * model = llama_init.model;
|
||||||
llama_context * ctx = llama_init.context;
|
llama_context * ctx = llama_init.context;
|
||||||
|
|
||||||
// tokenize the prompt
|
// tokenize the prompt
|
||||||
std::vector<llama_token> inp;
|
std::vector<llama_token> inp;
|
||||||
inp = ::llama_tokenize(ctx, params.prompt, true, true);
|
inp = ::common_tokenize(ctx, params.prompt, true, true);
|
||||||
|
|
||||||
llama_ngram_cache ngram_cache_context;
|
llama_ngram_cache ngram_cache_context;
|
||||||
llama_ngram_cache ngram_cache_dynamic;
|
llama_ngram_cache ngram_cache_dynamic;
|
||||||
|
@ -80,7 +80,7 @@ int main(int argc, char ** argv){
|
||||||
LOG("\n\n");
|
LOG("\n\n");
|
||||||
|
|
||||||
for (auto id : inp) {
|
for (auto id : inp) {
|
||||||
LOG("%s", llama_token_to_piece(ctx, id).c_str());
|
LOG("%s", common_token_to_piece(ctx, id).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
fflush(stderr);
|
fflush(stderr);
|
||||||
|
@ -117,7 +117,7 @@ int main(int argc, char ** argv){
|
||||||
// debug
|
// debug
|
||||||
if (dump_kv_cache) {
|
if (dump_kv_cache) {
|
||||||
llama_kv_cache_view_update(ctx, &kvc_view);
|
llama_kv_cache_view_update(ctx, &kvc_view);
|
||||||
llama_kv_cache_dump_view_seqs(kvc_view, 40);
|
common_kv_cache_dump_view_seqs(kvc_view, 40);
|
||||||
}
|
}
|
||||||
|
|
||||||
// print current draft sequence
|
// print current draft sequence
|
||||||
|
@ -130,7 +130,7 @@ int main(int argc, char ** argv){
|
||||||
|
|
||||||
gpt_sampler_accept(smpl, id, true);
|
gpt_sampler_accept(smpl, id, true);
|
||||||
|
|
||||||
const std::string token_str = llama_token_to_piece(ctx, id);
|
const std::string token_str = common_token_to_piece(ctx, id);
|
||||||
|
|
||||||
if (!params.use_color) {
|
if (!params.use_color) {
|
||||||
LOG("%s", token_str.c_str());
|
LOG("%s", token_str.c_str());
|
||||||
|
@ -192,8 +192,8 @@ int main(int argc, char ** argv){
|
||||||
// clean the cache of draft tokens that weren't accepted
|
// clean the cache of draft tokens that weren't accepted
|
||||||
llama_kv_cache_seq_rm(ctx, 0, n_past, -1);
|
llama_kv_cache_seq_rm(ctx, 0, n_past, -1);
|
||||||
|
|
||||||
llama_batch_clear(batch_tgt);
|
common_batch_clear(batch_tgt);
|
||||||
llama_batch_add(batch_tgt, draft[0], n_past, { 0 }, true);
|
common_batch_add(batch_tgt, draft[0], n_past, { 0 }, true);
|
||||||
|
|
||||||
// Draft already contains a single token sampled from the model:
|
// Draft already contains a single token sampled from the model:
|
||||||
GGML_ASSERT(draft.size() == 1);
|
GGML_ASSERT(draft.size() == 1);
|
||||||
|
@ -203,7 +203,7 @@ int main(int argc, char ** argv){
|
||||||
llama_ngram_cache_draft(inp, draft, n_draft, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, ngram_cache_context, ngram_cache_dynamic, ngram_cache_static);
|
llama_ngram_cache_draft(inp, draft, n_draft, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, ngram_cache_context, ngram_cache_dynamic, ngram_cache_static);
|
||||||
|
|
||||||
for (size_t i = 1; i < draft.size(); ++i) {
|
for (size_t i = 1; i < draft.size(); ++i) {
|
||||||
llama_batch_add(batch_tgt, draft[i], n_past + i, { 0 }, true);
|
common_batch_add(batch_tgt, draft[i], n_past + i, { 0 }, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
t_draft_us += ggml_time_us() - t_start_draft_us;
|
t_draft_us += ggml_time_us() - t_start_draft_us;
|
||||||
|
|
|
@ -127,9 +127,9 @@ static void sigint_handler(int signo) {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static std::string chat_add_and_format(struct llama_model * model, std::vector<llama_chat_msg> & chat_msgs, const std::string & role, const std::string & content) {
|
static std::string chat_add_and_format(struct llama_model * model, std::vector<common_chat_msg> & chat_msgs, const std::string & role, const std::string & content) {
|
||||||
llama_chat_msg new_msg{role, content};
|
common_chat_msg new_msg{role, content};
|
||||||
auto formatted = llama_chat_format_single(model, g_params->chat_template, chat_msgs, new_msg, role == "user");
|
auto formatted = common_chat_format_single(model, g_params->chat_template, chat_msgs, new_msg, role == "user");
|
||||||
chat_msgs.push_back({role, content});
|
chat_msgs.push_back({role, content});
|
||||||
LOG_DBG("formatted: '%s'\n", formatted.c_str());
|
LOG_DBG("formatted: '%s'\n", formatted.c_str());
|
||||||
return formatted;
|
return formatted;
|
||||||
|
@ -189,7 +189,7 @@ int main(int argc, char ** argv) {
|
||||||
llama_context * ctx = nullptr;
|
llama_context * ctx = nullptr;
|
||||||
gpt_sampler * smpl = nullptr;
|
gpt_sampler * smpl = nullptr;
|
||||||
|
|
||||||
std::vector<llama_chat_msg> chat_msgs;
|
std::vector<common_chat_msg> chat_msgs;
|
||||||
|
|
||||||
g_model = &model;
|
g_model = &model;
|
||||||
g_ctx = &ctx;
|
g_ctx = &ctx;
|
||||||
|
@ -197,7 +197,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// load the model and apply lora adapter, if any
|
// load the model and apply lora adapter, if any
|
||||||
LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__);
|
LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__);
|
||||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
common_init_result llama_init = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
model = llama_init.model;
|
model = llama_init.model;
|
||||||
ctx = llama_init.context;
|
ctx = llama_init.context;
|
||||||
|
@ -246,7 +246,7 @@ int main(int argc, char ** argv) {
|
||||||
// print chat template example in conversation mode
|
// print chat template example in conversation mode
|
||||||
if (params.conversation) {
|
if (params.conversation) {
|
||||||
if (params.enable_chat_template) {
|
if (params.enable_chat_template) {
|
||||||
LOG_INF("%s: chat template example:\n%s\n", __func__, llama_chat_format_example(model, params.chat_template).c_str());
|
LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(model, params.chat_template).c_str());
|
||||||
} else {
|
} else {
|
||||||
LOG_INF("%s: in-suffix/prefix is specified, chat template will be disabled\n", __func__);
|
LOG_INF("%s: in-suffix/prefix is specified, chat template will be disabled\n", __func__);
|
||||||
}
|
}
|
||||||
|
@ -296,7 +296,7 @@ int main(int argc, char ** argv) {
|
||||||
: params.prompt;
|
: params.prompt;
|
||||||
if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) {
|
if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) {
|
||||||
LOG_DBG("tokenize the prompt\n");
|
LOG_DBG("tokenize the prompt\n");
|
||||||
embd_inp = ::llama_tokenize(ctx, prompt, true, true);
|
embd_inp = ::common_tokenize(ctx, prompt, true, true);
|
||||||
} else {
|
} else {
|
||||||
LOG_DBG("use session tokens\n");
|
LOG_DBG("use session tokens\n");
|
||||||
embd_inp = session_tokens;
|
embd_inp = session_tokens;
|
||||||
|
@ -379,13 +379,13 @@ int main(int argc, char ** argv) {
|
||||||
LOG_INF("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
LOG_INF("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
||||||
LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
||||||
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
||||||
LOG_INF("%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str());
|
LOG_INF("%6d -> '%s'\n", embd_inp[i], common_token_to_piece(ctx, embd_inp[i]).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.n_keep > add_bos) {
|
if (params.n_keep > add_bos) {
|
||||||
LOG_INF("%s: static prompt based on n_keep: '", __func__);
|
LOG_INF("%s: static prompt based on n_keep: '", __func__);
|
||||||
for (int i = 0; i < params.n_keep; i++) {
|
for (int i = 0; i < params.n_keep; i++) {
|
||||||
LOG_CNT("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
|
LOG_CNT("%s", common_token_to_piece(ctx, embd_inp[i]).c_str());
|
||||||
}
|
}
|
||||||
LOG_CNT("'\n");
|
LOG_CNT("'\n");
|
||||||
}
|
}
|
||||||
|
@ -415,9 +415,9 @@ int main(int argc, char ** argv) {
|
||||||
for (const auto & antiprompt : params.antiprompt) {
|
for (const auto & antiprompt : params.antiprompt) {
|
||||||
LOG_INF("Reverse prompt: '%s'\n", antiprompt.c_str());
|
LOG_INF("Reverse prompt: '%s'\n", antiprompt.c_str());
|
||||||
if (params.verbose_prompt) {
|
if (params.verbose_prompt) {
|
||||||
auto tmp = ::llama_tokenize(ctx, antiprompt, false, true);
|
auto tmp = ::common_tokenize(ctx, antiprompt, false, true);
|
||||||
for (int i = 0; i < (int) tmp.size(); i++) {
|
for (int i = 0; i < (int) tmp.size(); i++) {
|
||||||
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
|
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -430,9 +430,9 @@ int main(int argc, char ** argv) {
|
||||||
if (!params.input_prefix.empty()) {
|
if (!params.input_prefix.empty()) {
|
||||||
LOG_INF("Input prefix: '%s'\n", params.input_prefix.c_str());
|
LOG_INF("Input prefix: '%s'\n", params.input_prefix.c_str());
|
||||||
if (params.verbose_prompt) {
|
if (params.verbose_prompt) {
|
||||||
auto tmp = ::llama_tokenize(ctx, params.input_prefix, true, true);
|
auto tmp = ::common_tokenize(ctx, params.input_prefix, true, true);
|
||||||
for (int i = 0; i < (int) tmp.size(); i++) {
|
for (int i = 0; i < (int) tmp.size(); i++) {
|
||||||
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
|
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -440,9 +440,9 @@ int main(int argc, char ** argv) {
|
||||||
if (!params.input_suffix.empty()) {
|
if (!params.input_suffix.empty()) {
|
||||||
LOG_INF("Input suffix: '%s'\n", params.input_suffix.c_str());
|
LOG_INF("Input suffix: '%s'\n", params.input_suffix.c_str());
|
||||||
if (params.verbose_prompt) {
|
if (params.verbose_prompt) {
|
||||||
auto tmp = ::llama_tokenize(ctx, params.input_suffix, false, true);
|
auto tmp = ::common_tokenize(ctx, params.input_suffix, false, true);
|
||||||
for (int i = 0; i < (int) tmp.size(); i++) {
|
for (int i = 0; i < (int) tmp.size(); i++) {
|
||||||
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
|
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -521,7 +521,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
antiprompt_ids.reserve(params.antiprompt.size());
|
antiprompt_ids.reserve(params.antiprompt.size());
|
||||||
for (const std::string & antiprompt : params.antiprompt) {
|
for (const std::string & antiprompt : params.antiprompt) {
|
||||||
antiprompt_ids.emplace_back(::llama_tokenize(ctx, antiprompt, false, true));
|
antiprompt_ids.emplace_back(::common_tokenize(ctx, antiprompt, false, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (llama_model_has_encoder(model)) {
|
if (llama_model_has_encoder(model)) {
|
||||||
|
@ -714,7 +714,7 @@ int main(int argc, char ** argv) {
|
||||||
// display text
|
// display text
|
||||||
if (input_echo && display) {
|
if (input_echo && display) {
|
||||||
for (auto id : embd) {
|
for (auto id : embd) {
|
||||||
const std::string token_str = llama_token_to_piece(ctx, id, params.special);
|
const std::string token_str = common_token_to_piece(ctx, id, params.special);
|
||||||
|
|
||||||
// Console/Stream Output
|
// Console/Stream Output
|
||||||
LOG("%s", token_str.c_str());
|
LOG("%s", token_str.c_str());
|
||||||
|
@ -788,7 +788,7 @@ int main(int argc, char ** argv) {
|
||||||
if (params.interactive) {
|
if (params.interactive) {
|
||||||
if (!params.antiprompt.empty()) {
|
if (!params.antiprompt.empty()) {
|
||||||
// tokenize and inject first reverse prompt
|
// tokenize and inject first reverse prompt
|
||||||
const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false, true);
|
const auto first_antiprompt = ::common_tokenize(ctx, params.antiprompt.front(), false, true);
|
||||||
embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
|
embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
|
||||||
is_antiprompt = true;
|
is_antiprompt = true;
|
||||||
}
|
}
|
||||||
|
@ -804,7 +804,7 @@ int main(int argc, char ** argv) {
|
||||||
// if current token is not EOG, we add it to current assistant message
|
// if current token is not EOG, we add it to current assistant message
|
||||||
if (params.conversation) {
|
if (params.conversation) {
|
||||||
const auto id = gpt_sampler_last(smpl);
|
const auto id = gpt_sampler_last(smpl);
|
||||||
assistant_ss << llama_token_to_piece(ctx, id, false);
|
assistant_ss << common_token_to_piece(ctx, id, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n_past > 0 && is_interacting) {
|
if (n_past > 0 && is_interacting) {
|
||||||
|
@ -862,9 +862,9 @@ int main(int argc, char ** argv) {
|
||||||
? chat_add_and_format(model, chat_msgs, "user", std::move(buffer))
|
? chat_add_and_format(model, chat_msgs, "user", std::move(buffer))
|
||||||
: std::move(buffer);
|
: std::move(buffer);
|
||||||
// TODO: one inconvenient of current chat template implementation is that we can't distinguish between user input and special tokens (prefix/postfix)
|
// TODO: one inconvenient of current chat template implementation is that we can't distinguish between user input and special tokens (prefix/postfix)
|
||||||
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
|
const auto line_pfx = ::common_tokenize(ctx, params.input_prefix, false, true);
|
||||||
const auto line_inp = ::llama_tokenize(ctx, user_inp, false, format_chat);
|
const auto line_inp = ::common_tokenize(ctx, user_inp, false, format_chat);
|
||||||
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
|
const auto line_sfx = ::common_tokenize(ctx, params.input_suffix, false, true);
|
||||||
|
|
||||||
LOG_DBG("input tokens: %s\n", string_from(ctx, line_inp).c_str());
|
LOG_DBG("input tokens: %s\n", string_from(ctx, line_inp).c_str());
|
||||||
|
|
||||||
|
@ -882,7 +882,7 @@ int main(int argc, char ** argv) {
|
||||||
for (size_t i = original_size; i < embd_inp.size(); ++i) {
|
for (size_t i = original_size; i < embd_inp.size(); ++i) {
|
||||||
const llama_token token = embd_inp[i];
|
const llama_token token = embd_inp[i];
|
||||||
output_tokens.push_back(token);
|
output_tokens.push_back(token);
|
||||||
output_ss << llama_token_to_piece(ctx, token);
|
output_ss << common_token_to_piece(ctx, token);
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset assistant message
|
// reset assistant message
|
||||||
|
|
|
@ -130,7 +130,7 @@ int main(int argc, char ** argv) {
|
||||||
llama_numa_init(params.numa);
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// load the target model
|
// load the target model
|
||||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
common_init_result llama_init = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_init.model;
|
llama_model * model = llama_init.model;
|
||||||
llama_context * ctx = llama_init.context;
|
llama_context * ctx = llama_init.context;
|
||||||
|
@ -164,7 +164,7 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<llama_token> tokens_system;
|
std::vector<llama_token> tokens_system;
|
||||||
tokens_system = ::llama_tokenize(ctx, k_system, true);
|
tokens_system = ::common_tokenize(ctx, k_system, true);
|
||||||
const int32_t n_tokens_system = tokens_system.size();
|
const int32_t n_tokens_system = tokens_system.size();
|
||||||
|
|
||||||
llama_seq_id g_seq_id = 0;
|
llama_seq_id g_seq_id = 0;
|
||||||
|
@ -189,7 +189,7 @@ int main(int argc, char ** argv) {
|
||||||
LOG_INF("%s: Evaluating the system prompt ...\n", __func__);
|
LOG_INF("%s: Evaluating the system prompt ...\n", __func__);
|
||||||
|
|
||||||
for (int32_t i = 0; i < n_tokens_system; ++i) {
|
for (int32_t i = 0; i < n_tokens_system; ++i) {
|
||||||
llama_batch_add(batch, tokens_system[i], i, { 0 }, false);
|
common_batch_add(batch, tokens_system[i], i, { 0 }, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (llama_decode(ctx, batch) != 0) {
|
if (llama_decode(ctx, batch) != 0) {
|
||||||
|
@ -210,10 +210,10 @@ int main(int argc, char ** argv) {
|
||||||
while (true) {
|
while (true) {
|
||||||
if (dump_kv_cache) {
|
if (dump_kv_cache) {
|
||||||
llama_kv_cache_view_update(ctx, &kvc_view);
|
llama_kv_cache_view_update(ctx, &kvc_view);
|
||||||
llama_kv_cache_dump_view_seqs(kvc_view, 40);
|
common_kv_cache_dump_view_seqs(kvc_view, 40);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
|
|
||||||
// decode any currently ongoing sequences
|
// decode any currently ongoing sequences
|
||||||
for (auto & client : clients) {
|
for (auto & client : clients) {
|
||||||
|
@ -223,7 +223,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
client.i_batch = batch.n_tokens;
|
client.i_batch = batch.n_tokens;
|
||||||
|
|
||||||
llama_batch_add(batch, client.sampled, n_tokens_system + client.n_prompt + client.n_decoded, { client.id + 1 }, true);
|
common_batch_add(batch, client.sampled, n_tokens_system + client.n_prompt + client.n_decoded, { client.id + 1 }, true);
|
||||||
|
|
||||||
client.n_decoded += 1;
|
client.n_decoded += 1;
|
||||||
}
|
}
|
||||||
|
@ -256,10 +256,10 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// do not prepend BOS because we have a system prompt!
|
// do not prepend BOS because we have a system prompt!
|
||||||
std::vector<llama_token> tokens_prompt;
|
std::vector<llama_token> tokens_prompt;
|
||||||
tokens_prompt = ::llama_tokenize(ctx, client.prompt, false);
|
tokens_prompt = ::common_tokenize(ctx, client.prompt, false);
|
||||||
|
|
||||||
for (size_t i = 0; i < tokens_prompt.size(); ++i) {
|
for (size_t i = 0; i < tokens_prompt.size(); ++i) {
|
||||||
llama_batch_add(batch, tokens_prompt[i], i + n_tokens_system, { client.id + 1 }, false);
|
common_batch_add(batch, tokens_prompt[i], i + n_tokens_system, { client.id + 1 }, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
// extract the logits only for the last token
|
// extract the logits only for the last token
|
||||||
|
@ -350,7 +350,7 @@ int main(int argc, char ** argv) {
|
||||||
client.t_start_gen = ggml_time_us();
|
client.t_start_gen = ggml_time_us();
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::string token_str = llama_token_to_piece(ctx, id);
|
const std::string token_str = common_token_to_piece(ctx, id);
|
||||||
|
|
||||||
client.response += token_str;
|
client.response += token_str;
|
||||||
client.sampled = id;
|
client.sampled = id;
|
||||||
|
|
|
@ -61,7 +61,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// initialize the model
|
// initialize the model
|
||||||
|
|
||||||
llama_model_params model_params = llama_model_params_from_gpt_params(params);
|
llama_model_params model_params = common_model_params_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// initialize the context
|
// initialize the context
|
||||||
|
|
||||||
llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
|
llama_context_params ctx_params = common_context_params_from_gpt_params(params);
|
||||||
|
|
||||||
ctx_params.n_ctx = llama_n_ctx_train(model)*n_grp + n_keep;
|
ctx_params.n_ctx = llama_n_ctx_train(model)*n_grp + n_keep;
|
||||||
|
|
||||||
|
@ -92,10 +92,10 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// tokenize the prompt
|
// tokenize the prompt
|
||||||
std::vector<llama_token> tokens_list;
|
std::vector<llama_token> tokens_list;
|
||||||
tokens_list = ::llama_tokenize(ctx, params.prompt, true);
|
tokens_list = ::common_tokenize(ctx, params.prompt, true);
|
||||||
|
|
||||||
// tokenize the prefix and use it as a sink
|
// tokenize the prefix and use it as a sink
|
||||||
const int n_tokens_prefix = ::llama_tokenize(ctx, prompt_prefix, true).size();
|
const int n_tokens_prefix = ::common_tokenize(ctx, prompt_prefix, true).size();
|
||||||
|
|
||||||
const int n_tokens_all = tokens_list.size();
|
const int n_tokens_all = tokens_list.size();
|
||||||
|
|
||||||
|
@ -137,10 +137,10 @@ int main(int argc, char ** argv) {
|
||||||
n_past = llama_kv_cache_seq_pos_max(ctx, 0) + 1;
|
n_past = llama_kv_cache_seq_pos_max(ctx, 0) + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
|
|
||||||
for (int j = 0; j < n_batch && i + j < n_tokens_all; j++) {
|
for (int j = 0; j < n_batch && i + j < n_tokens_all; j++) {
|
||||||
llama_batch_add(batch, tokens_list[i + j], n_past++, { 0 }, false);
|
common_batch_add(batch, tokens_list[i + j], n_past++, { 0 }, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i + n_batch >= n_tokens_all) {
|
if (i + n_batch >= n_tokens_all) {
|
||||||
|
@ -171,10 +171,10 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
n_past = llama_kv_cache_seq_pos_max(ctx, 0) + 1;
|
n_past = llama_kv_cache_seq_pos_max(ctx, 0) + 1;
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
|
|
||||||
for (int j = 0; j < n_batch && i + j < n_tokens_all; j++) {
|
for (int j = 0; j < n_batch && i + j < n_tokens_all; j++) {
|
||||||
llama_batch_add(batch, tokens_list[i + j], n_past++, { 0 }, false);
|
common_batch_add(batch, tokens_list[i + j], n_past++, { 0 }, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i + n_batch >= n_tokens_all) {
|
if (i + n_batch >= n_tokens_all) {
|
||||||
|
@ -229,15 +229,15 @@ int main(int argc, char ** argv) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG("%s", llama_token_to_piece(ctx, new_token_id).c_str());
|
LOG("%s", common_token_to_piece(ctx, new_token_id).c_str());
|
||||||
|
|
||||||
n_decode += 1;
|
n_decode += 1;
|
||||||
|
|
||||||
// prepare the next batch
|
// prepare the next batch
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
|
|
||||||
// push this new token for next evaluation
|
// push this new token for next evaluation
|
||||||
llama_batch_add(batch, new_token_id, n_past++, { 0 }, true);
|
common_batch_add(batch, new_token_id, n_past++, { 0 }, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
n_cur += 1;
|
n_cur += 1;
|
||||||
|
|
|
@ -348,7 +348,7 @@ static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params &
|
||||||
|
|
||||||
LOG_INF("%s: tokenizing the input ..\n", __func__);
|
LOG_INF("%s: tokenizing the input ..\n", __func__);
|
||||||
|
|
||||||
std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, true);
|
std::vector<llama_token> tokens = ::common_tokenize(ctx, params.prompt, true);
|
||||||
|
|
||||||
const int n_ctx = llama_n_ctx(ctx);
|
const int n_ctx = llama_n_ctx(ctx);
|
||||||
|
|
||||||
|
@ -500,7 +500,7 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
||||||
auto tim1 = std::chrono::high_resolution_clock::now();
|
auto tim1 = std::chrono::high_resolution_clock::now();
|
||||||
LOG_INF("%s: tokenizing the input ..\n", __func__);
|
LOG_INF("%s: tokenizing the input ..\n", __func__);
|
||||||
|
|
||||||
std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, true);
|
std::vector<llama_token> tokens = ::common_tokenize(ctx, params.prompt, true);
|
||||||
|
|
||||||
auto tim2 = std::chrono::high_resolution_clock::now();
|
auto tim2 = std::chrono::high_resolution_clock::now();
|
||||||
LOG_INF("%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());
|
LOG_INF("%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());
|
||||||
|
@ -844,7 +844,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
||||||
hs_cur.gold_ending_idx = std::stoi( prompt_lines[idx*6+1] );
|
hs_cur.gold_ending_idx = std::stoi( prompt_lines[idx*6+1] );
|
||||||
for (size_t j = 0; j < 4; j++) {
|
for (size_t j = 0; j < 4; j++) {
|
||||||
hs_cur.ending[j] = prompt_lines[idx*6+2+j];
|
hs_cur.ending[j] = prompt_lines[idx*6+2+j];
|
||||||
hs_cur.seq_tokens[j] = ::llama_tokenize(ctx, hs_cur.context + " " + hs_cur.ending[j], true);
|
hs_cur.seq_tokens[j] = ::common_tokenize(ctx, hs_cur.context + " " + hs_cur.ending[j], true);
|
||||||
}
|
}
|
||||||
|
|
||||||
// determine the common prefix of the endings
|
// determine the common prefix of the endings
|
||||||
|
@ -900,7 +900,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
||||||
size_t i1 = i0;
|
size_t i1 = i0;
|
||||||
size_t i_logits = 0; // this tells us how many logits were needed before this point in the batch
|
size_t i_logits = 0; // this tells us how many logits were needed before this point in the batch
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
|
|
||||||
// batch as much tasks as possible into the available context
|
// batch as much tasks as possible into the available context
|
||||||
// each task has 4 unique sequence ids - one for each ending
|
// each task has 4 unique sequence ids - one for each ending
|
||||||
|
@ -916,7 +916,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < hs_cur.common_prefix; ++i) {
|
for (size_t i = 0; i < hs_cur.common_prefix; ++i) {
|
||||||
llama_batch_add(batch, hs_cur.seq_tokens[0][i], i, { s0 + 0, s0 + 1, s0 + 2, s0 + 3 }, false);
|
common_batch_add(batch, hs_cur.seq_tokens[0][i], i, { s0 + 0, s0 + 1, s0 + 2, s0 + 3 }, false);
|
||||||
}
|
}
|
||||||
batch.logits[batch.n_tokens - 1] = true; // we need logits for the last token of the common prefix
|
batch.logits[batch.n_tokens - 1] = true; // we need logits for the last token of the common prefix
|
||||||
n_logits += 1;
|
n_logits += 1;
|
||||||
|
@ -926,7 +926,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
||||||
// TODO: don't evaluate the last token of each sequence
|
// TODO: don't evaluate the last token of each sequence
|
||||||
for (size_t i = hs_cur.common_prefix; i < seq_tokens_size; ++i) {
|
for (size_t i = hs_cur.common_prefix; i < seq_tokens_size; ++i) {
|
||||||
const bool needs_logits = i < seq_tokens_size - 1;
|
const bool needs_logits = i < seq_tokens_size - 1;
|
||||||
llama_batch_add(batch, hs_cur.seq_tokens[s][i], i, { s0 + s }, needs_logits);
|
common_batch_add(batch, hs_cur.seq_tokens[s][i], i, { s0 + s }, needs_logits);
|
||||||
n_logits += needs_logits;
|
n_logits += needs_logits;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1136,8 +1136,8 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
||||||
LOG_INF("%s : tokenizing selected tasks\n", __func__);
|
LOG_INF("%s : tokenizing selected tasks\n", __func__);
|
||||||
|
|
||||||
for (auto & task : data) {
|
for (auto & task : data) {
|
||||||
task.seq_tokens[0] = ::llama_tokenize(ctx, task.first + task.choices[0] + task.second, true);
|
task.seq_tokens[0] = ::common_tokenize(ctx, task.first + task.choices[0] + task.second, true);
|
||||||
task.seq_tokens[1] = ::llama_tokenize(ctx, task.first + task.choices[1] + task.second, true);
|
task.seq_tokens[1] = ::common_tokenize(ctx, task.first + task.choices[1] + task.second, true);
|
||||||
|
|
||||||
task.common_prefix = 0;
|
task.common_prefix = 0;
|
||||||
for (size_t k = 0; k < task.seq_tokens[0].size(); k++) {
|
for (size_t k = 0; k < task.seq_tokens[0].size(); k++) {
|
||||||
|
@ -1152,8 +1152,8 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
||||||
task.seq_tokens[0].size() - task.common_prefix +
|
task.seq_tokens[0].size() - task.common_prefix +
|
||||||
task.seq_tokens[1].size() - task.common_prefix;
|
task.seq_tokens[1].size() - task.common_prefix;
|
||||||
|
|
||||||
task.n_base1 = ::llama_tokenize(ctx, task.first + task.choices[0], true).size();
|
task.n_base1 = ::common_tokenize(ctx, task.first + task.choices[0], true).size();
|
||||||
task.n_base2 = ::llama_tokenize(ctx, task.first + task.choices[1], true).size();
|
task.n_base2 = ::common_tokenize(ctx, task.first + task.choices[1], true).size();
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_INF("%s : calculating winogrande score over selected tasks.\n", __func__);
|
LOG_INF("%s : calculating winogrande score over selected tasks.\n", __func__);
|
||||||
|
@ -1184,7 +1184,7 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
||||||
size_t i1 = i0;
|
size_t i1 = i0;
|
||||||
size_t i_logits = 0;
|
size_t i_logits = 0;
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
|
|
||||||
while (n_cur + (int) data[i1].required_tokens <= n_ctx) {
|
while (n_cur + (int) data[i1].required_tokens <= n_ctx) {
|
||||||
int n_logits = 0;
|
int n_logits = 0;
|
||||||
|
@ -1194,7 +1194,7 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < data[i1].common_prefix; ++i) {
|
for (size_t i = 0; i < data[i1].common_prefix; ++i) {
|
||||||
llama_batch_add(batch, data[i1].seq_tokens[0][i], i, { s0 + 0, s0 + 1 }, false);
|
common_batch_add(batch, data[i1].seq_tokens[0][i], i, { s0 + 0, s0 + 1 }, false);
|
||||||
}
|
}
|
||||||
batch.logits[batch.n_tokens - 1] = true;
|
batch.logits[batch.n_tokens - 1] = true;
|
||||||
n_logits += 1;
|
n_logits += 1;
|
||||||
|
@ -1202,7 +1202,7 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
||||||
for (int s = 0; s < 2; ++s) {
|
for (int s = 0; s < 2; ++s) {
|
||||||
// TODO: end before the last token, no need to predict past the end of the sequences
|
// TODO: end before the last token, no need to predict past the end of the sequences
|
||||||
for (size_t i = data[i1].common_prefix; i < data[i1].seq_tokens[s].size(); ++i) {
|
for (size_t i = data[i1].common_prefix; i < data[i1].seq_tokens[s].size(); ++i) {
|
||||||
llama_batch_add(batch, data[i1].seq_tokens[s][i], i, { s0 + s }, true);
|
common_batch_add(batch, data[i1].seq_tokens[s][i], i, { s0 + s }, true);
|
||||||
n_logits += 1;
|
n_logits += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1359,7 +1359,7 @@ static bool multiple_choice_prepare_one_task(llama_context * ctx, multiple_choic
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
task.seq_tokens.emplace_back(::llama_tokenize(ctx, task.question + " " + answer, true));
|
task.seq_tokens.emplace_back(::common_tokenize(ctx, task.question + " " + answer, true));
|
||||||
}
|
}
|
||||||
auto min_len = task.seq_tokens.front().size();
|
auto min_len = task.seq_tokens.front().size();
|
||||||
for (auto& seq : task.seq_tokens) {
|
for (auto& seq : task.seq_tokens) {
|
||||||
|
@ -1536,7 +1536,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
||||||
size_t i1 = i0;
|
size_t i1 = i0;
|
||||||
size_t i_logits = 0; // this tells us how many logits were needed before this point in the batch
|
size_t i_logits = 0; // this tells us how many logits were needed before this point in the batch
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
|
|
||||||
// batch as much tasks as possible into the available context
|
// batch as much tasks as possible into the available context
|
||||||
// each task has 4 unique sequence ids - one for each ending
|
// each task has 4 unique sequence ids - one for each ending
|
||||||
|
@ -1559,7 +1559,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
||||||
|
|
||||||
for (size_t i = 0; i < cur_task.common_prefix; ++i) {
|
for (size_t i = 0; i < cur_task.common_prefix; ++i) {
|
||||||
//llama_batch_add(batch, cur_task.seq_tokens[0][i], i, { s0 + 0, s0 + 1, s0 + 2, s0 + 3}, false);
|
//llama_batch_add(batch, cur_task.seq_tokens[0][i], i, { s0 + 0, s0 + 1, s0 + 2, s0 + 3}, false);
|
||||||
llama_batch_add(batch, cur_task.seq_tokens[0][i], i, batch_indeces, false);
|
common_batch_add(batch, cur_task.seq_tokens[0][i], i, batch_indeces, false);
|
||||||
}
|
}
|
||||||
batch.logits[batch.n_tokens - 1] = true; // we need logits for the last token of the common prefix
|
batch.logits[batch.n_tokens - 1] = true; // we need logits for the last token of the common prefix
|
||||||
n_logits += 1;
|
n_logits += 1;
|
||||||
|
@ -1569,7 +1569,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
||||||
// TODO: don't evaluate the last token of each sequence
|
// TODO: don't evaluate the last token of each sequence
|
||||||
for (size_t i = cur_task.common_prefix; i < seq_tokens_size; ++i) {
|
for (size_t i = cur_task.common_prefix; i < seq_tokens_size; ++i) {
|
||||||
const bool needs_logits = i < seq_tokens_size - 1;
|
const bool needs_logits = i < seq_tokens_size - 1;
|
||||||
llama_batch_add(batch, cur_task.seq_tokens[s][i], i, { s0 + s }, needs_logits);
|
common_batch_add(batch, cur_task.seq_tokens[s][i], i, { s0 + s }, needs_logits);
|
||||||
n_logits += needs_logits;
|
n_logits += needs_logits;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2004,7 +2004,7 @@ int main(int argc, char ** argv) {
|
||||||
llama_numa_init(params.numa);
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// load the model and apply lora adapter, if any
|
// load the model and apply lora adapter, if any
|
||||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
common_init_result llama_init = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_init.model;
|
llama_model * model = llama_init.model;
|
||||||
llama_context * ctx = llama_init.context;
|
llama_context * ctx = llama_init.context;
|
||||||
|
|
|
@ -77,7 +77,7 @@ static std::vector<chunk> chunk_file(const std::string & filename, int chunk_siz
|
||||||
static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, llama_seq_id seq_id) {
|
static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, llama_seq_id seq_id) {
|
||||||
size_t n_tokens = tokens.size();
|
size_t n_tokens = tokens.size();
|
||||||
for (size_t i = 0; i < n_tokens; i++) {
|
for (size_t i = 0; i < n_tokens; i++) {
|
||||||
llama_batch_add(batch, tokens[i], i, { seq_id }, true);
|
common_batch_add(batch, tokens[i], i, { seq_id }, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
|
||||||
}
|
}
|
||||||
|
|
||||||
float * out = output + batch.seq_id[i][0] * n_embd;
|
float * out = output + batch.seq_id[i][0] * n_embd;
|
||||||
llama_embd_normalize(embd, out, n_embd);
|
common_embd_normalize(embd, out, n_embd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -149,7 +149,7 @@ int main(int argc, char ** argv) {
|
||||||
llama_numa_init(params.numa);
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// load the model
|
// load the model
|
||||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
common_init_result llama_init = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_init.model;
|
llama_model * model = llama_init.model;
|
||||||
llama_context * ctx = llama_init.context;
|
llama_context * ctx = llama_init.context;
|
||||||
|
@ -185,7 +185,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// tokenize the prompts and trim
|
// tokenize the prompts and trim
|
||||||
for (auto & chunk : chunks) {
|
for (auto & chunk : chunks) {
|
||||||
auto inp = ::llama_tokenize(ctx, chunk.textdata, true, false);
|
auto inp = ::common_tokenize(ctx, chunk.textdata, true, false);
|
||||||
if (inp.size() > n_batch) {
|
if (inp.size() > n_batch) {
|
||||||
LOG_ERR("%s: chunk size (%lld) exceeds batch size (%lld), increase batch size and re-run\n",
|
LOG_ERR("%s: chunk size (%lld) exceeds batch size (%lld), increase batch size and re-run\n",
|
||||||
__func__, (long long int) inp.size(), (long long int) n_batch);
|
__func__, (long long int) inp.size(), (long long int) n_batch);
|
||||||
|
@ -204,7 +204,7 @@ int main(int argc, char ** argv) {
|
||||||
LOG_INF("%s: prompt %d: '%s'\n", __func__, i, chunks[i].textdata.c_str());
|
LOG_INF("%s: prompt %d: '%s'\n", __func__, i, chunks[i].textdata.c_str());
|
||||||
LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, chunks[i].tokens.size());
|
LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, chunks[i].tokens.size());
|
||||||
for (int j = 0; j < (int) chunks[i].tokens.size(); j++) {
|
for (int j = 0; j < (int) chunks[i].tokens.size(); j++) {
|
||||||
LOG_INF("%6d -> '%s'\n", chunks[i].tokens[j], llama_token_to_piece(ctx, chunks[i].tokens[j]).c_str());
|
LOG_INF("%6d -> '%s'\n", chunks[i].tokens[j], common_token_to_piece(ctx, chunks[i].tokens[j]).c_str());
|
||||||
}
|
}
|
||||||
LOG_INF("\n\n");
|
LOG_INF("\n\n");
|
||||||
}
|
}
|
||||||
|
@ -232,7 +232,7 @@ int main(int argc, char ** argv) {
|
||||||
if (batch.n_tokens + n_toks > n_batch) {
|
if (batch.n_tokens + n_toks > n_batch) {
|
||||||
float * out = emb + p * n_embd;
|
float * out = emb + p * n_embd;
|
||||||
batch_decode(ctx, batch, out, s, n_embd);
|
batch_decode(ctx, batch, out, s, n_embd);
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
p += s;
|
p += s;
|
||||||
s = 0;
|
s = 0;
|
||||||
}
|
}
|
||||||
|
@ -260,20 +260,20 @@ int main(int argc, char ** argv) {
|
||||||
while (true) {
|
while (true) {
|
||||||
LOG("Enter query: ");
|
LOG("Enter query: ");
|
||||||
std::getline(std::cin, query);
|
std::getline(std::cin, query);
|
||||||
std::vector<int32_t> query_tokens = llama_tokenize(ctx, query, true);
|
std::vector<int32_t> query_tokens = common_tokenize(ctx, query, true);
|
||||||
|
|
||||||
batch_add_seq(query_batch, query_tokens, 0);
|
batch_add_seq(query_batch, query_tokens, 0);
|
||||||
|
|
||||||
std::vector<float> query_emb(n_embd, 0);
|
std::vector<float> query_emb(n_embd, 0);
|
||||||
batch_decode(ctx, query_batch, query_emb.data(), 1, n_embd);
|
batch_decode(ctx, query_batch, query_emb.data(), 1, n_embd);
|
||||||
|
|
||||||
llama_batch_clear(query_batch);
|
common_batch_clear(query_batch);
|
||||||
|
|
||||||
// compute cosine similarities
|
// compute cosine similarities
|
||||||
{
|
{
|
||||||
std::vector<std::pair<int, float>> similarities;
|
std::vector<std::pair<int, float>> similarities;
|
||||||
for (int i = 0; i < n_chunks; i++) {
|
for (int i = 0; i < n_chunks; i++) {
|
||||||
float sim = llama_embd_similarity_cos(chunks[i].embedding.data(), query_emb.data(), n_embd);
|
float sim = common_embd_similarity_cos(chunks[i].embedding.data(), query_emb.data(), n_embd);
|
||||||
similarities.push_back(std::make_pair(i, sim));
|
similarities.push_back(std::make_pair(i, sim));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ int main(int argc, char ** argv) {
|
||||||
std::string result2;
|
std::string result2;
|
||||||
|
|
||||||
// init
|
// init
|
||||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
common_init_result llama_init = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_init.model;
|
llama_model * model = llama_init.model;
|
||||||
llama_context * ctx = llama_init.context;
|
llama_context * ctx = llama_init.context;
|
||||||
|
@ -46,7 +46,7 @@ int main(int argc, char ** argv) {
|
||||||
llama_sampler_chain_add(smpl, llama_sampler_init_dist(params.sparams.seed));
|
llama_sampler_chain_add(smpl, llama_sampler_init_dist(params.sparams.seed));
|
||||||
|
|
||||||
// tokenize prompt
|
// tokenize prompt
|
||||||
auto tokens = llama_tokenize(ctx, params.prompt, true);
|
auto tokens = common_tokenize(ctx, params.prompt, true);
|
||||||
|
|
||||||
// evaluate prompt
|
// evaluate prompt
|
||||||
llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), n_past, 0));
|
llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), n_past, 0));
|
||||||
|
@ -72,7 +72,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
for (auto i = 0; i < params.n_predict; i++) {
|
for (auto i = 0; i < params.n_predict; i++) {
|
||||||
auto next_token = llama_sampler_sample(smpl, ctx, -1);
|
auto next_token = llama_sampler_sample(smpl, ctx, -1);
|
||||||
auto next_token_str = llama_token_to_piece(ctx, next_token);
|
auto next_token_str = common_token_to_piece(ctx, next_token);
|
||||||
|
|
||||||
printf("%s", next_token_str.c_str());
|
printf("%s", next_token_str.c_str());
|
||||||
result0 += next_token_str;
|
result0 += next_token_str;
|
||||||
|
@ -92,7 +92,7 @@ int main(int argc, char ** argv) {
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
|
|
||||||
// make new context
|
// make new context
|
||||||
auto * ctx2 = llama_new_context_with_model(model, llama_context_params_from_gpt_params(params));
|
auto * ctx2 = llama_new_context_with_model(model, common_context_params_from_gpt_params(params));
|
||||||
|
|
||||||
llama_sampler * smpl2 = llama_sampler_chain_init(sparams);
|
llama_sampler * smpl2 = llama_sampler_chain_init(sparams);
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ int main(int argc, char ** argv) {
|
||||||
// second run
|
// second run
|
||||||
for (auto i = 0; i < params.n_predict; i++) {
|
for (auto i = 0; i < params.n_predict; i++) {
|
||||||
auto next_token = llama_sampler_sample(smpl2, ctx2, -1);
|
auto next_token = llama_sampler_sample(smpl2, ctx2, -1);
|
||||||
auto next_token_str = llama_token_to_piece(ctx2, next_token);
|
auto next_token_str = common_token_to_piece(ctx2, next_token);
|
||||||
|
|
||||||
printf("%s", next_token_str.c_str());
|
printf("%s", next_token_str.c_str());
|
||||||
result1 += next_token_str;
|
result1 += next_token_str;
|
||||||
|
@ -152,7 +152,7 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// make new context
|
// make new context
|
||||||
auto * ctx3 = llama_new_context_with_model(model, llama_context_params_from_gpt_params(params));
|
auto * ctx3 = llama_new_context_with_model(model, common_context_params_from_gpt_params(params));
|
||||||
|
|
||||||
llama_sampler * smpl3 = llama_sampler_chain_init(sparams);
|
llama_sampler * smpl3 = llama_sampler_chain_init(sparams);
|
||||||
|
|
||||||
|
@ -216,7 +216,7 @@ int main(int argc, char ** argv) {
|
||||||
// third run with seq 1 instead of 0
|
// third run with seq 1 instead of 0
|
||||||
for (auto i = 0; i < params.n_predict; i++) {
|
for (auto i = 0; i < params.n_predict; i++) {
|
||||||
auto next_token = llama_sampler_sample(smpl3, ctx3, -1);
|
auto next_token = llama_sampler_sample(smpl3, ctx3, -1);
|
||||||
auto next_token_str = llama_token_to_piece(ctx3, next_token);
|
auto next_token_str = common_token_to_piece(ctx3, next_token);
|
||||||
|
|
||||||
printf("%s", next_token_str.c_str());
|
printf("%s", next_token_str.c_str());
|
||||||
result2 += next_token_str;
|
result2 += next_token_str;
|
||||||
|
|
|
@ -611,7 +611,7 @@ struct server_response {
|
||||||
struct server_context {
|
struct server_context {
|
||||||
llama_model * model = nullptr;
|
llama_model * model = nullptr;
|
||||||
llama_context * ctx = nullptr;
|
llama_context * ctx = nullptr;
|
||||||
std::vector<llama_lora_adapter_container> loras;
|
std::vector<common_lora_adapter_container> loras;
|
||||||
|
|
||||||
gpt_params params;
|
gpt_params params;
|
||||||
|
|
||||||
|
@ -668,7 +668,7 @@ struct server_context {
|
||||||
// dedicate one sequence to the system prompt
|
// dedicate one sequence to the system prompt
|
||||||
params.n_parallel += 1;
|
params.n_parallel += 1;
|
||||||
|
|
||||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
common_init_result llama_init = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
model = llama_init.model;
|
model = llama_init.model;
|
||||||
ctx = llama_init.context;
|
ctx = llama_init.context;
|
||||||
|
@ -771,10 +771,10 @@ struct server_context {
|
||||||
|
|
||||||
std::vector<llama_token> p;
|
std::vector<llama_token> p;
|
||||||
if (first) {
|
if (first) {
|
||||||
p = ::llama_tokenize(ctx, s, add_special, TMP_FORCE_SPECIAL);
|
p = ::common_tokenize(ctx, s, add_special, TMP_FORCE_SPECIAL);
|
||||||
first = false;
|
first = false;
|
||||||
} else {
|
} else {
|
||||||
p = ::llama_tokenize(ctx, s, false, TMP_FORCE_SPECIAL);
|
p = ::common_tokenize(ctx, s, false, TMP_FORCE_SPECIAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
|
prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
|
||||||
|
@ -788,7 +788,7 @@ struct server_context {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
auto s = json_prompt.template get<std::string>();
|
auto s = json_prompt.template get<std::string>();
|
||||||
prompt_tokens = ::llama_tokenize(ctx, s, add_special, TMP_FORCE_SPECIAL);
|
prompt_tokens = ::common_tokenize(ctx, s, add_special, TMP_FORCE_SPECIAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return prompt_tokens;
|
return prompt_tokens;
|
||||||
|
@ -999,7 +999,7 @@ struct server_context {
|
||||||
slot.sparams.logit_bias.push_back({tok, bias});
|
slot.sparams.logit_bias.push_back({tok, bias});
|
||||||
}
|
}
|
||||||
} else if (el[0].is_string()) {
|
} else if (el[0].is_string()) {
|
||||||
auto toks = llama_tokenize(model, el[0].get<std::string>(), false);
|
auto toks = common_tokenize(model, el[0].get<std::string>(), false);
|
||||||
for (auto tok : toks) {
|
for (auto tok : toks) {
|
||||||
slot.sparams.logit_bias.push_back({tok, bias});
|
slot.sparams.logit_bias.push_back({tok, bias});
|
||||||
}
|
}
|
||||||
|
@ -1073,7 +1073,7 @@ struct server_context {
|
||||||
system_tokens.clear();
|
system_tokens.clear();
|
||||||
|
|
||||||
if (!system_prompt.empty()) {
|
if (!system_prompt.empty()) {
|
||||||
system_tokens = ::llama_tokenize(ctx, system_prompt, true);
|
system_tokens = ::common_tokenize(ctx, system_prompt, true);
|
||||||
|
|
||||||
const int32_t n_batch = llama_n_batch(ctx);
|
const int32_t n_batch = llama_n_batch(ctx);
|
||||||
const int32_t n_tokens_prompt = system_tokens.size();
|
const int32_t n_tokens_prompt = system_tokens.size();
|
||||||
|
@ -1081,10 +1081,10 @@ struct server_context {
|
||||||
for (int32_t i = 0; i < n_tokens_prompt; i += n_batch) {
|
for (int32_t i = 0; i < n_tokens_prompt; i += n_batch) {
|
||||||
const int32_t n_tokens = std::min(n_batch, n_tokens_prompt - i);
|
const int32_t n_tokens = std::min(n_batch, n_tokens_prompt - i);
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
|
|
||||||
for (int32_t j = 0; j < n_tokens; ++j) {
|
for (int32_t j = 0; j < n_tokens; ++j) {
|
||||||
llama_batch_add(batch, system_tokens[i + j], i + j, { 0 }, false);
|
common_batch_add(batch, system_tokens[i + j], i + j, { 0 }, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (llama_decode(ctx, batch) != 0) {
|
if (llama_decode(ctx, batch) != 0) {
|
||||||
|
@ -1113,7 +1113,7 @@ struct server_context {
|
||||||
|
|
||||||
bool process_token(completion_token_output & result, server_slot & slot) {
|
bool process_token(completion_token_output & result, server_slot & slot) {
|
||||||
// remember which tokens were sampled - used for repetition penalties during sampling
|
// remember which tokens were sampled - used for repetition penalties during sampling
|
||||||
const std::string token_str = llama_token_to_piece(ctx, result.tok, params.special);
|
const std::string token_str = common_token_to_piece(ctx, result.tok, params.special);
|
||||||
slot.sampled = result.tok;
|
slot.sampled = result.tok;
|
||||||
|
|
||||||
// search stop word and delete it
|
// search stop word and delete it
|
||||||
|
@ -1297,7 +1297,7 @@ struct server_context {
|
||||||
};
|
};
|
||||||
|
|
||||||
if (slot.sparams.n_probs > 0) {
|
if (slot.sparams.n_probs > 0) {
|
||||||
const std::vector<llama_token> to_send_toks = llama_tokenize(ctx, tkn.text_to_send, false);
|
const std::vector<llama_token> to_send_toks = common_tokenize(ctx, tkn.text_to_send, false);
|
||||||
const size_t probs_pos = std::min(slot.n_sent_token_probs, slot.generated_token_probs.size());
|
const size_t probs_pos = std::min(slot.n_sent_token_probs, slot.generated_token_probs.size());
|
||||||
const size_t probs_stop_pos = std::min(slot.n_sent_token_probs + to_send_toks.size(), slot.generated_token_probs.size());
|
const size_t probs_stop_pos = std::min(slot.n_sent_token_probs + to_send_toks.size(), slot.generated_token_probs.size());
|
||||||
|
|
||||||
|
@ -1347,7 +1347,7 @@ struct server_context {
|
||||||
if (slot.sparams.n_probs > 0) {
|
if (slot.sparams.n_probs > 0) {
|
||||||
std::vector<completion_token_output> probs;
|
std::vector<completion_token_output> probs;
|
||||||
if (!slot.params.stream && slot.stopped_word) {
|
if (!slot.params.stream && slot.stopped_word) {
|
||||||
const std::vector<llama_token> stop_word_toks = llama_tokenize(ctx, slot.stopping_word, false);
|
const std::vector<llama_token> stop_word_toks = common_tokenize(ctx, slot.stopping_word, false);
|
||||||
|
|
||||||
size_t safe_offset = std::min(slot.generated_token_probs.size(), stop_word_toks.size());
|
size_t safe_offset = std::min(slot.generated_token_probs.size(), stop_word_toks.size());
|
||||||
probs = std::vector<completion_token_output>(
|
probs = std::vector<completion_token_output>(
|
||||||
|
@ -1401,7 +1401,7 @@ struct server_context {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_embd_normalize(embd, embd_res.data(), n_embd);
|
common_embd_normalize(embd, embd_res.data(), n_embd);
|
||||||
|
|
||||||
res.data = json {
|
res.data = json {
|
||||||
{"embedding", embd_res},
|
{"embedding", embd_res},
|
||||||
|
@ -1835,7 +1835,7 @@ struct server_context {
|
||||||
} break;
|
} break;
|
||||||
case SERVER_TASK_TYPE_SET_LORA:
|
case SERVER_TASK_TYPE_SET_LORA:
|
||||||
{
|
{
|
||||||
llama_lora_adapters_apply(ctx, loras);
|
common_lora_adapters_apply(ctx, loras);
|
||||||
server_task_result result;
|
server_task_result result;
|
||||||
result.id = task.id;
|
result.id = task.id;
|
||||||
result.stop = true;
|
result.stop = true;
|
||||||
|
@ -1921,7 +1921,7 @@ struct server_context {
|
||||||
}
|
}
|
||||||
|
|
||||||
// start populating the batch for this iteration
|
// start populating the batch for this iteration
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
|
|
||||||
// frist, add sampled tokens from any ongoing sequences
|
// frist, add sampled tokens from any ongoing sequences
|
||||||
for (auto & slot : slots) {
|
for (auto & slot : slots) {
|
||||||
|
@ -1935,7 +1935,7 @@ struct server_context {
|
||||||
|
|
||||||
// TODO: we always have to take into account the "system_tokens"
|
// TODO: we always have to take into account the "system_tokens"
|
||||||
// this is not great and needs to be improved somehow
|
// this is not great and needs to be improved somehow
|
||||||
llama_batch_add(batch, slot.sampled, system_tokens.size() + slot_npast, { slot.id + 1 }, true);
|
common_batch_add(batch, slot.sampled, system_tokens.size() + slot_npast, { slot.id + 1 }, true);
|
||||||
|
|
||||||
slot.n_past += 1;
|
slot.n_past += 1;
|
||||||
|
|
||||||
|
@ -2184,7 +2184,7 @@ struct server_context {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_batch_add(batch, prompt_tokens[slot.n_past], system_tokens.size() + slot_npast, { slot.id + 1 }, false);
|
common_batch_add(batch, prompt_tokens[slot.n_past], system_tokens.size() + slot_npast, { slot.id + 1 }, false);
|
||||||
|
|
||||||
if (slot.params.cache_prompt) {
|
if (slot.params.cache_prompt) {
|
||||||
slot.cache_tokens.push_back(prompt_tokens[slot.n_past]);
|
slot.cache_tokens.push_back(prompt_tokens[slot.n_past]);
|
||||||
|
@ -3014,7 +3014,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
if (with_pieces) {
|
if (with_pieces) {
|
||||||
for (const auto& token : tokens) {
|
for (const auto& token : tokens) {
|
||||||
std::string piece = llama_token_to_piece(ctx_server.ctx, token);
|
std::string piece = common_token_to_piece(ctx_server.ctx, token);
|
||||||
json piece_json;
|
json piece_json;
|
||||||
|
|
||||||
// Check if the piece is valid UTF-8
|
// Check if the piece is valid UTF-8
|
||||||
|
@ -3357,7 +3357,7 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// print sample chat example to make it clear which template is used
|
// print sample chat example to make it clear which template is used
|
||||||
LOG_INF("%s: chat template, built_in: %d, chat_example: '%s'\n", __func__, params.chat_template.empty(), llama_chat_format_example(ctx_server.model, params.chat_template).c_str());
|
LOG_INF("%s: chat template, built_in: %d, chat_example: '%s'\n", __func__, params.chat_template.empty(), common_chat_format_example(ctx_server.model, params.chat_template).c_str());
|
||||||
|
|
||||||
ctx_server.queue_tasks.on_new_task(std::bind(
|
ctx_server.queue_tasks.on_new_task(std::bind(
|
||||||
&server_context::process_single_task, &ctx_server, std::placeholders::_1));
|
&server_context::process_single_task, &ctx_server, std::placeholders::_1));
|
||||||
|
|
|
@ -57,7 +57,7 @@ static T json_value(const json & body, const std::string & key, const T & defaul
|
||||||
|
|
||||||
// Format given chat. If tmpl is empty, we take the template from model metadata
|
// Format given chat. If tmpl is empty, we take the template from model metadata
|
||||||
inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages) {
|
inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages) {
|
||||||
std::vector<llama_chat_msg> chat;
|
std::vector<common_chat_msg> chat;
|
||||||
|
|
||||||
for (size_t i = 0; i < messages.size(); ++i) {
|
for (size_t i = 0; i < messages.size(); ++i) {
|
||||||
const auto & curr_msg = messages[i];
|
const auto & curr_msg = messages[i];
|
||||||
|
@ -84,7 +84,7 @@ inline std::string format_chat(const struct llama_model * model, const std::stri
|
||||||
chat.push_back({role, content});
|
chat.push_back({role, content});
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto formatted_chat = llama_chat_apply_template(model, tmpl, chat, true);
|
const auto formatted_chat = common_chat_apply_template(model, tmpl, chat, true);
|
||||||
LOG_DBG("formatted_chat: '%s'\n", formatted_chat.c_str());
|
LOG_DBG("formatted_chat: '%s'\n", formatted_chat.c_str());
|
||||||
|
|
||||||
return formatted_chat;
|
return formatted_chat;
|
||||||
|
@ -246,7 +246,7 @@ template <class Iter>
|
||||||
static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
|
static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
|
||||||
std::string ret;
|
std::string ret;
|
||||||
for (; begin != end; ++begin) {
|
for (; begin != end; ++begin) {
|
||||||
ret += llama_token_to_piece(ctx, *begin);
|
ret += common_token_to_piece(ctx, *begin);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -254,7 +254,7 @@ static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
|
||||||
|
|
||||||
// format incomplete utf-8 multibyte character for output
|
// format incomplete utf-8 multibyte character for output
|
||||||
static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
|
static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
|
||||||
std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token);
|
std::string out = token == -1 ? "" : common_token_to_piece(ctx, token);
|
||||||
|
|
||||||
// if the size is 1 and first bit is 1, meaning it's a partial character
|
// if the size is 1 and first bit is 1, meaning it's a partial character
|
||||||
// (size > 1 meaning it's already a known token)
|
// (size > 1 meaning it's already a known token)
|
||||||
|
|
|
@ -33,7 +33,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// initialize the model
|
// initialize the model
|
||||||
|
|
||||||
llama_model_params model_params = llama_model_params_from_gpt_params(params);
|
llama_model_params model_params = common_model_params_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// initialize the context
|
// initialize the context
|
||||||
|
|
||||||
llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
|
llama_context_params ctx_params = common_context_params_from_gpt_params(params);
|
||||||
|
|
||||||
llama_context * ctx = llama_new_context_with_model(model, ctx_params);
|
llama_context * ctx = llama_new_context_with_model(model, ctx_params);
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ int main(int argc, char ** argv) {
|
||||||
// tokenize the prompt
|
// tokenize the prompt
|
||||||
|
|
||||||
std::vector<llama_token> tokens_list;
|
std::vector<llama_token> tokens_list;
|
||||||
tokens_list = ::llama_tokenize(ctx, params.prompt, true);
|
tokens_list = ::common_tokenize(ctx, params.prompt, true);
|
||||||
|
|
||||||
const int n_ctx = llama_n_ctx(ctx);
|
const int n_ctx = llama_n_ctx(ctx);
|
||||||
const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size());
|
const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size());
|
||||||
|
@ -84,7 +84,7 @@ int main(int argc, char ** argv) {
|
||||||
LOG("\n");
|
LOG("\n");
|
||||||
|
|
||||||
for (auto id : tokens_list) {
|
for (auto id : tokens_list) {
|
||||||
LOG("%s", llama_token_to_piece(ctx, id).c_str());
|
LOG("%s", common_token_to_piece(ctx, id).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
// create a llama_batch with size 512
|
// create a llama_batch with size 512
|
||||||
|
@ -94,7 +94,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// evaluate the initial prompt
|
// evaluate the initial prompt
|
||||||
for (size_t i = 0; i < tokens_list.size(); i++) {
|
for (size_t i = 0; i < tokens_list.size(); i++) {
|
||||||
llama_batch_add(batch, tokens_list[i], i, { 0 }, false);
|
common_batch_add(batch, tokens_list[i], i, { 0 }, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
// llama_decode will output logits only for the last token of the prompt
|
// llama_decode will output logits only for the last token of the prompt
|
||||||
|
@ -124,14 +124,14 @@ int main(int argc, char ** argv) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG("%s", llama_token_to_piece(ctx, new_token_id).c_str());
|
LOG("%s", common_token_to_piece(ctx, new_token_id).c_str());
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
|
|
||||||
// prepare the next batch
|
// prepare the next batch
|
||||||
llama_batch_clear(batch);
|
common_batch_clear(batch);
|
||||||
|
|
||||||
// push this new token for next evaluation
|
// push this new token for next evaluation
|
||||||
llama_batch_add(batch, new_token_id, n_cur, { 0 }, true);
|
common_batch_add(batch, new_token_id, n_cur, { 0 }, true);
|
||||||
|
|
||||||
n_decode += 1;
|
n_decode += 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ int main(int argc, char ** argv) {
|
||||||
llama_context * ctx_dft = NULL;
|
llama_context * ctx_dft = NULL;
|
||||||
|
|
||||||
// load the target model
|
// load the target model
|
||||||
llama_init_result llama_init_tgt = llama_init_from_gpt_params(params);
|
common_init_result llama_init_tgt = llama_init_from_gpt_params(params);
|
||||||
model_tgt = llama_init_tgt.model;
|
model_tgt = llama_init_tgt.model;
|
||||||
ctx_tgt = llama_init_tgt.context;
|
ctx_tgt = llama_init_tgt.context;
|
||||||
|
|
||||||
|
@ -78,7 +78,7 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
params.cpuparams_batch.n_threads = params.draft_cpuparams_batch.n_threads;
|
params.cpuparams_batch.n_threads = params.draft_cpuparams_batch.n_threads;
|
||||||
llama_init_result llama_init_dft = llama_init_from_gpt_params(params);
|
common_init_result llama_init_dft = llama_init_from_gpt_params(params);
|
||||||
model_dft = llama_init_dft.model;
|
model_dft = llama_init_dft.model;
|
||||||
ctx_dft = llama_init_dft.context;
|
ctx_dft = llama_init_dft.context;
|
||||||
|
|
||||||
|
@ -124,8 +124,8 @@ int main(int argc, char ** argv) {
|
||||||
if (std::strcmp(token_text_tgt, token_text_dft) != 0) {
|
if (std::strcmp(token_text_tgt, token_text_dft) != 0) {
|
||||||
LOG_ERR("%s: draft model vocab must match target model to use speculation but ", __func__);
|
LOG_ERR("%s: draft model vocab must match target model to use speculation but ", __func__);
|
||||||
LOG_ERR("token %d content differs - target '%s', draft '%s'\n", i,
|
LOG_ERR("token %d content differs - target '%s', draft '%s'\n", i,
|
||||||
llama_token_to_piece(ctx_tgt, i).c_str(),
|
common_token_to_piece(ctx_tgt, i).c_str(),
|
||||||
llama_token_to_piece(ctx_dft, i).c_str());
|
common_token_to_piece(ctx_dft, i).c_str());
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -134,7 +134,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// Tokenize the prompt
|
// Tokenize the prompt
|
||||||
std::vector<llama_token> inp;
|
std::vector<llama_token> inp;
|
||||||
inp = ::llama_tokenize(ctx_tgt, params.prompt, true, true);
|
inp = ::common_tokenize(ctx_tgt, params.prompt, true, true);
|
||||||
|
|
||||||
const int max_context_size = llama_n_ctx(ctx_tgt);
|
const int max_context_size = llama_n_ctx(ctx_tgt);
|
||||||
const int max_tokens_list_size = max_context_size - 4;
|
const int max_tokens_list_size = max_context_size - 4;
|
||||||
|
@ -147,7 +147,7 @@ int main(int argc, char ** argv) {
|
||||||
LOG("\n\n");
|
LOG("\n\n");
|
||||||
|
|
||||||
for (auto id : inp) {
|
for (auto id : inp) {
|
||||||
LOG("%s", llama_token_to_piece(ctx_tgt, id).c_str());
|
LOG("%s", common_token_to_piece(ctx_tgt, id).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
const int n_input = inp.size();
|
const int n_input = inp.size();
|
||||||
|
@ -277,13 +277,13 @@ int main(int argc, char ** argv) {
|
||||||
s_keep = s;
|
s_keep = s;
|
||||||
accept = true;
|
accept = true;
|
||||||
token_id = drafts[s].tokens[i_dft];
|
token_id = drafts[s].tokens[i_dft];
|
||||||
token_str = llama_token_to_piece(ctx_tgt, token_id);
|
token_str = common_token_to_piece(ctx_tgt, token_id);
|
||||||
gpt_sampler_accept(smpl, token_id, true);
|
gpt_sampler_accept(smpl, token_id, true);
|
||||||
|
|
||||||
LOG_DBG("draft token %d of sequence %d (%d, '%s') accepted\n", i_dft, s, token_id, token_str.c_str());
|
LOG_DBG("draft token %d of sequence %d (%d, '%s') accepted\n", i_dft, s, token_id, token_str.c_str());
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
LOG_DBG("draft token %d of sequence %d (%d, '%s') rejected\n", i_dft, s, drafts[s].tokens[i_dft], llama_token_to_piece(ctx_tgt, drafts[s].tokens[i_dft]).c_str());
|
LOG_DBG("draft token %d of sequence %d (%d, '%s') rejected\n", i_dft, s, drafts[s].tokens[i_dft], common_token_to_piece(ctx_tgt, drafts[s].tokens[i_dft]).c_str());
|
||||||
drafts[s].active = false;
|
drafts[s].active = false;
|
||||||
|
|
||||||
// calculate residual probability
|
// calculate residual probability
|
||||||
|
@ -350,7 +350,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
token_id = dist_tgt.data[idx].id;
|
token_id = dist_tgt.data[idx].id;
|
||||||
gpt_sampler_accept(smpl, token_id, true);
|
gpt_sampler_accept(smpl, token_id, true);
|
||||||
token_str = llama_token_to_piece(ctx_tgt, token_id);
|
token_str = common_token_to_piece(ctx_tgt, token_id);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// greedy verification
|
// greedy verification
|
||||||
|
@ -361,7 +361,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
gpt_sampler_accept(smpl, token_id, true);
|
gpt_sampler_accept(smpl, token_id, true);
|
||||||
|
|
||||||
token_str = llama_token_to_piece(ctx_tgt, token_id);
|
token_str = common_token_to_piece(ctx_tgt, token_id);
|
||||||
|
|
||||||
for (int s = 0; s < n_seq_dft; ++s) {
|
for (int s = 0; s < n_seq_dft; ++s) {
|
||||||
if (!drafts[s].active) {
|
if (!drafts[s].active) {
|
||||||
|
@ -431,8 +431,8 @@ int main(int argc, char ** argv) {
|
||||||
drafts[0].dists.push_back(std::vector<llama_token_data>());
|
drafts[0].dists.push_back(std::vector<llama_token_data>());
|
||||||
drafts[0].i_batch_tgt.push_back(0);
|
drafts[0].i_batch_tgt.push_back(0);
|
||||||
|
|
||||||
llama_batch_clear(batch_dft);
|
common_batch_clear(batch_dft);
|
||||||
llama_batch_add (batch_dft, token_id, n_past_dft, { 0 }, true);
|
common_batch_add (batch_dft, token_id, n_past_dft, { 0 }, true);
|
||||||
|
|
||||||
llama_kv_cache_seq_rm(ctx_dft, 0, n_past_dft, -1);
|
llama_kv_cache_seq_rm(ctx_dft, 0, n_past_dft, -1);
|
||||||
// LOG_DBG("dft batch: %s\n", LOG_BATCH_TOSTR_PRETTY(ctx_dft, batch_dft).c_str());
|
// LOG_DBG("dft batch: %s\n", LOG_BATCH_TOSTR_PRETTY(ctx_dft, batch_dft).c_str());
|
||||||
|
@ -461,8 +461,8 @@ int main(int argc, char ** argv) {
|
||||||
drafts[0].drafting = true;
|
drafts[0].drafting = true;
|
||||||
drafts[0].i_batch_dft = 0;
|
drafts[0].i_batch_dft = 0;
|
||||||
|
|
||||||
llama_batch_clear(batch_tgt);
|
common_batch_clear(batch_tgt);
|
||||||
llama_batch_add (batch_tgt, drafts[0].tokens[0], n_past_tgt, { 0 }, true);
|
common_batch_add (batch_tgt, drafts[0].tokens[0], n_past_tgt, { 0 }, true);
|
||||||
|
|
||||||
// sample n_draft tokens from the draft model using tree-based sampling
|
// sample n_draft tokens from the draft model using tree-based sampling
|
||||||
for (int i = 0; i < n_draft; ++i) {
|
for (int i = 0; i < n_draft; ++i) {
|
||||||
|
@ -483,7 +483,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
for (int k = 0; k < std::min(n_seq_dft + 3, (int) cur_p->size); ++k) {
|
for (int k = 0; k < std::min(n_seq_dft + 3, (int) cur_p->size); ++k) {
|
||||||
LOG_DBG(" - draft candidate %3d for seq %3d, pos %3d: %6d (%8.3f) '%s'\n",
|
LOG_DBG(" - draft candidate %3d for seq %3d, pos %3d: %6d (%8.3f) '%s'\n",
|
||||||
k, s, i, cur_p->data[k].id, cur_p->data[k].p, llama_token_to_piece(ctx_dft, cur_p->data[k].id).c_str());
|
k, s, i, cur_p->data[k].id, cur_p->data[k].p, common_token_to_piece(ctx_dft, cur_p->data[k].id).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<int> sa(1, s);
|
std::vector<int> sa(1, s);
|
||||||
|
@ -545,12 +545,12 @@ int main(int argc, char ** argv) {
|
||||||
// add unique drafted tokens to the target batch
|
// add unique drafted tokens to the target batch
|
||||||
drafts[s].i_batch_tgt.push_back(batch_tgt.n_tokens);
|
drafts[s].i_batch_tgt.push_back(batch_tgt.n_tokens);
|
||||||
|
|
||||||
llama_batch_add(batch_tgt, id, n_past_tgt + i + 1, { s }, true);
|
common_batch_add(batch_tgt, id, n_past_tgt + i + 1, { s }, true);
|
||||||
|
|
||||||
// add the token to the batch for batched decoding with the draft model
|
// add the token to the batch for batched decoding with the draft model
|
||||||
drafts[s].i_batch_dft = batch_dft.n_tokens;
|
drafts[s].i_batch_dft = batch_dft.n_tokens;
|
||||||
|
|
||||||
llama_batch_add(batch_dft, id, n_past_cur, { s }, true);
|
common_batch_add(batch_dft, id, n_past_cur, { s }, true);
|
||||||
|
|
||||||
if (batch_tgt.n_tokens > n_draft) {
|
if (batch_tgt.n_tokens > n_draft) {
|
||||||
drafts[s].drafting = false;
|
drafts[s].drafting = false;
|
||||||
|
|
|
@ -365,7 +365,7 @@ int main(int raw_argc, char ** raw_argv) {
|
||||||
const bool parse_special = !no_parse_special;
|
const bool parse_special = !no_parse_special;
|
||||||
|
|
||||||
std::vector<llama_token> tokens;
|
std::vector<llama_token> tokens;
|
||||||
tokens = ::llama_tokenize(model, prompt, add_bos, parse_special);
|
tokens = ::common_tokenize(model, prompt, add_bos, parse_special);
|
||||||
|
|
||||||
if (printing_ids) {
|
if (printing_ids) {
|
||||||
printf("[");
|
printf("[");
|
||||||
|
@ -380,7 +380,7 @@ int main(int raw_argc, char ** raw_argv) {
|
||||||
} else {
|
} else {
|
||||||
bool invalid_utf8 = false;
|
bool invalid_utf8 = false;
|
||||||
printf("%6d -> '", tokens[i]);
|
printf("%6d -> '", tokens[i]);
|
||||||
write_utf8_cstr_to_stdout(llama_token_to_piece(ctx, tokens[i]).c_str(), invalid_utf8);
|
write_utf8_cstr_to_stdout(common_token_to_piece(ctx, tokens[i]).c_str(), invalid_utf8);
|
||||||
if (invalid_utf8) {
|
if (invalid_utf8) {
|
||||||
printf("' (utf-8 decode failure)\n");
|
printf("' (utf-8 decode failure)\n");
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -140,11 +140,11 @@ int main(void) {
|
||||||
|
|
||||||
// test llama_chat_format_single for system message
|
// test llama_chat_format_single for system message
|
||||||
printf("\n\n=== llama_chat_format_single (system message) ===\n\n");
|
printf("\n\n=== llama_chat_format_single (system message) ===\n\n");
|
||||||
std::vector<llama_chat_msg> chat2;
|
std::vector<common_chat_msg> chat2;
|
||||||
llama_chat_msg sys_msg{"system", "You are a helpful assistant"};
|
common_chat_msg sys_msg{"system", "You are a helpful assistant"};
|
||||||
|
|
||||||
auto fmt_sys = [&](std::string tmpl) {
|
auto fmt_sys = [&](std::string tmpl) {
|
||||||
auto output = llama_chat_format_single(nullptr, tmpl, chat2, sys_msg, false);
|
auto output = common_chat_format_single(nullptr, tmpl, chat2, sys_msg, false);
|
||||||
printf("fmt_sys(%s) : %s\n", tmpl.c_str(), output.c_str());
|
printf("fmt_sys(%s) : %s\n", tmpl.c_str(), output.c_str());
|
||||||
printf("-------------------------\n");
|
printf("-------------------------\n");
|
||||||
return output;
|
return output;
|
||||||
|
@ -160,10 +160,10 @@ int main(void) {
|
||||||
chat2.push_back({"system", "You are a helpful assistant"});
|
chat2.push_back({"system", "You are a helpful assistant"});
|
||||||
chat2.push_back({"user", "Hello"});
|
chat2.push_back({"user", "Hello"});
|
||||||
chat2.push_back({"assistant", "I am assistant"});
|
chat2.push_back({"assistant", "I am assistant"});
|
||||||
llama_chat_msg new_msg{"user", "How are you"};
|
common_chat_msg new_msg{"user", "How are you"};
|
||||||
|
|
||||||
auto fmt_single = [&](std::string tmpl) {
|
auto fmt_single = [&](std::string tmpl) {
|
||||||
auto output = llama_chat_format_single(nullptr, tmpl, chat2, new_msg, true);
|
auto output = common_chat_format_single(nullptr, tmpl, chat2, new_msg, true);
|
||||||
printf("fmt_single(%s) : %s\n", tmpl.c_str(), output.c_str());
|
printf("fmt_single(%s) : %s\n", tmpl.c_str(), output.c_str());
|
||||||
printf("-------------------------\n");
|
printf("-------------------------\n");
|
||||||
return output;
|
return output;
|
||||||
|
|
|
@ -202,7 +202,7 @@ int main(int argc, char **argv) {
|
||||||
for (int i = 0; i < nthread; i++) {
|
for (int i = 0; i < nthread; i++) {
|
||||||
threads[i] = std::thread([&, i]() {
|
threads[i] = std::thread([&, i]() {
|
||||||
for (const auto & test_kv : k_tests) {
|
for (const auto & test_kv : k_tests) {
|
||||||
const std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, add_special, false);
|
const std::vector<llama_token> res = common_tokenize(ctx, test_kv.first, add_special, false);
|
||||||
|
|
||||||
// here only print the result of the first thread
|
// here only print the result of the first thread
|
||||||
// because the other threads are running the same tests
|
// because the other threads are running the same tests
|
||||||
|
@ -212,7 +212,7 @@ int main(int argc, char **argv) {
|
||||||
|
|
||||||
printf("\n");
|
printf("\n");
|
||||||
printf("src: '%s'\n", test_kv.first.c_str());
|
printf("src: '%s'\n", test_kv.first.c_str());
|
||||||
printf("res: '%s'\n", llama_detokenize(ctx, res).c_str());
|
printf("res: '%s'\n", common_detokenize(ctx, res).c_str());
|
||||||
printf("tok: ");
|
printf("tok: ");
|
||||||
for (const auto & tok : res) {
|
for (const auto & tok : res) {
|
||||||
printf("%d ", tok);
|
printf("%d ", tok);
|
||||||
|
@ -229,16 +229,16 @@ int main(int argc, char **argv) {
|
||||||
if (!correct) {
|
if (!correct) {
|
||||||
fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
|
fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
|
||||||
fprintf(stderr, "%s : detokenized to: '%s' instead of '%s'\n", __func__,
|
fprintf(stderr, "%s : detokenized to: '%s' instead of '%s'\n", __func__,
|
||||||
llama_detokenize(ctx, res).c_str(),
|
common_detokenize(ctx, res).c_str(),
|
||||||
llama_detokenize(ctx, test_kv.second).c_str());
|
common_detokenize(ctx, test_kv.second).c_str());
|
||||||
fprintf(stderr, "%s : expected tokens: ", __func__);
|
fprintf(stderr, "%s : expected tokens: ", __func__);
|
||||||
for (const auto & t : test_kv.second) {
|
for (const auto & t : test_kv.second) {
|
||||||
fprintf(stderr, "%6d '%s', ", t, llama_token_to_piece(ctx, t).c_str());
|
fprintf(stderr, "%6d '%s', ", t, common_token_to_piece(ctx, t).c_str());
|
||||||
}
|
}
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "%s : got tokens: ", __func__);
|
fprintf(stderr, "%s : got tokens: ", __func__);
|
||||||
for (const auto & t : res) {
|
for (const auto & t : res) {
|
||||||
fprintf(stderr, "%6d '%s', ", t, llama_token_to_piece(ctx, t).c_str());
|
fprintf(stderr, "%6d '%s', ", t, common_token_to_piece(ctx, t).c_str());
|
||||||
}
|
}
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
|
|
||||||
|
@ -273,7 +273,7 @@ int main(int argc, char **argv) {
|
||||||
{
|
{
|
||||||
const auto t_start = ggml_time_us();
|
const auto t_start = ggml_time_us();
|
||||||
|
|
||||||
res = llama_tokenize(ctx, text, add_special, false);
|
res = common_tokenize(ctx, text, add_special, false);
|
||||||
|
|
||||||
const auto t_end = ggml_time_us();
|
const auto t_end = ggml_time_us();
|
||||||
|
|
||||||
|
|
|
@ -78,10 +78,10 @@ int main(int argc, char **argv) {
|
||||||
const int n_vocab = llama_n_vocab(model);
|
const int n_vocab = llama_n_vocab(model);
|
||||||
|
|
||||||
for (int i = 0; i < n_vocab; ++i) {
|
for (int i = 0; i < n_vocab; ++i) {
|
||||||
std::string str = llama_detokenize(ctx, std::vector<int>(1, i));
|
std::string str = common_detokenize(ctx, std::vector<int>(1, i));
|
||||||
try {
|
try {
|
||||||
auto cps = unicode_cpts_from_utf8(str);
|
auto cps = unicode_cpts_from_utf8(str);
|
||||||
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false, true);
|
std::vector<llama_token> tokens = common_tokenize(ctx, str, false, true);
|
||||||
if (ignore_merges && tokens.size() > 1) {
|
if (ignore_merges && tokens.size() > 1) {
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"%s : error: token %d detokenizes to '%s'(%zu) but "
|
"%s : error: token %d detokenizes to '%s'(%zu) but "
|
||||||
|
@ -94,7 +94,7 @@ int main(int argc, char **argv) {
|
||||||
fprintf(stderr, "]\n");
|
fprintf(stderr, "]\n");
|
||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
std::string check = llama_detokenize(ctx, tokens);
|
std::string check = common_detokenize(ctx, tokens);
|
||||||
if (check != str) {
|
if (check != str) {
|
||||||
fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
|
fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
|
||||||
__func__, i, str.c_str(), str.length(), check.c_str(), check.length());
|
__func__, i, str.c_str(), str.length(), check.c_str(), check.length());
|
||||||
|
@ -123,8 +123,8 @@ int main(int argc, char **argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string str = unicode_cpt_to_utf8(cp);
|
std::string str = unicode_cpt_to_utf8(cp);
|
||||||
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
|
std::vector<llama_token> tokens = common_tokenize(ctx, str, false);
|
||||||
std::string check = llama_detokenize(ctx, tokens);
|
std::string check = common_detokenize(ctx, tokens);
|
||||||
if (cp != 9601 && str != check) {
|
if (cp != 9601 && str != check) {
|
||||||
fprintf(stderr, "error: codepoint 0x%x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
|
fprintf(stderr, "error: codepoint 0x%x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
|
||||||
cp, check.c_str(), check.length(), str.c_str(), str.length());
|
cp, check.c_str(), check.length(), str.c_str(), str.length());
|
||||||
|
|
|
@ -66,9 +66,9 @@ int main(int argc, char ** argv) {
|
||||||
const int n_vocab = llama_n_vocab(model);
|
const int n_vocab = llama_n_vocab(model);
|
||||||
|
|
||||||
for (int i = 0; i < n_vocab; ++i) {
|
for (int i = 0; i < n_vocab; ++i) {
|
||||||
std::string str = llama_detokenize(ctx, std::vector<int>(1, i), true);
|
std::string str = common_detokenize(ctx, std::vector<int>(1, i), true);
|
||||||
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false, true);
|
std::vector<llama_token> tokens = common_tokenize(ctx, str, false, true);
|
||||||
std::string check = llama_detokenize(ctx, tokens);
|
std::string check = common_detokenize(ctx, tokens);
|
||||||
if (check != str) {
|
if (check != str) {
|
||||||
fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
|
fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
|
||||||
__func__, i, str.c_str(), str.length(), check.c_str(), check.length());
|
__func__, i, str.c_str(), str.length(), check.c_str(), check.length());
|
||||||
|
@ -93,8 +93,8 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string str = unicode_cpt_to_utf8(cp);
|
std::string str = unicode_cpt_to_utf8(cp);
|
||||||
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false, true);
|
std::vector<llama_token> tokens = common_tokenize(ctx, str, false, true);
|
||||||
std::string check = llama_detokenize(ctx, tokens);
|
std::string check = common_detokenize(ctx, tokens);
|
||||||
if (cp != 9601 && str != check) {
|
if (cp != 9601 && str != check) {
|
||||||
fprintf(stderr, "error: codepoint 0x%x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
|
fprintf(stderr, "error: codepoint 0x%x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
|
||||||
cp, check.c_str(), check.length(), str.c_str(), str.length());
|
cp, check.c_str(), check.length(), str.c_str(), str.length());
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue