server : normalize id vars

This commit is contained in:
Georgi Gerganov 2024-03-05 14:57:15 +02:00
parent 134f5fec22
commit ad1d746caa
No known key found for this signature in database
GPG key ID: BF970631944C16B7
2 changed files with 283 additions and 288 deletions

View file

@ -81,7 +81,8 @@ struct server_params {
struct server_slot { struct server_slot {
int id; int id;
int task_id = -1; int id_task = -1;
int id_multi = -1;
struct slot_params params; struct slot_params params;
@ -141,9 +142,6 @@ struct server_slot {
double t_prompt_processing; // ms double t_prompt_processing; // ms
double t_token_generation; // ms double t_token_generation; // ms
// multitasks
int multitask_id = -1;
void reset() { void reset() {
n_prompt_tokens = 0; n_prompt_tokens = 0;
generated_text = ""; generated_text = "";
@ -254,8 +252,8 @@ struct server_slot {
t_token, n_tokens_second); t_token, n_tokens_second);
LOG_INFO(buffer, { LOG_INFO(buffer, {
{"slot_id", id}, {"id_slot", id},
{"task_id", task_id}, {"id_task", id_task},
{"t_prompt_processing", t_prompt_processing}, {"t_prompt_processing", t_prompt_processing},
{"n_prompt_tokens_processed", n_prompt_tokens_processed}, {"n_prompt_tokens_processed", n_prompt_tokens_processed},
{"t_token", t_token}, {"t_token", t_token},
@ -270,8 +268,8 @@ struct server_slot {
t_token, n_tokens_second); t_token, n_tokens_second);
LOG_INFO(buffer, { LOG_INFO(buffer, {
{"slot_id", id}, {"id_slot", id},
{"task_id", task_id}, {"id_task", id_task},
{"t_token_generation", t_token_generation}, {"t_token_generation", t_token_generation},
{"n_decoded", n_decoded}, {"n_decoded", n_decoded},
{"t_token", t_token}, {"t_token", t_token},
@ -281,8 +279,8 @@ struct server_slot {
snprintf(buffer, 512, " total time = %10.2f ms", t_prompt_processing + t_token_generation); snprintf(buffer, 512, " total time = %10.2f ms", t_prompt_processing + t_token_generation);
LOG_INFO(buffer, { LOG_INFO(buffer, {
{"slot_id", id}, {"id_slot", id},
{"task_id", task_id}, {"id_task", id_task},
{"t_prompt_processing", t_prompt_processing}, {"t_prompt_processing", t_prompt_processing},
{"t_token_generation", t_token_generation}, {"t_token_generation", t_token_generation},
{"t_total", t_prompt_processing + t_token_generation}, {"t_total", t_prompt_processing + t_token_generation},
@ -405,7 +403,7 @@ struct llama_server_context {
slot.n_predict = params.n_predict; slot.n_predict = params.n_predict;
LOG_INFO("new slot", { LOG_INFO("new slot", {
{"slot_id", slot.id}, {"id_slot", slot.id},
{"n_ctx_slot", slot.n_ctx} {"n_ctx_slot", slot.n_ctx}
}); });
@ -419,7 +417,7 @@ struct llama_server_context {
//GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * ga_n"); // NOLINT //GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * ga_n"); // NOLINT
LOG_INFO("slot self-extend", { LOG_INFO("slot self-extend", {
{"slot_id", slot.id}, {"id_slot", slot.id},
{"ga_n", ga_n}, {"ga_n", ga_n},
{"ga_w", ga_w} {"ga_w", ga_w}
}); });
@ -591,7 +589,7 @@ struct llama_server_context {
slot.sparams.use_penalty_prompt_tokens = true; slot.sparams.use_penalty_prompt_tokens = true;
LOG_VERBOSE("penalty_prompt_tokens", { LOG_VERBOSE("penalty_prompt_tokens", {
{"slot_id", slot.id}, {"id_slot", slot.id},
{"tokens", slot.sparams.penalty_prompt_tokens}, {"tokens", slot.sparams.penalty_prompt_tokens},
}); });
} }
@ -611,7 +609,7 @@ struct llama_server_context {
slot.sparams.use_penalty_prompt_tokens = true; slot.sparams.use_penalty_prompt_tokens = true;
LOG_VERBOSE("penalty_prompt_tokens", { LOG_VERBOSE("penalty_prompt_tokens", {
{"slot_id", slot.id}, {"id_slot", slot.id},
{"tokens", slot.sparams.penalty_prompt_tokens}, {"tokens", slot.sparams.penalty_prompt_tokens},
}); });
} }
@ -686,8 +684,8 @@ struct llama_server_context {
slot.command = LOAD_PROMPT; slot.command = LOAD_PROMPT;
LOG_INFO("slot is processing task", { LOG_INFO("slot is processing task", {
{"slot_id", slot.id}, {"id_slot", slot.id},
{"task_id", slot.task_id}, {"id_task", slot.id_task},
}); });
return true; return true;
@ -843,7 +841,7 @@ struct llama_server_context {
slot.has_next_token = false; slot.has_next_token = false;
LOG_VERBOSE("stopped by limit", { LOG_VERBOSE("stopped by limit", {
{"slot_id", slot.id}, {"id_slot", slot.id},
{"n_decoded", slot.n_decoded}, {"n_decoded", slot.n_decoded},
{"n_predict", slot.params.n_predict}, {"n_predict", slot.params.n_predict},
}); });
@ -876,7 +874,7 @@ struct llama_server_context {
task_result res; task_result res;
res.id = task.id; res.id = task.id;
res.multitask_id = task.multitask_id; res.id_multi = task.id_multi;
res.stop = false; res.stop = false;
res.error = true; res.error = true;
res.result_json = { { "content", error } }; res.result_json = { { "content", error } };
@ -932,14 +930,14 @@ struct llama_server_context {
void send_partial_response(server_slot & slot, completion_token_output tkn) { void send_partial_response(server_slot & slot, completion_token_output tkn) {
task_result res; task_result res;
res.id = slot.task_id; res.id = slot.id_task;
res.multitask_id = slot.multitask_id; res.id_multi = slot.id_multi;
res.error = false; res.error = false;
res.stop = false; res.stop = false;
res.result_json = json { res.result_json = json {
{"content", tkn.text_to_send}, {"content", tkn.text_to_send},
{"stop", false}, {"stop", false},
{"slot_id", slot.id}, {"id_slot", slot.id},
{"multimodal", false} {"multimodal", false}
}; };
@ -969,13 +967,13 @@ struct llama_server_context {
void send_final_response(const server_slot & slot) { void send_final_response(const server_slot & slot) {
task_result res; task_result res;
res.id = slot.task_id; res.id = slot.id_task;
res.multitask_id = slot.multitask_id; res.id_multi = slot.id_multi;
res.error = false; res.error = false;
res.stop = true; res.stop = true;
res.result_json = json { res.result_json = json {
{"content", !slot.params.stream ? slot.generated_text : ""}, {"content", !slot.params.stream ? slot.generated_text : ""},
{"slot_id", slot.id}, {"id_slot", slot.id},
{"stop", true}, {"stop", true},
{"model", params.model_alias}, {"model", params.model_alias},
{"tokens_predicted", slot.n_decoded}, {"tokens_predicted", slot.n_decoded},
@ -1018,8 +1016,8 @@ struct llama_server_context {
void send_embedding(const server_slot & slot, const llama_batch & batch) { void send_embedding(const server_slot & slot, const llama_batch & batch) {
task_result res; task_result res;
res.id = slot.task_id; res.id = slot.id_task;
res.multitask_id = slot.multitask_id; res.id_multi = slot.id_multi;
res.error = false; res.error = false;
res.stop = true; res.stop = true;
@ -1040,8 +1038,13 @@ struct llama_server_context {
const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]); const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
if (embd == NULL) { if (embd == NULL) {
embd = llama_get_embeddings_ith(ctx, i); embd = llama_get_embeddings_ith(ctx, i);
}
if (embd == NULL) { if (embd == NULL) {
LOG_ERROR("failed to get embeddings for token", {{"token", batch.token[i]}, {"seq_id", batch.seq_id[i][0]}}); LOG_ERROR("failed to get embeddings", {
{"token", batch.token [i]},
{"seq_id", batch.seq_id[i][0]}
});
res.result_json = json { res.result_json = json {
{"embedding", std::vector<float>(n_embd, 0.0f)}, {"embedding", std::vector<float>(n_embd, 0.0f)},
@ -1049,7 +1052,6 @@ struct llama_server_context {
continue; continue;
} }
}
res.result_json = json { res.result_json = json {
{"embedding", std::vector<float>(embd, embd + n_embd)}, {"embedding", std::vector<float>(embd, embd + n_embd)},
@ -1060,15 +1062,15 @@ struct llama_server_context {
queue_results.send(res); queue_results.send(res);
} }
void request_completion(int task_id, json data, bool infill, bool embedding, int multitask_id) { void request_completion(int id_task, int id_multi, json data, bool infill, bool embedding) {
task_server task; task_server task;
task.id = task_id; task.id = id_task;
task.target_id = 0; task.id_multi = id_multi;
task.id_target = 0;
task.data = std::move(data); task.data = std::move(data);
task.infill_mode = infill; task.infill_mode = infill;
task.embedding_mode = embedding; task.embedding_mode = embedding;
task.type = TASK_TYPE_COMPLETION; task.type = TASK_TYPE_COMPLETION;
task.multitask_id = multitask_id;
// when a completion task's prompt array is not a singleton, we split it into multiple requests // when a completion task's prompt array is not a singleton, we split it into multiple requests
// otherwise, it's a single-prompt task, we actually queue it // otherwise, it's a single-prompt task, we actually queue it
@ -1090,22 +1092,22 @@ struct llama_server_context {
if (numbers) { if (numbers) {
queue_tasks.post(task); queue_tasks.post(task);
} else { } else {
split_multiprompt_task(task_id, task); split_multiprompt_task(id_task, task);
} }
} else { } else {
queue_tasks.post(task); queue_tasks.post(task);
} }
} }
void request_cancel(int task_id) void request_cancel(int id_task)
{ {
task_server task; task_server task;
task.type = TASK_TYPE_CANCEL; task.type = TASK_TYPE_CANCEL;
task.target_id = task_id; task.id_target = id_task;
queue_tasks.post(task); queue_tasks.post(task);
} }
void split_multiprompt_task(int multitask_id, task_server& multiprompt_task) void split_multiprompt_task(int id_multi, task_server& multiprompt_task)
{ {
int prompt_count = multiprompt_task.data.at("prompt").size(); int prompt_count = multiprompt_task.data.at("prompt").size();
if (prompt_count <= 1) { if (prompt_count <= 1) {
@ -1121,7 +1123,7 @@ struct llama_server_context {
} }
// queue up the multitask so we can track its subtask progression // queue up the multitask so we can track its subtask progression
queue_tasks.add_multitask(multitask_id, subtask_ids); queue_tasks.add_multitask(id_multi, subtask_ids);
// add subtasks // add subtasks
for (int i = 0; i < prompt_count; i++) for (int i = 0; i < prompt_count; i++)
@ -1130,7 +1132,7 @@ struct llama_server_context {
subtask_data["prompt"] = subtask_data["prompt"][i]; subtask_data["prompt"] = subtask_data["prompt"][i];
// subtasks inherit everything else (infill mode, embedding mode, etc.) // subtasks inherit everything else (infill mode, embedding mode, etc.)
request_completion(subtask_ids[i], subtask_data, multiprompt_task.infill_mode, multiprompt_task.embedding_mode, multitask_id); request_completion(subtask_ids[i], id_multi, subtask_data, multiprompt_task.infill_mode, multiprompt_task.embedding_mode);
} }
} }
@ -1138,10 +1140,10 @@ struct llama_server_context {
switch (task.type) switch (task.type)
{ {
case TASK_TYPE_COMPLETION: { case TASK_TYPE_COMPLETION: {
server_slot * slot = get_slot(json_value(task.data, "slot_id", -1)); server_slot * slot = get_slot(json_value(task.data, "id_slot", -1));
if (slot == nullptr) { if (slot == nullptr) {
// if no slot is available, we defer this task for processing later // if no slot is available, we defer this task for processing later
LOG_VERBOSE("no slot is available", {{"task_id", task.id}}); LOG_VERBOSE("no slot is available", {{"id_task", task.id}});
queue_tasks.defer(task); queue_tasks.defer(task);
break; break;
} }
@ -1160,10 +1162,10 @@ struct llama_server_context {
slot->reset(); slot->reset();
slot->id_task = task.id;
slot->id_multi = task.id_multi;
slot->infill = task.infill_mode; slot->infill = task.infill_mode;
slot->embedding = task.embedding_mode; slot->embedding = task.embedding_mode;
slot->task_id = task.id;
slot->multitask_id = task.multitask_id;
if (!launch_slot_with_data(*slot, task.data)) if (!launch_slot_with_data(*slot, task.data))
{ {
@ -1174,7 +1176,7 @@ struct llama_server_context {
} break; } break;
case TASK_TYPE_CANCEL: { // release slot linked with the task id case TASK_TYPE_CANCEL: { // release slot linked with the task id
for (auto & slot : slots) { for (auto & slot : slots) {
if (slot.task_id == task.target_id) { if (slot.id_task == task.id_target) {
slot.release(); slot.release();
break; break;
} }
@ -1188,10 +1190,10 @@ struct llama_server_context {
int n_idle_slots = 0; int n_idle_slots = 0;
int n_processing_slots = 0; int n_processing_slots = 0;
for (server_slot &slot: slots) { for (server_slot & slot: slots) {
json slot_data = get_formated_generation(slot); json slot_data = get_formated_generation(slot);
slot_data["id"] = slot.id; slot_data["id"] = slot.id;
slot_data["task_id"] = slot.task_id; slot_data["id_task"] = slot.id_task;
slot_data["state"] = slot.state; slot_data["state"] = slot.state;
slot_data["prompt"] = slot.prompt; slot_data["prompt"] = slot.prompt;
slot_data["next_token"] = { slot_data["next_token"] = {
@ -1211,19 +1213,21 @@ struct llama_server_context {
slots_data.push_back(slot_data); slots_data.push_back(slot_data);
} }
LOG_INFO("slot data", { LOG_INFO("slot data", {
{"task_id", task.id}, {"id_task", task.id},
{"n_idle_slots", n_idle_slots}, {"n_idle_slots", n_idle_slots},
{"n_processing_slots", n_processing_slots} {"n_processing_slots", n_processing_slots}
}); });
LOG_VERBOSE("slot data", { LOG_VERBOSE("slot data", {
{"task_id", task.id}, {"id_task", task.id},
{"n_idle_slots", n_idle_slots}, {"n_idle_slots", n_idle_slots},
{"n_processing_slots", n_processing_slots}, {"n_processing_slots", n_processing_slots},
{"slots", slots_data} {"slots", slots_data}
}); });
task_result res; task_result res;
res.id = task.id; res.id = task.id;
res.multitask_id = task.multitask_id; res.id_multi = task.id_multi;
res.stop = true; res.stop = true;
res.error = false; res.error = false;
res.result_json = { res.result_json = {
@ -1244,6 +1248,7 @@ struct llama_server_context {
{ "slots", slots_data }, { "slots", slots_data },
}; };
metrics.reset_bucket(); metrics.reset_bucket();
queue_results.send(res); queue_results.send(res);
} break; } break;
@ -1286,8 +1291,8 @@ struct llama_server_context {
slot.t_last_used = ggml_time_us(); slot.t_last_used = ggml_time_us();
LOG_INFO("slot released", { LOG_INFO("slot released", {
{"slot_id", slot.id}, {"id_slot", slot.id},
{"task_id", slot.task_id}, {"id_task", slot.id_task},
{"n_ctx", n_ctx}, {"n_ctx", n_ctx},
{"n_past", slot.n_past}, {"n_past", slot.n_past},
{"n_system_tokens", system_tokens.size()}, {"n_system_tokens", system_tokens.size()},
@ -1299,6 +1304,7 @@ struct llama_server_context {
} }
} }
// check if all slots are idle
{ {
bool all_slots_are_idle = true; bool all_slots_are_idle = true;
@ -1324,7 +1330,7 @@ struct llama_server_context {
{ {
task_server task; task_server task;
task.type = TASK_TYPE_NEXT_RESPONSE; task.type = TASK_TYPE_NEXT_RESPONSE;
task.target_id = -1; task.id_target = -1;
queue_tasks.post(task); queue_tasks.post(task);
} }
@ -1338,8 +1344,8 @@ struct llama_server_context {
const int n_discard = n_left / 2; const int n_discard = n_left / 2;
LOG_INFO("slot context shift", { LOG_INFO("slot context shift", {
{"slot_id", slot.id}, {"id_slot", slot.id},
{"task_id", slot.task_id}, {"id_task", slot.id_task},
{"n_keep", n_keep}, {"n_keep", n_keep},
{"n_left", n_left}, {"n_left", n_left},
{"n_discard", n_discard}, {"n_discard", n_discard},
@ -1381,8 +1387,8 @@ struct llama_server_context {
slot.n_past += 1; slot.n_past += 1;
LOG_VERBOSE("slot decode token", { LOG_VERBOSE("slot decode token", {
{"slot_id", slot.id}, {"id_slot", slot.id},
{"task_id", slot.task_id}, {"id_task", slot.id_task},
{"n_ctx", n_ctx}, {"n_ctx", n_ctx},
{"n_past", slot.n_past}, {"n_past", slot.n_past},
{"n_system_tokens", system_tokens.size()}, {"n_system_tokens", system_tokens.size()},
@ -1533,8 +1539,8 @@ struct llama_server_context {
} }
LOG_INFO("slot progression", { LOG_INFO("slot progression", {
{ "slot_id", slot.id }, { "id_slot", slot.id },
{ "task_id", slot.task_id }, { "id_task", slot.id_task },
{ "n_past", slot.n_past }, { "n_past", slot.n_past },
{ "n_past_se", slot.n_past_se }, { "n_past_se", slot.n_past_se },
{ "ga_i", slot.ga_i }, { "ga_i", slot.ga_i },
@ -1548,12 +1554,12 @@ struct llama_server_context {
{ {
// we have to evaluate at least 1 token to generate logits. // we have to evaluate at least 1 token to generate logits.
LOG_INFO("we have to evaluate at least 1 token to generate logits", { LOG_INFO("we have to evaluate at least 1 token to generate logits", {
{ "slot_id", slot.id }, { "id_slot", slot.id },
{ "task_id", slot.task_id } { "id_task", slot.id_task }
}); });
slot.n_past--; slot.n_past--;
if (slot.ga_i > 0) if (slot.ga_i > 0) {
{
slot.n_past_se--; slot.n_past_se--;
} }
} }
@ -1562,8 +1568,8 @@ struct llama_server_context {
llama_kv_cache_seq_rm(ctx, slot.id, p0, -1); llama_kv_cache_seq_rm(ctx, slot.id, p0, -1);
LOG_INFO("kv cache rm [p0, end)", { LOG_INFO("kv cache rm [p0, end)", {
{ "slot_id", slot.id }, { "id_slot", slot.id },
{ "task_id", slot.task_id }, { "id_task", slot.id_task },
{ "p0", p0 } { "p0", p0 }
}); });
@ -2389,13 +2395,12 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
/* llama.cpp completion api semantics */ /* llama.cpp completion api semantics */
static json format_partial_response( static json format_partial_response(
llama_server_context &llama, server_slot *slot, const std::string &content, const std::vector<completion_token_output> &probs llama_server_context & llama, server_slot * slot, const std::string & content, const std::vector<completion_token_output> & probs
) { ) {
json res = json json res = json {
{
{"content", content}, {"content", content},
{"stop", false}, {"stop", false},
{"slot_id", slot->id}, {"id_slot", slot->id},
{"multimodal", false}, {"multimodal", false},
}; };
@ -2527,7 +2532,7 @@ int main(int argc, char ** argv) {
task_server task; task_server task;
task.id = llama.queue_tasks.get_new_id(); task.id = llama.queue_tasks.get_new_id();
task.type = TASK_TYPE_METRICS; task.type = TASK_TYPE_METRICS;
task.target_id = -1; task.id_target = -1;
llama.queue_results.add_waiting_task_id(task.id); llama.queue_results.add_waiting_task_id(task.id);
llama.queue_tasks.post(task); llama.queue_tasks.post(task);
@ -2574,7 +2579,7 @@ int main(int argc, char ** argv) {
task_server task; task_server task;
task.id = llama.queue_tasks.get_new_id(); task.id = llama.queue_tasks.get_new_id();
task.type = TASK_TYPE_METRICS; task.type = TASK_TYPE_METRICS;
task.target_id = -1; task.id_target = -1;
llama.queue_results.add_waiting_task_id(task.id); llama.queue_results.add_waiting_task_id(task.id);
llama.queue_tasks.post(task); llama.queue_tasks.post(task);
@ -2594,7 +2599,7 @@ int main(int argc, char ** argv) {
task_server task; task_server task;
task.id = llama.queue_tasks.get_new_id(); task.id = llama.queue_tasks.get_new_id();
task.type = TASK_TYPE_METRICS; task.type = TASK_TYPE_METRICS;
task.target_id = -1; task.id_target = -1;
llama.queue_results.add_waiting_task_id(task.id); llama.queue_results.add_waiting_task_id(task.id);
llama.queue_tasks.post(task); llama.queue_tasks.post(task);
@ -2822,12 +2827,12 @@ int main(int argc, char ** argv) {
return; return;
} }
json data = json::parse(req.body); json data = json::parse(req.body);
const int task_id = llama.queue_tasks.get_new_id(); const int id_task = llama.queue_tasks.get_new_id();
llama.queue_results.add_waiting_task_id(task_id); llama.queue_results.add_waiting_task_id(id_task);
llama.request_completion(task_id, data, false, false, -1); llama.request_completion(id_task, -1, data, false, false);
if (!json_value(data, "stream", false)) { if (!json_value(data, "stream", false)) {
std::string completion_text; std::string completion_text;
task_result result = llama.queue_results.recv(task_id); task_result result = llama.queue_results.recv(id_task);
if (!result.error && result.stop) { if (!result.error && result.stop) {
res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8"); res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8");
} }
@ -2836,13 +2841,13 @@ int main(int argc, char ** argv) {
res.status = 404; res.status = 404;
res.set_content(result.result_json["content"], "text/plain; charset=utf-8"); res.set_content(result.result_json["content"], "text/plain; charset=utf-8");
} }
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
} else { } else {
const auto chunked_content_provider = [task_id, &llama](size_t, httplib::DataSink & sink) const auto chunked_content_provider = [id_task, &llama](size_t, httplib::DataSink & sink)
{ {
while (true) while (true)
{ {
task_result result = llama.queue_results.recv(task_id); task_result result = llama.queue_results.recv(id_task);
if (!result.error) { if (!result.error) {
const std::string str = const std::string str =
"data: " + "data: " +
@ -2853,7 +2858,7 @@ int main(int argc, char ** argv) {
}); });
if (!sink.write(str.c_str(), str.size())) if (!sink.write(str.c_str(), str.size()))
{ {
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
return false; return false;
} }
if (result.stop) { if (result.stop) {
@ -2869,23 +2874,23 @@ int main(int argc, char ** argv) {
}); });
if (!sink.write(str.c_str(), str.size())) if (!sink.write(str.c_str(), str.size()))
{ {
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
return false; return false;
} }
break; break;
} }
} }
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
sink.done(); sink.done();
return true; return true;
}; };
auto on_complete = [task_id, &llama] (bool) auto on_complete = [id_task, &llama] (bool)
{ {
// cancel // cancel
llama.request_cancel(task_id); llama.request_cancel(id_task);
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
}; };
res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete); res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
@ -2921,13 +2926,13 @@ int main(int argc, char ** argv) {
} }
json data = oaicompat_completion_params_parse(llama.model, json::parse(req.body), sparams.chat_template); json data = oaicompat_completion_params_parse(llama.model, json::parse(req.body), sparams.chat_template);
const int task_id = llama.queue_tasks.get_new_id(); const int id_task = llama.queue_tasks.get_new_id();
llama.queue_results.add_waiting_task_id(task_id); llama.queue_results.add_waiting_task_id(id_task);
llama.request_completion(task_id, data, false, false, -1); llama.request_completion(id_task, -1, data, false, false);
if (!json_value(data, "stream", false)) { if (!json_value(data, "stream", false)) {
std::string completion_text; std::string completion_text;
task_result result = llama.queue_results.recv(task_id); task_result result = llama.queue_results.recv(id_task);
if (!result.error && result.stop) { if (!result.error && result.stop) {
json oaicompat_result = format_final_response_oaicompat(data, result); json oaicompat_result = format_final_response_oaicompat(data, result);
@ -2939,11 +2944,11 @@ int main(int argc, char ** argv) {
res.status = 500; res.status = 500;
res.set_content(result.result_json["content"], "text/plain; charset=utf-8"); res.set_content(result.result_json["content"], "text/plain; charset=utf-8");
} }
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
} else { } else {
const auto chunked_content_provider = [task_id, &llama](size_t, httplib::DataSink &sink) { const auto chunked_content_provider = [id_task, &llama](size_t, httplib::DataSink &sink) {
while (true) { while (true) {
task_result llama_result = llama.queue_results.recv(task_id); task_result llama_result = llama.queue_results.recv(id_task);
if (!llama_result.error) { if (!llama_result.error) {
std::vector<json> result_array = format_partial_response_oaicompat( llama_result); std::vector<json> result_array = format_partial_response_oaicompat( llama_result);
@ -2956,7 +2961,7 @@ int main(int argc, char ** argv) {
"\n\n"; "\n\n";
LOG_VERBOSE("data stream", {{"to_send", str}}); LOG_VERBOSE("data stream", {{"to_send", str}});
if (!sink.write(str.c_str(), str.size())) { if (!sink.write(str.c_str(), str.size())) {
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
return false; return false;
} }
} }
@ -2972,21 +2977,21 @@ int main(int argc, char ** argv) {
"\n\n"; "\n\n";
LOG_VERBOSE("data stream", {{"to_send", str}}); LOG_VERBOSE("data stream", {{"to_send", str}});
if (!sink.write(str.c_str(), str.size())) { if (!sink.write(str.c_str(), str.size())) {
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
return false; return false;
} }
break; break;
} }
} }
sink.done(); sink.done();
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
return true; return true;
}; };
auto on_complete = [task_id, &llama](bool) { auto on_complete = [id_task, &llama](bool) {
// cancel request // cancel request
llama.request_cancel(task_id); llama.request_cancel(id_task);
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
}; };
res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete); res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
@ -3003,12 +3008,12 @@ int main(int argc, char ** argv) {
return; return;
} }
json data = json::parse(req.body); json data = json::parse(req.body);
const int task_id = llama.queue_tasks.get_new_id(); const int id_task = llama.queue_tasks.get_new_id();
llama.queue_results.add_waiting_task_id(task_id); llama.queue_results.add_waiting_task_id(id_task);
llama.request_completion(task_id, data, true, false, -1); llama.request_completion(id_task, -1, data, true, false);
if (!json_value(data, "stream", false)) { if (!json_value(data, "stream", false)) {
std::string completion_text; std::string completion_text;
task_result result = llama.queue_results.recv(task_id); task_result result = llama.queue_results.recv(id_task);
if (!result.error && result.stop) if (!result.error && result.stop)
{ {
res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8"); res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8");
@ -3018,12 +3023,12 @@ int main(int argc, char ** argv) {
res.status = 404; res.status = 404;
res.set_content(result.result_json["content"], "text/plain; charset=utf-8"); res.set_content(result.result_json["content"], "text/plain; charset=utf-8");
} }
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
} else { } else {
const auto chunked_content_provider = [task_id, &llama](size_t, httplib::DataSink & sink) { const auto chunked_content_provider = [id_task, &llama](size_t, httplib::DataSink & sink) {
while (true) while (true)
{ {
task_result result = llama.queue_results.recv(task_id); task_result result = llama.queue_results.recv(id_task);
if (!result.error) { if (!result.error) {
const std::string str = const std::string str =
"data: " + "data: " +
@ -3034,7 +3039,7 @@ int main(int argc, char ** argv) {
}); });
if (!sink.write(str.c_str(), str.size())) if (!sink.write(str.c_str(), str.size()))
{ {
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
return false; return false;
} }
if (result.stop) if (result.stop)
@ -3048,15 +3053,15 @@ int main(int argc, char ** argv) {
} }
} }
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
sink.done(); sink.done();
return true; return true;
}; };
auto on_complete = [task_id, &llama] (bool) auto on_complete = [id_task, &llama] (bool)
{ {
// cancel // cancel
llama.request_cancel(task_id); llama.request_cancel(id_task);
}; };
res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete); res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
@ -3109,13 +3114,13 @@ int main(int argc, char ** argv) {
} }
// create and queue the task // create and queue the task
const int task_id = llama.queue_tasks.get_new_id(); const int id_task = llama.queue_tasks.get_new_id();
llama.queue_results.add_waiting_task_id(task_id); llama.queue_results.add_waiting_task_id(id_task);
llama.request_completion(task_id, { {"prompt", prompt}, { "n_predict", 0} }, false, true, -1); llama.request_completion(id_task, -1, { {"prompt", prompt}, { "n_predict", 0} }, false, true);
// get the result // get the result
task_result result = llama.queue_results.recv(task_id); task_result result = llama.queue_results.recv(id_task);
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
// send the result // send the result
return res.set_content(result.result_json.dump(), "application/json; charset=utf-8"); return res.set_content(result.result_json.dump(), "application/json; charset=utf-8");
@ -3135,13 +3140,13 @@ int main(int argc, char ** argv) {
json data = json::array(); json data = json::array();
int i = 0; int i = 0;
for (const json &elem : prompt) { for (const json &elem : prompt) {
const int task_id = llama.queue_tasks.get_new_id(); const int id_task = llama.queue_tasks.get_new_id();
llama.queue_results.add_waiting_task_id(task_id); llama.queue_results.add_waiting_task_id(id_task);
llama.request_completion(task_id, { {"prompt", elem}, { "n_predict", 0} }, false, true, -1); llama.request_completion(id_task, -1, { {"prompt", elem}, { "n_predict", 0} }, false, true);
// get the result // get the result
task_result result = llama.queue_results.recv(task_id); task_result result = llama.queue_results.recv(id_task);
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
json embedding = json{ json embedding = json{
{"embedding", json_value(result.result_json, "embedding", json::array())}, {"embedding", json_value(result.result_json, "embedding", json::array())},
@ -3160,13 +3165,13 @@ int main(int argc, char ** argv) {
} }
// create and queue the task // create and queue the task
const int task_id = llama.queue_tasks.get_new_id(); const int id_task = llama.queue_tasks.get_new_id();
llama.queue_results.add_waiting_task_id(task_id); llama.queue_results.add_waiting_task_id(id_task);
llama.request_completion(task_id, { {"prompt", prompt}, { "n_predict", 0}}, false, true, -1); llama.request_completion(id_task, -1, { {"prompt", prompt}, { "n_predict", 0}}, false, true);
// get the result // get the result
task_result result = llama.queue_results.recv(task_id); task_result result = llama.queue_results.recv(id_task);
llama.queue_results.remove_waiting_task_id(task_id); llama.queue_results.remove_waiting_task_id(id_task);
json data = json::array({json{ json data = json::array({json{
{"embedding", json_value(result.result_json, "embedding", json::array())}, {"embedding", json_value(result.result_json, "embedding", json::array())},

View file

@ -52,32 +52,36 @@ enum task_type {
struct task_server { struct task_server {
int id = -1; // to be filled by llama_server_queue int id = -1; // to be filled by llama_server_queue
int target_id; int id_multi = -1;
int id_target = -1;
task_type type; task_type type;
json data; json data;
bool infill_mode = false; bool infill_mode = false;
bool embedding_mode = false; bool embedding_mode = false;
int multitask_id = -1;
}; };
struct task_result { struct task_result {
int id; int id = -1;
int multitask_id = -1; int id_multi = -1;
json result_json;
bool stop; bool stop;
bool error; bool error;
json result_json;
}; };
struct task_multi { struct task_multi {
int id; int id = -1;
std::set<int> subtasks_remaining{};
std::vector<task_result> results{}; std::set<int> subtasks_remaining;
std::vector<task_result> results;
}; };
// completion token output with probabilities // completion token output with probabilities
struct completion_token_output { struct completion_token_output {
struct token_prob struct token_prob {
{
llama_token tok; llama_token tok;
float prob; float prob;
}; };
@ -102,18 +106,18 @@ static inline void server_log(const char *level, const char *function, int line,
}; };
if (server_log_json) { if (server_log_json) {
log.merge_patch( log.merge_patch( {
{
{"level", level}, {"level", level},
{"function", function}, {"function", function},
{"line", line}, {"line", line},
{"msg", message}, {"msg", message},
}); });
if (!extra.empty()) { if (!extra.empty()) {
log.merge_patch(extra); log.merge_patch(extra);
} }
std::cout << log.dump(-1, ' ', false, json::error_handler_t::replace) << "\n" << std::flush; printf("%s\n", log.dump(-1, ' ', false, json::error_handler_t::replace).c_str());
} else { } else {
char buf[1024]; char buf[1024];
snprintf(buf, 1024, "%4s [%24s] %s", level, function, message); snprintf(buf, 1024, "%4s [%24s] %s", level, function, message);
@ -163,7 +167,7 @@ inline std::string format_chat(const struct llama_model * model, const std::stri
std::vector<llama_chat_message> chat(messages.size()); std::vector<llama_chat_message> chat(messages.size());
for (size_t i = 0; i < messages.size(); ++i) { for (size_t i = 0; i < messages.size(); ++i) {
auto &curr_msg = messages[i]; const auto & curr_msg = messages[i];
str[i*2 + 0] = json_value(curr_msg, "role", std::string("")); str[i*2 + 0] = json_value(curr_msg, "role", std::string(""));
str[i*2 + 1] = json_value(curr_msg, "content", std::string("")); str[i*2 + 1] = json_value(curr_msg, "content", std::string(""));
alloc_size += str[i*2 + 1].length(); alloc_size += str[i*2 + 1].length();
@ -195,8 +199,10 @@ inline std::string format_chat(const struct llama_model * model, const std::stri
struct llama_server_queue { struct llama_server_queue {
int id = 0; int id = 0;
std::mutex mutex_tasks;
bool running; bool running;
std::mutex mutex_tasks;
// queues // queues
std::vector<task_server> queue_tasks; std::vector<task_server> queue_tasks;
std::vector<task_server> queue_tasks_deferred; std::vector<task_server> queue_tasks_deferred;
@ -235,17 +241,17 @@ struct llama_server_queue {
// Register function to process a new task // Register function to process a new task
void on_new_task(std::function<void(task_server&)> callback) { void on_new_task(std::function<void(task_server&)> callback) {
callback_new_task = callback; callback_new_task = std::move(callback);
} }
// Register function to process a multitask when it is finished // Register function to process a multitask when it is finished
void on_finish_multitask(std::function<void(task_multi&)> callback) { void on_finish_multitask(std::function<void(task_multi&)> callback) {
callback_finish_multitask = callback; callback_finish_multitask = std::move(callback);
} }
// Register the function to be called when all slots data is ready to be processed // Register the function to be called when all slots data is ready to be processed
void on_run_slots(std::function<void(void)> callback) { void on_run_slots(std::function<void(void)> callback) {
callback_run_slots = callback; callback_run_slots = std::move(callback);
} }
// Call when the state of one slot is changed // Call when the state of one slot is changed
@ -259,8 +265,7 @@ struct llama_server_queue {
} }
// end the start_loop routine // end the start_loop routine
void terminate() { void terminate() { {
{
std::unique_lock<std::mutex> lock(mutex_tasks); std::unique_lock<std::mutex> lock(mutex_tasks);
running = false; running = false;
} }
@ -276,9 +281,10 @@ struct llama_server_queue {
*/ */
void start_loop() { void start_loop() {
running = true; running = true;
while (true) { while (true) {
LOG_VERBOSE("new task may arrive", {}); LOG_VERBOSE("new task may arrive", {});
{
while (true) while (true)
{ {
std::unique_lock<std::mutex> lock(mutex_tasks); std::unique_lock<std::mutex> lock(mutex_tasks);
@ -289,33 +295,32 @@ struct llama_server_queue {
task_server task = queue_tasks.front(); task_server task = queue_tasks.front();
queue_tasks.erase(queue_tasks.begin()); queue_tasks.erase(queue_tasks.begin());
lock.unlock(); lock.unlock();
LOG_VERBOSE("callback_new_task", {{"task_id", task.id}}); LOG_VERBOSE("callback_new_task", {{"id_task", task.id}});
callback_new_task(task); callback_new_task(task);
} }
LOG_VERBOSE("update_multitasks", {}); LOG_VERBOSE("update_multitasks", {});
// check if we have any finished multitasks // check if we have any finished multitasks
auto queue_iterator = queue_multitasks.begin(); auto queue_iterator = queue_multitasks.begin();
while (queue_iterator != queue_multitasks.end()) while (queue_iterator != queue_multitasks.end()) {
{ if (queue_iterator->subtasks_remaining.empty()) {
if (queue_iterator->subtasks_remaining.empty())
{
// all subtasks done == multitask is done // all subtasks done == multitask is done
task_multi current_multitask = *queue_iterator; task_multi current_multitask = *queue_iterator;
callback_finish_multitask(current_multitask); callback_finish_multitask(current_multitask);
// remove this multitask // remove this multitask
queue_iterator = queue_multitasks.erase(queue_iterator); queue_iterator = queue_multitasks.erase(queue_iterator);
} } else {
else
{
++queue_iterator; ++queue_iterator;
} }
} }
// all tasks in the current loop is processed, slots data is now ready // all tasks in the current loop is processed, slots data is now ready
LOG_VERBOSE("callback_run_slots", {}); LOG_VERBOSE("callback_run_slots", {});
callback_run_slots(); callback_run_slots();
}
LOG_VERBOSE("wait for new task", {}); LOG_VERBOSE("wait for new task", {});
// wait for new task
{ {
std::unique_lock<std::mutex> lock(mutex_tasks); std::unique_lock<std::mutex> lock(mutex_tasks);
if (queue_tasks.empty()) { if (queue_tasks.empty()) {
@ -336,22 +341,21 @@ struct llama_server_queue {
// //
// add a multitask by specifying the id of all subtask (subtask is a task_server) // add a multitask by specifying the id of all subtask (subtask is a task_server)
void add_multitask(int multitask_id, std::vector<int>& sub_ids) void add_multitask(int id_multi, std::vector<int>& sub_ids) {
{
std::lock_guard<std::mutex> lock(mutex_tasks); std::lock_guard<std::mutex> lock(mutex_tasks);
task_multi multi; task_multi multi;
multi.id = multitask_id; multi.id = id_multi;
std::copy(sub_ids.begin(), sub_ids.end(), std::inserter(multi.subtasks_remaining, multi.subtasks_remaining.end())); std::copy(sub_ids.begin(), sub_ids.end(), std::inserter(multi.subtasks_remaining, multi.subtasks_remaining.end()));
queue_multitasks.push_back(multi); queue_multitasks.push_back(multi);
} }
// updatethe remaining subtasks, while appending results to multitask // updatethe remaining subtasks, while appending results to multitask
void update_multitask(int multitask_id, int subtask_id, task_result& result) void update_multitask(int id_multi, int subtask_id, task_result& result)
{ {
std::lock_guard<std::mutex> lock(mutex_tasks); std::lock_guard<std::mutex> lock(mutex_tasks);
for (auto& multitask : queue_multitasks) for (auto& multitask : queue_multitasks)
{ {
if (multitask.id == multitask_id) if (multitask.id == id_multi)
{ {
multitask.subtasks_remaining.erase(subtask_id); multitask.subtasks_remaining.erase(subtask_id);
multitask.results.push_back(result); multitask.results.push_back(result);
@ -363,41 +367,43 @@ struct llama_server_queue {
struct llama_server_response { struct llama_server_response {
typedef std::function<void(int, int, task_result&)> callback_multitask_t; typedef std::function<void(int, int, task_result&)> callback_multitask_t;
callback_multitask_t callback_update_multitask; callback_multitask_t callback_update_multitask;
// for keeping track of all tasks waiting for the result // for keeping track of all tasks waiting for the result
std::set<int> waiting_task_ids; std::set<int> waiting_task_ids;
// the main result queue // the main result queue
std::vector<task_result> queue_results; std::vector<task_result> queue_results;
std::mutex mutex_results; std::mutex mutex_results;
std::condition_variable condition_results; std::condition_variable condition_results;
// add the task_id to the list of tasks waiting for response // add the id_task to the list of tasks waiting for response
void add_waiting_task_id(int task_id) { void add_waiting_task_id(int id_task) {
LOG_VERBOSE("waiting for task id", {{"task_id", task_id}}); LOG_VERBOSE("waiting for task id", {{"id_task", id_task}});
std::unique_lock<std::mutex> lock(mutex_results); std::unique_lock<std::mutex> lock(mutex_results);
waiting_task_ids.insert(task_id); waiting_task_ids.insert(id_task);
} }
// when the request is finished, we can remove task associated with it // when the request is finished, we can remove task associated with it
void remove_waiting_task_id(int task_id) { void remove_waiting_task_id(int id_task) {
LOG_VERBOSE("remove waiting for task id", {{"task_id", task_id}}); LOG_VERBOSE("remove waiting for task id", {{"id_task", id_task}});
std::unique_lock<std::mutex> lock(mutex_results); std::unique_lock<std::mutex> lock(mutex_results);
waiting_task_ids.erase(task_id); waiting_task_ids.erase(id_task);
} }
// This function blocks the thread until there is a response for this task_id // This function blocks the thread until there is a response for this id_task
task_result recv(int task_id) { task_result recv(int id_task) {
while (true) while (true) {
{
std::unique_lock<std::mutex> lock(mutex_results); std::unique_lock<std::mutex> lock(mutex_results);
condition_results.wait(lock, [&]{ condition_results.wait(lock, [&]{
return !queue_results.empty(); return !queue_results.empty();
}); });
for (int i = 0; i < (int) queue_results.size(); i++) for (int i = 0; i < (int) queue_results.size(); i++) {
{ if (queue_results[i].id == id_task) {
if (queue_results[i].id == task_id) assert(queue_results[i].id_multi == -1);
{
assert(queue_results[i].multitask_id == -1);
task_result res = queue_results[i]; task_result res = queue_results[i];
queue_results.erase(queue_results.begin() + i); queue_results.erase(queue_results.begin() + i);
return res; return res;
@ -410,26 +416,25 @@ struct llama_server_response {
// Register the function to update multitask // Register the function to update multitask
void on_multitask_update(callback_multitask_t callback) { void on_multitask_update(callback_multitask_t callback) {
callback_update_multitask = callback; callback_update_multitask = std::move(callback);
} }
// Send a new result to a waiting task_id // Send a new result to a waiting id_task
void send(task_result result) { void send(task_result result) {
LOG_VERBOSE("send new result", {{"id_task", result.id}});
std::unique_lock<std::mutex> lock(mutex_results); std::unique_lock<std::mutex> lock(mutex_results);
LOG_VERBOSE("send new result", {{"task_id", result.id}}); for (const auto & id_task : waiting_task_ids) {
for (auto& task_id : waiting_task_ids) { // LOG_TEE("waiting task id %i \n", id_task);
// LOG_TEE("waiting task id %i \n", task_id);
// for now, tasks that have associated parent multitasks just get erased once multitask picks up the result // for now, tasks that have associated parent multitasks just get erased once multitask picks up the result
if (result.multitask_id == task_id) if (result.id_multi == id_task) {
{ LOG_VERBOSE("callback_update_multitask", {{"id_task", id_task}});
LOG_VERBOSE("callback_update_multitask", {{"task_id", task_id}}); callback_update_multitask(id_task, result.id, result);
callback_update_multitask(task_id, result.id, result);
continue; continue;
} }
if (result.id == task_id) if (result.id == id_task) {
{ LOG_VERBOSE("queue_results.push_back", {{"id_task", id_task}});
LOG_VERBOSE("queue_results.push_back", {{"task_id", task_id}});
queue_results.push_back(result); queue_results.push_back(result);
condition_results.notify_all(); condition_results.notify_all();
return; return;
@ -447,13 +452,11 @@ static const std::string base64_chars =
"abcdefghijklmnopqrstuvwxyz" "abcdefghijklmnopqrstuvwxyz"
"0123456789+/"; "0123456789+/";
static inline bool is_base64(uint8_t c) static inline bool is_base64(uint8_t c) {
{
return (isalnum(c) || (c == '+') || (c == '/')); return (isalnum(c) || (c == '+') || (c == '/'));
} }
static inline std::vector<uint8_t> base64_decode(const std::string & encoded_string) static inline std::vector<uint8_t> base64_decode(const std::string & encoded_string) {
{
int i = 0; int i = 0;
int j = 0; int j = 0;
int in_ = 0; int in_ = 0;
@ -516,8 +519,7 @@ static inline std::vector<uint8_t> base64_decode(const std::string & encoded_str
// random string / id // random string / id
// //
static std::string random_string() static std::string random_string() {
{
static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"); static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz");
std::random_device rd; std::random_device rd;
@ -532,10 +534,10 @@ static std::string random_string()
return result; return result;
} }
static std::string gen_chatcmplid() static std::string gen_chatcmplid() {
{
std::stringstream chatcmplid; std::stringstream chatcmplid;
chatcmplid << "chatcmpl-" << random_string(); chatcmplid << "chatcmpl-" << random_string();
return chatcmplid.str(); return chatcmplid.str();
} }
@ -543,91 +545,79 @@ static std::string gen_chatcmplid()
// other common utils // other common utils
// //
static size_t common_part(const std::vector<llama_token> &a, const std::vector<llama_token> &b) static size_t common_part(const std::vector<llama_token> & a, const std::vector<llama_token> & b) {
{
size_t i; size_t i;
for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
{
}
return i; return i;
} }
static bool ends_with(const std::string &str, const std::string &suffix) static bool ends_with(const std::string & str, const std::string & suffix) {
{ return str.size() >= suffix.size() && 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
return str.size() >= suffix.size() &&
0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
} }
static size_t find_partial_stop_string(const std::string &stop, static size_t find_partial_stop_string(const std::string &stop, const std::string &text) {
const std::string &text) if (!text.empty() && !stop.empty()) {
{
if (!text.empty() && !stop.empty())
{
const char text_last_char = text.back(); const char text_last_char = text.back();
for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) {
{ if (stop[char_index] == text_last_char) {
if (stop[char_index] == text_last_char)
{
const std::string current_partial = stop.substr(0, char_index + 1); const std::string current_partial = stop.substr(0, char_index + 1);
if (ends_with(text, current_partial)) if (ends_with(text, current_partial)) {
{
return text.size() - char_index - 1; return text.size() - char_index - 1;
} }
} }
} }
} }
return std::string::npos; return std::string::npos;
} }
// TODO: reuse llama_detokenize // TODO: reuse llama_detokenize
template <class Iter> template <class Iter>
static std::string tokens_to_str(llama_context *ctx, Iter begin, Iter end) static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
{
std::string ret; std::string ret;
for (; begin != end; ++begin) for (; begin != end; ++begin) {
{
ret += llama_token_to_piece(ctx, *begin); ret += llama_token_to_piece(ctx, *begin);
} }
return ret; return ret;
} }
// format incomplete utf-8 multibyte character for output // format incomplete utf-8 multibyte character for output
static std::string tokens_to_output_formatted_string(const llama_context *ctx, const llama_token token) static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
{
std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token); std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token);
// if the size is 1 and first bit is 1, meaning it's a partial character // if the size is 1 and first bit is 1, meaning it's a partial character
// (size > 1 meaning it's already a known token) // (size > 1 meaning it's already a known token)
if (out.size() == 1 && (out[0] & 0x80) == 0x80) if (out.size() == 1 && (out[0] & 0x80) == 0x80) {
{
std::stringstream ss; std::stringstream ss;
ss << std::hex << (out[0] & 0xff); ss << std::hex << (out[0] & 0xff);
std::string res(ss.str()); std::string res(ss.str());
out = "byte: \\x" + res; out = "byte: \\x" + res;
} }
return out; return out;
} }
// convert a vector of completion_token_output to json // convert a vector of completion_token_output to json
static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> &probs) static json probs_vector_to_json(const llama_context * ctx, const std::vector<completion_token_output> & probs) {
{
json out = json::array(); json out = json::array();
for (const auto &prob : probs) for (const auto & prob : probs) {
{
json probs_for_token = json::array(); json probs_for_token = json::array();
for (const auto &p : prob.probs)
{ for (const auto & p : prob.probs) {
std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok); const std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
probs_for_token.push_back(json probs_for_token.push_back(json {
{
{"tok_str", tok_str}, {"tok_str", tok_str},
{"prob", p.prob}, {"prob", p.prob},
}); });
} }
std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
out.push_back(json{ const std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
out.push_back(json {
{"content", tok_str}, {"content", tok_str},
{"probs", probs_for_token}, {"probs", probs_for_token},
}); });
} }
return out; return out;
} }