common : use common_ prefix for common library functions (#9805)

* common : use common_ prefix for common library functions

---------

Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
Diego Devesa 2024-10-10 22:57:42 +02:00 committed by GitHub
parent 0e9f760eb1
commit 7eee341bee
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
45 changed files with 1284 additions and 1284 deletions

View file

@ -6,12 +6,12 @@
#include <cstdio>
int main(int argc, char ** argv) {
gpt_params params;
common_params params;
params.prompt = "The quick brown fox";
params.sparams.seed = 1234;
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
return 1;
}
@ -28,7 +28,7 @@ int main(int argc, char ** argv) {
std::string result2;
// init
llama_init_result llama_init = llama_init_from_gpt_params(params);
common_init_result llama_init = common_init_from_params(params);
llama_model * model = llama_init.model;
llama_context * ctx = llama_init.context;
@ -46,7 +46,7 @@ int main(int argc, char ** argv) {
llama_sampler_chain_add(smpl, llama_sampler_init_dist(params.sparams.seed));
// tokenize prompt
auto tokens = llama_tokenize(ctx, params.prompt, true);
auto tokens = common_tokenize(ctx, params.prompt, true);
// evaluate prompt
llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), n_past, 0));
@ -72,7 +72,7 @@ int main(int argc, char ** argv) {
for (auto i = 0; i < params.n_predict; i++) {
auto next_token = llama_sampler_sample(smpl, ctx, -1);
auto next_token_str = llama_token_to_piece(ctx, next_token);
auto next_token_str = common_token_to_piece(ctx, next_token);
printf("%s", next_token_str.c_str());
result0 += next_token_str;
@ -92,7 +92,7 @@ int main(int argc, char ** argv) {
llama_free(ctx);
// make new context
auto * ctx2 = llama_new_context_with_model(model, llama_context_params_from_gpt_params(params));
auto * ctx2 = llama_new_context_with_model(model, common_context_params_to_llama(params));
llama_sampler * smpl2 = llama_sampler_chain_init(sparams);
@ -128,7 +128,7 @@ int main(int argc, char ** argv) {
// second run
for (auto i = 0; i < params.n_predict; i++) {
auto next_token = llama_sampler_sample(smpl2, ctx2, -1);
auto next_token_str = llama_token_to_piece(ctx2, next_token);
auto next_token_str = common_token_to_piece(ctx2, next_token);
printf("%s", next_token_str.c_str());
result1 += next_token_str;
@ -152,7 +152,7 @@ int main(int argc, char ** argv) {
}
// make new context
auto * ctx3 = llama_new_context_with_model(model, llama_context_params_from_gpt_params(params));
auto * ctx3 = llama_new_context_with_model(model, common_context_params_to_llama(params));
llama_sampler * smpl3 = llama_sampler_chain_init(sparams);
@ -216,7 +216,7 @@ int main(int argc, char ** argv) {
// third run with seq 1 instead of 0
for (auto i = 0; i < params.n_predict; i++) {
auto next_token = llama_sampler_sample(smpl3, ctx3, -1);
auto next_token_str = llama_token_to_piece(ctx3, next_token);
auto next_token_str = common_token_to_piece(ctx3, next_token);
printf("%s", next_token_str.c_str());
result2 += next_token_str;