From 7a4f712a29a2e65648b3eed20582ee73173a3181 Mon Sep 17 00:00:00 2001 From: SuperUserNameMan Date: Wed, 14 Jun 2023 08:58:18 +0200 Subject: [PATCH] removed trailing white spaces simple.cpp --- examples/simple/simple.cpp | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index 6593fdbc8..45ed4b8b9 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -54,9 +54,9 @@ int main(int argc, char ** argv) if ( argc >= 3 ) { - params.prompt = argv[2]; + params.prompt = argv[2]; } - + if ( params.prompt.empty() ) { params.prompt = "Hello my name is"; @@ -71,8 +71,8 @@ int main(int argc, char ** argv) llama_context * ctx ; ctx = llama_init_from_gpt_params( params ); - - if ( ctx == NULL ) + + if ( ctx == NULL ) { fprintf( stderr , "%s: error: unable to load model\n" , __func__ ); return 1; @@ -84,13 +84,13 @@ int main(int argc, char ** argv) std::vector tokens_list; tokens_list = ::llama_tokenize( ctx , params.prompt , true ); - + const int max_context_size = llama_n_ctx( ctx ); const int max_tokens_list_size = max_context_size - 4 ; - if ( (int)tokens_list.size() > max_tokens_list_size ) + if ( (int)tokens_list.size() > max_tokens_list_size ) { - fprintf( stderr , "%s: error: prompt too long (%d tokens, max %d)\n" , + fprintf( stderr , "%s: error: prompt too long (%d tokens, max %d)\n" , __func__ , (int)tokens_list.size() , max_tokens_list_size ); return 1; } @@ -99,7 +99,7 @@ int main(int argc, char ** argv) // Print the tokens from the prompt : - for( auto id : tokens_list ) + for( auto id : tokens_list ) { printf( "%s" , llama_token_to_str( ctx , id ) ); } @@ -115,19 +115,19 @@ int main(int argc, char ** argv) // tokens (see "infinite text generation via context swapping" in the main example), but in this minimalist // example, we will just going to stop the loop. - while ( llama_get_kv_cache_token_count( ctx ) < max_context_size ) + while ( llama_get_kv_cache_token_count( ctx ) < max_context_size ) { //--------------------------------- // Evaluate the tokens : //--------------------------------- - if ( llama_eval( ctx , tokens_list.data() , tokens_list.size() , llama_get_kv_cache_token_count( ctx ) , params.n_threads ) ) + if ( llama_eval( ctx , tokens_list.data() , tokens_list.size() , llama_get_kv_cache_token_count( ctx ) , params.n_threads ) ) { fprintf( stderr, "%s : failed to eval\n" , __func__ ); return 1; } - + tokens_list.clear(); //--------------------------------- @@ -135,9 +135,9 @@ int main(int argc, char ** argv) //--------------------------------- llama_token new_token_id = 0; - + auto logits = llama_get_logits( ctx ); - auto n_vocab = llama_n_vocab( ctx ); + auto n_vocab = llama_n_vocab( ctx ); std::vector candidates; candidates.reserve( n_vocab );