only activate pp_threads for main for now
This commit is contained in:
parent
590feeac1d
commit
215e2f21d0
7 changed files with 11 additions and 11 deletions
|
@ -83,7 +83,7 @@ bool eval_float(void * model, float * input, int N){
|
||||||
if (n_eval > n_batch) {
|
if (n_eval > n_batch) {
|
||||||
n_eval = n_batch;
|
n_eval = n_batch;
|
||||||
}
|
}
|
||||||
if (llama_eval_embd(ctx, (input+i*n_emb), n_eval, n_past, params.n_threads, params.pp_threads)) {
|
if (llama_eval_embd(ctx, (input+i*n_emb), n_eval, n_past, params.n_threads, params.n_threads)) {
|
||||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -104,7 +104,7 @@ bool eval_tokens(void * model, std::vector<llama_token> tokens) {
|
||||||
if (n_eval > params.n_batch) {
|
if (n_eval > params.n_batch) {
|
||||||
n_eval = params.n_batch;
|
n_eval = params.n_batch;
|
||||||
}
|
}
|
||||||
if (llama_eval(ctx, &tokens[i], n_eval, n_past, params.n_threads, params.pp_threads)) {
|
if (llama_eval(ctx, &tokens[i], n_eval, n_past, params.n_threads, params.n_threads)) {
|
||||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,7 +74,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
if (params.embedding){
|
if (params.embedding){
|
||||||
if (embd_inp.size() > 0) {
|
if (embd_inp.size() > 0) {
|
||||||
if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads, params.pp_threads)) {
|
if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads, params.n_threads)) {
|
||||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
|
||||||
tokens[batch_start] = llama_token_bos();
|
tokens[batch_start] = llama_token_bos();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads, params.pp_threads)) {
|
if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads, params.n_threads)) {
|
||||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -233,7 +233,7 @@ void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Evaluate the query
|
// Evaluate the query
|
||||||
if (llama_eval(ctx, query_embd.data(), query_embd.size(), 0, params.n_threads, params.pp_threads)) {
|
if (llama_eval(ctx, query_embd.data(), query_embd.size(), 0, params.n_threads, params.n_threads)) {
|
||||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,7 +56,7 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// evaluate prompt
|
// evaluate prompt
|
||||||
llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past, params.n_threads, params.pp_threads);
|
llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past, params.n_threads, params.n_threads);
|
||||||
|
|
||||||
last_n_tokens_data.insert(last_n_tokens_data.end(), tokens.data(), tokens.data() + n_prompt_tokens);
|
last_n_tokens_data.insert(last_n_tokens_data.end(), tokens.data(), tokens.data() + n_prompt_tokens);
|
||||||
n_past += n_prompt_tokens;
|
n_past += n_prompt_tokens;
|
||||||
|
@ -93,7 +93,7 @@ int main(int argc, char ** argv) {
|
||||||
last_n_tokens_data.push_back(next_token);
|
last_n_tokens_data.push_back(next_token);
|
||||||
|
|
||||||
printf("%s", next_token_str);
|
printf("%s", next_token_str);
|
||||||
if (llama_eval(ctx, &next_token, 1, n_past, params.n_threads, params.pp_threads)) {
|
if (llama_eval(ctx, &next_token, 1, n_past, params.n_threads, params.n_threads)) {
|
||||||
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
|
@ -153,7 +153,7 @@ int main(int argc, char ** argv) {
|
||||||
last_n_tokens_data.push_back(next_token);
|
last_n_tokens_data.push_back(next_token);
|
||||||
|
|
||||||
printf("%s", next_token_str);
|
printf("%s", next_token_str);
|
||||||
if (llama_eval(ctx2, &next_token, 1, n_past, params.n_threads, params.pp_threads)) {
|
if (llama_eval(ctx2, &next_token, 1, n_past, params.n_threads, params.n_threads)) {
|
||||||
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
||||||
llama_free(ctx2);
|
llama_free(ctx2);
|
||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
|
|
|
@ -350,7 +350,7 @@ struct llama_server_context
|
||||||
{
|
{
|
||||||
n_eval = params.n_batch;
|
n_eval = params.n_batch;
|
||||||
}
|
}
|
||||||
if (llama_eval(ctx, &embd[n_past], n_eval, n_past, params.n_threads, params.pp_threads))
|
if (llama_eval(ctx, &embd[n_past], n_eval, n_past, params.n_threads, params.n_threads))
|
||||||
{
|
{
|
||||||
LOG_ERROR("failed to eval", {
|
LOG_ERROR("failed to eval", {
|
||||||
{"n_eval", n_eval},
|
{"n_eval", n_eval},
|
||||||
|
|
|
@ -123,7 +123,7 @@ int main(int argc, char ** argv)
|
||||||
// Evaluate the tokens :
|
// Evaluate the tokens :
|
||||||
//---------------------------------
|
//---------------------------------
|
||||||
|
|
||||||
if ( llama_eval( ctx , tokens_list.data() , int(tokens_list.size()) , llama_get_kv_cache_token_count( ctx ) , params.n_threads , params.pp_threads ) )
|
if ( llama_eval( ctx , tokens_list.data() , int(tokens_list.size()) , llama_get_kv_cache_token_count( ctx ) , params.n_threads , params.n_threads ) )
|
||||||
{
|
{
|
||||||
fprintf( stderr, "%s : failed to eval\n" , __func__ );
|
fprintf( stderr, "%s : failed to eval\n" , __func__ );
|
||||||
return 1;
|
return 1;
|
||||||
|
|
|
@ -1782,7 +1782,7 @@ static bool llama_eval_internal(
|
||||||
int n_tokens,
|
int n_tokens,
|
||||||
int n_past,
|
int n_past,
|
||||||
int n_threads,
|
int n_threads,
|
||||||
int pp_threads,
|
int pp_threads,
|
||||||
const char * cgraph_fname) {
|
const char * cgraph_fname) {
|
||||||
|
|
||||||
LLAMA_ASSERT((!tokens && embd) || (tokens && !embd));
|
LLAMA_ASSERT((!tokens && embd) || (tokens && !embd));
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue