revert llama_eval, create main example

This commit is contained in:
netrunnereve 2023-08-24 20:26:19 -04:00
parent 471e469ae2
commit 8209b5d6a2
3 changed files with 15 additions and 20 deletions

View file

@ -144,7 +144,7 @@ int main(int argc, char ** argv) {
fprintf(stderr, "%s: testing memory usage for n_batch = %d, n_ctx = %d\n", __func__, params.n_batch, params.n_ctx);
const std::vector<llama_token> tmp(params.n_batch, llama_token_bos());
llama_eval(ctx, tmp.data(), tmp.size(), params.n_ctx, params.n_threads, params.pp_threads);
llama_eval(ctx, tmp.data(), tmp.size(), params.n_ctx, params.n_threads);
}
llama_print_timings(ctx);
@ -406,7 +406,7 @@ int main(int argc, char ** argv) {
// do one empty run to warm up the model
{
const std::vector<llama_token> tmp = { llama_token_bos(), };
llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads, params.pp_threads);
llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
llama_reset_timings(ctx);
}
@ -513,7 +513,8 @@ int main(int argc, char ** argv) {
for (int i = 0; i < input_size; i += params.n_batch) {
int n_eval = std::min(input_size - i, params.n_batch);
if (llama_eval(ctx_guidance, input_buf + i, n_eval, n_past_guidance, params.n_threads, params.pp_threads)) {
int eval_thr = n_eval > 1 ? params.pp_threads : params.n_threads;
if (llama_eval(ctx_guidance, input_buf + i, n_eval, n_past_guidance, eval_thr)) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return 1;
}
@ -527,7 +528,8 @@ int main(int argc, char ** argv) {
if (n_eval > params.n_batch) {
n_eval = params.n_batch;
}
if (llama_eval(ctx, &embd[i], n_eval, n_past, params.n_threads, params.pp_threads)) {
int eval_thr = n_eval > 1 ? params.pp_threads : params.n_threads;
if (llama_eval(ctx, &embd[i], n_eval, n_past, eval_thr)) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return 1;
}

View file

@ -1787,7 +1787,6 @@ static struct ggml_cgraph * llama_build_graph(
// - n_tokens number of tokens
// - n_past: the context size so far
// - n_threads: number of threads to use for inference
// - pp_threads: number of threads to use for prompt processing
//
static bool llama_eval_internal(
llama_context & lctx,
@ -1796,7 +1795,6 @@ static bool llama_eval_internal(
int n_tokens,
int n_past,
int n_threads,
int pp_threads,
const char * cgraph_fname) {
LLAMA_ASSERT((!tokens && embd) || (tokens && !embd));
@ -1840,8 +1838,7 @@ static bool llama_eval_internal(
// for big prompts, if BLAS is enabled, it is better to use only one thread
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
pp_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : pp_threads;
n_threads = N > 1 ? pp_threads : n_threads;
n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads;
struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
@ -3487,7 +3484,7 @@ struct llama_context * llama_new_context_with_model(
if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
// Enter a blocking eval loop with dummy input, letting rank=0 drive the process
const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos());
while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0, 0)) {};
while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
llama_backend_free();
exit(1);
}
@ -4179,9 +4176,8 @@ int llama_eval(
const llama_token * tokens,
int n_tokens,
int n_past,
int n_threads,
int pp_threads) {
if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, pp_threads, nullptr)) {
int n_threads) {
if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) {
LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
return 1;
}
@ -4202,9 +4198,8 @@ int llama_eval_embd(
const float * embd,
int n_tokens,
int n_past,
int n_threads,
int pp_threads) {
if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, pp_threads, nullptr)) {
int n_threads) {
if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) {
LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
return 1;
}
@ -4225,7 +4220,7 @@ int llama_eval_export(struct llama_context * ctx, const char * fname) {
const std::vector<llama_token> tmp(n_batch, llama_token_bos());
if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, 1, fname)) {
if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) {
LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
return 1;
}

View file

@ -308,8 +308,7 @@ extern "C" {
const llama_token * tokens,
int n_tokens,
int n_past,
int n_threads,
int pp_threads);
int n_threads);
// Same as llama_eval, but use float matrix input directly.
LLAMA_API int llama_eval_embd(
@ -317,8 +316,7 @@ extern "C" {
const float * embd,
int n_tokens,
int n_past,
int n_threads,
int pp_threads);
int n_threads);
// Export a static computation graph for context of 511 and batch size of 1
// NOTE: since this functionality is mostly for debugging and demonstration purposes, we hardcode these