diff --git a/llama.cpp b/llama.cpp index f58ca7487..fbe04958b 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1771,7 +1771,7 @@ static struct ggml_cgraph * llama_build_graph( // - embd embeddings input // - n_tokens number of tokens // - n_past: the context size so far -// - n_threads: number of threads to use +// - n_threads: number of threads to use for inference // - pp_threads: number of threads to use for prompt processing // static bool llama_eval_internal(