Correct typo. CFG already means context-free grammar.

This commit is contained in:
Bach Le 2023-07-07 23:48:07 +08:00
parent 422a7ffdaf
commit 66eb048470
3 changed files with 3 additions and 3 deletions

View file

@ -549,7 +549,7 @@ int main(int argc, char ** argv) {
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
if (guidance_ctx) {
llama_sample_context_free_guidance(ctx, &candidates_p, guidance_ctx, params.cfg_scale, params.cfg_smooth_factor);
llama_sample_classifier_free_guidance(ctx, &candidates_p, guidance_ctx, params.cfg_scale, params.cfg_smooth_factor);
}
// Apply penalties

View file

@ -2157,7 +2157,7 @@ void llama_log_softmax(T * array, int size, LogitAccessor logit_accessor) {
}
}
void llama_sample_context_free_guidance(
void llama_sample_classifier_free_guidance(
struct llama_context * ctx,
llama_token_data_array * candidates,
struct llama_context * guidance_ctx,

View file

@ -312,7 +312,7 @@ extern "C" {
/// @params guidance_ctx A separate context from the same model. Other than a negative prompt at the beginning, it should have all generated and user input tokens copied from the main context.
/// @params scale Guidance strength. 1.0f means no guidance. Higher values mean stronger guidance.
/// @params smooth_factor Smooth factor between guidance logits and original logits. 1.0f means only use guidance logits. 0.0f means only original logits.
LLAMA_API void llama_sample_context_free_guidance(
LLAMA_API void llama_sample_classifier_free_guidance(
struct llama_context * ctx,
llama_token_data_array * candidates,
struct llama_context * guidance_ctx,