Record sampling time in llama_sample_classifier_free_guidance
This commit is contained in:
parent
66eb048470
commit
8e66e59cdd
1 changed files with 6 additions and 0 deletions
|
@ -2163,6 +2163,8 @@ void llama_sample_classifier_free_guidance(
|
|||
struct llama_context * guidance_ctx,
|
||||
float scale,
|
||||
float smooth_factor) {
|
||||
int64_t t_start_sample_us = t_start_sample_us = ggml_time_us();
|
||||
|
||||
assert(ctx);
|
||||
auto n_vocab = llama_n_vocab(ctx);
|
||||
assert(n_vocab == (int)candidates->size);
|
||||
|
@ -2195,6 +2197,10 @@ void llama_sample_classifier_free_guidance(
|
|||
|
||||
candidates->data[i].logit = smooth_factor * guidance_logit + (1.f - smooth_factor) * base_logit;
|
||||
}
|
||||
|
||||
if (ctx) {
|
||||
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
|
||||
}
|
||||
}
|
||||
|
||||
llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue