log llama's entropy

This commit is contained in:
Maël Kerbiriou 2023-03-20 12:15:13 +01:00
parent e66962ff63
commit 4547848743

View file

@ -521,18 +521,26 @@ struct SoftMaxSampler {
// compute probs for the tokens // compute probs for the tokens
double sum_p = 0.0; double sum_p = 0.0;
double sum_act = 0.0;
double entropy = 0.0;
for (const auto & kv : logits_id) { for (const auto & kv : logits_id) {
sum_act += kv.first;
double logp = kv.first - maxl; double logp = kv.first - maxl;
double p = exp(logp); double p = exp(logp);
probs.push_back(p); probs.push_back(p);
sum_p += p; sum_p += p;
entropy -= p * logp;
} }
// normalize the probs // normalize the probs
const double scale = 1.0 / sum_p; const double scale = 1.0 / sum_p;
entropy = entropy * scale + log(sum_p);
for (auto & p : probs) { for (auto & p : probs) {
p *= scale; p *= scale;
} }
// Scaled activations stats & distribution info
logprintf( "%s: top_sact=%f mean_sact=%f top_p=%f entropy=%f\n", __func__, logits_id[0].first, sum_act / n, probs[0], entropy);
} }