fix increase of model.train_samples and model.train_tokens

now that each optimizer iteration gets its own batch we need to multiply by number of opt iterations
This commit is contained in:
xaedes 2023-07-03 17:58:09 +02:00
parent 58024d3e5f
commit 17a0898d50
No known key found for this signature in database
GPG key ID: 30030EDD817EA2B1

View file

@ -4145,9 +4145,10 @@ int main(int argc, char ** argv) {
size_t used_mem_after_opt = ggml_used_mem(ctx0);
int n_iter = params.use_adam ? params.adam_n_iter : params.lbfgs_n_iter;
model.train_its = opt->iter;
model.train_samples += n_batch;
model.train_tokens += n_batch * n_tokens;
model.train_samples += n_batch * n_iter;
model.train_tokens += n_batch * n_tokens * n_iter;
if (params.print_info_interval > 0 && ex % params.print_info_interval == 0) {
printf("Example %d, opt iter %d\n", ex, opt->iter);