fix increase of model.train_samples and model.train_tokens
now that each optimizer iteration gets its own batch we need to multiply by number of opt iterations
This commit is contained in:
parent
58024d3e5f
commit
17a0898d50
1 changed files with 3 additions and 2 deletions
|
@ -4145,9 +4145,10 @@ int main(int argc, char ** argv) {
|
|||
|
||||
size_t used_mem_after_opt = ggml_used_mem(ctx0);
|
||||
|
||||
int n_iter = params.use_adam ? params.adam_n_iter : params.lbfgs_n_iter;
|
||||
model.train_its = opt->iter;
|
||||
model.train_samples += n_batch;
|
||||
model.train_tokens += n_batch * n_tokens;
|
||||
model.train_samples += n_batch * n_iter;
|
||||
model.train_tokens += n_batch * n_tokens * n_iter;
|
||||
|
||||
if (params.print_info_interval > 0 && ex % params.print_info_interval == 0) {
|
||||
printf("Example %d, opt iter %d\n", ex, opt->iter);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue