Commit a13cbe1e authored by Jared Casper's avatar Jared Casper
Browse files

Use new api to get loss scale when finetuning.

parent 7381754e
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -186,7 +186,8 @@ def _train(model, optimizer, lr_scheduler, forward_step,
            # Logging.
            report_memory_flag = training_log(losses_dict, losses_dict_sum,
                                              optimizer.param_groups[0]['lr'],
                                              iteration, optimizer.loss_scale,
                                              iteration,
                                              optimizer.get_loss_scale().item(),
                                              report_memory_flag, skipped_iter)

            # Autoresume