Commit bd315c35 authored by Jared Casper's avatar Jared Casper
Browse files

Merge branch 'lmcafee/empty-cache' into 'main'

Flag to call empty_cache() each iteration, to reduce fragmentation

See merge request ADLR/megatron-lm!306
parents 5ca20cdd 52b2296b
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -601,6 +601,11 @@ def _add_distributed_args(parser):
    group.add_argument('--use-cpu-initialization', action='store_true',
                       default=None, help='If set, affine parallel weights '
                       'initialization uses CPU' )
    group.add_argument('--empty-unused-memory-level', default=0, type=int,
                       choices=[0, 1, 2],
                       help='Call torch.cuda.empty_cache() each iteration '
                       '(training and eval), to reduce fragmentation.'
                       '0=off, 1=moderate, 2=aggressive.')
    return parser


+12 −0
Original line number Diff line number Diff line
@@ -362,6 +362,10 @@ def train_step(forward_step_func, data_iterator,
        forward_step_func, data_iterator, model,
        optimizer, timers, forward_only=False)

    # Empty unused memory
    if args.empty_unused_memory_each_iter >= 1:
        torch.cuda.empty_cache()

    # All-reduce if needed.
    if args.DDP_impl == 'local':
        timers('backward-params-all-reduce').start()
@@ -408,6 +412,10 @@ def train_step(forward_step_func, data_iterator,
    else:
        skipped_iter = 1

    # Empty unused memory
    if args.empty_unused_memory_each_iter >= 2:
        torch.cuda.empty_cache()

    if mpu.is_pipeline_last_stage(ignore_virtual=True):
        # Average loss across microbatches.
        loss_reduced = {}
@@ -716,6 +724,10 @@ def evaluate(forward_step_func, data_iterator, model, verbose=False):
                forward_step_func, data_iterator, model, optimizer=None,
                timers=None, forward_only=True)

            # Empty unused memory
            if args.empty_unused_memory_each_iter >= 1:
                torch.cuda.empty_cache()

            if mpu.is_pipeline_last_stage(ignore_virtual=True):
                # Reduce across processes.
                for loss_dict in loss_dicts: