From dac1087e3ca74c790eab151b3cdc6f378036d763 Mon Sep 17 00:00:00 2001 From: Haohongxiang <86215757+haohongxiang@users.noreply.github.com> Date: Wed, 2 Nov 2022 16:08:26 +0800 Subject: [PATCH] rename fw_bw func name of interleave pp (#47571) --- .../distributed/fleet/meta_parallel/pipeline_parallel.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py b/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py index b7d1eb39c0174..cbdee2c875b9d 100755 --- a/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py +++ b/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py @@ -534,7 +534,7 @@ def _backward_step_helper(self, micro_step): return input_tensor_grad - def interleave_pipeline( + def forward_backward_pipeline( self, data, scaler, forward_only=False, compute_loss=True ): # use interleave scheduling strategy. @@ -763,7 +763,7 @@ def interleave_pipeline( def train_batch(self, data, optimizer, lr_scheduler=None, scaler=None): data = self._prepare_training(data, optimizer, lr_scheduler) # interleave scheduler for pipeline parallel - train_loss = self.interleave_pipeline(data, scaler) + train_loss = self.forward_backward_pipeline(data, scaler) # optimizer with paddle.amp.auto_cast(enable=False): @@ -778,4 +778,4 @@ def eval_batch(self, data, compute_loss=False): self._layers.eval() self._compute_loss = compute_loss - return self.interleave_pipeline(data, None, forward_only=True) + return self.forward_backward_pipeline(data, None, forward_only=True)