Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Describe the behavior with limit_*_batches=1|1.0 #11950

Merged
merged 5 commits into from Feb 21, 2022
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Expand Up @@ -66,6 +66,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Added a `LOGGER_REGISTRY` instance to register custom loggers to the `LightningCLI` ([#11533](https://github.com/PyTorchLightning/pytorch-lightning/pull/11533))


- Added info message when the `Trainer` arguments `limit_*_batches`, `overfit_batches`, or `val_check_interval` are set to `1` or `1.0` ([#11950](https://github.com/PyTorchLightning/pytorch-lightning/pull/11950))

- Added a `PrecisionPlugin.teardown` method ([#10990](https://github.com/PyTorchLightning/pytorch-lightning/pull/10990))


Expand Down
6 changes: 5 additions & 1 deletion pytorch_lightning/loops/epoch/prediction_epoch_loop.py
Expand Up @@ -164,7 +164,11 @@ def _get_batch_indices(self, dataloader_idx: int) -> List[List[int]]:
"""Returns a reference to the seen batch indices if the dataloader has a batch sampler wrapped by our
:class:`~pytorch_lightning.overrides.distributed.IndexBatchSamplerWrapper`."""
# the batch_sampler is not be defined in case of CombinedDataLoaders
batch_sampler = getattr(self.trainer.predict_dataloaders[dataloader_idx], "batch_sampler", None)
batch_sampler = getattr(
self.trainer.predict_dataloaders[dataloader_idx], # type: ignore[has-type]
"batch_sampler",
None,
)
if isinstance(batch_sampler, IndexBatchSamplerWrapper) and self.should_store_predictions:
return batch_sampler.seen_batch_indices

Expand Down
1 change: 1 addition & 0 deletions pytorch_lightning/loops/fit_loop.py
Expand Up @@ -263,6 +263,7 @@ def advance(self) -> None: # type: ignore[override]
log.detail(f"{self.__class__.__name__}: advancing loop")
assert self.trainer.train_dataloader is not None
dataloader = self.trainer.strategy.process_dataloader(self.trainer.train_dataloader)
assert self._data_fetcher is not None
carmocca marked this conversation as resolved.
Show resolved Hide resolved
self._data_fetcher.setup(
dataloader, batch_to_device=partial(self.trainer._call_strategy_hook, "batch_to_device", dataloader_idx=0)
)
Expand Down
38 changes: 24 additions & 14 deletions pytorch_lightning/trainer/trainer.py
Expand Up @@ -157,11 +157,11 @@ def __init__(
max_steps: int = -1,
min_steps: Optional[int] = None,
max_time: Optional[Union[str, timedelta, Dict[str, int]]] = None,
limit_train_batches: Union[int, float] = 1.0,
limit_val_batches: Union[int, float] = 1.0,
limit_test_batches: Union[int, float] = 1.0,
limit_predict_batches: Union[int, float] = 1.0,
val_check_interval: Union[int, float] = 1.0,
limit_train_batches: Optional[Union[int, float]] = None,
limit_val_batches: Optional[Union[int, float]] = None,
limit_test_batches: Optional[Union[int, float]] = None,
limit_predict_batches: Optional[Union[int, float]] = None,
val_check_interval: Optional[Union[int, float]] = None,
flush_logs_every_n_steps: Optional[int] = None,
log_every_n_steps: int = 50,
accelerator: Optional[Union[str, Accelerator]] = None,
Expand Down Expand Up @@ -512,6 +512,7 @@ def __init__(
self._call_callback_hooks("on_init_start")

# init data flags
self.check_val_every_n_epoch: int
self._data_connector.on_trainer_init(
check_val_every_n_epoch,
reload_dataloaders_every_n_epochs,
Expand Down Expand Up @@ -569,6 +570,7 @@ def __init__(
self.logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu)

# init debugging flags
self.val_check_interval: Union[int, float]
self._init_debugging_flags(
limit_train_batches,
limit_val_batches,
Expand All @@ -584,14 +586,14 @@ def __init__(

def _init_debugging_flags(
self,
limit_train_batches,
limit_val_batches,
limit_test_batches,
limit_predict_batches,
val_check_interval,
overfit_batches,
fast_dev_run,
):
limit_train_batches: Optional[Union[int, float]],
limit_val_batches: Optional[Union[int, float]],
limit_test_batches: Optional[Union[int, float]],
limit_predict_batches: Optional[Union[int, float]],
val_check_interval: Optional[Union[int, float]],
overfit_batches: Union[int, float],
fast_dev_run: Union[int, bool],
) -> None:
if isinstance(fast_dev_run, int) and (fast_dev_run < 0):
raise MisconfigurationException(
f"fast_dev_run={fast_dev_run} is not a valid configuration. It should be >= 0."
Expand Down Expand Up @@ -2640,7 +2642,15 @@ def terminate_on_nan(self, val: bool) -> None:
self._terminate_on_nan = val # : 212


def _determine_batch_limits(batches: Union[int, float], name: str) -> Union[int, float]:
def _determine_batch_limits(batches: Optional[Union[int, float]], name: str) -> Union[int, float]:
if batches is None:
# batches is optional to know if the user passed a value so that we can show the above info messages only to the
# users that set a value explicitly
return 1.0
carmocca marked this conversation as resolved.
Show resolved Hide resolved
if isinstance(batches, int) and batches == 1:
log.info(f"`Trainer({name}=1)` was configured so 1 batch per epoch will be used.")
carmocca marked this conversation as resolved.
Show resolved Hide resolved
elif isinstance(batches, float) and batches == 1.0:
log.info(f"`Trainer({name}=1.0)` was configured so 100% of the batches per epoch will be used.")
if 0 <= batches <= 1:
return batches
if batches > 1 and batches % 1.0 == 0:
Expand Down
21 changes: 21 additions & 0 deletions tests/trainer/flags/test_limit_batches.py
Expand Up @@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging

import pytest

Expand Down Expand Up @@ -66,3 +67,23 @@ def test_eval_limit_batches(stage, mode, limit_batches):
expected_batches = int(limit_batches * len(eval_loader)) if isinstance(limit_batches, float) else limit_batches
assert loader_num_batches[0] == expected_batches
assert len(dataloaders[0]) == len(eval_loader)


@pytest.mark.parametrize(
"flag",
("limit_train_batches", "limit_test_batches", "limit_predict_batches", "overfit_batches", "val_check_interval"),
carmocca marked this conversation as resolved.
Show resolved Hide resolved
)
@pytest.mark.parametrize("value", (1, 1.0))
def test_limit_batches_info_message(caplog, flag, value):
with caplog.at_level(logging.INFO, logger="pytorch_lightning.trainer.trainer"):
Trainer(**{flag: value})
assert f"`Trainer({flag}={value})` was configured" in caplog.text
message = f"configured so {'1' if isinstance(value, int) else '100%'}"
assert message in caplog.text

caplog.clear()

# the message should not appear by default
with caplog.at_level(logging.INFO, logger="pytorch_lightning.trainer.trainer"):
Trainer()
assert message not in caplog.text