Skip to content

Commit

Permalink
Add a custom PossibleUserWarning category (#10675)
Browse files Browse the repository at this point in the history
  • Loading branch information
carmocca committed Nov 26, 2021
1 parent 3089dc3 commit 8893072
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 4 deletions.
5 changes: 5 additions & 0 deletions docs/source/guides/speed.rst
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,11 @@ For debugging purposes or for dataloaders that load very small datasets, it is d
warnings.filterwarnings("ignore", ".*Consider increasing the value of the `num_workers` argument*")
# or to ignore all warnings which could be false positives
from pytorch_lightning.utilities.warnings import PossibleUserWarning
warnings.filterwarnings("ignore", category=PossibleUserWarning)
Spawn
"""""
When using ``strategy=ddp_spawn`` or training on TPUs, the way multiple GPUs/TPU cores are used is by calling ``.spawn()`` under the hood.
Expand Down
7 changes: 5 additions & 2 deletions pytorch_lightning/trainer/data_loading.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _fault_tolerant_training
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.warnings import PossibleUserWarning


class TrainerDataLoadingMixin(ABC):
Expand Down Expand Up @@ -109,7 +110,8 @@ def _worker_check(self, dataloader: DataLoader, name: str) -> None:
f"The dataloader, {name}, does not have many workers which may be a bottleneck."
" Consider increasing the value of the `num_workers` argument`"
f" (try {num_cpus} which is the number of cpus on this machine)"
" in the `DataLoader` init to improve performance."
" in the `DataLoader` init to improve performance.",
category=PossibleUserWarning,
)

def _requires_distributed_sampler(self, dataloader) -> bool:
Expand Down Expand Up @@ -267,7 +269,8 @@ def reset_train_dataloader(self, model: Optional["pl.LightningModule"] = None) -
rank_zero_warn(
f"The number of training samples ({self.num_training_batches}) is smaller than the logging interval"
f" Trainer(log_every_n_steps={self.log_every_n_steps}). Set a lower value for log_every_n_steps if"
" you want to see logs for the training epoch."
" you want to see logs for the training epoch.",
category=PossibleUserWarning,
)

def _reset_eval_dataloader(
Expand Down
4 changes: 3 additions & 1 deletion pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@
LRSchedulerTypeUnion,
TRAIN_DATALOADERS,
)
from pytorch_lightning.utilities.warnings import PossibleUserWarning

log = logging.getLogger(__name__)
# warnings to ignore in trainer
Expand Down Expand Up @@ -1531,7 +1532,8 @@ def _log_device_info(self) -> None:

if torch.cuda.is_available() and self._device_type != _AcceleratorType.GPU:
rank_zero_warn(
"GPU available but not used. Set the gpus flag in your trainer `Trainer(gpus=1)` or script `--gpus=1`."
"GPU available but not used. Set the gpus flag in your trainer `Trainer(gpus=1)` or script `--gpus=1`.",
category=PossibleUserWarning,
)

if _TPU_AVAILABLE and self._device_type != _AcceleratorType.TPU:
Expand Down
6 changes: 5 additions & 1 deletion pytorch_lightning/utilities/warnings.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,12 @@ def rank_zero_warn(*args, stacklevel: int = 4, **kwargs):
_warn(*args, stacklevel=stacklevel, **kwargs)


class PossibleUserWarning(UserWarning):
"""Warnings that could be false positives."""


class LightningDeprecationWarning(DeprecationWarning):
...
"""Deprecation warnings raised by PyTorch Lightning."""


# enable our warnings
Expand Down

0 comments on commit 8893072

Please sign in to comment.