diff --git a/CHANGELOG.md b/CHANGELOG.md index 167bc7df4a741..ec7e545db501f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -86,6 +86,13 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added a `MisconfigurationException` if user provided `opt_idx` in scheduler config doesn't match with actual optimizer index of its respective optimizer ([#11247](https://github.com/PyTorchLightning/pytorch-lightning/pull/11247)) + +- Added a `loggers` property to `Trainer` which returns a list of loggers provided by the user ([#11683](https://github.com/PyTorchLightning/pytorch-lightning/pull/11683)) + + +- Added a `loggers` property to `LightningModule` which retrieves the `loggers` property from `Trainer` ([#11683](https://github.com/PyTorchLightning/pytorch-lightning/pull/11683)) + + - Added support for DDP when using a `CombinedLoader` for the training data ([#11648](https://github.com/PyTorchLightning/pytorch-lightning/pull/11648)) diff --git a/docs/source/common/lightning_module.rst b/docs/source/common/lightning_module.rst index b1436290c8c19..18a0fbead4e7d 100644 --- a/docs/source/common/lightning_module.rst +++ b/docs/source/common/lightning_module.rst @@ -985,6 +985,19 @@ The current logger being used (tensorboard or other supported logger) # the particular logger tensorboard_logger = self.logger.experiment +loggers +~~~~~~~ + +The list of loggers currently being used by the Trainer. + +.. code-block:: python + + def training_step(self, batch, batch_idx): + # List of LightningLoggerBase objects + loggers = self.loggers + for logger in loggers: + logger.log_metrics({"foo": 1.0}) + local_rank ~~~~~~~~~~~ diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index 94c6a86bf393d..2126472b95169 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -1734,16 +1734,28 @@ The current epoch pass -logger (p) -********** +logger +******* The current logger being used. Here's an example using tensorboard .. code-block:: python - def training_step(self, batch, batch_idx): - logger = self.trainer.logger - tensorboard = logger.experiment + logger = trainer.logger + tensorboard = logger.experiment + + +loggers +******** + +The list of loggers currently being used by the Trainer. + +.. code-block:: python + + # List of LightningLoggerBase objects + loggers = trainer.loggers + for logger in loggers: + logger.log_metrics({"foo": 1.0}) logged_metrics diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index bd0c04d715133..ff6693823b586 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -37,6 +37,7 @@ from pytorch_lightning.core.mixins import DeviceDtypeModuleMixin, HyperparametersMixin from pytorch_lightning.core.optimizer import LightningOptimizer from pytorch_lightning.core.saving import ModelIO +from pytorch_lightning.loggers import LightningLoggerBase from pytorch_lightning.trainer.connectors.logger_connector.fx_validator import _FxValidator from pytorch_lightning.utilities import _IS_WINDOWS, _TORCH_GREATER_EQUAL_1_10, GradClipAlgorithmType from pytorch_lightning.utilities.apply_func import apply_to_collection, convert_to_tensors @@ -75,6 +76,7 @@ class LightningModule( "global_rank", "local_rank", "logger", + "loggers", "model_size", "automatic_optimization", "truncated_bptt_steps", @@ -247,10 +249,15 @@ def truncated_bptt_steps(self, truncated_bptt_steps: int) -> None: self._truncated_bptt_steps = truncated_bptt_steps @property - def logger(self): + def logger(self) -> Optional[LightningLoggerBase]: """Reference to the logger object in the Trainer.""" return self.trainer.logger if self.trainer else None + @property + def loggers(self) -> List[LightningLoggerBase]: + """Reference to the loggers object in the Trainer.""" + return self.trainer.loggers if self.trainer else [] + def _apply_batch_transfer_handler( self, batch: Any, device: Optional[torch.device] = None, dataloader_idx: int = 0 ) -> Any: diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 91d319a113173..f73e16604dcba 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -565,7 +565,7 @@ def __init__( self.__init_profiler(profiler) # init logger flags - self.logger: Optional[LightningLoggerBase] + self._loggers: List[LightningLoggerBase] self.logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu) # init debugging flags @@ -2553,6 +2553,37 @@ def _active_loop(self) -> Optional[Union[FitLoop, EvaluationLoop, PredictionLoop Logging properties """ + @property + def logger(self) -> Optional[LightningLoggerBase]: + if len(self.loggers) == 0: + return None + if len(self.loggers) == 1: + return self.loggers[0] + else: + rank_zero_warn( + "Using trainer.logger when Trainer is configured to use multiple loggers." + " This behavior will change in v1.8 when LoggerCollection is removed, and" + " trainer.logger will return the first logger in trainer.loggers" + ) + return LoggerCollection(self.loggers) + + @logger.setter + def logger(self, logger: Optional[LightningLoggerBase]) -> None: + if not logger: + self.loggers = [] + elif isinstance(logger, LoggerCollection): + self.loggers = list(logger) + else: + self.loggers = [logger] + + @property + def loggers(self) -> List[LightningLoggerBase]: + return self._loggers + + @loggers.setter + def loggers(self, loggers: Optional[List[LightningLoggerBase]]) -> None: + self._loggers = loggers if loggers else [] + @property def callback_metrics(self) -> dict: return self.logger_connector.callback_metrics diff --git a/tests/core/test_lightning_module.py b/tests/core/test_lightning_module.py index 70abaed4492d7..bf19040e04b89 100644 --- a/tests/core/test_lightning_module.py +++ b/tests/core/test_lightning_module.py @@ -76,6 +76,17 @@ def test_property_logger(tmpdir): assert model.logger == logger +def test_property_loggers(tmpdir): + """Test that loggers in LightningModule is accessible via the Trainer.""" + model = BoringModel() + assert model.loggers == [] + + logger = TensorBoardLogger(tmpdir) + trainer = Trainer(logger=logger) + model.trainer = trainer + assert model.loggers == [logger] + + def test_toggle_untoggle_2_optimizers_no_shared_parameters(tmpdir): class TestModel(BoringModel): def training_step(self, batch, batch_idx, optimizer_idx=None): diff --git a/tests/profiler/test_profiler.py b/tests/profiler/test_profiler.py index 8d92c4318b4f2..522b56ca90e3f 100644 --- a/tests/profiler/test_profiler.py +++ b/tests/profiler/test_profiler.py @@ -24,7 +24,7 @@ from pytorch_lightning import Callback, Trainer from pytorch_lightning.callbacks import EarlyStopping, StochasticWeightAveraging -from pytorch_lightning.loggers.base import LoggerCollection +from pytorch_lightning.loggers.base import DummyLogger, LoggerCollection from pytorch_lightning.loggers.tensorboard import TensorBoardLogger from pytorch_lightning.profiler import AdvancedProfiler, PassThroughProfiler, PyTorchProfiler, SimpleProfiler from pytorch_lightning.profiler.pytorch import RegisterRecordFunction, warning_cache @@ -493,7 +493,7 @@ def look_for_trace(trace_dir): model = BoringModel() # Wrap the logger in a list so it becomes a LoggerCollection - logger = [TensorBoardLogger(save_dir=tmpdir)] + logger = [TensorBoardLogger(save_dir=tmpdir), DummyLogger()] trainer = Trainer(default_root_dir=tmpdir, profiler="pytorch", logger=logger, limit_train_batches=5, max_epochs=1) assert isinstance(trainer.logger, LoggerCollection) trainer.fit(model) diff --git a/tests/trainer/properties/test_log_dir.py b/tests/trainer/properties/test_log_dir.py index 277a2f105efd6..71920a6b079bf 100644 --- a/tests/trainer/properties/test_log_dir.py +++ b/tests/trainer/properties/test_log_dir.py @@ -16,6 +16,7 @@ from pytorch_lightning import Trainer from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger +from pytorch_lightning.loggers.base import DummyLogger from tests.helpers.boring_model import BoringModel @@ -117,7 +118,7 @@ def test_logdir_logger_collection(tmpdir): trainer = Trainer( default_root_dir=default_root_dir, max_steps=2, - logger=[TensorBoardLogger(save_dir=save_dir, name="custom_logs")], + logger=[TensorBoardLogger(save_dir=save_dir, name="custom_logs"), DummyLogger()], ) assert isinstance(trainer.logger, LoggerCollection) assert trainer.log_dir == default_root_dir diff --git a/tests/trainer/properties/test_loggers.py b/tests/trainer/properties/test_loggers.py new file mode 100644 index 0000000000000..606c7b641ae1d --- /dev/null +++ b/tests/trainer/properties/test_loggers.py @@ -0,0 +1,91 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytorch_lightning import Trainer +from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger +from tests.loggers.test_base import CustomLogger + + +def test_trainer_loggers_property(): + """Test for correct initialization of loggers in Trainer.""" + logger1 = CustomLogger() + logger2 = CustomLogger() + + # trainer.loggers should be a copy of the input list + trainer = Trainer(logger=[logger1, logger2]) + + assert trainer.loggers == [logger1, logger2] + + # trainer.loggers should create a list of size 1 + trainer = Trainer(logger=logger1) + + assert trainer.loggers == [logger1] + + # trainer.loggers should be an empty list + trainer = Trainer(logger=False) + + assert trainer.loggers == [] + + # trainer.loggers should be a list of size 1 holding the default logger + trainer = Trainer(logger=True) + + assert trainer.loggers == [trainer.logger] + assert type(trainer.loggers[0]) == TensorBoardLogger + + +def test_trainer_loggers_setters(): + """Test the behavior of setters for trainer.logger and trainer.loggers.""" + logger1 = CustomLogger() + logger2 = CustomLogger() + logger_collection = LoggerCollection([logger1, logger2]) + logger_collection_2 = LoggerCollection([logger2]) + + trainer = Trainer() + assert type(trainer.logger) == TensorBoardLogger + assert trainer.loggers == [trainer.logger] + + # Test setters for trainer.logger + trainer.logger = logger1 + assert trainer.logger == logger1 + assert trainer.loggers == [logger1] + + trainer.logger = logger_collection + assert trainer.logger._logger_iterable == logger_collection._logger_iterable + assert trainer.loggers == [logger1, logger2] + + # LoggerCollection of size 1 should result in trainer.logger becoming the contained logger. + trainer.logger = logger_collection_2 + assert trainer.logger == logger2 + assert trainer.loggers == [logger2] + + trainer.logger = None + assert trainer.logger is None + assert trainer.loggers == [] + + # Test setters for trainer.loggers + trainer.loggers = [logger1, logger2] + assert trainer.loggers == [logger1, logger2] + assert trainer.logger._logger_iterable == logger_collection._logger_iterable + + trainer.loggers = [logger1] + assert trainer.loggers == [logger1] + assert trainer.logger == logger1 + + trainer.loggers = [] + assert trainer.loggers == [] + assert trainer.logger is None + + trainer.loggers = None + assert trainer.loggers == [] + assert trainer.logger is None