From 2883fb2bb6da41af28f1d0cadf9a80817399538c Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Mon, 31 Jan 2022 16:04:51 -0800 Subject: [PATCH 01/56] Implement logger property, replace self.logger --- .../logger_connector/logger_connector.py | 19 ++++---- pytorch_lightning/trainer/trainer.py | 45 ++++++++++++------- 2 files changed, 39 insertions(+), 25 deletions(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 54e8fd676294c..1f043f68071ee 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -78,17 +78,15 @@ def should_update_logs(self) -> bool: def configure_logger(self, logger: Union[bool, LightningLoggerBase, Iterable[LightningLoggerBase]]) -> None: if isinstance(logger, bool): # default logger - self.trainer.logger = ( - TensorBoardLogger( - save_dir=self.trainer.default_root_dir, version=SLURMEnvironment.job_id(), name="lightning_logs" - ) - if logger - else None - ) + if logger == True: + self.trainer.logger = TensorBoardLogger(save_dir=self.trainer.default_root_dir, version=SLURMEnvironment.job_id(), name="lightning_logs") + self.trainer._loggers = [self.trainer.logger] elif isinstance(logger, Iterable): self.trainer.logger = LoggerCollection(logger) + self.trainer._loggers = list(logger) else: self.trainer.logger = logger + self.trainer._loggers = [self.trainer.logger] def log_metrics(self, metrics: _OUT_DICT, step: Optional[int] = None) -> None: """Logs the metric dict passed in. If `step` parameter is None and `step` key is presented is metrics, uses @@ -99,7 +97,7 @@ def log_metrics(self, metrics: _OUT_DICT, step: Optional[int] = None) -> None: step: Step for which metrics should be logged. Default value is `self.global_step` during training or the total validation / test log step count during validation and testing. """ - if self.trainer.logger is None or not metrics: + if self.trainer.loggers is None or not metrics: return self._logged_metrics.update(metrics) @@ -116,8 +114,9 @@ def log_metrics(self, metrics: _OUT_DICT, step: Optional[int] = None) -> None: step = self.trainer.global_step # log actual metrics - self.trainer.logger.agg_and_log_metrics(scalar_metrics, step=step) - self.trainer.logger.save() + for logger in self.trainer.loggers: + self.trainer.loggers.agg_and_log_metrics(scalar_metrics, step=step) + self.trainer.loggers.save() """ Evaluation metric updates diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index ac01227fd00ac..f3da1b5c41075 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -565,6 +565,9 @@ def __init__( # init logger flags self.logger: Optional[LightningLoggerBase] + + self._loggers: Optional[List[LightningLoggerBase]] + self.logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu) # init debugging flags @@ -613,7 +616,9 @@ def _init_debugging_flags( self.fit_loop.max_epochs = 1 val_check_interval = 1.0 self.check_val_every_n_epoch = 1 - self.logger = DummyLogger() if self.logger is not None else None + if self.logger is not None: + self.logger = DummyLogger() + self.loggers = [self.logger] rank_zero_info( "Running in fast_dev_run mode: will run a full train," @@ -1201,7 +1206,7 @@ def _log_hyperparams(self) -> None: # log hyper-parameters hparams_initial = None - if self.logger is not None: + if self.loggers is not None: # save exp to get started (this is where the first experiment logs are written) datamodule_log_hyperparams = self.datamodule._log_hyperparams if self.datamodule is not None else False @@ -1229,10 +1234,11 @@ def _log_hyperparams(self) -> None: elif datamodule_log_hyperparams: hparams_initial = self.datamodule.hparams_initial - if hparams_initial is not None: - self.logger.log_hyperparams(hparams_initial) - self.logger.log_graph(self.lightning_module) - self.logger.save() + for logger in self.loggers: + if hparams_initial is not None: + logger.log_hyperparams(hparams_initial) + logger.log_graph(self.lightning_module) + logger.save() def _teardown(self): """This is the Trainer's internal teardown, unrelated to the `teardown` hooks in LightningModule and @@ -1463,8 +1469,9 @@ def _call_teardown_hook(self) -> None: # todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers. # It might be related to xla tensors blocked when moving the cpu kill loggers. - if self.logger is not None: - self.logger.finalize("success") + if self.loggers is not None: + for logger in self.loggers: + logger.finalize("success") # summarize profile results self.profiler.describe() @@ -1833,7 +1840,7 @@ def reset_train_dataloader(self, model: Optional["pl.LightningModule"] = None) - self.val_check_batch = int(self.num_training_batches * self.val_check_interval) self.val_check_batch = max(1, self.val_check_batch) - if self.logger and self.num_training_batches < self.log_every_n_steps: + if self.loggers and self.num_training_batches < self.log_every_n_steps: rank_zero_warn( f"The number of training samples ({self.num_training_batches}) is smaller than the logging interval" f" Trainer(log_every_n_steps={self.log_every_n_steps}). Set a lower value for log_every_n_steps if" @@ -2082,14 +2089,18 @@ def model(self, model: torch.nn.Module) -> None: @property def log_dir(self) -> Optional[str]: - if self.logger is None: - dirpath = self.default_root_dir - elif isinstance(self.logger, TensorBoardLogger): - dirpath = self.logger.log_dir - elif isinstance(self.logger, LoggerCollection): + if self.loggers is None: dirpath = self.default_root_dir + elif len(self.loggers == 1): + logger = self.loggers[0] + if isinstance(logger, TensorBoardLogger): + dirpath = logger.log_dir + elif isinstance(logger, LoggerCollection): + dirpath = self.default_root_dir + else: + dirpath = logger.save_dir else: - dirpath = self.logger.save_dir + dirpath = self.default_root_dir dirpath = self.strategy.broadcast(dirpath) return dirpath @@ -2471,6 +2482,10 @@ def _active_loop(self) -> Optional[Union[FitLoop, EvaluationLoop, PredictionLoop Logging properties """ + @property + def loggers(self) -> List[LightningLoggerBase]: + return self._loggers + @property def callback_metrics(self) -> dict: return self.logger_connector.callback_metrics From 3ae0a6efcbcb54c60e6c1f864a5665d708553b0b Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Mon, 31 Jan 2022 16:31:12 -0800 Subject: [PATCH 02/56] Fix small bugs --- .../trainer/connectors/logger_connector/logger_connector.py | 6 +++--- pytorch_lightning/trainer/trainer.py | 5 +++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 1f043f68071ee..60eddc9530434 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -78,7 +78,7 @@ def should_update_logs(self) -> bool: def configure_logger(self, logger: Union[bool, LightningLoggerBase, Iterable[LightningLoggerBase]]) -> None: if isinstance(logger, bool): # default logger - if logger == True: + if logger: self.trainer.logger = TensorBoardLogger(save_dir=self.trainer.default_root_dir, version=SLURMEnvironment.job_id(), name="lightning_logs") self.trainer._loggers = [self.trainer.logger] elif isinstance(logger, Iterable): @@ -115,8 +115,8 @@ def log_metrics(self, metrics: _OUT_DICT, step: Optional[int] = None) -> None: # log actual metrics for logger in self.trainer.loggers: - self.trainer.loggers.agg_and_log_metrics(scalar_metrics, step=step) - self.trainer.loggers.save() + logger.agg_and_log_metrics(scalar_metrics, step=step) + logger.save() """ Evaluation metric updates diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index f3da1b5c41075..25dde295e1c9b 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -618,7 +618,7 @@ def _init_debugging_flags( self.check_val_every_n_epoch = 1 if self.logger is not None: self.logger = DummyLogger() - self.loggers = [self.logger] + self._loggers = [self.logger] rank_zero_info( "Running in fast_dev_run mode: will run a full train," @@ -2091,7 +2091,8 @@ def model(self, model: torch.nn.Module) -> None: def log_dir(self) -> Optional[str]: if self.loggers is None: dirpath = self.default_root_dir - elif len(self.loggers == 1): + elif len(self.loggers) == 1: + print(type(self.loggers)) logger = self.loggers[0] if isinstance(logger, TensorBoardLogger): dirpath = logger.log_dir From 6d917a44b5dceca076ce4d5bdb13e8a5a6a8d296 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Mon, 31 Jan 2022 16:36:44 -0800 Subject: [PATCH 03/56] Fixed initalization bug --- .../trainer/connectors/logger_connector/logger_connector.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 60eddc9530434..2645f7f9e6a8a 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -81,6 +81,9 @@ def configure_logger(self, logger: Union[bool, LightningLoggerBase, Iterable[Lig if logger: self.trainer.logger = TensorBoardLogger(save_dir=self.trainer.default_root_dir, version=SLURMEnvironment.job_id(), name="lightning_logs") self.trainer._loggers = [self.trainer.logger] + else: + self.trainer.logger = None + self.trainer._loggers = None elif isinstance(logger, Iterable): self.trainer.logger = LoggerCollection(logger) self.trainer._loggers = list(logger) From 30d3a579b528ef4a305ca0935697f91f0d66e714 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 1 Feb 2022 00:45:19 +0000 Subject: [PATCH 04/56] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../trainer/connectors/logger_connector/logger_connector.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 2645f7f9e6a8a..b1f45e9a306a7 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -79,7 +79,9 @@ def configure_logger(self, logger: Union[bool, LightningLoggerBase, Iterable[Lig if isinstance(logger, bool): # default logger if logger: - self.trainer.logger = TensorBoardLogger(save_dir=self.trainer.default_root_dir, version=SLURMEnvironment.job_id(), name="lightning_logs") + self.trainer.logger = TensorBoardLogger( + save_dir=self.trainer.default_root_dir, version=SLURMEnvironment.job_id(), name="lightning_logs" + ) self.trainer._loggers = [self.trainer.logger] else: self.trainer.logger = None From 62c93355818625f2ded73baecee242804c86d7dc Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Mon, 31 Jan 2022 18:57:51 -0800 Subject: [PATCH 05/56] Fix for case where creating a LoggerCollection of size 1 --- .../trainer/connectors/logger_connector/logger_connector.py | 5 ++++- pytorch_lightning/trainer/trainer.py | 1 - 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index b1f45e9a306a7..f00796cd1503f 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -88,7 +88,10 @@ def configure_logger(self, logger: Union[bool, LightningLoggerBase, Iterable[Lig self.trainer._loggers = None elif isinstance(logger, Iterable): self.trainer.logger = LoggerCollection(logger) - self.trainer._loggers = list(logger) + if(len(logger) < 1): + self.trainer._loggers = list(logger) + else: + self.trainer._loggers = [self.trainer.logger] else: self.trainer.logger = logger self.trainer._loggers = [self.trainer.logger] diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 25dde295e1c9b..73600afd95694 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -2092,7 +2092,6 @@ def log_dir(self) -> Optional[str]: if self.loggers is None: dirpath = self.default_root_dir elif len(self.loggers) == 1: - print(type(self.loggers)) logger = self.loggers[0] if isinstance(logger, TensorBoardLogger): dirpath = logger.log_dir From 99a4d98c5fb6339747507002877aedc1b73af911 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Mon, 31 Jan 2022 19:42:07 -0800 Subject: [PATCH 06/56] Better fix for the LoggerCollection of size 1 issue --- .../trainer/connectors/logger_connector/logger_connector.py | 5 +---- pytorch_lightning/trainer/trainer.py | 6 +++--- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index f00796cd1503f..b1f45e9a306a7 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -88,10 +88,7 @@ def configure_logger(self, logger: Union[bool, LightningLoggerBase, Iterable[Lig self.trainer._loggers = None elif isinstance(logger, Iterable): self.trainer.logger = LoggerCollection(logger) - if(len(logger) < 1): - self.trainer._loggers = list(logger) - else: - self.trainer._loggers = [self.trainer.logger] + self.trainer._loggers = list(logger) else: self.trainer.logger = logger self.trainer._loggers = [self.trainer.logger] diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 73600afd95694..a526f97c5f05f 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -2093,10 +2093,10 @@ def log_dir(self) -> Optional[str]: dirpath = self.default_root_dir elif len(self.loggers) == 1: logger = self.loggers[0] - if isinstance(logger, TensorBoardLogger): - dirpath = logger.log_dir - elif isinstance(logger, LoggerCollection): + if isinstance(self.logger, LoggerCollection): dirpath = self.default_root_dir + elif isinstance(logger, TensorBoardLogger): + dirpath = logger.log_dir else: dirpath = logger.save_dir else: From 01287bab7728ca295ba23f331655acb0d0e58f39 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Tue, 1 Feb 2022 12:35:43 -0800 Subject: [PATCH 07/56] Change trainer.loggers from a property to an instance variable --- .../connectors/logger_connector/logger_connector.py | 8 ++++---- pytorch_lightning/trainer/trainer.py | 8 ++------ 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index b1f45e9a306a7..c6e3d2081e028 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -82,16 +82,16 @@ def configure_logger(self, logger: Union[bool, LightningLoggerBase, Iterable[Lig self.trainer.logger = TensorBoardLogger( save_dir=self.trainer.default_root_dir, version=SLURMEnvironment.job_id(), name="lightning_logs" ) - self.trainer._loggers = [self.trainer.logger] + self.trainer.loggers = [self.trainer.logger] else: self.trainer.logger = None - self.trainer._loggers = None + self.trainer.loggers = None elif isinstance(logger, Iterable): self.trainer.logger = LoggerCollection(logger) - self.trainer._loggers = list(logger) + self.trainer.loggers = list(logger) else: self.trainer.logger = logger - self.trainer._loggers = [self.trainer.logger] + self.trainer.loggers = [self.trainer.logger] def log_metrics(self, metrics: _OUT_DICT, step: Optional[int] = None) -> None: """Logs the metric dict passed in. If `step` parameter is None and `step` key is presented is metrics, uses diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index a526f97c5f05f..80c39e61679d1 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -566,7 +566,7 @@ def __init__( # init logger flags self.logger: Optional[LightningLoggerBase] - self._loggers: Optional[List[LightningLoggerBase]] + self.loggers: Optional[List[LightningLoggerBase]] self.logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu) @@ -618,7 +618,7 @@ def _init_debugging_flags( self.check_val_every_n_epoch = 1 if self.logger is not None: self.logger = DummyLogger() - self._loggers = [self.logger] + self.loggers = [self.logger] rank_zero_info( "Running in fast_dev_run mode: will run a full train," @@ -2482,10 +2482,6 @@ def _active_loop(self) -> Optional[Union[FitLoop, EvaluationLoop, PredictionLoop Logging properties """ - @property - def loggers(self) -> List[LightningLoggerBase]: - return self._loggers - @property def callback_metrics(self) -> dict: return self.logger_connector.callback_metrics From 41cbab0b3245695edde7f6253ee5997d914ab442 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Tue, 1 Feb 2022 13:04:14 -0800 Subject: [PATCH 08/56] Revert all instances of trainer.loggers being used --- .../logger_connector/logger_connector.py | 7 ++-- pytorch_lightning/trainer/trainer.py | 34 ++++++++----------- 2 files changed, 17 insertions(+), 24 deletions(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index c6e3d2081e028..0dbc6a37cf535 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -102,7 +102,7 @@ def log_metrics(self, metrics: _OUT_DICT, step: Optional[int] = None) -> None: step: Step for which metrics should be logged. Default value is `self.global_step` during training or the total validation / test log step count during validation and testing. """ - if self.trainer.loggers is None or not metrics: + if self.trainer.logger is None or not metrics: return self._logged_metrics.update(metrics) @@ -119,9 +119,8 @@ def log_metrics(self, metrics: _OUT_DICT, step: Optional[int] = None) -> None: step = self.trainer.global_step # log actual metrics - for logger in self.trainer.loggers: - logger.agg_and_log_metrics(scalar_metrics, step=step) - logger.save() + self.trainer.logger.agg_and_log_metrics(scalar_metrics, step=step) + self.trainer.logger.save() """ Evaluation metric updates diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 80c39e61679d1..6be2dfc0d67c8 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -1206,7 +1206,7 @@ def _log_hyperparams(self) -> None: # log hyper-parameters hparams_initial = None - if self.loggers is not None: + if self.logger is not None: # save exp to get started (this is where the first experiment logs are written) datamodule_log_hyperparams = self.datamodule._log_hyperparams if self.datamodule is not None else False @@ -1234,11 +1234,10 @@ def _log_hyperparams(self) -> None: elif datamodule_log_hyperparams: hparams_initial = self.datamodule.hparams_initial - for logger in self.loggers: - if hparams_initial is not None: - logger.log_hyperparams(hparams_initial) - logger.log_graph(self.lightning_module) - logger.save() + if hparams_initial is not None: + self.logger.log_hyperparams(hparams_initial) + self.logger.log_graph(self.lightning_module) + self.logger.save() def _teardown(self): """This is the Trainer's internal teardown, unrelated to the `teardown` hooks in LightningModule and @@ -1469,9 +1468,8 @@ def _call_teardown_hook(self) -> None: # todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers. # It might be related to xla tensors blocked when moving the cpu kill loggers. - if self.loggers is not None: - for logger in self.loggers: - logger.finalize("success") + if self.logger is not None: + self.logger.finalize("success") # summarize profile results self.profiler.describe() @@ -1840,7 +1838,7 @@ def reset_train_dataloader(self, model: Optional["pl.LightningModule"] = None) - self.val_check_batch = int(self.num_training_batches * self.val_check_interval) self.val_check_batch = max(1, self.val_check_batch) - if self.loggers and self.num_training_batches < self.log_every_n_steps: + if self.logger and self.num_training_batches < self.log_every_n_steps: rank_zero_warn( f"The number of training samples ({self.num_training_batches}) is smaller than the logging interval" f" Trainer(log_every_n_steps={self.log_every_n_steps}). Set a lower value for log_every_n_steps if" @@ -2089,18 +2087,14 @@ def model(self, model: torch.nn.Module) -> None: @property def log_dir(self) -> Optional[str]: - if self.loggers is None: + if self.logger is None: dirpath = self.default_root_dir - elif len(self.loggers) == 1: - logger = self.loggers[0] - if isinstance(self.logger, LoggerCollection): - dirpath = self.default_root_dir - elif isinstance(logger, TensorBoardLogger): - dirpath = logger.log_dir - else: - dirpath = logger.save_dir - else: + elif isinstance(self.logger, TensorBoardLogger): + dirpath = self.logger.log_dir + elif isinstance(self.logger, LoggerCollection): dirpath = self.default_root_dir + else: + dirpath = self.logger.save_dir dirpath = self.strategy.broadcast(dirpath) return dirpath From d78d98fd14728f8b6ca387ad3252c4d28c1e9c52 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Tue, 1 Feb 2022 13:28:09 -0800 Subject: [PATCH 09/56] Use logger param to initialize trainer.loggers --- .../trainer/connectors/logger_connector/logger_connector.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 0dbc6a37cf535..290484df37503 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -82,7 +82,7 @@ def configure_logger(self, logger: Union[bool, LightningLoggerBase, Iterable[Lig self.trainer.logger = TensorBoardLogger( save_dir=self.trainer.default_root_dir, version=SLURMEnvironment.job_id(), name="lightning_logs" ) - self.trainer.loggers = [self.trainer.logger] + self.trainer.loggers = [logger] else: self.trainer.logger = None self.trainer.loggers = None @@ -91,7 +91,7 @@ def configure_logger(self, logger: Union[bool, LightningLoggerBase, Iterable[Lig self.trainer.loggers = list(logger) else: self.trainer.logger = logger - self.trainer.loggers = [self.trainer.logger] + self.trainer.loggers = [logger] def log_metrics(self, metrics: _OUT_DICT, step: Optional[int] = None) -> None: """Logs the metric dict passed in. If `step` parameter is None and `step` key is presented is metrics, uses From 7f809fffeb5254cc457acfca9889b255a084e8c6 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Tue, 1 Feb 2022 13:30:23 -0800 Subject: [PATCH 10/56] Remove unneeded newlines --- pytorch_lightning/trainer/trainer.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 6be2dfc0d67c8..c89017d220c74 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -565,9 +565,7 @@ def __init__( # init logger flags self.logger: Optional[LightningLoggerBase] - self.loggers: Optional[List[LightningLoggerBase]] - self.logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu) # init debugging flags From 179f3d4f03c2f94f03ad748be20271cf23fe3f88 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Tue, 1 Feb 2022 13:55:02 -0800 Subject: [PATCH 11/56] Implement unit test for loggers property --- .../logger_connector/logger_connector.py | 5 ++-- tests/loggers/test_base.py | 24 +++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 290484df37503..71f0b335662b8 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -79,10 +79,11 @@ def configure_logger(self, logger: Union[bool, LightningLoggerBase, Iterable[Lig if isinstance(logger, bool): # default logger if logger: - self.trainer.logger = TensorBoardLogger( + default_logger = TensorBoardLogger( save_dir=self.trainer.default_root_dir, version=SLURMEnvironment.job_id(), name="lightning_logs" ) - self.trainer.loggers = [logger] + self.trainer.logger = default_logger + self.trainer.loggers = [default_logger] else: self.trainer.logger = None self.trainer.loggers = None diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index cb5721d6a9622..159e69c924dc7 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -175,6 +175,30 @@ def training_step(self, batch, batch_idx): assert logger2.metrics_logged != {} assert logger2.finalized_status == "success" +def test_loggers_property(tmpdir): + logger1 = CustomLogger() + logger2 = CustomLogger() + + # trainer.loggers should be a copy of the input list + trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=[logger1, logger2], default_root_dir=tmpdir) + + assert trainer.loggers == [logger1, logger2] + + # trainer.loggers should create a list of size 1 + trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=logger1, default_root_dir=tmpdir) + + assert trainer.loggers == [logger1] + + # trainer.loggers should be None + trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=False, default_root_dir=tmpdir) + + assert trainer.loggers is None + + # trainer.loggers should be a list of size 1 holding the default logger + trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=True, default_root_dir=tmpdir) + + assert trainer.loggers == [trainer.logger] + assert type(trainer.loggers[0]) == TensorBoardLogger def test_multiple_loggers_pickle(tmpdir): """Verify that pickling trainer with multiple loggers works.""" From 1ad542a53ce3e86d1c7ec5f0147760816b71be83 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Tue, 1 Feb 2022 14:01:55 -0800 Subject: [PATCH 12/56] make trainer.loggers by default an empty list --- .../trainer/connectors/logger_connector/logger_connector.py | 2 +- pytorch_lightning/trainer/trainer.py | 2 +- tests/loggers/test_base.py | 4 +++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 71f0b335662b8..fadb52d66ac18 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -86,7 +86,7 @@ def configure_logger(self, logger: Union[bool, LightningLoggerBase, Iterable[Lig self.trainer.loggers = [default_logger] else: self.trainer.logger = None - self.trainer.loggers = None + self.trainer.loggers = [] elif isinstance(logger, Iterable): self.trainer.logger = LoggerCollection(logger) self.trainer.loggers = list(logger) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index c89017d220c74..0cc9329cda932 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -565,7 +565,7 @@ def __init__( # init logger flags self.logger: Optional[LightningLoggerBase] - self.loggers: Optional[List[LightningLoggerBase]] + self.loggers: List[LightningLoggerBase] self.logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu) # init debugging flags diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index 159e69c924dc7..bb64b7a63fbed 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -175,6 +175,7 @@ def training_step(self, batch, batch_idx): assert logger2.metrics_logged != {} assert logger2.finalized_status == "success" + def test_loggers_property(tmpdir): logger1 = CustomLogger() logger2 = CustomLogger() @@ -192,7 +193,7 @@ def test_loggers_property(tmpdir): # trainer.loggers should be None trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=False, default_root_dir=tmpdir) - assert trainer.loggers is None + assert trainer.loggers is [] # trainer.loggers should be a list of size 1 holding the default logger trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=True, default_root_dir=tmpdir) @@ -200,6 +201,7 @@ def test_loggers_property(tmpdir): assert trainer.loggers == [trainer.logger] assert type(trainer.loggers[0]) == TensorBoardLogger + def test_multiple_loggers_pickle(tmpdir): """Verify that pickling trainer with multiple loggers works.""" From c5df0adf7e5bdae8ee197169902184152f71e130 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Tue, 1 Feb 2022 14:19:34 -0800 Subject: [PATCH 13/56] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index aa7c4f9b056bc..0b8adfa7e2bae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,6 +71,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added a `MisconfigurationException` if user provided `opt_idx` in scheduler config doesn't match with actual optimizer index of its respective optimizer ([#11247](https://github.com/PyTorchLightning/pytorch-lightning/pull/11247)) +- Added a `loggers` property to `Trainer` which returns a list of loggers as provided by the user ([#11683](https://github.com/PyTorchLightning/pytorch-lightning/pull/11683)) + ### Changed From 9d564d8e17699291eb35fba919d0ebd3a390e11b Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Tue, 1 Feb 2022 14:22:28 -0800 Subject: [PATCH 14/56] fix unit test according to suggestions --- tests/loggers/test_base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index bb64b7a63fbed..3d416a600f955 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -176,7 +176,7 @@ def training_step(self, batch, batch_idx): assert logger2.finalized_status == "success" -def test_loggers_property(tmpdir): +def test_trainer_loggers_property(tmpdir): logger1 = CustomLogger() logger2 = CustomLogger() @@ -190,10 +190,10 @@ def test_loggers_property(tmpdir): assert trainer.loggers == [logger1] - # trainer.loggers should be None + # trainer.loggers should be an empty list trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=False, default_root_dir=tmpdir) - assert trainer.loggers is [] + assert trainer.loggers == [] # trainer.loggers should be a list of size 1 holding the default logger trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=True, default_root_dir=tmpdir) From 9d7c1bfdedc4f26139051b468e9d8a452cd423aa Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Tue, 1 Feb 2022 14:30:40 -0800 Subject: [PATCH 15/56] Update CHANGELOG.md Co-authored-by: ananthsub --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b8adfa7e2bae..846a9e9b2f404 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,7 +71,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added a `MisconfigurationException` if user provided `opt_idx` in scheduler config doesn't match with actual optimizer index of its respective optimizer ([#11247](https://github.com/PyTorchLightning/pytorch-lightning/pull/11247)) -- Added a `loggers` property to `Trainer` which returns a list of loggers as provided by the user ([#11683](https://github.com/PyTorchLightning/pytorch-lightning/pull/11683)) +- Added a `loggers` property to `Trainer` which returns a list of loggers provided by the user ([#11683](https://github.com/PyTorchLightning/pytorch-lightning/pull/11683)) ### Changed From d263659ab202f9cfaac2ac3de9683b53a8e8e327 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Tue, 1 Feb 2022 16:22:46 -0800 Subject: [PATCH 16/56] Remove unnecessary Trainer params --- tests/loggers/test_base.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index 3d416a600f955..deedf4a634f6e 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -181,22 +181,22 @@ def test_trainer_loggers_property(tmpdir): logger2 = CustomLogger() # trainer.loggers should be a copy of the input list - trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=[logger1, logger2], default_root_dir=tmpdir) + trainer = Trainer(logger=[logger1, logger2]) assert trainer.loggers == [logger1, logger2] # trainer.loggers should create a list of size 1 - trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=logger1, default_root_dir=tmpdir) + trainer = Trainer(logger=logger1) assert trainer.loggers == [logger1] # trainer.loggers should be an empty list - trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=False, default_root_dir=tmpdir) + trainer = Trainer(logger=False) assert trainer.loggers == [] # trainer.loggers should be a list of size 1 holding the default logger - trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=True, default_root_dir=tmpdir) + trainer = Trainer(logger=True) assert trainer.loggers == [trainer.logger] assert type(trainer.loggers[0]) == TensorBoardLogger From befad11d23b5d6245b4cdde844321cc59196b407 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Tue, 1 Feb 2022 16:47:17 -0800 Subject: [PATCH 17/56] Remove tmpdir parameter for unit test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos MocholĂ­ --- tests/loggers/test_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index deedf4a634f6e..6f3176a6de332 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -176,7 +176,7 @@ def training_step(self, batch, batch_idx): assert logger2.finalized_status == "success" -def test_trainer_loggers_property(tmpdir): +def test_trainer_loggers_property(): logger1 = CustomLogger() logger2 = CustomLogger() From 4773c26d49d65f402c176eeda67c776ebf3931c2 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 11:38:55 -0800 Subject: [PATCH 18/56] Write setters for logger and loggers --- pytorch_lightning/trainer/trainer.py | 35 ++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 0cc9329cda932..bf98cf59e6b96 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -564,8 +564,8 @@ def __init__( self.__init_profiler(profiler) # init logger flags - self.logger: Optional[LightningLoggerBase] - self.loggers: List[LightningLoggerBase] + self._logger: Optional[LightningLoggerBase] + self._loggers: List[LightningLoggerBase] self.logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu) # init debugging flags @@ -2474,6 +2474,37 @@ def _active_loop(self) -> Optional[Union[FitLoop, EvaluationLoop, PredictionLoop Logging properties """ + @property + def logger(self) -> Optional[LightningLoggerBase]: + return self._logger + + @logger.setter + def logger(self, new_logger: Union[LightningLoggerBase, None]) -> None: + if new_logger: + if isinstance(new_logger, LoggerCollection): + self._logger = new_logger + self._loggers = [_logger for _logger in new_logger] + else: + self._logger = new_logger + self._loggers = [new_logger] + else: + self._logger = None + self._loggers = [] + + @property + def loggers(self) -> List[LightningLoggerBase]: + return self._loggers + + @loggers.setter + def loggers(self, new_loggers: Union[Iterable[LightningLoggerBase], None]) -> None: + if new_loggers: + self._loggers = [_logger for _logger in new_loggers] + self._logger = LoggerCollection(self._loggers) + else: + self._logger = None + self._loggers = [] + + @property def callback_metrics(self) -> dict: return self.logger_connector.callback_metrics From 8871c36841f727b819e07107c74ac549df7d95cd Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 11:53:27 -0800 Subject: [PATCH 19/56] Unit test for setters --- tests/loggers/test_base.py | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index 6f3176a6de332..00d27fdae3788 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -201,6 +201,44 @@ def test_trainer_loggers_property(): assert trainer.loggers == [trainer.logger] assert type(trainer.loggers[0]) == TensorBoardLogger +def test_trainer_logger_setters(): + logger1 = CustomLogger() + logger2 = CustomLogger() + logger_collection = LoggerCollection([logger1, logger2]) + + trainer = Trainer() + assert trainer.logger is None + assert trainer.loggers == [] + + # Test setters for trainer.logger + trainer.logger = logger1 + assert trainer.logger == logger1 + assert trainer.loggers == [logger1] + + trainer.logger = logger_collection + assert trainer.logger == logger_collection + assert trainer.loggers == [logger1, logger2] + + trainer.logger = None + assert trainer.logger is None + assert trainer.loggers == [] + + # Test setters for trainer.loggers + trainer.loggers = [logger1, logger2] + assert trainer.loggers == [logger1, logger2] + assert trainer.logger == logger_collection + + trainer.loggers = logger_collection + assert trainer.loggers == [logger1, logger2] + assert trainer.logger == logger_collection + + trainer.loggers = [] + assert trainer.loggers == [] + assert trainer.logger == None + + trainer.loggers = None + assert trainer.loggers == [] + assert trainer.logger == None def test_multiple_loggers_pickle(tmpdir): """Verify that pickling trainer with multiple loggers works.""" From 65ae649a32c9b2a3ecba38330ab36ff46d8feab4 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 12:07:44 -0800 Subject: [PATCH 20/56] Fix bug where logger setter is called twice --- .../trainer/connectors/logger_connector/logger_connector.py | 6 +----- pytorch_lightning/trainer/trainer.py | 5 ++++- tests/loggers/test_base.py | 4 ++++ 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index fadb52d66ac18..921554d8d7510 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -82,16 +82,12 @@ def configure_logger(self, logger: Union[bool, LightningLoggerBase, Iterable[Lig default_logger = TensorBoardLogger( save_dir=self.trainer.default_root_dir, version=SLURMEnvironment.job_id(), name="lightning_logs" ) - self.trainer.logger = default_logger self.trainer.loggers = [default_logger] else: - self.trainer.logger = None self.trainer.loggers = [] elif isinstance(logger, Iterable): - self.trainer.logger = LoggerCollection(logger) - self.trainer.loggers = list(logger) + self.trainer.loggers = logger else: - self.trainer.logger = logger self.trainer.loggers = [logger] def log_metrics(self, metrics: _OUT_DICT, step: Optional[int] = None) -> None: diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index bf98cf59e6b96..4b2a5eb540982 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -2499,7 +2499,10 @@ def loggers(self) -> List[LightningLoggerBase]: def loggers(self, new_loggers: Union[Iterable[LightningLoggerBase], None]) -> None: if new_loggers: self._loggers = [_logger for _logger in new_loggers] - self._logger = LoggerCollection(self._loggers) + if len(self._loggers) == 1: + self._logger = self._loggers[0] + else: + self._logger = LoggerCollection(self._loggers) else: self._logger = None self._loggers = [] diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index 00d27fdae3788..49099380c7a09 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -228,6 +228,10 @@ def test_trainer_logger_setters(): assert trainer.loggers == [logger1, logger2] assert trainer.logger == logger_collection + trainer.loggers = [logger1] + assert trainer.loggers == [logger1, logger2] + assert trainer.logger == logger1 + trainer.loggers = logger_collection assert trainer.loggers == [logger1, logger2] assert trainer.logger == logger_collection From df992de0a692b1ab05ad047d439d496be673fa85 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 12:13:08 -0800 Subject: [PATCH 21/56] Fix initialization bug with trainer test --- tests/loggers/test_base.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index 49099380c7a09..daf988bbd328a 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -207,8 +207,8 @@ def test_trainer_logger_setters(): logger_collection = LoggerCollection([logger1, logger2]) trainer = Trainer() - assert trainer.logger is None - assert trainer.loggers == [] + assert type(trainer.logger) == TensorBoardLogger + assert trainer.loggers == [trainer.logger] # Test setters for trainer.logger trainer.logger = logger1 @@ -226,15 +226,15 @@ def test_trainer_logger_setters(): # Test setters for trainer.loggers trainer.loggers = [logger1, logger2] assert trainer.loggers == [logger1, logger2] - assert trainer.logger == logger_collection + assert trainer.logger._logger_iterable == logger_collection._logger_iterable trainer.loggers = [logger1] - assert trainer.loggers == [logger1, logger2] + assert trainer.loggers == [logger1] assert trainer.logger == logger1 trainer.loggers = logger_collection assert trainer.loggers == [logger1, logger2] - assert trainer.logger == logger_collection + assert trainer.logger._logger_iterable == logger_collection._logger_iterable trainer.loggers = [] assert trainer.loggers == [] From 5e47ef40df3a98ef3898ca95787e7f7255f1593c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 2 Feb 2022 20:19:21 +0000 Subject: [PATCH 22/56] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- pytorch_lightning/trainer/trainer.py | 1 - tests/loggers/test_base.py | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 4b2a5eb540982..2c3c4990d3bc2 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -2507,7 +2507,6 @@ def loggers(self, new_loggers: Union[Iterable[LightningLoggerBase], None]) -> No self._logger = None self._loggers = [] - @property def callback_metrics(self) -> dict: return self.logger_connector.callback_metrics diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index daf988bbd328a..e4262e90a1f33 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -201,6 +201,7 @@ def test_trainer_loggers_property(): assert trainer.loggers == [trainer.logger] assert type(trainer.loggers[0]) == TensorBoardLogger + def test_trainer_logger_setters(): logger1 = CustomLogger() logger2 = CustomLogger() @@ -244,6 +245,7 @@ def test_trainer_logger_setters(): assert trainer.loggers == [] assert trainer.logger == None + def test_multiple_loggers_pickle(tmpdir): """Verify that pickling trainer with multiple loggers works.""" From d55f6268bb35ef61032c9bc82aa8e38def9ec306 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 12:21:08 -0800 Subject: [PATCH 23/56] Get rid of extra DummyLogger assignment --- pytorch_lightning/trainer/trainer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 4b2a5eb540982..015fb559813cb 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -615,8 +615,8 @@ def _init_debugging_flags( val_check_interval = 1.0 self.check_val_every_n_epoch = 1 if self.logger is not None: - self.logger = DummyLogger() - self.loggers = [self.logger] + dummy_logger = DummyLogger() + self.loggers = [dummy_logger] rank_zero_info( "Running in fast_dev_run mode: will run a full train," From 29778b1e8744d5e276f6478924c44c6ec243c55f Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 12:35:28 -0800 Subject: [PATCH 24/56] flake and mypy fixes --- .../trainer/connectors/logger_connector/logger_connector.py | 2 +- tests/loggers/test_base.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 921554d8d7510..35366ac209279 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -86,7 +86,7 @@ def configure_logger(self, logger: Union[bool, LightningLoggerBase, Iterable[Lig else: self.trainer.loggers = [] elif isinstance(logger, Iterable): - self.trainer.loggers = logger + self.trainer.loggers = list(logger) else: self.trainer.loggers = [logger] diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index e4262e90a1f33..47570ea822bc5 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -239,11 +239,11 @@ def test_trainer_logger_setters(): trainer.loggers = [] assert trainer.loggers == [] - assert trainer.logger == None + assert trainer.logger is None trainer.loggers = None assert trainer.loggers == [] - assert trainer.logger == None + assert trainer.logger is None def test_multiple_loggers_pickle(tmpdir): From 506e5fd4c61c412a0b27570edf56f906bdddb84e Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 12:58:11 -0800 Subject: [PATCH 25/56] Flake fix did not commit properly --- .../trainer/connectors/logger_connector/logger_connector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 35366ac209279..9f86787167b97 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -16,7 +16,7 @@ import torch import pytorch_lightning as pl -from pytorch_lightning.loggers import LightningLoggerBase, LoggerCollection, TensorBoardLogger +from pytorch_lightning.loggers import LightningLoggerBase, TensorBoardLogger from pytorch_lightning.plugins.environments.slurm_environment import SLURMEnvironment from pytorch_lightning.trainer.connectors.logger_connector.result import _METRICS, _OUT_DICT, _PBAR_DICT from pytorch_lightning.trainer.states import RunningStage From 144169b63ddacba7c90e95b4329d27026a6fb39f Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 13:41:16 -0800 Subject: [PATCH 26/56] Small changes based on suggestions --- pytorch_lightning/trainer/trainer.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index a91b50f4ed3ae..f386ca28c3146 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -2479,11 +2479,11 @@ def logger(self) -> Optional[LightningLoggerBase]: return self._logger @logger.setter - def logger(self, new_logger: Union[LightningLoggerBase, None]) -> None: + def logger(self, new_logger: Optional[LightningLoggerBase]) -> None: if new_logger: if isinstance(new_logger, LoggerCollection): self._logger = new_logger - self._loggers = [_logger for _logger in new_logger] + self._loggers = list(new_loggers) else: self._logger = new_logger self._loggers = [new_logger] @@ -2496,9 +2496,9 @@ def loggers(self) -> List[LightningLoggerBase]: return self._loggers @loggers.setter - def loggers(self, new_loggers: Union[Iterable[LightningLoggerBase], None]) -> None: + def loggers(self, new_loggers: Optional[Iterable[LightningLoggerBase]]) -> None: if new_loggers: - self._loggers = [_logger for _logger in new_loggers] + self._loggers = list(new_loggers) if len(self._loggers) == 1: self._logger = self._loggers[0] else: From bdcbcfb02d1aebd172b899f5faf4adcba3de5636 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 14:15:59 -0800 Subject: [PATCH 27/56] Shorten setters and update unit test --- pytorch_lightning/trainer/trainer.py | 17 +++-------------- tests/loggers/test_base.py | 2 +- 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index f386ca28c3146..209acb50c4fef 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -2480,16 +2480,8 @@ def logger(self) -> Optional[LightningLoggerBase]: @logger.setter def logger(self, new_logger: Optional[LightningLoggerBase]) -> None: - if new_logger: - if isinstance(new_logger, LoggerCollection): - self._logger = new_logger - self._loggers = list(new_loggers) - else: - self._logger = new_logger - self._loggers = [new_logger] - else: - self._logger = None - self._loggers = [] + self._logger = new_logger + self.loggers = [new_logger] if new_logger and not isinstance(new_logger, LoggerCollection) else new_logger @property def loggers(self) -> List[LightningLoggerBase]: @@ -2499,10 +2491,7 @@ def loggers(self) -> List[LightningLoggerBase]: def loggers(self, new_loggers: Optional[Iterable[LightningLoggerBase]]) -> None: if new_loggers: self._loggers = list(new_loggers) - if len(self._loggers) == 1: - self._logger = self._loggers[0] - else: - self._logger = LoggerCollection(self._loggers) + self._logger = self._loggers[0] if len(self._loggers) == 1 else LoggerCollection(self._loggers) else: self._logger = None self._loggers = [] diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index 47570ea822bc5..1e76beb79a554 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -217,7 +217,7 @@ def test_trainer_logger_setters(): assert trainer.loggers == [logger1] trainer.logger = logger_collection - assert trainer.logger == logger_collection + assert trainer.logger._logger_iterable == logger_collection._logger_iterable assert trainer.loggers == [logger1, logger2] trainer.logger = None From d42ce9042678fa2d5cdd4de48d6dabf23400bfe4 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 14:35:30 -0800 Subject: [PATCH 28/56] Move unit test to a new file --- tests/loggers/test_base.py | 68 ------------------ tests/trainer/properties/test_loggers.py | 91 ++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 68 deletions(-) create mode 100644 tests/trainer/properties/test_loggers.py diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index 1e76beb79a554..00ddb84ed9c16 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -176,74 +176,6 @@ def training_step(self, batch, batch_idx): assert logger2.finalized_status == "success" -def test_trainer_loggers_property(): - logger1 = CustomLogger() - logger2 = CustomLogger() - - # trainer.loggers should be a copy of the input list - trainer = Trainer(logger=[logger1, logger2]) - - assert trainer.loggers == [logger1, logger2] - - # trainer.loggers should create a list of size 1 - trainer = Trainer(logger=logger1) - - assert trainer.loggers == [logger1] - - # trainer.loggers should be an empty list - trainer = Trainer(logger=False) - - assert trainer.loggers == [] - - # trainer.loggers should be a list of size 1 holding the default logger - trainer = Trainer(logger=True) - - assert trainer.loggers == [trainer.logger] - assert type(trainer.loggers[0]) == TensorBoardLogger - - -def test_trainer_logger_setters(): - logger1 = CustomLogger() - logger2 = CustomLogger() - logger_collection = LoggerCollection([logger1, logger2]) - - trainer = Trainer() - assert type(trainer.logger) == TensorBoardLogger - assert trainer.loggers == [trainer.logger] - - # Test setters for trainer.logger - trainer.logger = logger1 - assert trainer.logger == logger1 - assert trainer.loggers == [logger1] - - trainer.logger = logger_collection - assert trainer.logger._logger_iterable == logger_collection._logger_iterable - assert trainer.loggers == [logger1, logger2] - - trainer.logger = None - assert trainer.logger is None - assert trainer.loggers == [] - - # Test setters for trainer.loggers - trainer.loggers = [logger1, logger2] - assert trainer.loggers == [logger1, logger2] - assert trainer.logger._logger_iterable == logger_collection._logger_iterable - - trainer.loggers = [logger1] - assert trainer.loggers == [logger1] - assert trainer.logger == logger1 - - trainer.loggers = logger_collection - assert trainer.loggers == [logger1, logger2] - assert trainer.logger._logger_iterable == logger_collection._logger_iterable - - trainer.loggers = [] - assert trainer.loggers == [] - assert trainer.logger is None - - trainer.loggers = None - assert trainer.loggers == [] - assert trainer.logger is None def test_multiple_loggers_pickle(tmpdir): diff --git a/tests/trainer/properties/test_loggers.py b/tests/trainer/properties/test_loggers.py new file mode 100644 index 0000000000000..b75fab9f44059 --- /dev/null +++ b/tests/trainer/properties/test_loggers.py @@ -0,0 +1,91 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +from pytorch_lightning import Trainer +from pytorch_lightning.callbacks import ModelCheckpoint +from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger +from tests.loggers.test_base import CustomLogger +from tests.helpers.boring_model import BoringModel + +def test_trainer_loggers_property(): + """Test for correct initialization of loggers in Trainer""" + logger1 = CustomLogger() + logger2 = CustomLogger() + + # trainer.loggers should be a copy of the input list + trainer = Trainer(logger=[logger1, logger2]) + + assert trainer.loggers == [logger1, logger2] + + # trainer.loggers should create a list of size 1 + trainer = Trainer(logger=logger1) + + assert trainer.loggers == [logger1] + + # trainer.loggers should be an empty list + trainer = Trainer(logger=False) + + assert trainer.loggers == [] + + # trainer.loggers should be a list of size 1 holding the default logger + trainer = Trainer(logger=True) + + assert trainer.loggers == [trainer.logger] + assert type(trainer.loggers[0]) == TensorBoardLogger + + +def test_trainer_logger_setters(): + """Test the behavior of setters for trainer.logger and trainer.loggers""" + logger1 = CustomLogger() + logger2 = CustomLogger() + logger_collection = LoggerCollection([logger1, logger2]) + + trainer = Trainer() + assert type(trainer.logger) == TensorBoardLogger + assert trainer.loggers == [trainer.logger] + + # Test setters for trainer.logger + trainer.logger = logger1 + assert trainer.logger == logger1 + assert trainer.loggers == [logger1] + + trainer.logger = logger_collection + assert trainer.logger._logger_iterable == logger_collection._logger_iterable + assert trainer.loggers == [logger1, logger2] + + trainer.logger = None + assert trainer.logger is None + assert trainer.loggers == [] + + # Test setters for trainer.loggers + trainer.loggers = [logger1, logger2] + assert trainer.loggers == [logger1, logger2] + assert trainer.logger._logger_iterable == logger_collection._logger_iterable + + trainer.loggers = [logger1] + assert trainer.loggers == [logger1] + assert trainer.logger == logger1 + + trainer.loggers = logger_collection + assert trainer.loggers == [logger1, logger2] + assert trainer.logger._logger_iterable == logger_collection._logger_iterable + + trainer.loggers = [] + assert trainer.loggers == [] + assert trainer.logger is None + + trainer.loggers = None + assert trainer.loggers == [] + assert trainer.logger is None From 4001efc366f34e4042234983d58cfe9c868cf249 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 14:39:16 -0800 Subject: [PATCH 29/56] flake and mypy fixes --- tests/loggers/test_base.py | 2 -- tests/trainer/properties/test_loggers.py | 4 +--- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index 00ddb84ed9c16..cb5721d6a9622 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -176,8 +176,6 @@ def training_step(self, batch, batch_idx): assert logger2.finalized_status == "success" - - def test_multiple_loggers_pickle(tmpdir): """Verify that pickling trainer with multiple loggers works.""" diff --git a/tests/trainer/properties/test_loggers.py b/tests/trainer/properties/test_loggers.py index b75fab9f44059..1fc4eec7df59e 100644 --- a/tests/trainer/properties/test_loggers.py +++ b/tests/trainer/properties/test_loggers.py @@ -11,13 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import os from pytorch_lightning import Trainer -from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger from tests.loggers.test_base import CustomLogger -from tests.helpers.boring_model import BoringModel + def test_trainer_loggers_property(): """Test for correct initialization of loggers in Trainer""" From 9401fb75e1da3f493e05949ab99cf21e1059fd93 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 2 Feb 2022 22:42:21 +0000 Subject: [PATCH 30/56] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- tests/trainer/properties/test_loggers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/trainer/properties/test_loggers.py b/tests/trainer/properties/test_loggers.py index 1fc4eec7df59e..a574414bce9f4 100644 --- a/tests/trainer/properties/test_loggers.py +++ b/tests/trainer/properties/test_loggers.py @@ -18,7 +18,7 @@ def test_trainer_loggers_property(): - """Test for correct initialization of loggers in Trainer""" + """Test for correct initialization of loggers in Trainer.""" logger1 = CustomLogger() logger2 = CustomLogger() @@ -45,7 +45,7 @@ def test_trainer_loggers_property(): def test_trainer_logger_setters(): - """Test the behavior of setters for trainer.logger and trainer.loggers""" + """Test the behavior of setters for trainer.logger and trainer.loggers.""" logger1 = CustomLogger() logger2 = CustomLogger() logger_collection = LoggerCollection([logger1, logger2]) From e2be787b81c920776c794d78246fe13064edb6a6 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 16:42:30 -0800 Subject: [PATCH 31/56] Refactor setter to handle special case of size 1 LoggerCollection --- .../logger_connector/logger_connector.py | 16 ++++++++-------- pytorch_lightning/trainer/trainer.py | 13 +++++++++---- tests/trainer/properties/test_loggers.py | 2 +- 3 files changed, 18 insertions(+), 13 deletions(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 9f86787167b97..54e8fd676294c 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -16,7 +16,7 @@ import torch import pytorch_lightning as pl -from pytorch_lightning.loggers import LightningLoggerBase, TensorBoardLogger +from pytorch_lightning.loggers import LightningLoggerBase, LoggerCollection, TensorBoardLogger from pytorch_lightning.plugins.environments.slurm_environment import SLURMEnvironment from pytorch_lightning.trainer.connectors.logger_connector.result import _METRICS, _OUT_DICT, _PBAR_DICT from pytorch_lightning.trainer.states import RunningStage @@ -78,17 +78,17 @@ def should_update_logs(self) -> bool: def configure_logger(self, logger: Union[bool, LightningLoggerBase, Iterable[LightningLoggerBase]]) -> None: if isinstance(logger, bool): # default logger - if logger: - default_logger = TensorBoardLogger( + self.trainer.logger = ( + TensorBoardLogger( save_dir=self.trainer.default_root_dir, version=SLURMEnvironment.job_id(), name="lightning_logs" ) - self.trainer.loggers = [default_logger] - else: - self.trainer.loggers = [] + if logger + else None + ) elif isinstance(logger, Iterable): - self.trainer.loggers = list(logger) + self.trainer.logger = LoggerCollection(logger) else: - self.trainer.loggers = [logger] + self.trainer.logger = logger def log_metrics(self, metrics: _OUT_DICT, step: Optional[int] = None) -> None: """Logs the metric dict passed in. If `step` parameter is None and `step` key is presented is metrics, uses diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 209acb50c4fef..dae3f4023e84d 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -2480,8 +2480,7 @@ def logger(self) -> Optional[LightningLoggerBase]: @logger.setter def logger(self, new_logger: Optional[LightningLoggerBase]) -> None: - self._logger = new_logger - self.loggers = [new_logger] if new_logger and not isinstance(new_logger, LoggerCollection) else new_logger + self.loggers = [new_logger] if new_logger else None @property def loggers(self) -> List[LightningLoggerBase]: @@ -2491,10 +2490,16 @@ def loggers(self) -> List[LightningLoggerBase]: def loggers(self, new_loggers: Optional[Iterable[LightningLoggerBase]]) -> None: if new_loggers: self._loggers = list(new_loggers) - self._logger = self._loggers[0] if len(self._loggers) == 1 else LoggerCollection(self._loggers) + if len(self._loggers) == 1: + new_logger = self._loggers[0] + if isinstance(new_logger, LoggerCollection): + self._loggers = list(new_logger) + self._logger = new_logger + else: + self._logger = LoggerCollection(self._loggers) else: - self._logger = None self._loggers = [] + self._logger = None @property def callback_metrics(self) -> dict: diff --git a/tests/trainer/properties/test_loggers.py b/tests/trainer/properties/test_loggers.py index a574414bce9f4..75066be9d28c5 100644 --- a/tests/trainer/properties/test_loggers.py +++ b/tests/trainer/properties/test_loggers.py @@ -44,7 +44,7 @@ def test_trainer_loggers_property(): assert type(trainer.loggers[0]) == TensorBoardLogger -def test_trainer_logger_setters(): +def test_trainer_loggers_setters(): """Test the behavior of setters for trainer.logger and trainer.loggers.""" logger1 = CustomLogger() logger2 = CustomLogger() From 1e71c40d0fc14e4d36178b8391186010eed1f73f Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 16:46:05 -0800 Subject: [PATCH 32/56] Remove DummyLogger changes --- pytorch_lightning/trainer/trainer.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index dae3f4023e84d..1d5f6cf4fc85b 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -614,9 +614,7 @@ def _init_debugging_flags( self.fit_loop.max_epochs = 1 val_check_interval = 1.0 self.check_val_every_n_epoch = 1 - if self.logger is not None: - dummy_logger = DummyLogger() - self.loggers = [dummy_logger] + self.logger = DummyLogger() if self.logger is not None else None rank_zero_info( "Running in fast_dev_run mode: will run a full train," From c96534b4d68174fa1dc61b129f45db428d52dbda Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 17:40:04 -0800 Subject: [PATCH 33/56] Commit suggestion to change None to [] Co-authored-by: Danielle Pintz <38207072+daniellepintz@users.noreply.github.com> --- pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 2a1b639b62c8d..99d3c0c319d4f 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -2480,7 +2480,7 @@ def logger(self) -> Optional[LightningLoggerBase]: @logger.setter def logger(self, new_logger: Optional[LightningLoggerBase]) -> None: - self.loggers = [new_logger] if new_logger else None + self.loggers = [new_logger] if new_logger else [] @property def loggers(self) -> List[LightningLoggerBase]: From 7fef6fd9352801839251fd4665916668253f618a Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 20:53:37 -0800 Subject: [PATCH 34/56] Decouple both setters for readability --- pytorch_lightning/trainer/trainer.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 99d3c0c319d4f..8d0026c23ab95 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -2480,7 +2480,13 @@ def logger(self) -> Optional[LightningLoggerBase]: @logger.setter def logger(self, new_logger: Optional[LightningLoggerBase]) -> None: - self.loggers = [new_logger] if new_logger else [] + self._logger = new_logger + if not new_logger: + self._loggers = [] + elif isinstance(new_logger, LoggerCollection): + self._loggers = list(new_logger) + else: + self._loggers = [new_logger] @property def loggers(self) -> List[LightningLoggerBase]: @@ -2490,13 +2496,7 @@ def loggers(self) -> List[LightningLoggerBase]: def loggers(self, new_loggers: Optional[Iterable[LightningLoggerBase]]) -> None: if new_loggers: self._loggers = list(new_loggers) - if len(self._loggers) == 1: - new_logger = self._loggers[0] - if isinstance(new_logger, LoggerCollection): - self._loggers = list(new_logger) - self._logger = new_logger - else: - self._logger = LoggerCollection(self._loggers) + self._logger = [self._loggers[0]] if len(self._loggers) == 1 else LoggerCollection(self._loggers) else: self._loggers = [] self._logger = None From a669a98dd5cb6a401f4943eae60718d3bc3d5a03 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 2 Feb 2022 20:59:23 -0800 Subject: [PATCH 35/56] Fix tiny bug in trainer.loggers setter --- pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index adf1d99503dec..12e61bc23813d 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -2502,7 +2502,7 @@ def loggers(self) -> List[LightningLoggerBase]: def loggers(self, new_loggers: Optional[Iterable[LightningLoggerBase]]) -> None: if new_loggers: self._loggers = list(new_loggers) - self._logger = [self._loggers[0]] if len(self._loggers) == 1 else LoggerCollection(self._loggers) + self._logger = self._loggers[0] if len(self._loggers) == 1 else LoggerCollection(self._loggers) else: self._loggers = [] self._logger = None From 8714857df6c99d748896243e0febf3fee7624c76 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Thu, 3 Feb 2022 13:59:59 -0800 Subject: [PATCH 36/56] Add loggers property to lightning.py --- pytorch_lightning/core/lightning.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index 1b280f12d4166..a3a0b7b47a2dd 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -252,10 +252,15 @@ def truncated_bptt_steps(self, truncated_bptt_steps: int) -> None: self._truncated_bptt_steps = truncated_bptt_steps @property - def logger(self): + def logger(self) -> Optional[LightningLoggerBase]: """Reference to the logger object in the Trainer.""" return self.trainer.logger if self.trainer else None + @property + def loggers(self) -> Optional[List[LightningLoggerBase]]: + """Reference to the loggers object in the Trainer.""" + return self.trainer.loggers if self.trainer else None + def _apply_batch_transfer_handler( self, batch: Any, device: Optional[torch.device] = None, dataloader_idx: int = 0 ) -> Any: From a7a47f18aeec50d8d6273dcdb4b4d68b3a9f260a Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Thu, 3 Feb 2022 14:04:17 -0800 Subject: [PATCH 37/56] update changelog --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e6de1dfe26de2..e4ec00f37e450 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -80,8 +80,13 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added a `MisconfigurationException` if user provided `opt_idx` in scheduler config doesn't match with actual optimizer index of its respective optimizer ([#11247](https://github.com/PyTorchLightning/pytorch-lightning/pull/11247)) + - Added a `loggers` property to `Trainer` which returns a list of loggers provided by the user ([#11683](https://github.com/PyTorchLightning/pytorch-lightning/pull/11683)) + +- Added a `loggers` property to `LightningModule` which retrieves the `loggers` property from `Trainer` ([#11683](https://github.com/PyTorchLightning/pytorch-lightning/pull/11683)) + + - Added support for DDP when using a `CombinedLoader` for the training data ([#11648](https://github.com/PyTorchLightning/pytorch-lightning/pull/11648)) From 15fa585c359ec1bc852a5a5f6ae789dbbbce40a1 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Thu, 3 Feb 2022 14:20:04 -0800 Subject: [PATCH 38/56] Add logger property to docs --- docs/source/common/lightning_module.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/source/common/lightning_module.rst b/docs/source/common/lightning_module.rst index e416d329ec4ce..a035bfc4e3f9e 100644 --- a/docs/source/common/lightning_module.rst +++ b/docs/source/common/lightning_module.rst @@ -983,6 +983,20 @@ The current logger being used (tensorboard or other supported logger) # the particular logger tensorboard_logger = self.logger.experiment +loggers +~~~~~~ + +The list of loggers currently being used (tensorboard or other supported loggers) + +.. code-block:: python + + def training_step(self, batch, batch_idx): + # the list of generic logger (same no matter if tensorboard or other supported loggers) + self.loggers + + # the particular logger + tensorboard_logger = self.loggers[0].experiment + local_rank ~~~~~~~~~~~ From 3f195be3a697eb21c2189152c0838f825d039f96 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Thu, 3 Feb 2022 14:22:41 -0800 Subject: [PATCH 39/56] Fix typo --- docs/source/common/lightning_module.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/common/lightning_module.rst b/docs/source/common/lightning_module.rst index a035bfc4e3f9e..8e9c43a67bc46 100644 --- a/docs/source/common/lightning_module.rst +++ b/docs/source/common/lightning_module.rst @@ -991,7 +991,7 @@ The list of loggers currently being used (tensorboard or other supported loggers .. code-block:: python def training_step(self, batch, batch_idx): - # the list of generic logger (same no matter if tensorboard or other supported loggers) + # the list of generic loggers (same no matter if tensorboard or other supported loggers) self.loggers # the particular logger From c944f9a0497f1ac0661dfd6f41ccea3324057b85 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Thu, 3 Feb 2022 14:46:56 -0800 Subject: [PATCH 40/56] update trainer.rst --- docs/source/common/trainer.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index 94c6a86bf393d..f0d175156dac7 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -1745,6 +1745,17 @@ The current logger being used. Here's an example using tensorboard logger = self.trainer.logger tensorboard = logger.experiment +loggers (p) +********** + +The list of loggers currently being used. Here's an example using tensorboard + +.. code-block:: python + + def training_step(self, batch, batch_idx): + loggers = self.trainer.loggers + tensorboard = loggers[0].experiment + logged_metrics ************** From e3312d546fc1ae8cb8cdb3002f29c7641676bfc9 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Thu, 3 Feb 2022 14:47:53 -0800 Subject: [PATCH 41/56] correct spacing --- docs/source/common/trainer.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index f0d175156dac7..4eac2a56eb65a 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -1745,6 +1745,7 @@ The current logger being used. Here's an example using tensorboard logger = self.trainer.logger tensorboard = logger.experiment + loggers (p) ********** From 88b3c24287a8e01571b6fbed86f1ab585706823b Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Thu, 3 Feb 2022 14:53:16 -0800 Subject: [PATCH 42/56] remove typing for now --- pytorch_lightning/core/lightning.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index a3a0b7b47a2dd..24d89477dda51 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -252,12 +252,12 @@ def truncated_bptt_steps(self, truncated_bptt_steps: int) -> None: self._truncated_bptt_steps = truncated_bptt_steps @property - def logger(self) -> Optional[LightningLoggerBase]: + def logger(self): """Reference to the logger object in the Trainer.""" return self.trainer.logger if self.trainer else None @property - def loggers(self) -> Optional[List[LightningLoggerBase]]: + def loggers(self): """Reference to the loggers object in the Trainer.""" return self.trainer.loggers if self.trainer else None From 85256e7314257236e9aa10409988a3359c8dc252 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Thu, 3 Feb 2022 15:42:39 -0800 Subject: [PATCH 43/56] Fix jit unused issue --- pytorch_lightning/core/lightning.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index 5b34af48e9771..a84c6b0bbdc2d 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -80,6 +80,7 @@ class LightningModule( "global_rank", "local_rank", "logger", + "loggers", "model_size", "automatic_optimization", "truncated_bptt_steps", From 7ab36835cee5097a63a36d623d7a7e29e18ddb19 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Thu, 3 Feb 2022 15:59:30 -0800 Subject: [PATCH 44/56] Fix underlines in docs --- docs/source/common/lightning_module.rst | 2 +- docs/source/common/trainer.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/common/lightning_module.rst b/docs/source/common/lightning_module.rst index 8e9c43a67bc46..c4805806df82d 100644 --- a/docs/source/common/lightning_module.rst +++ b/docs/source/common/lightning_module.rst @@ -984,7 +984,7 @@ The current logger being used (tensorboard or other supported logger) tensorboard_logger = self.logger.experiment loggers -~~~~~~ +~~~~~~~ The list of loggers currently being used (tensorboard or other supported loggers) diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index 4eac2a56eb65a..fc964a94d6e5d 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -1747,7 +1747,7 @@ The current logger being used. Here's an example using tensorboard loggers (p) -********** +*********** The list of loggers currently being used. Here's an example using tensorboard From c51f336bfc3583d0f7f253bf047391f11fedf1ff Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Thu, 3 Feb 2022 17:03:58 -0800 Subject: [PATCH 45/56] Updates based on suggestions --- docs/source/common/lightning_module.rst | 7 ++----- docs/source/common/trainer.rst | 8 +++++--- pytorch_lightning/core/lightning.py | 2 +- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/docs/source/common/lightning_module.rst b/docs/source/common/lightning_module.rst index c4805806df82d..17754d154956c 100644 --- a/docs/source/common/lightning_module.rst +++ b/docs/source/common/lightning_module.rst @@ -986,17 +986,14 @@ The current logger being used (tensorboard or other supported logger) loggers ~~~~~~~ -The list of loggers currently being used (tensorboard or other supported loggers) +The list of loggers currently being used. .. code-block:: python def training_step(self, batch, batch_idx): - # the list of generic loggers (same no matter if tensorboard or other supported loggers) + # List of LightningLoggerBase objects self.loggers - # the particular logger - tensorboard_logger = self.loggers[0].experiment - local_rank ~~~~~~~~~~~ diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index fc964a94d6e5d..7efe01645ef5a 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -1749,13 +1749,15 @@ The current logger being used. Here's an example using tensorboard loggers (p) *********** -The list of loggers currently being used. Here's an example using tensorboard +The list of loggers currently being used. .. code-block:: python def training_step(self, batch, batch_idx): - loggers = self.trainer.loggers - tensorboard = loggers[0].experiment + # List of LightningLoggerBase objects + loggers = trainer.loggers + for logger in loggers: + logger.log_metrics({"foo": 1.0}) logged_metrics diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index a84c6b0bbdc2d..ac7c604d57edd 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -260,7 +260,7 @@ def logger(self): @property def loggers(self): """Reference to the loggers object in the Trainer.""" - return self.trainer.loggers if self.trainer else None + return self.trainer.loggers if self.trainer else [] def _apply_batch_transfer_handler( self, batch: Any, device: Optional[torch.device] = None, dataloader_idx: int = 0 From ec99acd3a40449e79bce19f9b9f82bd5e0df78f9 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Thu, 3 Feb 2022 17:34:29 -0800 Subject: [PATCH 46/56] More updates to docs based on suggestions --- docs/source/common/trainer.rst | 14 ++++++-------- pytorch_lightning/core/lightning.py | 5 +++-- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index 7efe01645ef5a..c5e94d904b1fa 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -1741,9 +1741,8 @@ The current logger being used. Here's an example using tensorboard .. code-block:: python - def training_step(self, batch, batch_idx): - logger = self.trainer.logger - tensorboard = logger.experiment + logger = trainer.logger + tensorboard = logger.experiment loggers (p) @@ -1753,11 +1752,10 @@ The list of loggers currently being used. .. code-block:: python - def training_step(self, batch, batch_idx): - # List of LightningLoggerBase objects - loggers = trainer.loggers - for logger in loggers: - logger.log_metrics({"foo": 1.0}) + # List of LightningLoggerBase objects + loggers = trainer.loggers + for logger in loggers: + logger.log_metrics({"foo": 1.0}) logged_metrics diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index ac7c604d57edd..0af3bbb8a8238 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -37,6 +37,7 @@ from pytorch_lightning.core.mixins import DeviceDtypeModuleMixin, HyperparametersMixin from pytorch_lightning.core.optimizer import LightningOptimizer from pytorch_lightning.core.saving import ModelIO +from pytorch_lighting.loggers import LightingLoggerBase from pytorch_lightning.trainer.connectors.logger_connector.fx_validator import _FxValidator from pytorch_lightning.utilities import ( _IS_WINDOWS, @@ -253,12 +254,12 @@ def truncated_bptt_steps(self, truncated_bptt_steps: int) -> None: self._truncated_bptt_steps = truncated_bptt_steps @property - def logger(self): + def logger(self) -> Optional[LightningLoggerBase]: """Reference to the logger object in the Trainer.""" return self.trainer.logger if self.trainer else None @property - def loggers(self): + def loggers(self) -> Optional[List[LightingLoggerBase]]: """Reference to the loggers object in the Trainer.""" return self.trainer.loggers if self.trainer else [] From ba09e27f1328476907b42054baefa0712d3c3c59 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Thu, 3 Feb 2022 17:38:18 -0800 Subject: [PATCH 47/56] Create unit test for lightningmodule loggers property --- tests/core/test_lightning_module.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/core/test_lightning_module.py b/tests/core/test_lightning_module.py index 63d5ecf670f91..c70bf3845871b 100644 --- a/tests/core/test_lightning_module.py +++ b/tests/core/test_lightning_module.py @@ -76,6 +76,17 @@ def test_property_logger(tmpdir): assert model.logger == logger +def test_property_loggers(tmpdir): + """Test that loggers in LightningModule is accessible via the Trainer.""" + model = BoringModel() + assert model.loggers == [] + + logger = TensorBoardLogger(tmpdir) + trainer = Mock(logger=logger) + model.trainer = trainer + assert model.loggers == [logger] + + def test_params_groups_and_state_are_accessible(tmpdir): class TestModel(BoringModel): def training_step(self, batch, batch_idx, optimizer_idx): From bc6fd725359a3e5a0c0db8cad3eb00c0dfe774c6 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Thu, 3 Feb 2022 17:45:28 -0800 Subject: [PATCH 48/56] Replace Mock with Trainer --- pytorch_lightning/core/lightning.py | 4 ++-- tests/core/test_lightning_module.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index 0af3bbb8a8238..e1ba4f9523669 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -37,7 +37,7 @@ from pytorch_lightning.core.mixins import DeviceDtypeModuleMixin, HyperparametersMixin from pytorch_lightning.core.optimizer import LightningOptimizer from pytorch_lightning.core.saving import ModelIO -from pytorch_lighting.loggers import LightingLoggerBase +from pytorch_lightning.loggers import LightningLoggerBase from pytorch_lightning.trainer.connectors.logger_connector.fx_validator import _FxValidator from pytorch_lightning.utilities import ( _IS_WINDOWS, @@ -259,7 +259,7 @@ def logger(self) -> Optional[LightningLoggerBase]: return self.trainer.logger if self.trainer else None @property - def loggers(self) -> Optional[List[LightingLoggerBase]]: + def loggers(self) -> Optional[List[LightningLoggerBase]]: """Reference to the loggers object in the Trainer.""" return self.trainer.loggers if self.trainer else [] diff --git a/tests/core/test_lightning_module.py b/tests/core/test_lightning_module.py index c70bf3845871b..e353f189bb568 100644 --- a/tests/core/test_lightning_module.py +++ b/tests/core/test_lightning_module.py @@ -82,7 +82,7 @@ def test_property_loggers(tmpdir): assert model.loggers == [] logger = TensorBoardLogger(tmpdir) - trainer = Mock(logger=logger) + trainer = Trainer(logger=logger) model.trainer = trainer assert model.loggers == [logger] From f5b492d23645b00788ea2415cd8953421310f0b9 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Thu, 3 Feb 2022 20:49:31 -0800 Subject: [PATCH 49/56] Update types --- pytorch_lightning/core/lightning.py | 2 +- pytorch_lightning/trainer/trainer.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index e1ba4f9523669..6e25b7d9bf282 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -259,7 +259,7 @@ def logger(self) -> Optional[LightningLoggerBase]: return self.trainer.logger if self.trainer else None @property - def loggers(self) -> Optional[List[LightningLoggerBase]]: + def loggers(self) -> List[LightningLoggerBase]: """Reference to the loggers object in the Trainer.""" return self.trainer.loggers if self.trainer else [] diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 92e50b82351b3..7248a4ebb93ad 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -2495,7 +2495,7 @@ def loggers(self) -> List[LightningLoggerBase]: return self._loggers @loggers.setter - def loggers(self, new_loggers: Optional[Iterable[LightningLoggerBase]]) -> None: + def loggers(self, new_loggers: Optional[List[LightningLoggerBase]]) -> None: if new_loggers: self._loggers = list(new_loggers) self._logger = self._loggers[0] if len(self._loggers) == 1 else LoggerCollection(self._loggers) From 8800ec88876d6e1915bb9f6648a4008904e4980d Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Thu, 3 Feb 2022 20:50:06 -0800 Subject: [PATCH 50/56] Remove list cast --- pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 7248a4ebb93ad..3f5f0e48b5e55 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -2497,7 +2497,7 @@ def loggers(self) -> List[LightningLoggerBase]: @loggers.setter def loggers(self, new_loggers: Optional[List[LightningLoggerBase]]) -> None: if new_loggers: - self._loggers = list(new_loggers) + self._loggers = new_loggers self._logger = self._loggers[0] if len(self._loggers) == 1 else LoggerCollection(self._loggers) else: self._loggers = [] From 5852ee2425f4970e0e5e177375fa7ace392c9fcd Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Fri, 4 Feb 2022 00:56:48 -0800 Subject: [PATCH 51/56] Remove unit test for unsupported behavior --- tests/trainer/properties/test_loggers.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/trainer/properties/test_loggers.py b/tests/trainer/properties/test_loggers.py index 75066be9d28c5..c32e86b17b5c4 100644 --- a/tests/trainer/properties/test_loggers.py +++ b/tests/trainer/properties/test_loggers.py @@ -76,10 +76,6 @@ def test_trainer_loggers_setters(): assert trainer.loggers == [logger1] assert trainer.logger == logger1 - trainer.loggers = logger_collection - assert trainer.loggers == [logger1, logger2] - assert trainer.logger._logger_iterable == logger_collection._logger_iterable - trainer.loggers = [] assert trainer.loggers == [] assert trainer.logger is None From 0752fb5dc9794556b08a9ceb6cd8bdea1709f8e2 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Fri, 4 Feb 2022 12:11:40 -0800 Subject: [PATCH 52/56] Handle special case for setter --- pytorch_lightning/trainer/trainer.py | 8 +++++++- tests/trainer/properties/test_loggers.py | 4 ++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 3f5f0e48b5e55..0ca1835b2ce59 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -2498,7 +2498,13 @@ def loggers(self) -> List[LightningLoggerBase]: def loggers(self, new_loggers: Optional[List[LightningLoggerBase]]) -> None: if new_loggers: self._loggers = new_loggers - self._logger = self._loggers[0] if len(self._loggers) == 1 else LoggerCollection(self._loggers) + if len(self._loggers) == 1: + new_logger = self._loggers[0] + self._logger = new_logger + if(isinstance(new_logger, LoggerCollection)): + self._loggers = list(new_logger) + else: + self._logger = LoggerCollection(self._loggers) else: self._loggers = [] self._logger = None diff --git a/tests/trainer/properties/test_loggers.py b/tests/trainer/properties/test_loggers.py index c32e86b17b5c4..39cb6f75e496e 100644 --- a/tests/trainer/properties/test_loggers.py +++ b/tests/trainer/properties/test_loggers.py @@ -76,6 +76,10 @@ def test_trainer_loggers_setters(): assert trainer.loggers == [logger1] assert trainer.logger == logger1 + trainer.loggers = [logger_collection] + assert trainer.loggers == [logger1, logger2] + assert trainer.logger._logger_iterable == logger_collection._logger_iterable + trainer.loggers = [] assert trainer.loggers == [] assert trainer.logger is None From 46b5ae15dfc84f7ecb9d5464d30d08b31ec999b5 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 4 Feb 2022 20:56:32 +0000 Subject: [PATCH 53/56] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 0ca1835b2ce59..33342230318ea 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -2501,7 +2501,7 @@ def loggers(self, new_loggers: Optional[List[LightningLoggerBase]]) -> None: if len(self._loggers) == 1: new_logger = self._loggers[0] self._logger = new_logger - if(isinstance(new_logger, LoggerCollection)): + if isinstance(new_logger, LoggerCollection): self._loggers = list(new_logger) else: self._logger = LoggerCollection(self._loggers) From 3480c38cfc2e09c050833e370a9504d8377c5410 Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 9 Feb 2022 10:44:25 -0800 Subject: [PATCH 54/56] Update docs/source/common/lightning_module.rst Co-authored-by: ananthsub --- docs/source/common/lightning_module.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/source/common/lightning_module.rst b/docs/source/common/lightning_module.rst index 17754d154956c..b241e78d85eb5 100644 --- a/docs/source/common/lightning_module.rst +++ b/docs/source/common/lightning_module.rst @@ -992,7 +992,9 @@ The list of loggers currently being used. def training_step(self, batch, batch_idx): # List of LightningLoggerBase objects - self.loggers + loggers = self.loggers + for logger in loggers: + logger.log_metrics({"foo": 1.0}) local_rank ~~~~~~~~~~~ From cd90c344ba9a87d88fe2eef0766fa548b6045a3d Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 9 Feb 2022 12:33:25 -0800 Subject: [PATCH 55/56] Refactor docs and trainer according to suggestions --- docs/source/common/lightning_module.rst | 2 +- docs/source/common/trainer.rst | 10 +++--- pytorch_lightning/trainer/trainer.py | 41 ++++++++++++------------- 3 files changed, 25 insertions(+), 28 deletions(-) diff --git a/docs/source/common/lightning_module.rst b/docs/source/common/lightning_module.rst index b241e78d85eb5..f1528d85ae3a5 100644 --- a/docs/source/common/lightning_module.rst +++ b/docs/source/common/lightning_module.rst @@ -986,7 +986,7 @@ The current logger being used (tensorboard or other supported logger) loggers ~~~~~~~ -The list of loggers currently being used. +The list of loggers currently being used by the Trainer. .. code-block:: python diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index c5e94d904b1fa..2126472b95169 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -1734,8 +1734,8 @@ The current epoch pass -logger (p) -********** +logger +******* The current logger being used. Here's an example using tensorboard @@ -1745,10 +1745,10 @@ The current logger being used. Here's an example using tensorboard tensorboard = logger.experiment -loggers (p) -*********** +loggers +******** -The list of loggers currently being used. +The list of loggers currently being used by the Trainer. .. code-block:: python diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 33342230318ea..a122ce3035dbd 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -563,7 +563,6 @@ def __init__( self.__init_profiler(profiler) # init logger flags - self._logger: Optional[LightningLoggerBase] self._loggers: List[LightningLoggerBase] self.logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu) @@ -2478,36 +2477,34 @@ def _active_loop(self) -> Optional[Union[FitLoop, EvaluationLoop, PredictionLoop @property def logger(self) -> Optional[LightningLoggerBase]: - return self._logger + if len(self.loggers) == 0: + return None + if len(self.loggers) == 1: + return self.loggers[0] + else: + rank_zero_warn( + "Using trainer.logger when Trainer is configured to use multiple loggers." + " This behavior will change in v1.8 when LoggerCollection is removed, and" + " trainer.logger will return the first logger in trainer.loggers" + ) + return LoggerCollection(self.loggers) @logger.setter - def logger(self, new_logger: Optional[LightningLoggerBase]) -> None: - self._logger = new_logger - if not new_logger: - self._loggers = [] - elif isinstance(new_logger, LoggerCollection): - self._loggers = list(new_logger) + def logger(self, logger: Optional[LightningLoggerBase]) -> None: + if not logger: + self.loggers = [] + elif isinstance(logger, LoggerCollection): + self.loggers = list(logger) else: - self._loggers = [new_logger] + self.loggers = [logger] @property def loggers(self) -> List[LightningLoggerBase]: return self._loggers @loggers.setter - def loggers(self, new_loggers: Optional[List[LightningLoggerBase]]) -> None: - if new_loggers: - self._loggers = new_loggers - if len(self._loggers) == 1: - new_logger = self._loggers[0] - self._logger = new_logger - if isinstance(new_logger, LoggerCollection): - self._loggers = list(new_logger) - else: - self._logger = LoggerCollection(self._loggers) - else: - self._loggers = [] - self._logger = None + def loggers(self, loggers: Optional[List[LightningLoggerBase]]) -> None: + self._loggers = loggers if loggers else [] @property def callback_metrics(self) -> dict: From 338ba43420424c2cb164d70919fed1c70a681baf Mon Sep 17 00:00:00 2001 From: Akash Kwatra Date: Wed, 9 Feb 2022 12:58:47 -0800 Subject: [PATCH 56/56] Update unit tests with new behavior --- tests/profiler/test_profiler.py | 4 ++-- tests/trainer/properties/test_log_dir.py | 3 ++- tests/trainer/properties/test_loggers.py | 10 ++++++---- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/tests/profiler/test_profiler.py b/tests/profiler/test_profiler.py index 8d92c4318b4f2..522b56ca90e3f 100644 --- a/tests/profiler/test_profiler.py +++ b/tests/profiler/test_profiler.py @@ -24,7 +24,7 @@ from pytorch_lightning import Callback, Trainer from pytorch_lightning.callbacks import EarlyStopping, StochasticWeightAveraging -from pytorch_lightning.loggers.base import LoggerCollection +from pytorch_lightning.loggers.base import DummyLogger, LoggerCollection from pytorch_lightning.loggers.tensorboard import TensorBoardLogger from pytorch_lightning.profiler import AdvancedProfiler, PassThroughProfiler, PyTorchProfiler, SimpleProfiler from pytorch_lightning.profiler.pytorch import RegisterRecordFunction, warning_cache @@ -493,7 +493,7 @@ def look_for_trace(trace_dir): model = BoringModel() # Wrap the logger in a list so it becomes a LoggerCollection - logger = [TensorBoardLogger(save_dir=tmpdir)] + logger = [TensorBoardLogger(save_dir=tmpdir), DummyLogger()] trainer = Trainer(default_root_dir=tmpdir, profiler="pytorch", logger=logger, limit_train_batches=5, max_epochs=1) assert isinstance(trainer.logger, LoggerCollection) trainer.fit(model) diff --git a/tests/trainer/properties/test_log_dir.py b/tests/trainer/properties/test_log_dir.py index 277a2f105efd6..71920a6b079bf 100644 --- a/tests/trainer/properties/test_log_dir.py +++ b/tests/trainer/properties/test_log_dir.py @@ -16,6 +16,7 @@ from pytorch_lightning import Trainer from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger +from pytorch_lightning.loggers.base import DummyLogger from tests.helpers.boring_model import BoringModel @@ -117,7 +118,7 @@ def test_logdir_logger_collection(tmpdir): trainer = Trainer( default_root_dir=default_root_dir, max_steps=2, - logger=[TensorBoardLogger(save_dir=save_dir, name="custom_logs")], + logger=[TensorBoardLogger(save_dir=save_dir, name="custom_logs"), DummyLogger()], ) assert isinstance(trainer.logger, LoggerCollection) assert trainer.log_dir == default_root_dir diff --git a/tests/trainer/properties/test_loggers.py b/tests/trainer/properties/test_loggers.py index 39cb6f75e496e..606c7b641ae1d 100644 --- a/tests/trainer/properties/test_loggers.py +++ b/tests/trainer/properties/test_loggers.py @@ -49,6 +49,7 @@ def test_trainer_loggers_setters(): logger1 = CustomLogger() logger2 = CustomLogger() logger_collection = LoggerCollection([logger1, logger2]) + logger_collection_2 = LoggerCollection([logger2]) trainer = Trainer() assert type(trainer.logger) == TensorBoardLogger @@ -63,6 +64,11 @@ def test_trainer_loggers_setters(): assert trainer.logger._logger_iterable == logger_collection._logger_iterable assert trainer.loggers == [logger1, logger2] + # LoggerCollection of size 1 should result in trainer.logger becoming the contained logger. + trainer.logger = logger_collection_2 + assert trainer.logger == logger2 + assert trainer.loggers == [logger2] + trainer.logger = None assert trainer.logger is None assert trainer.loggers == [] @@ -76,10 +82,6 @@ def test_trainer_loggers_setters(): assert trainer.loggers == [logger1] assert trainer.logger == logger1 - trainer.loggers = [logger_collection] - assert trainer.loggers == [logger1, logger2] - assert trainer.logger._logger_iterable == logger_collection._logger_iterable - trainer.loggers = [] assert trainer.loggers == [] assert trainer.logger is None