From d54e28d0e6541054fdf87a6e23178bcbcdb7e22c Mon Sep 17 00:00:00 2001 From: Kenza Bouzid <37396332+kenza-bouzid@users.noreply.github.com> Date: Fri, 28 Oct 2022 15:13:26 +0100 Subject: [PATCH] ENH: Add log every nsteps parameter (#645) Add log_every_n_steps trainer parameter --- hi-ml/src/health_ml/deep_learning_config.py | 3 +++ hi-ml/src/health_ml/model_trainer.py | 1 + 2 files changed, 4 insertions(+) diff --git a/hi-ml/src/health_ml/deep_learning_config.py b/hi-ml/src/health_ml/deep_learning_config.py index 332ee321..97ee60cb 100644 --- a/hi-ml/src/health_ml/deep_learning_config.py +++ b/hi-ml/src/health_ml/deep_learning_config.py @@ -531,6 +531,9 @@ class TrainerParams(param.Parameterized): pl_accumulate_grad_batches: int = param.Integer(default=1, doc="The number of batches over which gradients are accumulated, " "before a parameter update is done.") + pl_log_every_n_steps: int = param.Integer(default=50, + doc="PyTorch Lightning trainer flag 'log_every_n_steps': How often to" + "log within steps. Default to 50.") @property def use_gpu(self) -> bool: diff --git a/hi-ml/src/health_ml/model_trainer.py b/hi-ml/src/health_ml/model_trainer.py index 58d7fbfc..1d3756b8 100644 --- a/hi-ml/src/health_ml/model_trainer.py +++ b/hi-ml/src/health_ml/model_trainer.py @@ -177,6 +177,7 @@ def create_lightning_trainer(container: LightningContainer, limit_test_batches=container.pl_limit_test_batches or 1.0, fast_dev_run=container.pl_fast_dev_run, # type: ignore num_sanity_val_steps=container.pl_num_sanity_val_steps, + log_every_n_steps=container.pl_log_every_n_steps, # check_val_every_n_epoch=container.pl_check_val_every_n_epoch, callbacks=callbacks, logger=loggers,