Bump lightning[pytorch-extra] from 2.0.9.post0 to 2.1.0 in /requirements (#1662)

* Bump lightning[pytorch-extra] from 2.0.9.post0 to 2.1.0 in /requirements

Bumps [lightning[pytorch-extra]](https://github.com/Lightning-AI/lightning) from 2.0.9.post0 to 2.1.0.
- [Release notes](https://github.com/Lightning-AI/lightning/releases)
- [Commits](https://github.com/Lightning-AI/lightning/compare/2.0.9.post0...2.1.0)

---
updated-dependencies:
- dependency-name: lightning[pytorch-extra]
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Remove type ignores

* Capture more warnings

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Caleb Robinson <calebrob6@gmail.com>
Co-authored-by: Adam J. Stewart <ajstewart426@gmail.com>
This commit is contained in:
dependabot[bot] 2023-10-13 16:33:37 -05:00 коммит произвёл Nils Lehmann
Родитель e0917cafae
Коммит 01de750885
4 изменённых файлов: 14 добавлений и 13 удалений

Просмотреть файл

@ -246,7 +246,8 @@ filterwarnings = [
# Expected warnings
# Lightning warns us about using num_workers=0, but it's faster on macOS
"ignore:The dataloader, .*, does not have many workers which may be a bottleneck:UserWarning",
"ignore:The .*dataloader.* does not have many workers which may be a bottleneck:UserWarning:lightning",
"ignore:The .*dataloader.* does not have many workers which may be a bottleneck:lightning.fabric.utilities.warnings.PossibleUserWarning:lightning",
# Lightning warns us about using the CPU when GPU/MPS is available
"ignore:GPU available but not used.:UserWarning",
"ignore:MPS available but not used.:UserWarning",

Просмотреть файл

@ -164,7 +164,7 @@ class ClassificationTask(BaseTask):
loss: Tensor = self.criterion(y_hat, y)
self.log("train_loss", loss)
self.train_metrics(y_hat_hard, y)
self.log_dict(self.train_metrics) # type: ignore[arg-type]
self.log_dict(self.train_metrics)
return loss
@ -185,7 +185,7 @@ class ClassificationTask(BaseTask):
loss = self.criterion(y_hat, y)
self.log("val_loss", loss)
self.val_metrics(y_hat_hard, y)
self.log_dict(self.val_metrics) # type: ignore[arg-type]
self.log_dict(self.val_metrics)
if (
batch_idx < 10
@ -226,7 +226,7 @@ class ClassificationTask(BaseTask):
loss = self.criterion(y_hat, y)
self.log("test_loss", loss)
self.test_metrics(y_hat_hard, y)
self.log_dict(self.test_metrics) # type: ignore[arg-type]
self.log_dict(self.test_metrics)
def predict_step(
self, batch: Any, batch_idx: int, dataloader_idx: int = 0
@ -288,7 +288,7 @@ class MultiLabelClassificationTask(ClassificationTask):
loss: Tensor = self.criterion(y_hat, y.to(torch.float))
self.log("train_loss", loss)
self.train_metrics(y_hat_hard, y)
self.log_dict(self.train_metrics) # type: ignore[arg-type]
self.log_dict(self.train_metrics)
return loss
@ -309,7 +309,7 @@ class MultiLabelClassificationTask(ClassificationTask):
loss = self.criterion(y_hat, y.to(torch.float))
self.log("val_loss", loss)
self.val_metrics(y_hat_hard, y)
self.log_dict(self.val_metrics) # type: ignore[arg-type]
self.log_dict(self.val_metrics)
if (
batch_idx < 10
@ -349,7 +349,7 @@ class MultiLabelClassificationTask(ClassificationTask):
loss = self.criterion(y_hat, y.to(torch.float))
self.log("test_loss", loss)
self.test_metrics(y_hat_hard, y)
self.log_dict(self.test_metrics) # type: ignore[arg-type]
self.log_dict(self.test_metrics)
def predict_step(
self, batch: Any, batch_idx: int, dataloader_idx: int = 0

Просмотреть файл

@ -157,7 +157,7 @@ class RegressionTask(BaseTask):
loss: Tensor = self.criterion(y_hat, y)
self.log("train_loss", loss)
self.train_metrics(y_hat, y)
self.log_dict(self.train_metrics) # type: ignore[arg-type]
self.log_dict(self.train_metrics)
return loss
@ -180,7 +180,7 @@ class RegressionTask(BaseTask):
loss = self.criterion(y_hat, y)
self.log("val_loss", loss)
self.val_metrics(y_hat, y)
self.log_dict(self.val_metrics) # type: ignore[arg-type]
self.log_dict(self.val_metrics)
if (
batch_idx < 10
@ -226,7 +226,7 @@ class RegressionTask(BaseTask):
loss = self.criterion(y_hat, y)
self.log("test_loss", loss)
self.test_metrics(y_hat, y)
self.log_dict(self.test_metrics) # type: ignore[arg-type]
self.log_dict(self.test_metrics)
def predict_step(
self, batch: Any, batch_idx: int, dataloader_idx: int = 0

Просмотреть файл

@ -220,7 +220,7 @@ class SemanticSegmentationTask(BaseTask):
loss: Tensor = self.criterion(y_hat, y)
self.log("train_loss", loss)
self.train_metrics(y_hat_hard, y)
self.log_dict(self.train_metrics) # type: ignore[arg-type]
self.log_dict(self.train_metrics)
return loss
def validation_step(
@ -240,7 +240,7 @@ class SemanticSegmentationTask(BaseTask):
loss = self.criterion(y_hat, y)
self.log("val_loss", loss)
self.val_metrics(y_hat_hard, y)
self.log_dict(self.val_metrics) # type: ignore[arg-type]
self.log_dict(self.val_metrics)
if (
batch_idx < 10
@ -281,7 +281,7 @@ class SemanticSegmentationTask(BaseTask):
loss = self.criterion(y_hat, y)
self.log("test_loss", loss)
self.test_metrics(y_hat_hard, y)
self.log_dict(self.test_metrics) # type: ignore[arg-type]
self.log_dict(self.test_metrics)
def predict_step(
self, batch: Any, batch_idx: int, dataloader_idx: int = 0