From a150ab1acd8a2b9d66b57c5c0bd43e8ce3bac78f Mon Sep 17 00:00:00 2001 From: "deepsource-autofix[bot]" <62050782+deepsource-autofix[bot]@users.noreply.github.com> Date: Wed, 9 Jun 2021 11:02:35 +0000 Subject: [PATCH] Refactor unnecessary `else` / `elif` when `if` block has a `return` statement (#285) * Refactor unnecessary `else` / `elif` when `if` block has a `return` statement * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> Co-authored-by: Jirka Borovec Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- tests/classification/test_f_beta.py | 2 +- tests/classification/test_hinge.py | 13 ++++---- tests/classification/test_precision_recall.py | 2 +- tests/classification/test_specificity.py | 31 +++++++------------ tests/classification/test_stat_scores.py | 2 +- tests/retrieval/test_fallout.py | 3 +- tests/retrieval/test_mrr.py | 3 +- tests/retrieval/test_precision.py | 3 +- tests/retrieval/test_recall.py | 3 +- torchmetrics/classification/accuracy.py | 5 ++- .../classification/binned_precision_recall.py | 3 +- .../functional/classification/auroc.py | 4 +-- torchmetrics/utilities/distributed.py | 6 ++-- torchmetrics/wrappers/bootstrapping.py | 2 +- 14 files changed, 33 insertions(+), 49 deletions(-) diff --git a/tests/classification/test_f_beta.py b/tests/classification/test_f_beta.py index 83b9907..db0e58b 100644 --- a/tests/classification/test_f_beta.py +++ b/tests/classification/test_f_beta.py @@ -75,7 +75,7 @@ def _sk_fbeta_f1_multidim_multiclass( target = torch.transpose(target, 1, 2).reshape(-1, target.shape[1]) return _sk_fbeta_f1(preds, target, sk_fn, num_classes, average, False, ignore_index) - elif mdmc_average == "samplewise": + if mdmc_average == "samplewise": scores = [] for i in range(preds.shape[0]): diff --git a/tests/classification/test_hinge.py b/tests/classification/test_hinge.py index d4b9af0..443dfdf 100644 --- a/tests/classification/test_hinge.py +++ b/tests/classification/test_hinge.py @@ -65,14 +65,13 @@ def _sk_hinge(preds, target, squared, multiclass_mode): if squared: measures = measures**2 return measures.mean(axis=0) - else: - if multiclass_mode == MulticlassMode.ONE_VS_ALL: - result = np.zeros(sk_preds.shape[1]) - for i in range(result.shape[0]): - result[i] = sk_hinge(y_true=sk_target[:, i], pred_decision=sk_preds[:, i]) - return result + if multiclass_mode == MulticlassMode.ONE_VS_ALL: + result = np.zeros(sk_preds.shape[1]) + for i in range(result.shape[0]): + result[i] = sk_hinge(y_true=sk_target[:, i], pred_decision=sk_preds[:, i]) + return result - return sk_hinge(y_true=sk_target, pred_decision=sk_preds) + return sk_hinge(y_true=sk_target, pred_decision=sk_preds) @pytest.mark.parametrize( diff --git a/tests/classification/test_precision_recall.py b/tests/classification/test_precision_recall.py index 72be11a..064b88a 100644 --- a/tests/classification/test_precision_recall.py +++ b/tests/classification/test_precision_recall.py @@ -76,7 +76,7 @@ def _sk_prec_recall_multidim_multiclass( target = torch.transpose(target, 1, 2).reshape(-1, target.shape[1]) return _sk_prec_recall(preds, target, sk_fn, num_classes, average, False, ignore_index) - elif mdmc_average == "samplewise": + if mdmc_average == "samplewise": scores = [] for i in range(preds.shape[0]): diff --git a/tests/classification/test_specificity.py b/tests/classification/test_specificity.py index e8c6e07..376bbb6 100644 --- a/tests/classification/test_specificity.py +++ b/tests/classification/test_specificity.py @@ -112,28 +112,19 @@ def _sk_spec_mdim_mcls(preds, target, reduce, mdmc_reduce, num_classes, multicla preds = torch.transpose(preds, 1, 2).reshape(-1, preds.shape[1]) target = torch.transpose(target, 1, 2).reshape(-1, target.shape[1]) return _sk_spec(preds, target, reduce, num_classes, False, ignore_index, top_k, mdmc_reduce) - else: - fp, tn = [], [] - stats = [] + fp, tn = [], [] + stats = [] - for i in range(preds.shape[0]): - pred_i = preds[i, ...].T - target_i = target[i, ...].T - fp_i, tn_i = _sk_stats_score( - pred_i, - target_i, - reduce, - num_classes, - False, - ignore_index, - top_k, - ) - fp.append(fp_i) - tn.append(tn_i) + for i in range(preds.shape[0]): + pred_i = preds[i, ...].T + target_i = target[i, ...].T + fp_i, tn_i = _sk_stats_score(pred_i, target_i, reduce, num_classes, False, ignore_index, top_k) + fp.append(fp_i) + tn.append(tn_i) - stats.append(fp) - stats.append(tn) - return _sk_spec(preds[0], target[0], reduce, num_classes, multiclass, ignore_index, top_k, mdmc_reduce, stats) + stats.append(fp) + stats.append(tn) + return _sk_spec(preds[0], target[0], reduce, num_classes, multiclass, ignore_index, top_k, mdmc_reduce, stats) @pytest.mark.parametrize("metric, fn_metric", [(Specificity, specificity)]) diff --git a/tests/classification/test_stat_scores.py b/tests/classification/test_stat_scores.py index b85531a..6a85884 100644 --- a/tests/classification/test_stat_scores.py +++ b/tests/classification/test_stat_scores.py @@ -85,7 +85,7 @@ def _sk_stat_scores_mdim_mcls(preds, target, reduce, mdmc_reduce, num_classes, m target = torch.transpose(target, 1, 2).reshape(-1, target.shape[1]) return _sk_stat_scores(preds, target, reduce, None, False, ignore_index, top_k) - elif mdmc_reduce == "samplewise": + if mdmc_reduce == "samplewise": scores = [] for i in range(preds.shape[0]): diff --git a/tests/retrieval/test_fallout.py b/tests/retrieval/test_fallout.py index 2882d51..4f07f16 100644 --- a/tests/retrieval/test_fallout.py +++ b/tests/retrieval/test_fallout.py @@ -50,8 +50,7 @@ def _fallout_at_k(target: np.ndarray, preds: np.ndarray, k: int = None): order_indexes = np.argsort(preds, axis=0)[::-1] relevant = np.sum(target[order_indexes][:k]) return relevant * 1.0 / target.sum() - else: - return np.NaN + return np.NaN class TestFallOut(RetrievalMetricTester): diff --git a/tests/retrieval/test_mrr.py b/tests/retrieval/test_mrr.py index ec5f2f1..6e3dcdc 100644 --- a/tests/retrieval/test_mrr.py +++ b/tests/retrieval/test_mrr.py @@ -50,8 +50,7 @@ def _reciprocal_rank(target: np.ndarray, preds: np.ndarray): if target.sum() > 0: # sklearn `label_ranking_average_precision_score` requires at most 2 dims return label_ranking_average_precision_score(np.expand_dims(target, axis=0), np.expand_dims(preds, axis=0)) - else: - return 0.0 + return 0.0 class TestMRR(RetrievalMetricTester): diff --git a/tests/retrieval/test_precision.py b/tests/retrieval/test_precision.py index edf8d6b..03bce41 100644 --- a/tests/retrieval/test_precision.py +++ b/tests/retrieval/test_precision.py @@ -49,8 +49,7 @@ def _precision_at_k(target: np.ndarray, preds: np.ndarray, k: int = None): order_indexes = np.argsort(preds, axis=0)[::-1] relevant = np.sum(target[order_indexes][:k]) return relevant * 1.0 / k - else: - return np.NaN + return np.NaN class TestPrecision(RetrievalMetricTester): diff --git a/tests/retrieval/test_recall.py b/tests/retrieval/test_recall.py index d8e522f..050b90e 100644 --- a/tests/retrieval/test_recall.py +++ b/tests/retrieval/test_recall.py @@ -48,8 +48,7 @@ def _recall_at_k(target: np.ndarray, preds: np.ndarray, k: int = None): order_indexes = np.argsort(preds, axis=0)[::-1] relevant = np.sum(target[order_indexes][:k]) return relevant * 1.0 / target.sum() - else: - return np.NaN + return np.NaN class TestRecall(RetrievalMetricTester): diff --git a/torchmetrics/classification/accuracy.py b/torchmetrics/classification/accuracy.py index 639b723..7e41c27 100644 --- a/torchmetrics/classification/accuracy.py +++ b/torchmetrics/classification/accuracy.py @@ -268,9 +268,8 @@ class Accuracy(StatScores): """ if self.subset_accuracy: return _subset_accuracy_compute(self.correct, self.total) - else: - tp, fp, tn, fn = self._get_final_stats() - return _accuracy_compute(tp, fp, tn, fn, self.average, self.mdmc_reduce, self.mode) + tp, fp, tn, fn = self._get_final_stats() + return _accuracy_compute(tp, fp, tn, fn, self.average, self.mdmc_reduce, self.mode) @property def is_differentiable(self) -> bool: diff --git a/torchmetrics/classification/binned_precision_recall.py b/torchmetrics/classification/binned_precision_recall.py index 702e182..15cdb80 100644 --- a/torchmetrics/classification/binned_precision_recall.py +++ b/torchmetrics/classification/binned_precision_recall.py @@ -163,8 +163,7 @@ class BinnedPrecisionRecallCurve(Metric): recalls = torch.cat([recalls, t_zeros], dim=1) if self.num_classes == 1: return (precisions[0, :], recalls[0, :], self.thresholds) - else: - return (list(precisions), list(recalls), [self.thresholds for _ in range(self.num_classes)]) + return (list(precisions), list(recalls), [self.thresholds for _ in range(self.num_classes)]) class BinnedAveragePrecision(BinnedPrecisionRecallCurve): diff --git a/torchmetrics/functional/classification/auroc.py b/torchmetrics/functional/classification/auroc.py index 38f8504..a64a841 100644 --- a/torchmetrics/functional/classification/auroc.py +++ b/torchmetrics/functional/classification/auroc.py @@ -100,9 +100,9 @@ def _auroc_compute( # calculate average if average == AverageMethod.NONE: return auc_scores - elif average == AverageMethod.MACRO: + if average == AverageMethod.MACRO: return torch.mean(torch.stack(auc_scores)) - elif average == AverageMethod.WEIGHTED: + if average == AverageMethod.WEIGHTED: if mode == DataType.MULTILABEL: support = torch.sum(target, dim=0) else: diff --git a/torchmetrics/utilities/distributed.py b/torchmetrics/utilities/distributed.py index 85d2696..e412bd8 100644 --- a/torchmetrics/utilities/distributed.py +++ b/torchmetrics/utilities/distributed.py @@ -76,11 +76,11 @@ def class_reduce(num: Tensor, denom: Tensor, weights: Tensor, class_reduction: s if class_reduction == "micro": return fraction - elif class_reduction == "macro": + if class_reduction == "macro": return torch.mean(fraction) - elif class_reduction == "weighted": + if class_reduction == "weighted": return torch.sum(fraction * (weights.float() / torch.sum(weights))) - elif class_reduction == "none" or class_reduction is None: + if class_reduction == "none" or class_reduction is None: return fraction raise ValueError( diff --git a/torchmetrics/wrappers/bootstrapping.py b/torchmetrics/wrappers/bootstrapping.py index 244c9fd..9da5b05 100644 --- a/torchmetrics/wrappers/bootstrapping.py +++ b/torchmetrics/wrappers/bootstrapping.py @@ -40,7 +40,7 @@ def _bootstrap_sampler( p = torch.distributions.Poisson(1) n = p.sample((size, )) return torch.arange(size).repeat_interleave(n.long(), dim=0) - elif sampling_strategy == 'multinomial': + if sampling_strategy == 'multinomial': idx = torch.multinomial(torch.ones(size), num_samples=size, replacement=True) return idx raise ValueError('Unknown sampling strategy')