зеркало из https://github.com/microsoft/torchgeo.git
Remove Kornia master install inside Github Actions (#203)
* remove kornia master install in tests action * removed unused ignore comments * removed unused ignore comments x2
This commit is contained in:
Родитель
f68cec5a2e
Коммит
9b1ecb24e4
|
@ -19,8 +19,6 @@ jobs:
|
|||
python-version: 3.9
|
||||
- name: Install pip dependencies
|
||||
run: |
|
||||
# TODO: remove after kornia's next release (>0.5.11)
|
||||
pip install git+https://github.com/kornia/kornia@master # required for mypy fixes
|
||||
pip install cython numpy # needed for pycocotools
|
||||
pip install .[datasets,tests,train]
|
||||
- name: Run mypy checks
|
||||
|
|
|
@ -165,7 +165,7 @@ def main(args: argparse.Namespace) -> None:
|
|||
if isinstance(sampler, RandomBatchGeoSampler):
|
||||
dataloader = DataLoader(
|
||||
dataset,
|
||||
batch_sampler=sampler, # type: ignore[arg-type]
|
||||
batch_sampler=sampler,
|
||||
num_workers=args.num_workers,
|
||||
)
|
||||
else:
|
||||
|
|
|
@ -55,9 +55,7 @@ class TestBatchGeoSampler:
|
|||
@pytest.mark.parametrize("num_workers", [0, 1, 2])
|
||||
def test_dataloader(self, sampler: CustomBatchGeoSampler, num_workers: int) -> None:
|
||||
ds = CustomGeoDataset()
|
||||
dl = DataLoader(
|
||||
ds, batch_sampler=sampler, num_workers=num_workers # type: ignore[arg-type]
|
||||
)
|
||||
dl = DataLoader(ds, batch_sampler=sampler, num_workers=num_workers)
|
||||
for _ in dl:
|
||||
continue
|
||||
|
||||
|
@ -95,8 +93,6 @@ class TestRandomBatchGeoSampler:
|
|||
@pytest.mark.parametrize("num_workers", [0, 1, 2])
|
||||
def test_dataloader(self, sampler: RandomBatchGeoSampler, num_workers: int) -> None:
|
||||
ds = CustomGeoDataset()
|
||||
dl = DataLoader(
|
||||
ds, batch_sampler=sampler, num_workers=num_workers # type: ignore[arg-type]
|
||||
)
|
||||
dl = DataLoader(ds, batch_sampler=sampler, num_workers=num_workers)
|
||||
for _ in dl:
|
||||
continue
|
||||
|
|
|
@ -54,9 +54,7 @@ class TestGeoSampler:
|
|||
@pytest.mark.parametrize("num_workers", [0, 1, 2])
|
||||
def test_dataloader(self, sampler: CustomGeoSampler, num_workers: int) -> None:
|
||||
ds = CustomGeoDataset()
|
||||
dl = DataLoader(
|
||||
ds, sampler=sampler, num_workers=num_workers # type: ignore[arg-type]
|
||||
)
|
||||
dl = DataLoader(ds, sampler=sampler, num_workers=num_workers)
|
||||
for _ in dl:
|
||||
continue
|
||||
|
||||
|
@ -93,9 +91,7 @@ class TestRandomGeoSampler:
|
|||
@pytest.mark.parametrize("num_workers", [0, 1, 2])
|
||||
def test_dataloader(self, sampler: RandomGeoSampler, num_workers: int) -> None:
|
||||
ds = CustomGeoDataset()
|
||||
dl = DataLoader(
|
||||
ds, sampler=sampler, num_workers=num_workers # type: ignore[arg-type]
|
||||
)
|
||||
dl = DataLoader(ds, sampler=sampler, num_workers=num_workers)
|
||||
for _ in dl:
|
||||
continue
|
||||
|
||||
|
@ -135,9 +131,7 @@ class TestGridGeoSampler:
|
|||
@pytest.mark.parametrize("num_workers", [0, 1, 2])
|
||||
def test_dataloader(self, sampler: GridGeoSampler, num_workers: int) -> None:
|
||||
ds = CustomGeoDataset()
|
||||
dl = DataLoader(
|
||||
ds, sampler=sampler, num_workers=num_workers # type: ignore[arg-type]
|
||||
)
|
||||
dl = DataLoader(ds, sampler=sampler, num_workers=num_workers)
|
||||
for _ in dl:
|
||||
continue
|
||||
|
||||
|
|
|
@ -413,11 +413,11 @@ def dataset_split(
|
|||
a list of the subset datasets. Either [train, val] or [train, val, test]
|
||||
"""
|
||||
if test_pct is None:
|
||||
val_length = int(len(dataset) * val_pct) # type: ignore[arg-type]
|
||||
train_length = len(dataset) - val_length # type: ignore[arg-type]
|
||||
val_length = int(len(dataset) * val_pct)
|
||||
train_length = len(dataset) - val_length
|
||||
return random_split(dataset, [train_length, val_length])
|
||||
else:
|
||||
val_length = int(len(dataset) * val_pct) # type: ignore[arg-type]
|
||||
test_length = int(len(dataset) * test_pct) # type: ignore[arg-type]
|
||||
train_length = len(dataset) - (val_length + test_length) # type: ignore[arg-type] # noqa: E501
|
||||
val_length = int(len(dataset) * val_pct)
|
||||
test_length = int(len(dataset) * test_pct)
|
||||
train_length = len(dataset) - (val_length + test_length)
|
||||
return random_split(dataset, [train_length, val_length, test_length])
|
||||
|
|
|
@ -44,7 +44,7 @@ class ChangeMixin(Module):
|
|||
num_convs: number of convolution blocks
|
||||
scale_factor: number of upsampling factor
|
||||
"""
|
||||
super().__init__() # type: ignore[no-untyped-call]
|
||||
super().__init__()
|
||||
layers: List[Module] = [
|
||||
nn.modules.Sequential(
|
||||
nn.modules.Conv2d(in_channels, inner_channels, 3, 1, 1),
|
||||
|
@ -130,7 +130,7 @@ class ChangeStar(Module):
|
|||
``'t2t1'``: concatenate bitemporal features in the order of t2->t1;
|
||||
``'mean'``: the weighted mean of the output of ``'t1t2'`` and ``'t1t2'``
|
||||
"""
|
||||
super().__init__() # type: ignore[no-untyped-call]
|
||||
super().__init__()
|
||||
self.dense_feature_extractor = dense_feature_extractor
|
||||
self.seg_classifier = seg_classifier
|
||||
self.changemixin = changemixin
|
||||
|
|
|
@ -64,7 +64,7 @@ class FarSeg(Module):
|
|||
classes: number of output segmentation classes
|
||||
backbone_pretrained: whether to use pretrained weight for backbone
|
||||
"""
|
||||
super().__init__() # type: ignore[no-untyped-call]
|
||||
super().__init__()
|
||||
if backbone in ["resnet18", "resnet34"]:
|
||||
max_channels = 512
|
||||
elif backbone in ["resnet50", "resnet101"]:
|
||||
|
@ -129,7 +129,7 @@ class _FSRelation(Module):
|
|||
in_channels_list: a list of input channels
|
||||
out_channels: number of output channels
|
||||
"""
|
||||
super().__init__() # type: ignore[no-untyped-call]
|
||||
super().__init__()
|
||||
|
||||
self.scene_encoder = ModuleList(
|
||||
[
|
||||
|
@ -160,7 +160,7 @@ class _FSRelation(Module):
|
|||
)
|
||||
)
|
||||
|
||||
self.normalizer = Sigmoid() # type: ignore[no-untyped-call]
|
||||
self.normalizer = Sigmoid()
|
||||
|
||||
def forward(self, scene_feature: Tensor, features: List[Tensor]) -> List[Tensor]:
|
||||
"""Forward pass of the model."""
|
||||
|
@ -202,7 +202,7 @@ class _LightWeightDecoder(Module):
|
|||
levels
|
||||
out_feature_output_stride: output stride of output feature maps
|
||||
"""
|
||||
super().__init__() # type: ignore[no-untyped-call]
|
||||
super().__init__()
|
||||
|
||||
self.blocks = ModuleList()
|
||||
for in_feat_os in in_feature_output_strides:
|
||||
|
|
|
@ -37,7 +37,7 @@ class ConvBlock(Module):
|
|||
dropout: probability for each dropout layer
|
||||
pool: max pool last conv layer output if True
|
||||
"""
|
||||
super().__init__() # type: ignore[no-untyped-call]
|
||||
super().__init__()
|
||||
layers = []
|
||||
for i in range(1, len(channels)):
|
||||
layers.extend(
|
||||
|
@ -225,7 +225,7 @@ class FCEF(Module):
|
|||
classes: number of output segmentation classes
|
||||
(default=2 for binary segmentation)
|
||||
"""
|
||||
super().__init__() # type: ignore[no-untyped-call]
|
||||
super().__init__()
|
||||
self.encoder = Encoder(in_channels * t, pool=True)
|
||||
self.decoder = Decoder(classes)
|
||||
self.upsample = Upsample()
|
||||
|
@ -267,7 +267,7 @@ class FCSiamConc(Module):
|
|||
classes: number of output segmentation classes
|
||||
(default=2 for binary segmentation)
|
||||
"""
|
||||
super().__init__() # type: ignore[no-untyped-call]
|
||||
super().__init__()
|
||||
self.encoder = Encoder(in_channels, pool=False)
|
||||
self.decoder = ConcatDecoder(t, classes)
|
||||
self.upsample = Upsample()
|
||||
|
@ -318,7 +318,7 @@ class FCSiamDiff(nn.modules.Module):
|
|||
classes: number of output segmentation classes
|
||||
(default=2 for binary segmentation)
|
||||
"""
|
||||
super().__init__() # type: ignore[no-untyped-call]
|
||||
super().__init__()
|
||||
self.encoder = Encoder(in_channels, pool=False)
|
||||
self.decoder = Decoder(classes)
|
||||
self.upsample = Upsample()
|
||||
|
|
|
@ -23,7 +23,7 @@ class FCN(Module):
|
|||
classes: Number of filters in the final layer
|
||||
num_filters: Number of filters in each convolutional layer
|
||||
"""
|
||||
super(FCN, self).__init__() # type: ignore[no-untyped-call]
|
||||
super(FCN, self).__init__()
|
||||
|
||||
conv1 = nn.modules.Conv2d(
|
||||
in_channels, num_filters, kernel_size=3, stride=1, padding=1
|
||||
|
|
|
@ -43,7 +43,7 @@ class RCF(Module):
|
|||
kernel_size: size of the kernel used to compute the RCFs
|
||||
bias: bias of the convolutional layer
|
||||
"""
|
||||
super().__init__() # type: ignore[no-untyped-call]
|
||||
super().__init__()
|
||||
|
||||
assert features % 2 == 0
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ class RandomApply(Module):
|
|||
augm: augmentation function to apply
|
||||
p: probability with which the augmentation function is applied
|
||||
"""
|
||||
super().__init__() # type: ignore[no-untyped-call]
|
||||
super().__init__()
|
||||
self.augm = augm
|
||||
self.p = p
|
||||
|
||||
|
@ -90,7 +90,7 @@ class SimCLRAugmentation(Module):
|
|||
Args:
|
||||
image_size: Tuple of integers defining the image size
|
||||
"""
|
||||
super().__init__() # type: ignore[no-untyped-call]
|
||||
super().__init__()
|
||||
self.size = image_size
|
||||
|
||||
self.augmentation = Sequential(
|
||||
|
@ -128,7 +128,7 @@ class MLP(Module):
|
|||
projection_size: size of the output layer
|
||||
hidden_size: size of the hidden layer
|
||||
"""
|
||||
super().__init__() # type: ignore[no-untyped-call]
|
||||
super().__init__()
|
||||
self.mlp = Sequential(
|
||||
Linear(dim, hidden_size),
|
||||
BatchNorm1d(hidden_size), # type: ignore[no-untyped-call]
|
||||
|
@ -174,7 +174,7 @@ class EncoderWrapper(Module):
|
|||
hidden_size: size of hidden layer of the projector MLP
|
||||
layer: layer from model to project
|
||||
"""
|
||||
super().__init__() # type: ignore[no-untyped-call]
|
||||
super().__init__()
|
||||
|
||||
self.model = model
|
||||
self.projection_size = projection_size
|
||||
|
@ -275,7 +275,7 @@ class BYOL(Module):
|
|||
beta: the speed at which the target encoder is updated using the main
|
||||
encoder
|
||||
"""
|
||||
super().__init__() # type: ignore[no-untyped-call]
|
||||
super().__init__()
|
||||
|
||||
self.augment: Module
|
||||
if augment_fn is None:
|
||||
|
|
|
@ -509,7 +509,7 @@ class ChesapeakeCVPRDataModule(LightningDataModule):
|
|||
)
|
||||
return DataLoader(
|
||||
self.train_dataset,
|
||||
batch_sampler=sampler, # type: ignore[arg-type]
|
||||
batch_sampler=sampler,
|
||||
num_workers=self.num_workers,
|
||||
)
|
||||
|
||||
|
@ -523,7 +523,7 @@ class ChesapeakeCVPRDataModule(LightningDataModule):
|
|||
return DataLoader(
|
||||
self.val_dataset,
|
||||
batch_size=self.batch_size,
|
||||
sampler=sampler, # type: ignore[arg-type]
|
||||
sampler=sampler,
|
||||
num_workers=self.num_workers,
|
||||
)
|
||||
|
||||
|
@ -537,6 +537,6 @@ class ChesapeakeCVPRDataModule(LightningDataModule):
|
|||
return DataLoader(
|
||||
self.test_dataset,
|
||||
batch_size=self.batch_size,
|
||||
sampler=sampler, # type: ignore[arg-type]
|
||||
sampler=sampler,
|
||||
num_workers=self.num_workers,
|
||||
)
|
||||
|
|
|
@ -325,7 +325,7 @@ class NAIPChesapeakeDataModule(pl.LightningDataModule):
|
|||
"""Return a DataLoader for training."""
|
||||
return DataLoader(
|
||||
self.dataset,
|
||||
batch_sampler=self.train_sampler, # type: ignore[arg-type]
|
||||
batch_sampler=self.train_sampler,
|
||||
num_workers=self.num_workers,
|
||||
)
|
||||
|
||||
|
@ -334,7 +334,7 @@ class NAIPChesapeakeDataModule(pl.LightningDataModule):
|
|||
return DataLoader(
|
||||
self.dataset,
|
||||
batch_size=self.batch_size,
|
||||
sampler=self.val_sampler, # type: ignore[arg-type]
|
||||
sampler=self.val_sampler,
|
||||
num_workers=self.num_workers,
|
||||
)
|
||||
|
||||
|
@ -343,6 +343,6 @@ class NAIPChesapeakeDataModule(pl.LightningDataModule):
|
|||
return DataLoader(
|
||||
self.test_dataset,
|
||||
batch_size=self.batch_size,
|
||||
sampler=self.test_sampler, # type: ignore[arg-type]
|
||||
sampler=self.test_sampler,
|
||||
num_workers=self.num_workers,
|
||||
)
|
||||
|
|
Загрузка…
Ссылка в новой задаче