зеркало из https://github.com/microsoft/torchgeo.git
Bump black[jupyter] from 22.12.0 to 23.1.0 in /requirements (#1080)
* Bump black[jupyter] from 22.12.0 to 23.1.0 in /requirements Bumps [black[jupyter]](https://github.com/psf/black) from 22.12.0 to 23.1.0. - [Release notes](https://github.com/psf/black/releases) - [Changelog](https://github.com/psf/black/blob/main/CHANGES.md) - [Commits](https://github.com/psf/black/compare/22.12.0...23.1.0) --- updated-dependencies: - dependency-name: black[jupyter] dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] <support@github.com> * Style changes * Resolve discrepancy between black and pydocstyle --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Adam J. Stewart <ajstewart426@gmail.com>
This commit is contained in:
Родитель
50a4068e18
Коммит
2fe0c1cf3e
|
@ -12,7 +12,7 @@ repos:
|
|||
additional_dependencies: ["colorama>=0.4.3"]
|
||||
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 22.12.0
|
||||
rev: 23.1.0
|
||||
hooks:
|
||||
- id: black
|
||||
args: [--skip-magic-trailing-comma]
|
||||
|
|
|
@ -213,7 +213,6 @@ def main(args: argparse.Namespace) -> None:
|
|||
model = model.to(device)
|
||||
|
||||
if args.task == "etci2021": # Custom metric setup for testing ETCI2021
|
||||
|
||||
metrics = MetricCollection([BinaryAccuracy(), BinaryJaccardIndex()]).to(device)
|
||||
|
||||
val_results = run_eval_loop(model, dm.val_dataloader(), device, metrics)
|
||||
|
|
|
@ -36,7 +36,7 @@ def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
|
|||
if __name__ == "__main__":
|
||||
work: "Queue[str]" = Queue()
|
||||
|
||||
for (train_state, model, backbone, lr, loss, weight_init) in itertools.product(
|
||||
for train_state, model, backbone, lr, loss, weight_init in itertools.product(
|
||||
training_set_options,
|
||||
model_options,
|
||||
backbone_options,
|
||||
|
@ -44,7 +44,6 @@ if __name__ == "__main__":
|
|||
loss_options,
|
||||
weight_init_options,
|
||||
):
|
||||
|
||||
experiment_name = f"{train_state}_{model}_{backbone}_{lr}_{loss}_{weight_init}"
|
||||
|
||||
output_dir = os.path.join("output", "chesapeake-cvpr_experiments")
|
||||
|
@ -52,7 +51,6 @@ if __name__ == "__main__":
|
|||
config_file = os.path.join("conf", "chesapeake_cvpr.yaml")
|
||||
|
||||
if not os.path.exists(os.path.join(output_dir, experiment_name)):
|
||||
|
||||
command = (
|
||||
"python train.py"
|
||||
+ f" config_file={config_file}"
|
||||
|
|
|
@ -35,7 +35,7 @@ def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
|
|||
if __name__ == "__main__":
|
||||
work: "Queue[str]" = Queue()
|
||||
|
||||
for (model, lr, pretrained) in itertools.product(
|
||||
for model, lr, pretrained in itertools.product(
|
||||
model_options, lr_options, pretrained_options
|
||||
):
|
||||
experiment_name = f"{model}_{lr}_{pretrained}"
|
||||
|
|
|
@ -36,7 +36,7 @@ def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
|
|||
if __name__ == "__main__":
|
||||
work: "Queue[str]" = Queue()
|
||||
|
||||
for (model, lr, pretrained, seed) in itertools.product(
|
||||
for model, lr, pretrained, seed in itertools.product(
|
||||
model_options, lr_options, pretrained_options, seeds
|
||||
):
|
||||
experiment_name = f"{model}_{lr}_{pretrained}_{seed}"
|
||||
|
|
|
@ -35,10 +35,9 @@ def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
|
|||
if __name__ == "__main__":
|
||||
work: "Queue[str]" = Queue()
|
||||
|
||||
for (model, backbone, lr, loss, weight_init) in itertools.product(
|
||||
for model, backbone, lr, loss, weight_init in itertools.product(
|
||||
model_options, backbone_options, lr_options, loss_options, weight_init_options
|
||||
):
|
||||
|
||||
experiment_name = f"{model}_{backbone}_{lr}_{loss}_{weight_init}"
|
||||
|
||||
output_dir = os.path.join("output", "landcoverai_experiments")
|
||||
|
@ -46,7 +45,6 @@ if __name__ == "__main__":
|
|||
config_file = os.path.join("conf", "landcoverai.yaml")
|
||||
|
||||
if not os.path.exists(os.path.join(output_dir, experiment_name)):
|
||||
|
||||
command = (
|
||||
"python train.py"
|
||||
+ f" config_file={config_file}"
|
||||
|
|
|
@ -35,10 +35,9 @@ def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
|
|||
if __name__ == "__main__":
|
||||
work: "Queue[str]" = Queue()
|
||||
|
||||
for (model, backbone, lr, loss, weight_init) in itertools.product(
|
||||
for model, backbone, lr, loss, weight_init in itertools.product(
|
||||
model_options, backbone_options, lr_options, loss_options, weight_init_options
|
||||
):
|
||||
|
||||
experiment_name = f"{model}_{backbone}_{lr}_{loss}_{weight_init}"
|
||||
|
||||
output_dir = os.path.join("output", "landcoverai_experiments")
|
||||
|
@ -46,7 +45,6 @@ if __name__ == "__main__":
|
|||
config_file = os.path.join("conf", "landcoverai.yaml")
|
||||
|
||||
if not os.path.exists(os.path.join(output_dir, experiment_name)):
|
||||
|
||||
command = (
|
||||
"python train.py"
|
||||
+ f" config_file={config_file}"
|
||||
|
|
|
@ -34,10 +34,9 @@ def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
|
|||
if __name__ == "__main__":
|
||||
work: "Queue[str]" = Queue()
|
||||
|
||||
for (model, lr, loss, weights) in itertools.product(
|
||||
for model, lr, loss, weights in itertools.product(
|
||||
model_options, lr_options, loss_options, weight_options
|
||||
):
|
||||
|
||||
experiment_name = f"{model}_{lr}_{loss}_{weights.replace('_','-')}"
|
||||
|
||||
output_dir = os.path.join("output", "resisc45_experiments")
|
||||
|
@ -45,7 +44,6 @@ if __name__ == "__main__":
|
|||
config_file = os.path.join("conf", "resisc45.yaml")
|
||||
|
||||
if not os.path.exists(os.path.join(output_dir, experiment_name)):
|
||||
|
||||
command = (
|
||||
"python train.py"
|
||||
+ f" config_file={config_file}"
|
||||
|
|
|
@ -36,10 +36,9 @@ def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
|
|||
if __name__ == "__main__":
|
||||
work: "Queue[str]" = Queue()
|
||||
|
||||
for (model, lr, loss, weights, bands) in itertools.product(
|
||||
for model, lr, loss, weights, bands in itertools.product(
|
||||
model_options, lr_options, loss_options, weight_options, bands_options
|
||||
):
|
||||
|
||||
experiment_name = f"{model}_{lr}_{loss}_byol_{bands}-{weights.split('/')[-2]}"
|
||||
|
||||
output_dir = os.path.join("output", "so2sat_experiments")
|
||||
|
@ -47,7 +46,6 @@ if __name__ == "__main__":
|
|||
config_file = os.path.join("conf", "so2sat.yaml")
|
||||
|
||||
if not os.path.exists(os.path.join(output_dir, experiment_name)):
|
||||
|
||||
command = (
|
||||
"python train.py"
|
||||
+ f" config_file={config_file}"
|
||||
|
|
|
@ -34,10 +34,9 @@ def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
|
|||
if __name__ == "__main__":
|
||||
work: "Queue[str]" = Queue()
|
||||
|
||||
for (model, lr, loss, weights) in itertools.product(
|
||||
for model, lr, loss, weights in itertools.product(
|
||||
model_options, lr_options, loss_options, weight_options
|
||||
):
|
||||
|
||||
experiment_name = f"{model}_{lr}_{loss}_{weights.replace('_','-')}"
|
||||
|
||||
output_dir = os.path.join("output", "so2sat_experiments")
|
||||
|
@ -45,7 +44,6 @@ if __name__ == "__main__":
|
|||
config_file = os.path.join("conf", "so2sat.yaml")
|
||||
|
||||
if not os.path.exists(os.path.join(output_dir, experiment_name)):
|
||||
|
||||
command = (
|
||||
"python train.py"
|
||||
+ f" config_file={config_file}"
|
||||
|
|
|
@ -35,10 +35,9 @@ def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
|
|||
if __name__ == "__main__":
|
||||
work: "Queue[str]" = Queue()
|
||||
|
||||
for (model, lr, loss, weights, seed) in itertools.product(
|
||||
for model, lr, loss, weights, seed in itertools.product(
|
||||
model_options, lr_options, loss_options, weight_options, seeds
|
||||
):
|
||||
|
||||
experiment_name = f"{model}_{lr}_{loss}_{weights.replace('_','-')}_{seed}"
|
||||
|
||||
output_dir = os.path.join("output", "so2sat_seed_experiments")
|
||||
|
@ -46,7 +45,6 @@ if __name__ == "__main__":
|
|||
config_file = os.path.join("conf", "so2sat.yaml")
|
||||
|
||||
if not os.path.exists(os.path.join(output_dir, experiment_name)):
|
||||
|
||||
command = (
|
||||
"python train.py"
|
||||
+ f" config_file={config_file}"
|
||||
|
|
|
@ -94,7 +94,6 @@ def main(args: argparse.Namespace) -> None:
|
|||
)
|
||||
|
||||
for experiment_dir in os.listdir(args.input_dir):
|
||||
|
||||
checkpoint_fn = None
|
||||
for fn in os.listdir(os.path.join(args.input_dir, experiment_dir)):
|
||||
if fn.startswith("epoch") and fn.endswith(".ckpt"):
|
||||
|
@ -109,7 +108,6 @@ def main(args: argparse.Namespace) -> None:
|
|||
checkpoint_fn = os.path.join(args.input_dir, experiment_dir, checkpoint_fn)
|
||||
|
||||
try:
|
||||
|
||||
model = ChesapeakeCVPRSegmentationTask.load_from_checkpoint(checkpoint_fn)
|
||||
model.freeze()
|
||||
model.eval()
|
||||
|
@ -136,7 +134,6 @@ def main(args: argparse.Namespace) -> None:
|
|||
|
||||
# Test the loaded model against the test set from all states
|
||||
for test_splits in ALL_TEST_SPLITS:
|
||||
|
||||
dm = ChesapeakeCVPRDataModule(
|
||||
root=args.chesapeakecvpr_root,
|
||||
train_splits=["de-train"],
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# style
|
||||
black[jupyter]==22.12.0
|
||||
black[jupyter]==23.1.0
|
||||
flake8==6.0.0
|
||||
isort[colors]==5.12.0
|
||||
pydocstyle[toml]==6.3.0
|
||||
|
|
|
@ -109,7 +109,7 @@ docs =
|
|||
sphinx>=4,<7
|
||||
style =
|
||||
# black 21.8+ required for Jupyter support
|
||||
black[jupyter]>=21.8,<23
|
||||
black[jupyter]>=21.8,<24
|
||||
# flake8 3.8+ depends on pyflakes 2.2+, which fixes a bug with mypy error code ignores:
|
||||
# https://github.com/PyCQA/pyflakes/pull/455
|
||||
flake8>=3.8,<7
|
||||
|
|
|
@ -54,7 +54,6 @@ raster_extensions = [".tif", ".tif.ovr"]
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
for dir in directories:
|
||||
filename = dir + ".zip"
|
||||
|
||||
|
|
|
@ -229,7 +229,6 @@ def generate_test_data(root: str) -> str:
|
|||
|
||||
for prefix in tile_list:
|
||||
for suffix, data_profile in layer_data_profiles.items():
|
||||
|
||||
img_path = os.path.join(folder_path, f"{prefix}_{suffix}.tif")
|
||||
img_dir = os.path.dirname(img_path)
|
||||
if not os.path.exists(img_dir):
|
||||
|
@ -273,7 +272,6 @@ def generate_test_data(root: str) -> str:
|
|||
schema=schema,
|
||||
) as dst:
|
||||
for prefix in tile_list:
|
||||
|
||||
img_path = os.path.join(folder_path, f"{prefix}_a_naip.tif")
|
||||
with rasterio.open(img_path) as f:
|
||||
geom = shapely.geometry.mapping(shapely.geometry.box(*f.bounds))
|
||||
|
|
|
@ -41,7 +41,6 @@ def create_file(path: str, dtype: str, num_channels: int) -> None:
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
for measurement, file_paths in files.items():
|
||||
zipfilename = f"N00E020_{measurement}.zip"
|
||||
files_to_zip = []
|
||||
|
|
|
@ -63,7 +63,6 @@ def generate_test_data(root: str, n_samples: int = 2) -> str:
|
|||
os.makedirs(timg_dir)
|
||||
|
||||
for i in range(n_samples):
|
||||
|
||||
dtype_max = np.iinfo(dtype).max
|
||||
img = np.random.randint(dtype_max, size=size, dtype=dtype)
|
||||
lbl = np.random.randint(2, size=size, dtype=dtype)
|
||||
|
|
|
@ -96,7 +96,6 @@ TEST_EXTENT = Extent(
|
|||
|
||||
|
||||
def create_raster(path: str, dtype: str, num_channels: int, collection: str) -> None:
|
||||
|
||||
if not os.path.exists(os.path.split(path)[0]):
|
||||
Path(os.path.split(path)[0]).mkdir(parents=True)
|
||||
|
||||
|
@ -164,7 +163,6 @@ def get_class_label_list(overview: LabelOverview) -> LabelClasses:
|
|||
|
||||
|
||||
def get_item_class_overview(label_type: LabelType, asset_path: str) -> LabelOverview:
|
||||
|
||||
"""Takes a path to an asset based on type and returns the class label
|
||||
overview object
|
||||
|
||||
|
@ -224,7 +222,6 @@ def create_label_item() -> Item:
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
# create a geotiff for each s2 band
|
||||
for b in BANDS:
|
||||
tif_path = os.path.join(
|
||||
|
|
|
@ -175,7 +175,6 @@ def main() -> None:
|
|||
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
for dataset in datasets:
|
||||
|
||||
collections = list(dataset.collection_md5_dict.keys())
|
||||
for collection in collections:
|
||||
dataset = cast(SpaceNet, dataset)
|
||||
|
|
|
@ -29,7 +29,6 @@ class TestAbovegroundLiveWoodyBiomassDensity:
|
|||
def dataset(
|
||||
self, monkeypatch: MonkeyPatch, tmp_path: Path
|
||||
) -> AbovegroundLiveWoodyBiomassDensity:
|
||||
|
||||
transforms = nn.Identity()
|
||||
monkeypatch.setattr(
|
||||
torchgeo.datasets.agb_live_woody_density, "download_url", download_url
|
||||
|
|
|
@ -18,7 +18,6 @@ from torchgeo.datasets import EUDEM, BoundingBox, IntersectionDataset, UnionData
|
|||
class TestEUDEM:
|
||||
@pytest.fixture
|
||||
def dataset(self, monkeypatch: MonkeyPatch, tmp_path: Path) -> EUDEM:
|
||||
|
||||
md5s = {"eu_dem_v11_E30N10.zip": "ef148466c02197a08be169eaad186591"}
|
||||
monkeypatch.setattr(EUDEM, "md5s", md5s)
|
||||
zipfile = os.path.join("tests", "data", "eudem", "eu_dem_v11_E30N10.zip")
|
||||
|
|
|
@ -19,7 +19,6 @@ class TestInriaAerialImageLabeling:
|
|||
def dataset(
|
||||
self, request: SubRequest, monkeypatch: MonkeyPatch
|
||||
) -> InriaAerialImageLabeling:
|
||||
|
||||
root = os.path.join("tests", "data", "inria")
|
||||
test_md5 = "478688944e4797c097d9387fd0b3f038"
|
||||
monkeypatch.setattr(InriaAerialImageLabeling, "md5", test_md5)
|
||||
|
|
|
@ -29,7 +29,6 @@ pd = pytest.importorskip("pandas", minversion="0.23.2")
|
|||
class TestOpenBuildings:
|
||||
@pytest.fixture
|
||||
def dataset(self, monkeypatch: MonkeyPatch, tmp_path: Path) -> OpenBuildings:
|
||||
|
||||
root = str(tmp_path)
|
||||
shutil.copy(
|
||||
os.path.join("tests", "data", "openbuildings", "tiles.geojson"), root
|
||||
|
|
|
@ -39,7 +39,6 @@ class TestUSAVars:
|
|||
def dataset(
|
||||
self, monkeypatch: MonkeyPatch, tmp_path: Path, request: SubRequest
|
||||
) -> USAVars:
|
||||
|
||||
monkeypatch.setattr(torchgeo.datasets.usavars, "download_url", download_url)
|
||||
|
||||
md5 = "b504580a00bdc27097d5421dec50481b"
|
||||
|
|
|
@ -650,7 +650,6 @@ class ChesapeakeCVPR(GeoDataset):
|
|||
query_box = shapely.geometry.box(minx, miny, maxx, maxy)
|
||||
|
||||
for layer in self.layers:
|
||||
|
||||
fn = filenames[layer]
|
||||
|
||||
with rasterio.open(os.path.join(self.root, fn)) as f:
|
||||
|
@ -702,10 +701,11 @@ class ChesapeakeCVPR(GeoDataset):
|
|||
Raises:
|
||||
RuntimeError: if ``download=False`` but dataset is missing or checksum fails
|
||||
"""
|
||||
# Check if the extracted files already exist
|
||||
|
||||
def exists(filename: str) -> bool:
|
||||
return os.path.exists(os.path.join(self.root, filename))
|
||||
|
||||
# Check if the extracted files already exist
|
||||
if all(map(exists, self.files)):
|
||||
return
|
||||
|
||||
|
|
|
@ -359,7 +359,6 @@ class EnviroAtlas(GeoDataset):
|
|||
query_box = shapely.geometry.box(minx, miny, maxx, maxy)
|
||||
|
||||
for layer in self.layers:
|
||||
|
||||
fn = filenames[layer]
|
||||
|
||||
with rasterio.open(
|
||||
|
@ -416,10 +415,11 @@ class EnviroAtlas(GeoDataset):
|
|||
Raises:
|
||||
RuntimeError: if ``download=False`` but dataset is missing or checksum fails
|
||||
"""
|
||||
# Check if the extracted files already exist
|
||||
|
||||
def exists(filename: str) -> bool:
|
||||
return os.path.exists(os.path.join(self.root, "enviroatlas_lotp", filename))
|
||||
|
||||
# Check if the extracted files already exist
|
||||
if all(map(exists, self.files)):
|
||||
return
|
||||
|
||||
|
|
|
@ -144,7 +144,6 @@ class RandomBatchGeoSampler(BatchGeoSampler):
|
|||
# Choose random indices within that tile
|
||||
batch = []
|
||||
for _ in range(self.batch_size):
|
||||
|
||||
bounding_box = get_random_bounding_box(bounds, self.size, self.res)
|
||||
batch.append(bounding_box)
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче