зеркало из https://github.com/microsoft/landcover.git
0. Refactoring and removing old code in servers.py
This commit is contained in:
Родитель
a07f3eee11
Коммит
18d0e9f11f
|
@ -1,6 +1,4 @@
|
|||
import sys
|
||||
import os
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
from enum import Enum
|
||||
|
@ -28,10 +26,8 @@ import mercantile
|
|||
|
||||
import cv2
|
||||
import pickle
|
||||
import glob
|
||||
|
||||
from web_tool import ROOT_DIR
|
||||
|
||||
from DataLoaderAbstract import DataLoader
|
||||
|
||||
# ------------------------------------------------------
|
||||
|
@ -102,10 +98,24 @@ def crop_data_by_extent(src_img, src_bounds, extent):
|
|||
# ------------------------------------------------------
|
||||
class DataLoaderCustom(DataLoader):
|
||||
|
||||
@property
|
||||
def shapes(self):
|
||||
return self._shapes
|
||||
@shapes.setter
|
||||
def shapes(self, value):
|
||||
self._shapes = value
|
||||
|
||||
@property
|
||||
def padding(self):
|
||||
return self._padding
|
||||
@padding.setter
|
||||
def padding(self, value):
|
||||
self._padding = value
|
||||
|
||||
def __init__(self, data_fn, shapes, padding):
|
||||
self.data_fn = data_fn
|
||||
self.shapes = shapes
|
||||
self.padding = padding
|
||||
self._shapes = shapes
|
||||
self._padding = padding
|
||||
|
||||
def get_data_from_extent(self, extent):
|
||||
f = rasterio.open(os.path.join(ROOT_DIR, self.data_fn), "r")
|
||||
|
@ -211,11 +221,23 @@ class USALayerGeoDataTypes(Enum):
|
|||
|
||||
class DataLoaderUSALayer(DataLoader):
|
||||
|
||||
@property
|
||||
def shapes(self):
|
||||
return self._shapes
|
||||
@shapes.setter
|
||||
def shapes(self, value):
|
||||
self._shapes = value
|
||||
|
||||
@property
|
||||
def padding(self):
|
||||
return self._padding
|
||||
@padding.setter
|
||||
def padding(self, value):
|
||||
self._padding = value
|
||||
|
||||
def __init__(self, shapes, padding):
|
||||
self.shapes = shapes
|
||||
print("Loading US layer with shapes", self.shapes)
|
||||
self.padding = padding
|
||||
self._shapes = shapes
|
||||
self._padding = padding
|
||||
|
||||
def get_fn_by_geo_data_type(self, naip_fn, geo_data_type):
|
||||
fn = None
|
||||
|
@ -285,9 +307,23 @@ class DataLoaderUSALayer(DataLoader):
|
|||
# ------------------------------------------------------
|
||||
class DataLoaderBasemap(DataLoader):
|
||||
|
||||
@property
|
||||
def shapes(self):
|
||||
return self._shapes
|
||||
@shapes.setter
|
||||
def shapes(self, value):
|
||||
self._shapes = value
|
||||
|
||||
@property
|
||||
def padding(self):
|
||||
return self._padding
|
||||
@padding.setter
|
||||
def padding(self, value):
|
||||
self._padding = value
|
||||
|
||||
def __init__(self, data_url, padding):
|
||||
self.data_url = data_url
|
||||
self.padding = padding
|
||||
self._padding = padding
|
||||
self.zoom_level = 17
|
||||
|
||||
def get_image_by_xyz_from_url(self, tile):
|
||||
|
|
|
@ -2,15 +2,15 @@ from abc import ABC, abstractmethod
|
|||
|
||||
class DataLoader(ABC):
|
||||
|
||||
# @property
|
||||
# @abstractmethod
|
||||
# def padding(self):
|
||||
# pass
|
||||
@property
|
||||
@abstractmethod
|
||||
def padding(self):
|
||||
pass
|
||||
|
||||
# @property
|
||||
# @abstractmethod
|
||||
# def shapes(self):
|
||||
# pass
|
||||
@property
|
||||
@abstractmethod
|
||||
def shapes(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_shape_by_extent(self, extent, shape_layer):
|
||||
|
|
|
@ -9,7 +9,7 @@ from enum import Enum
|
|||
|
||||
from web_tool import ROOT_DIR
|
||||
|
||||
from DataLoaderNew import DataLoaderCustom, DataLoaderUSALayer, DataLoaderBasemap
|
||||
from DataLoader import DataLoaderCustom, DataLoaderUSALayer, DataLoaderBasemap
|
||||
|
||||
class DatasetTypes(Enum):
|
||||
CUSTOM = 1
|
||||
|
|
|
@ -0,0 +1,235 @@
|
|||
import sys, os, time, copy
|
||||
|
||||
import numpy as np
|
||||
|
||||
import sklearn.base
|
||||
from sklearn.neural_network import MLPClassifier
|
||||
from sklearn.ensemble import RandomForestClassifier
|
||||
|
||||
import tensorflow as tf
|
||||
import keras
|
||||
import keras.backend as K
|
||||
import keras.models
|
||||
import keras.optimizers
|
||||
|
||||
from ServerModelsAbstract import BackendModel
|
||||
from web_tool import ROOT_DIR
|
||||
|
||||
AUGMENT_MODEL = MLPClassifier(
|
||||
hidden_layer_sizes=(),
|
||||
activation='relu',
|
||||
alpha=0.001,
|
||||
solver='lbfgs',
|
||||
tol=0.1,
|
||||
verbose=False,
|
||||
validation_fraction=0.0,
|
||||
n_iter_no_change=10
|
||||
)
|
||||
|
||||
class KerasDenseFineTune(BackendModel):
|
||||
|
||||
def __init__(self, model_fn, gpuid, fine_tune_layer, verbose=False):
|
||||
|
||||
self.model_fn = model_fn
|
||||
|
||||
with tf.device('/gpu:%d' % gpuid):
|
||||
tmodel = keras.models.load_model(self.model_fn, compile=False, custom_objects={
|
||||
"jaccard_loss":keras.metrics.mean_squared_error,
|
||||
"loss":keras.metrics.mean_squared_error
|
||||
})
|
||||
|
||||
feature_layer_idx = fine_tune_layer
|
||||
|
||||
self.model = keras.models.Model(inputs=tmodel.inputs, outputs=[tmodel.outputs[0], tmodel.layers[feature_layer_idx].output])
|
||||
self.model.compile("sgd","mse")
|
||||
self.model._make_predict_function() # have to initialize before threading
|
||||
|
||||
self.output_channels = self.model.output_shape[0][3]
|
||||
self.output_features = self.model.output_shape[1][3]
|
||||
self.input_size = self.model.input_shape[1]
|
||||
|
||||
self.down_weight_padding = 40
|
||||
|
||||
self.stride_x = self.input_size - self.down_weight_padding*2
|
||||
self.stride_y = self.input_size - self.down_weight_padding*2
|
||||
|
||||
self.verbose = verbose
|
||||
|
||||
# Seed augmentation model dataset
|
||||
self.current_features = None
|
||||
|
||||
self.augment_base_x_train = []
|
||||
self.augment_base_y_train = []
|
||||
|
||||
self.augment_x_train = []
|
||||
self.augment_y_train = []
|
||||
self.augment_model = sklearn.base.clone(AUGMENT_MODEL)
|
||||
self.augment_model_trained = False
|
||||
|
||||
self.undo_stack = []
|
||||
|
||||
# seed_x_fn = ""
|
||||
# seed_y_fn = ""
|
||||
# if superres:
|
||||
# seed_x_fn = ROOT_DIR + "/data/seed_data_hr+sr_x.npy"
|
||||
# seed_y_fn = ROOT_DIR + "/data/seed_data_hr+sr_y.npy"
|
||||
# else:
|
||||
# seed_x_fn = ROOT_DIR + "/data/seed_data_hr_x.npy"
|
||||
# seed_y_fn = ROOT_DIR + "/data/seed_data_hr_y.npy"
|
||||
# for row in np.load(seed_x_fn):
|
||||
# self.augment_base_x_train.append(row)
|
||||
# for row in np.load(seed_y_fn):
|
||||
# self.augment_base_y_train.append(row)
|
||||
|
||||
for row in self.augment_base_x_train:
|
||||
self.augment_x_train.append(row)
|
||||
for row in self.augment_base_y_train:
|
||||
self.augment_y_train.append(row)
|
||||
|
||||
|
||||
def run(self, naip_data, extent, on_tile=False):
|
||||
''' Expects naip_data to have shape (height, width, channels) and have values in the [0, 255] range.
|
||||
'''
|
||||
naip_data = naip_data / 255.0
|
||||
output, output_features = self.run_model_on_tile(naip_data)
|
||||
|
||||
if self.augment_model_trained:
|
||||
original_shape = output.shape
|
||||
output = output_features.reshape(-1, output_features.shape[2])
|
||||
output = self.augment_model.predict_proba(output)
|
||||
output = output.reshape(original_shape[0], original_shape[1], -1)
|
||||
|
||||
if not on_tile:
|
||||
self.current_features = output_features
|
||||
|
||||
return output
|
||||
|
||||
def run_model_on_batch(self, batch_data, batch_size=32, predict_central_pixel_only=False):
|
||||
''' Expects batch_data to have shape (none, 240, 240, 4) and have values in the [0, 255] range.
|
||||
'''
|
||||
batch_data = batch_data / 255.0
|
||||
output = self.model.predict(batch_data, batch_size=batch_size, verbose=0)
|
||||
output, output_features = output
|
||||
output = output[:,:,:,1:]
|
||||
|
||||
if self.augment_model_trained:
|
||||
num_samples, height, width, num_features = output_features.shape
|
||||
|
||||
if predict_central_pixel_only:
|
||||
output_features = output_features[:,120,120,:].reshape(-1, num_features)
|
||||
output = self.augment_model.predict_proba(output_features)
|
||||
output = output.reshape(num_samples, 4)
|
||||
else:
|
||||
output_features = output_features.reshape(-1, num_features)
|
||||
output = self.augment_model.predict_proba(output_features)
|
||||
output = output.reshape(num_samples, height, width, 4)
|
||||
else:
|
||||
if predict_central_pixel_only:
|
||||
output = output[:,120,120,:]
|
||||
|
||||
return output
|
||||
|
||||
def retrain(self, **kwargs):
|
||||
x_train = np.concatenate(self.augment_x_train, axis=0)
|
||||
y_train = np.concatenate(self.augment_y_train, axis=0)
|
||||
|
||||
vals, counts = np.unique(y_train, return_counts=True)
|
||||
|
||||
if len(vals) >= 4:
|
||||
self.augment_model.fit(x_train, y_train)
|
||||
self.augment_model_trained = True
|
||||
self.undo_stack.append("retrain")
|
||||
|
||||
success = True
|
||||
message = "Fit accessory model with %d samples" % (x_train.shape[0])
|
||||
else:
|
||||
success = False
|
||||
message = "Need to include training samples from each class"
|
||||
|
||||
return success, message
|
||||
|
||||
def add_sample(self, tdst_row, bdst_row, tdst_col, bdst_col, class_idx):
|
||||
x_samples = self.current_features[tdst_row:bdst_row+1, tdst_col:bdst_col+1, :].copy().reshape(-1, self.current_features.shape[2])
|
||||
y_samples = np.zeros((x_samples.shape[0]), dtype=np.uint8)
|
||||
y_samples[:] = class_idx
|
||||
self.augment_x_train.append(x_samples)
|
||||
self.augment_y_train.append(y_samples)
|
||||
self.undo_stack.append("sample")
|
||||
|
||||
def undo(self):
|
||||
num_undone = 0
|
||||
if len(self.undo_stack) > 0:
|
||||
undo = self.undo_stack.pop()
|
||||
if undo == "sample":
|
||||
self.augment_x_train.pop()
|
||||
self.augment_y_train.pop()
|
||||
num_undone += 1
|
||||
success = True
|
||||
message = "Undoing sample"
|
||||
elif undo == "retrain":
|
||||
while self.undo_stack[-1] == "retrain":
|
||||
self.undo_stack.pop()
|
||||
self.augment_x_train.pop()
|
||||
self.augment_y_train.pop()
|
||||
num_undone += 1
|
||||
success = True
|
||||
message = "Undoing sample"
|
||||
else:
|
||||
raise ValueError("This shouldn't happen")
|
||||
else:
|
||||
success = False
|
||||
message = "Nothing to undo"
|
||||
return success, message, num_undone
|
||||
|
||||
def reset(self):
|
||||
self.augment_x_train = []
|
||||
self.augment_y_train = []
|
||||
self.undo_stack = []
|
||||
self.augment_model = sklearn.base.clone(AUGMENT_MODEL)
|
||||
self.augment_model_trained = False
|
||||
|
||||
for row in self.augment_base_x_train:
|
||||
self.augment_x_train.append(row)
|
||||
for row in self.augment_base_y_train:
|
||||
self.augment_y_train.append(row)
|
||||
|
||||
def run_model_on_tile(self, naip_tile, batch_size=32):
|
||||
''' Expects naip_tile to have shape (height, width, channels) and have values in the [0, 1] range.
|
||||
'''
|
||||
height = naip_tile.shape[0]
|
||||
width = naip_tile.shape[1]
|
||||
|
||||
output = np.zeros((height, width, self.output_channels), dtype=np.float32)
|
||||
output_features = np.zeros((height, width, self.output_features), dtype=np.float32)
|
||||
|
||||
counts = np.zeros((height, width), dtype=np.float32) + 0.000000001
|
||||
kernel = np.ones((self.input_size, self.input_size), dtype=np.float32) * 0.1
|
||||
kernel[10:-10, 10:-10] = 1
|
||||
kernel[self.down_weight_padding:self.down_weight_padding+self.stride_y,
|
||||
self.down_weight_padding:self.down_weight_padding+self.stride_x] = 5
|
||||
|
||||
batch = []
|
||||
batch_indices = []
|
||||
batch_count = 0
|
||||
|
||||
for y_index in (list(range(0, height - self.input_size, self.stride_y)) + [height - self.input_size,]):
|
||||
for x_index in (list(range(0, width - self.input_size, self.stride_x)) + [width - self.input_size,]):
|
||||
naip_im = naip_tile[y_index:y_index+self.input_size, x_index:x_index+self.input_size, :]
|
||||
|
||||
batch.append(naip_im)
|
||||
batch_indices.append((y_index, x_index))
|
||||
batch_count+=1
|
||||
|
||||
|
||||
model_output = self.model.predict(np.array(batch), batch_size=batch_size, verbose=0)
|
||||
|
||||
for i, (y, x) in enumerate(batch_indices):
|
||||
output[y:y+self.input_size, x:x+self.input_size] += model_output[0][i] * kernel[..., np.newaxis]
|
||||
output_features[y:y+self.input_size, x:x+self.input_size] += model_output[1][i] * kernel[..., np.newaxis]
|
||||
counts[y:y+self.input_size, x:x+self.input_size] += kernel
|
||||
|
||||
output = output / counts[..., np.newaxis]
|
||||
output = output[:,:,1:5]
|
||||
output_features = output_features / counts[..., np.newaxis]
|
||||
|
||||
return output, output_features
|
|
@ -4,6 +4,7 @@ import numpy as np
|
|||
|
||||
import sklearn.base
|
||||
from sklearn.neural_network import MLPClassifier
|
||||
from sklearn.ensemble import RandomForestClassifier
|
||||
|
||||
import tensorflow as tf
|
||||
import keras
|
||||
|
@ -71,18 +72,18 @@ class KerasDenseFineTune(BackendModel):
|
|||
|
||||
self.undo_stack = []
|
||||
|
||||
seed_x_fn = ""
|
||||
seed_y_fn = ""
|
||||
if superres:
|
||||
seed_x_fn = ROOT_DIR + "/data/seed_data_hr+sr_x.npy"
|
||||
seed_y_fn = ROOT_DIR + "/data/seed_data_hr+sr_y.npy"
|
||||
else:
|
||||
seed_x_fn = ROOT_DIR + "/data/seed_data_hr_x.npy"
|
||||
seed_y_fn = ROOT_DIR + "/data/seed_data_hr_y.npy"
|
||||
for row in np.load(seed_x_fn):
|
||||
self.augment_base_x_train.append(row)
|
||||
for row in np.load(seed_y_fn):
|
||||
self.augment_base_y_train.append(row)
|
||||
# seed_x_fn = ""
|
||||
# seed_y_fn = ""
|
||||
# if superres:
|
||||
# seed_x_fn = ROOT_DIR + "/data/seed_data_hr+sr_x.npy"
|
||||
# seed_y_fn = ROOT_DIR + "/data/seed_data_hr+sr_y.npy"
|
||||
# else:
|
||||
# seed_x_fn = ROOT_DIR + "/data/seed_data_hr_x.npy"
|
||||
# seed_y_fn = ROOT_DIR + "/data/seed_data_hr_y.npy"
|
||||
# for row in np.load(seed_x_fn):
|
||||
# self.augment_base_x_train.append(row)
|
||||
# for row in np.load(seed_y_fn):
|
||||
# self.augment_base_y_train.append(row)
|
||||
|
||||
for row in self.augment_base_x_train:
|
||||
self.augment_x_train.append(row)
|
||||
|
|
|
@ -32,12 +32,12 @@ import joblib
|
|||
from azure.cosmosdb.table.tableservice import TableService
|
||||
from azure.cosmosdb.table.models import Entity
|
||||
|
||||
from DataLoaderNew import warp_data_to_3857, crop_data_by_extent
|
||||
from DataLoader import warp_data_to_3857, crop_data_by_extent
|
||||
from Heatmap import Heatmap
|
||||
from Datasets import DATASETS
|
||||
from Utils import get_random_string, class_prediction_to_img, get_shape_layer_by_name, AtomicCounter
|
||||
|
||||
import ServerModelsNIPS
|
||||
from ServerModelsKerasDense import KerasDenseFineTune
|
||||
|
||||
from web_tool import ROOT_DIR
|
||||
|
||||
|
@ -59,7 +59,6 @@ class Session():
|
|||
current_request_counter = AtomicCounter()
|
||||
request_list = []
|
||||
|
||||
@staticmethod
|
||||
def reset(soft=False, from_cached=None):
|
||||
if not soft:
|
||||
Session.model.reset() # can't fail, so don't worry about it
|
||||
|
@ -79,7 +78,6 @@ class Session():
|
|||
"base_model": from_cached
|
||||
})
|
||||
|
||||
@staticmethod
|
||||
def load(encoded_model_fn):
|
||||
model_fn = base64.b64decode(encoded_model_fn).decode('utf-8')
|
||||
|
||||
|
@ -88,7 +86,6 @@ class Session():
|
|||
del Session.model
|
||||
Session.model = joblib.load(model_fn)
|
||||
|
||||
@staticmethod
|
||||
def save(model_name):
|
||||
|
||||
if Session.storage_type is not None:
|
||||
|
@ -116,7 +113,6 @@ class Session():
|
|||
else:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def add_entry(data):
|
||||
client_ip = bottle.request.environ.get('HTTP_X_FORWARDED_FOR') or bottle.request.environ.get('REMOTE_ADDR')
|
||||
data = data.copy()
|
||||
|
@ -456,7 +452,7 @@ def get_input():
|
|||
|
||||
naip_data, naip_crs, naip_transform, naip_bounds, naip_index = DATASETS[dataset]["data_loader"].get_data_from_extent(extent)
|
||||
naip_data = np.rollaxis(naip_data, 0, 3)
|
||||
|
||||
|
||||
naip_data, new_bounds = warp_data_to_3857(naip_data, naip_crs, naip_transform, naip_bounds)
|
||||
naip_data = crop_data_by_extent(naip_data, new_bounds, extent)
|
||||
|
||||
|
@ -524,13 +520,7 @@ def main():
|
|||
parser.add_argument("--port", action="store", dest="port", type=int, help="Port to listen on", default=4444)
|
||||
parser.add_argument("--model", action="store", dest="model",
|
||||
choices=[
|
||||
"cached",
|
||||
"iclr_keras",
|
||||
"iclr_cntk",
|
||||
"nips_sr",
|
||||
"existing",
|
||||
"nips_hr",
|
||||
"group_norm",
|
||||
"keras_dense"
|
||||
],
|
||||
help="Model to use", required=True
|
||||
)
|
||||
|
@ -544,6 +534,9 @@ def main():
|
|||
],
|
||||
help="Model to use", required=True
|
||||
)
|
||||
parser.add_argument("--fine_tune_layer", action="store", dest="fine_tune_layer", type=int, help="Layer of model to fine tune", default=-2)
|
||||
|
||||
|
||||
parser.add_argument("--model_fn", action="store", dest="model_fn", type=str, help="Model fn to use", default=None)
|
||||
parser.add_argument("--gpu", action="store", dest="gpuid", type=int, help="GPU to use", default=None)
|
||||
|
||||
|
@ -553,40 +546,10 @@ def main():
|
|||
os.environ["CUDA_VISIBLE_DEVICES"] = "" if args.gpuid is None else str(args.gpuid)
|
||||
|
||||
model = None
|
||||
if args.model == "cached":
|
||||
if args.model_fn not in ["7_10_2018","1_3_2019"]:
|
||||
print("When using `cached` model you must specify either '7_10_2018', or '1_3_2019'. Exiting...")
|
||||
return
|
||||
model = ServerModelsCachedFormat.CachedModel(args.model_fn)
|
||||
elif args.model == "iclr_keras":
|
||||
model = ServerModelsICLRDynamicFormat.KerasModel(args.model_fn, args.gpuid)
|
||||
elif args.model == "iclr_cntk":
|
||||
model = ServerModelsICLRFormat.CNTKModel(args.model_fn, args.gpuid)
|
||||
elif args.model == "nips_sr":
|
||||
if args.fine_tune == "last_layer":
|
||||
model = ServerModelsNIPS.KerasDenseFineTune(args.model_fn, args.gpuid, superres=True)
|
||||
model1 = ServerModelsNIPS.KerasDenseFineTune(args.model_fn, 1, superres=True)
|
||||
elif args.fine_tune == "last_k_layers":
|
||||
model = ServerModelsNIPS.KerasBackPropFineTune(args.model_fn, args.gpuid, superres=True)
|
||||
elif args.model == "nips_hr":
|
||||
if args.fine_tune == "last_layer":
|
||||
model = ServerModelsNIPS.KerasDenseFineTune(args.model_fn, args.gpuid, superres=False)
|
||||
elif args.fine_tune == "last_k_layers":
|
||||
model = ServerModelsNIPS.KerasBackPropFineTune(args.model_fn, args.gpuid, superres=False)
|
||||
elif args.model == "group_norm":
|
||||
if args.fine_tune == "last_k_layers":
|
||||
model = ServerModelsNIPSGroupNorm.LastKLayersFineTune(args.model_fn, args.gpuid, last_k_layers=1)
|
||||
elif args.fine_tune == "group_params":
|
||||
model = ServerModelsNIPSGroupNorm.UnetgnFineTune(args.model_fn, args.gpuid)
|
||||
elif args.fine_tune == "last_k_plus_group_params":
|
||||
model = ServerModelsNIPSGroupNorm.GroupParamsLastKLayersFineTune(args.model_fn, args.gpuid, last_k_layers=2)
|
||||
elif args.fine_tune == "group_params_then_last_k":
|
||||
model = ServerModelsNIPSGroupNorm.GroupParamsThenLastKLayersFineTune(args.model_fn, args.gpuid, last_k_layers=2)
|
||||
elif args.model == "existing":
|
||||
model = joblib.load(args.model_fn)
|
||||
if args.model == "keras_dense":
|
||||
model = KerasDenseFineTune(args.model_fn, args.gpuid, args.fine_tune_layer)
|
||||
else:
|
||||
print("Model isn't implemented, aborting")
|
||||
return
|
||||
raise NotImplementedError("The given model type is not implemented yet.")
|
||||
|
||||
if args.storage_type == "file":
|
||||
assert args.storage_path is not None, "You must specify a storage path if you select the 'path' storage type"
|
||||
|
|
Загрузка…
Ссылка в новой задаче