From 1b792e716682254c33ddb5eb845357e84018636d Mon Sep 17 00:00:00 2001 From: James Lamb Date: Wed, 21 Feb 2024 12:15:38 -0600 Subject: [PATCH] [ci] [python-package] enable ruff-format on tests and examples (#6317) --- .pre-commit-config.yaml | 14 +- examples/python-guide/advanced_example.py | 178 +- examples/python-guide/dask/ranking.py | 23 +- .../python-guide/dataset_from_multi_hdf5.py | 34 +- examples/python-guide/logistic_regression.py | 55 +- .../notebooks/interactive_plot_example.ipynb | 121 +- examples/python-guide/plot_example.py | 43 +- examples/python-guide/simple_example.py | 44 +- examples/python-guide/sklearn_example.py | 56 +- python-package/lightgbm/basic.py | 20 +- python-package/lightgbm/callback.py | 10 +- python-package/lightgbm/dask.py | 36 +- python-package/lightgbm/engine.py | 19 +- python-package/lightgbm/sklearn.py | 41 +- python-package/pyproject.toml | 9 +- tests/c_api_test/test_.py | 134 +- tests/cpp_tests/test.py | 2 +- tests/distributed/_test_distributed.py | 94 +- tests/distributed/conftest.py | 4 +- tests/python_package_test/test_arrow.py | 20 +- tests/python_package_test/test_basic.py | 403 +-- tests/python_package_test/test_callback.py | 13 +- tests/python_package_test/test_consistency.py | 87 +- tests/python_package_test/test_dask.py | 1017 ++---- tests/python_package_test/test_dual.py | 2 +- tests/python_package_test/test_engine.py | 2972 ++++++++--------- tests/python_package_test/test_plotting.py | 385 ++- tests/python_package_test/test_sklearn.py | 1134 ++++--- tests/python_package_test/test_utilities.py | 48 +- tests/python_package_test/utils.py | 53 +- 30 files changed, 3222 insertions(+), 3849 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1ce8e4a58..8e1fac76e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,6 +7,12 @@ exclude: | )$ repos: + - repo: https://github.com/pycqa/isort + rev: 5.13.2 + hooks: + - id: isort + name: isort (python) + args: ["--settings-path", "python-package/pyproject.toml"] - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. rev: v0.2.1 @@ -14,12 +20,8 @@ repos: # Run the linter. - id: ruff args: ["--config", "python-package/pyproject.toml"] + types_or: [python, jupyter] # Run the formatter. - id: ruff-format args: ["--config", "python-package/pyproject.toml"] - - repo: https://github.com/pycqa/isort - rev: 5.13.2 - hooks: - - id: isort - name: isort (python) - args: ["--settings-path", "python-package/pyproject.toml"] + types_or: [python, jupyter] diff --git a/examples/python-guide/advanced_example.py b/examples/python-guide/advanced_example.py index b775b4364..4f0263286 100644 --- a/examples/python-guide/advanced_example.py +++ b/examples/python-guide/advanced_example.py @@ -10,13 +10,13 @@ from sklearn.metrics import roc_auc_score import lightgbm as lgb -print('Loading data...') +print("Loading data...") # load or create your dataset -binary_example_dir = Path(__file__).absolute().parents[1] / 'binary_classification' -df_train = pd.read_csv(str(binary_example_dir / 'binary.train'), header=None, sep='\t') -df_test = pd.read_csv(str(binary_example_dir / 'binary.test'), header=None, sep='\t') -W_train = pd.read_csv(str(binary_example_dir / 'binary.train.weight'), header=None)[0] -W_test = pd.read_csv(str(binary_example_dir / 'binary.test.weight'), header=None)[0] +binary_example_dir = Path(__file__).absolute().parents[1] / "binary_classification" +df_train = pd.read_csv(str(binary_example_dir / "binary.train"), header=None, sep="\t") +df_test = pd.read_csv(str(binary_example_dir / "binary.test"), header=None, sep="\t") +W_train = pd.read_csv(str(binary_example_dir / "binary.train.weight"), header=None)[0] +W_test = pd.read_csv(str(binary_example_dir / "binary.test.weight"), header=None)[0] y_train = df_train[0] y_test = df_test[0] @@ -27,72 +27,72 @@ num_train, num_feature = X_train.shape # create dataset for lightgbm # if you want to re-use data, remember to set free_raw_data=False -lgb_train = lgb.Dataset(X_train, y_train, - weight=W_train, free_raw_data=False) -lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, - weight=W_test, free_raw_data=False) +lgb_train = lgb.Dataset(X_train, y_train, weight=W_train, free_raw_data=False) +lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, weight=W_test, free_raw_data=False) # specify your configurations as a dict params = { - 'boosting_type': 'gbdt', - 'objective': 'binary', - 'metric': 'binary_logloss', - 'num_leaves': 31, - 'learning_rate': 0.05, - 'feature_fraction': 0.9, - 'bagging_fraction': 0.8, - 'bagging_freq': 5, - 'verbose': 0 + "boosting_type": "gbdt", + "objective": "binary", + "metric": "binary_logloss", + "num_leaves": 31, + "learning_rate": 0.05, + "feature_fraction": 0.9, + "bagging_fraction": 0.8, + "bagging_freq": 5, + "verbose": 0, } # generate feature names -feature_name = [f'feature_{col}' for col in range(num_feature)] +feature_name = [f"feature_{col}" for col in range(num_feature)] -print('Starting training...') +print("Starting training...") # feature_name and categorical_feature -gbm = lgb.train(params, - lgb_train, - num_boost_round=10, - valid_sets=lgb_train, # eval training data - feature_name=feature_name, - categorical_feature=[21]) +gbm = lgb.train( + params, + lgb_train, + num_boost_round=10, + valid_sets=lgb_train, # eval training data + feature_name=feature_name, + categorical_feature=[21], +) -print('Finished first 10 rounds...') +print("Finished first 10 rounds...") # check feature name -print(f'7th feature name is: {lgb_train.feature_name[6]}') +print(f"7th feature name is: {lgb_train.feature_name[6]}") -print('Saving model...') +print("Saving model...") # save model to file -gbm.save_model('model.txt') +gbm.save_model("model.txt") -print('Dumping model to JSON...') +print("Dumping model to JSON...") # dump model to JSON (and save to file) model_json = gbm.dump_model() -with open('model.json', 'w+') as f: +with open("model.json", "w+") as f: json.dump(model_json, f, indent=4) # feature names -print(f'Feature names: {gbm.feature_name()}') +print(f"Feature names: {gbm.feature_name()}") # feature importances -print(f'Feature importances: {list(gbm.feature_importance())}') +print(f"Feature importances: {list(gbm.feature_importance())}") -print('Loading model to predict...') +print("Loading model to predict...") # load model to predict -bst = lgb.Booster(model_file='model.txt') +bst = lgb.Booster(model_file="model.txt") # can only predict with the best iteration (or the saving iteration) y_pred = bst.predict(X_test) # eval with loaded model auc_loaded_model = roc_auc_score(y_test, y_pred) print(f"The ROC AUC of loaded model's prediction is: {auc_loaded_model}") -print('Dumping and loading model with pickle...') +print("Dumping and loading model with pickle...") # dump model with pickle -with open('model.pkl', 'wb') as fout: +with open("model.pkl", "wb") as fout: pickle.dump(gbm, fout) # load model with pickle to predict -with open('model.pkl', 'rb') as fin: +with open("model.pkl", "rb") as fin: pkl_bst = pickle.load(fin) # can predict with any iteration when loaded in pickle way y_pred = pkl_bst.predict(X_test, num_iteration=7) @@ -104,36 +104,36 @@ print(f"The ROC AUC of pickled model's prediction is: {auc_pickled_model}") # init_model accepts: # 1. model file name # 2. Booster() -gbm = lgb.train(params, - lgb_train, - num_boost_round=10, - init_model='model.txt', - valid_sets=lgb_eval) +gbm = lgb.train(params, lgb_train, num_boost_round=10, init_model="model.txt", valid_sets=lgb_eval) -print('Finished 10 - 20 rounds with model file...') +print("Finished 10 - 20 rounds with model file...") # decay learning rates # reset_parameter callback accepts: # 1. list with length = num_boost_round # 2. function(curr_iter) -gbm = lgb.train(params, - lgb_train, - num_boost_round=10, - init_model=gbm, - valid_sets=lgb_eval, - callbacks=[lgb.reset_parameter(learning_rate=lambda iter: 0.05 * (0.99 ** iter))]) +gbm = lgb.train( + params, + lgb_train, + num_boost_round=10, + init_model=gbm, + valid_sets=lgb_eval, + callbacks=[lgb.reset_parameter(learning_rate=lambda iter: 0.05 * (0.99**iter))], +) -print('Finished 20 - 30 rounds with decay learning rates...') +print("Finished 20 - 30 rounds with decay learning rates...") # change other parameters during training -gbm = lgb.train(params, - lgb_train, - num_boost_round=10, - init_model=gbm, - valid_sets=lgb_eval, - callbacks=[lgb.reset_parameter(bagging_fraction=[0.7] * 5 + [0.6] * 5)]) +gbm = lgb.train( + params, + lgb_train, + num_boost_round=10, + init_model=gbm, + valid_sets=lgb_eval, + callbacks=[lgb.reset_parameter(bagging_fraction=[0.7] * 5 + [0.6] * 5)], +) -print('Finished 30 - 40 rounds with changing bagging_fraction...') +print("Finished 30 - 40 rounds with changing bagging_fraction...") # self-defined objective function @@ -141,9 +141,9 @@ print('Finished 30 - 40 rounds with changing bagging_fraction...') # log likelihood loss def loglikelihood(preds, train_data): labels = train_data.get_label() - preds = 1. / (1. + np.exp(-preds)) + preds = 1.0 / (1.0 + np.exp(-preds)) grad = preds - labels - hess = preds * (1. - preds) + hess = preds * (1.0 - preds) return grad, hess @@ -156,22 +156,19 @@ def loglikelihood(preds, train_data): # Keep this in mind when you use the customization def binary_error(preds, train_data): labels = train_data.get_label() - preds = 1. / (1. + np.exp(-preds)) - return 'error', np.mean(labels != (preds > 0.5)), False + preds = 1.0 / (1.0 + np.exp(-preds)) + return "error", np.mean(labels != (preds > 0.5)), False # Pass custom objective function through params params_custom_obj = copy.deepcopy(params) -params_custom_obj['objective'] = loglikelihood +params_custom_obj["objective"] = loglikelihood -gbm = lgb.train(params_custom_obj, - lgb_train, - num_boost_round=10, - init_model=gbm, - feval=binary_error, - valid_sets=lgb_eval) +gbm = lgb.train( + params_custom_obj, lgb_train, num_boost_round=10, init_model=gbm, feval=binary_error, valid_sets=lgb_eval +) -print('Finished 40 - 50 rounds with self-defined objective function and eval metric...') +print("Finished 40 - 50 rounds with self-defined objective function and eval metric...") # another self-defined eval metric @@ -183,24 +180,26 @@ print('Finished 40 - 50 rounds with self-defined objective function and eval met # Keep this in mind when you use the customization def accuracy(preds, train_data): labels = train_data.get_label() - preds = 1. / (1. + np.exp(-preds)) - return 'accuracy', np.mean(labels == (preds > 0.5)), True + preds = 1.0 / (1.0 + np.exp(-preds)) + return "accuracy", np.mean(labels == (preds > 0.5)), True # Pass custom objective function through params params_custom_obj = copy.deepcopy(params) -params_custom_obj['objective'] = loglikelihood +params_custom_obj["objective"] = loglikelihood -gbm = lgb.train(params_custom_obj, - lgb_train, - num_boost_round=10, - init_model=gbm, - feval=[binary_error, accuracy], - valid_sets=lgb_eval) +gbm = lgb.train( + params_custom_obj, + lgb_train, + num_boost_round=10, + init_model=gbm, + feval=[binary_error, accuracy], + valid_sets=lgb_eval, +) -print('Finished 50 - 60 rounds with self-defined objective function and multiple self-defined eval metrics...') +print("Finished 50 - 60 rounds with self-defined objective function and multiple self-defined eval metrics...") -print('Starting a new training job...') +print("Starting a new training job...") # callback @@ -208,17 +207,14 @@ def reset_metrics(): def callback(env): lgb_eval_new = lgb.Dataset(X_test, y_test, reference=lgb_train) if env.iteration - env.begin_iteration == 5: - print('Add a new valid dataset at iteration 5...') - env.model.add_valid(lgb_eval_new, 'new_valid') + print("Add a new valid dataset at iteration 5...") + env.model.add_valid(lgb_eval_new, "new_valid") + callback.before_iteration = True callback.order = 0 return callback -gbm = lgb.train(params, - lgb_train, - num_boost_round=10, - valid_sets=lgb_train, - callbacks=[reset_metrics()]) +gbm = lgb.train(params, lgb_train, num_boost_round=10, valid_sets=lgb_train, callbacks=[reset_metrics()]) -print('Finished first 10 rounds with callback function...') +print("Finished first 10 rounds with callback function...") diff --git a/examples/python-guide/dask/ranking.py b/examples/python-guide/dask/ranking.py index 0e80cfb9f..e812fa39f 100644 --- a/examples/python-guide/dask/ranking.py +++ b/examples/python-guide/dask/ranking.py @@ -10,9 +10,9 @@ import lightgbm as lgb if __name__ == "__main__": print("loading data") - rank_example_dir = Path(__file__).absolute().parents[2] / 'lambdarank' - X, y = load_svmlight_file(str(rank_example_dir / 'rank.train')) - group = np.loadtxt(str(rank_example_dir / 'rank.train.query')) + rank_example_dir = Path(__file__).absolute().parents[2] / "lambdarank" + X, y = load_svmlight_file(str(rank_example_dir / "rank.train")) + group = np.loadtxt(str(rank_example_dir / "rank.train.query")) print("initializing a Dask cluster") @@ -32,25 +32,14 @@ if __name__ == "__main__": # a sparse boundary to partition the data X = X.toarray() - dX = da.from_array( - x=X, - chunks=[ - (rows_in_part1, rows_in_part2), - (num_features,) - ] - ) + dX = da.from_array(x=X, chunks=[(rows_in_part1, rows_in_part2), (num_features,)]) dy = da.from_array( x=y, chunks=[ (rows_in_part1, rows_in_part2), - ] - ) - dg = da.from_array( - x=group, - chunks=[ - (100, group.size - 100) - ] + ], ) + dg = da.from_array(x=group, chunks=[(100, group.size - 100)]) print("beginning training") diff --git a/examples/python-guide/dataset_from_multi_hdf5.py b/examples/python-guide/dataset_from_multi_hdf5.py index 41c8bf21c..ae7000ffb 100644 --- a/examples/python-guide/dataset_from_multi_hdf5.py +++ b/examples/python-guide/dataset_from_multi_hdf5.py @@ -34,13 +34,13 @@ def create_dataset_from_multiple_hdf(input_flist, batch_size): data = [] ylist = [] for f in input_flist: - f = h5py.File(f, 'r') - data.append(HDFSequence(f['X'], batch_size)) - ylist.append(f['Y'][:]) + f = h5py.File(f, "r") + data.append(HDFSequence(f["X"], batch_size)) + ylist.append(f["Y"][:]) params = { - 'bin_construct_sample_cnt': 200000, - 'max_bin': 255, + "bin_construct_sample_cnt": 200000, + "max_bin": 255, } y = np.concatenate(ylist) dataset = lgb.Dataset(data, label=y, params=params) @@ -51,7 +51,7 @@ def create_dataset_from_multiple_hdf(input_flist, batch_size): # The reason is that DataFrame column names will be used in Dataset. For a DataFrame with Int64Index # as columns, Dataset will use column names like ["0", "1", "2", ...]. While for numpy array, column names # are using the default one assigned in C++ code (dataset_loader.cpp), like ["Column_0", "Column_1", ...]. - dataset.save_binary('regression.train.from_hdf.bin') + dataset.save_binary("regression.train.from_hdf.bin") def save2hdf(input_data, fname, batch_size): @@ -59,7 +59,7 @@ def save2hdf(input_data, fname, batch_size): Please note chunk size settings in the implementation for I/O performance optimization. """ - with h5py.File(fname, 'w') as f: + with h5py.File(fname, "w") as f: for name, data in input_data.items(): nrow, ncol = data.shape if ncol == 1: @@ -75,12 +75,12 @@ def save2hdf(input_data, fname, batch_size): # Also note that the data is stored in row major order to avoid extra copy when passing to # lightgbm Dataset. chunk = (batch_size, ncol) - f.create_dataset(name, data=data, chunks=chunk, compression='lzf') + f.create_dataset(name, data=data, chunks=chunk, compression="lzf") def generate_hdf(input_fname, output_basename, batch_size): # Save to 2 HDF5 files for demonstration. - df = pd.read_csv(input_fname, header=None, sep='\t') + df = pd.read_csv(input_fname, header=None, sep="\t") mid = len(df) // 2 df1 = df.iloc[:mid] @@ -88,25 +88,23 @@ def generate_hdf(input_fname, output_basename, batch_size): # We can store multiple datasets inside a single HDF5 file. # Separating X and Y for choosing best chunk size for data loading. - fname1 = f'{output_basename}1.h5' - fname2 = f'{output_basename}2.h5' - save2hdf({'Y': df1.iloc[:, :1], 'X': df1.iloc[:, 1:]}, fname1, batch_size) - save2hdf({'Y': df2.iloc[:, :1], 'X': df2.iloc[:, 1:]}, fname2, batch_size) + fname1 = f"{output_basename}1.h5" + fname2 = f"{output_basename}2.h5" + save2hdf({"Y": df1.iloc[:, :1], "X": df1.iloc[:, 1:]}, fname1, batch_size) + save2hdf({"Y": df2.iloc[:, :1], "X": df2.iloc[:, 1:]}, fname2, batch_size) return [fname1, fname2] def main(): batch_size = 64 - output_basename = 'regression' + output_basename = "regression" hdf_files = generate_hdf( - str(Path(__file__).absolute().parents[1] / 'regression' / 'regression.train'), - output_basename, - batch_size + str(Path(__file__).absolute().parents[1] / "regression" / "regression.train"), output_basename, batch_size ) create_dataset_from_multiple_hdf(hdf_files, batch_size=batch_size) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/examples/python-guide/logistic_regression.py b/examples/python-guide/logistic_regression.py index 332b52814..ea02382eb 100644 --- a/examples/python-guide/logistic_regression.py +++ b/examples/python-guide/logistic_regression.py @@ -24,23 +24,19 @@ import lightgbm as lgb # single continuous predictor np.random.seed(0) N = 1000 -X = pd.DataFrame({ - 'continuous': range(N), - 'categorical': np.repeat([0, 1, 2, 3, 4], N / 5) -}) +X = pd.DataFrame({"continuous": range(N), "categorical": np.repeat([0, 1, 2, 3, 4], N / 5)}) CATEGORICAL_EFFECTS = [-1, -1, -2, -2, 2] -LINEAR_TERM = np.array([ - -0.5 + 0.01 * X['continuous'][k] - + CATEGORICAL_EFFECTS[X['categorical'][k]] for k in range(X.shape[0]) -]) + np.random.normal(0, 1, X.shape[0]) +LINEAR_TERM = np.array( + [-0.5 + 0.01 * X["continuous"][k] + CATEGORICAL_EFFECTS[X["categorical"][k]] for k in range(X.shape[0])] +) + np.random.normal(0, 1, X.shape[0]) TRUE_PROB = expit(LINEAR_TERM) Y = np.random.binomial(1, TRUE_PROB, size=N) DATA = { - 'X': X, - 'probability_labels': TRUE_PROB, - 'binary_labels': Y, - 'lgb_with_binary_labels': lgb.Dataset(X, Y), - 'lgb_with_probability_labels': lgb.Dataset(X, TRUE_PROB), + "X": X, + "probability_labels": TRUE_PROB, + "binary_labels": Y, + "lgb_with_binary_labels": lgb.Dataset(X, Y), + "lgb_with_probability_labels": lgb.Dataset(X, TRUE_PROB), } @@ -72,34 +68,25 @@ def experiment(objective, label_type, data): np.random.seed(0) nrounds = 5 lgb_data = data[f"lgb_with_{label_type}_labels"] - params = { - 'objective': objective, - 'feature_fraction': 1, - 'bagging_fraction': 1, - 'verbose': -1 - } + params = {"objective": objective, "feature_fraction": 1, "bagging_fraction": 1, "verbose": -1} time_zero = time.time() gbm = lgb.train(params, lgb_data, num_boost_round=nrounds) - y_fitted = gbm.predict(data['X']) + y_fitted = gbm.predict(data["X"]) y_true = data[f"{label_type}_labels"] duration = time.time() - time_zero - return { - 'time': duration, - 'correlation': np.corrcoef(y_fitted, y_true)[0, 1], - 'logloss': log_loss(y_fitted, y_true) - } + return {"time": duration, "correlation": np.corrcoef(y_fitted, y_true)[0, 1], "logloss": log_loss(y_fitted, y_true)} ################# # Observe the behavior of `binary` and `xentropy` objectives -print('Performance of `binary` objective with binary labels:') -print(experiment('binary', label_type='binary', data=DATA)) +print("Performance of `binary` objective with binary labels:") +print(experiment("binary", label_type="binary", data=DATA)) -print('Performance of `xentropy` objective with binary labels:') -print(experiment('xentropy', label_type='binary', data=DATA)) +print("Performance of `xentropy` objective with binary labels:") +print(experiment("xentropy", label_type="binary", data=DATA)) -print('Performance of `xentropy` objective with probability labels:') -print(experiment('xentropy', label_type='probability', data=DATA)) +print("Performance of `xentropy` objective with probability labels:") +print(experiment("xentropy", label_type="probability", data=DATA)) # Trying this throws an error on non-binary values of y: # experiment('binary', label_type='probability', DATA) @@ -109,9 +96,7 @@ print(experiment('xentropy', label_type='probability', data=DATA)) # there are reasons to suspect that `binary` should run faster when the # label is an integer instead of a float K = 10 -A = [experiment('binary', label_type='binary', data=DATA)['time'] - for k in range(K)] -B = [experiment('xentropy', label_type='binary', data=DATA)['time'] - for k in range(K)] +A = [experiment("binary", label_type="binary", data=DATA)["time"] for k in range(K)] +B = [experiment("xentropy", label_type="binary", data=DATA)["time"] for k in range(K)] print(f"Best `binary` time: {min(A)}") print(f"Best `xentropy` time: {min(B)}") diff --git a/examples/python-guide/notebooks/interactive_plot_example.ipynb b/examples/python-guide/notebooks/interactive_plot_example.ipynb index 3090f4a65..2cab2ff43 100644 --- a/examples/python-guide/notebooks/interactive_plot_example.ipynb +++ b/examples/python-guide/notebooks/interactive_plot_example.ipynb @@ -31,6 +31,7 @@ " # To enable interactive mode you should install ipywidgets\n", " # https://github.com/jupyter-widgets/ipywidgets\n", " from ipywidgets import interact, SelectMultiple\n", + "\n", " INTERACTIVE = True\n", "except ImportError:\n", " INTERACTIVE = False" @@ -54,9 +55,9 @@ }, "outputs": [], "source": [ - "regression_example_dir = Path().absolute().parents[1] / 'regression'\n", - "df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\\t')\n", - "df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\\t')\n", + "regression_example_dir = Path().absolute().parents[1] / \"regression\"\n", + "df_train = pd.read_csv(str(regression_example_dir / \"regression.train\"), header=None, sep=\"\\t\")\n", + "df_test = pd.read_csv(str(regression_example_dir / \"regression.test\"), header=None, sep=\"\\t\")\n", "\n", "y_train = df_train[0]\n", "y_test = df_test[0]\n", @@ -99,11 +100,7 @@ }, "outputs": [], "source": [ - "params = {\n", - " 'num_leaves': 5,\n", - " 'metric': ['l1', 'l2'],\n", - " 'verbose': -1\n", - "}" + "params = {\"num_leaves\": 5, \"metric\": [\"l1\", \"l2\"], \"verbose\": -1}" ] }, { @@ -142,16 +139,15 @@ ], "source": [ "evals_result = {} # to record eval results for plotting\n", - "gbm = lgb.train(params,\n", - " lgb_train,\n", - " num_boost_round=100,\n", - " valid_sets=[lgb_train, lgb_test],\n", - " feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],\n", - " categorical_feature=[21],\n", - " callbacks=[\n", - " lgb.log_evaluation(10),\n", - " lgb.record_evaluation(evals_result)\n", - " ])" + "gbm = lgb.train(\n", + " params,\n", + " lgb_train,\n", + " num_boost_round=100,\n", + " valid_sets=[lgb_train, lgb_test],\n", + " feature_name=[f\"f{i + 1}\" for i in range(X_train.shape[-1])],\n", + " categorical_feature=[21],\n", + " callbacks=[lgb.log_evaluation(10), lgb.record_evaluation(evals_result)],\n", + ")" ] }, { @@ -173,7 +169,7 @@ "outputs": [], "source": [ "def render_metric(metric_name):\n", - " ax = lgb.plot_metric(evals_result, metric=metric_name, figsize=(10, 5))\n", + " lgb.plot_metric(evals_result, metric=metric_name, figsize=(10, 5))\n", " plt.show()" ] }, @@ -184,7 +180,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnAAAAFNCAYAAACAH1JNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzs3Xd8FVX+//HXJ530DoQEEoqh9y5qxIYNLKioq7LF7vJV17V8vz8V2xaX1V3Xim0VC2JHxYYaKdJ77yWhQygJECDk/P6YgAFCz83NDe/n4zGPe2fmzMxn7jzET86Zc4455xARERGRwBHk7wBERERE5PgogRMREREJMErgRERERAKMEjgRERGRAKMETkRERCTAKIETERERCTBK4ESk2jCz/zWz1yrhPAPN7B1/x+ErZva1md1U2WVFJHCYxoETkSMxs+VAGpDmnNtYbvt0oA2Q5ZxbfpRz5ADvOOfSfRfpAdcbCDR2zv2mKq53PMzMAU2cc4v9HYuIBC7VwInIsVgGXLtvxcxaAbUq8wJmFlKZ5ztR/o7D39cXkcCgBE5EjsUQ4MZy6zcBb5cvYGbhZjbIzFaa2Toze9nMaplZFPA1kGZmRWVLWlkz50dm9o6ZbQP6H9z0aWY9zOwXM9tiZnlm1r+i4Mwsy8x+NrNCM/seSC63L8fM8g8qv9zMzi37fsQ4zCzTzJyZ3VR2bxvN7P/KnauWmb1lZpvNbJ6Z3X/w9cqVHVX2dUbZ73DNvvjM7AEzWwu8aWYJZvalmW0oO++XZpZe7jy5ZvaHsu/9zWxM2W+/2cyWmdmFJ1g2y8xGlf2OI83shZNpihYR31ECJyLHYjwQa2bNzCwYuAY4+H/sfwdOA9oCjYF6wCPOue3AhcBq51x02bK67Jg+wEdAPPBu+ZOZWX28xO8/QErZeacfJr73gCl4idsTeAnm8ThsHOX0ALKBc4BHzKxZ2fZHgUygIXAecNhmW+fcmWVf25T9Dh+UrdcBEoEGwC14/za/WbZeH9gJPH+E+LsAC/Du/2ngdTOzEyj7HjARSAIGAjcc4Zoi4kdK4ETkWO2rhTsPmA+s2rejLAG4GbjHOVfgnCsE/gL0O8o5xznnPnPOlTrndh6073pgpHPufefcHufcJufcIQlcWaLXCXjYObfLOTcK+OI47+1IcezzmHNup3NuBjAD7/0/gKuBvzjnNjvn8oHnjvPaAKXAo2Xx7yy714+dczvKfsungLOOcPwK59yrzrm9wFtAXaD28ZQt9zs+4pzb7ZwbAww/gXsRkSqgdy1E5FgNAUYBWRzUfIpXQxYJTClX8WNA8FHOmXeEfRnAkmOIKw3YXFbTt8+KsuOP1ZHi2Gdtue87gOhy1y9//LGc62AbnHPF+1bMLBJ4FugFJJRtjjGz4LLE67CxOed2lD2D6ArKHalsMlDgnNtx0L0cz+8oIlVENXAickyccyvwOjNcBHxy0O6NeM18LZxz8WVLnHNuXxJxuO7uR+oGnwc0OobQ1gAJZe/a7VO/3PfteMklAGVNwCnHEcexXL9879oTSXgOvv6f8JpruzjnYoF9Ta+HaxatDGuAxLLkcR8lbyLVlBI4ETkevwd6HlTbhXOuFHgVeNbMUgHMrJ6ZXVBWZB2QZGZxx3Gtd4FzzexqMwsxsyQza3twobLEcjLwmJmFmVkP4NJyRRYCEWZ2sZmFAv8PCD+OOI5mGPBQWceDesBdRym/Du99uSOJwUuIt5hZIt57dj5V7nccWPY7duPA31FEqhElcCJyzJxzS5xzkw+z+wFgMTC+rDfnSLxaJJxz84H3gaVlPUrTjuFaK/Fq+/4EFOB1YGhzmOLX4b2cX4CX7Oxv4nXObQXuAF7De29vO1BhL9ET9HjZ+Zbh3fNHwK4jlB8IvFX2O1x9mDL/whumZSNeB5JvKi3aI7se6AZsAp4EPuDI9yIifqKBfEVEKpGZ3Q70c84dqdNBQDCzD4D5zjmf1wCKyPFRDZyIyEkws7pmdrqZBZlZNl6N4af+jutEmFknM2tUdi+98IZX+czfcYnIodQLVUTk5IQBr+D1zt0CDAVe9GtEJ64OXgeVJLxm4dudc9P8G5KIVERNqCIiIiIBRk2oIiIiIgFGCZyIiIhIgKkx78DFx8e7xo0b+zsMOUHbt28nKirq6AWlWtLzC1x6doFNzy9wTZkyZaNz7uBBxY9ZjUngateuzeTJhxueSqq73NxccnJy/B2GnCA9v8ClZxfY9PwCl5mtOJnj1YQqIiIiEmCUwImIiIgEGCVwIiIiIgGmxrwDJyIiIlVnz5495OfnU1xc7O9QqrWIiAjS09MJDQ2t1PMqgRMREZHjlp+fT0xMDJmZmZiZv8OplpxzbNq0ifz8fLKysir13GpCFRERkeNWXFxMUlKSkrcjMDOSkpJ8UkupBE5EREROiJK3o/PVb6QETkRERALSli1bePHFF4/7uIsuuogtW7YcscwjjzzCyJEjTzQ0n1MCJyIiIgHpcAnc3r17j3jciBEjiI+PP2KZxx9/nHPPPfek4vMlJXAiIiISkB588EGWLFlC27Zt6dSpE2effTbXXXcdrVq1AuCyyy6jQ4cOtGjRgsGDB+8/LjMzk40bN7J8+XKaNWvGzTffTIsWLTj//PPZuXMnAP379+ejjz7aX/7RRx+lffv2tGrVivnz5wOwYcMGzjvvPNq3b8+tt95KgwYN2LhxY5XcuxI4ERERCUh/+9vfaNSoEdOnT+cf//gHEydO5KmnnmLu3LkAvPHGG0yZMoXJkyfz3HPPsWnTpkPOsWjRIu68807mzJlDfHw8H3/8cYXXSk5OZurUqdx+++0MGjQIgMcee4yePXsydepULr/8clauXOm7mz2IhhERERGRk/LYF3OYu3pbpZ6zeVosj17a4riO6dy58wHDdTz33HN8+umnAOTl5bFo0SKSkpIOOCYrK4u2bdsC0KFDB5YvX17hua+44or9ZT755BMAxowZs//8vXr1IiEh4bjiPRlK4ERERKRGiIqK2v89NzeXkSNHMm7cOCIjI8nJyalwOI/w8PD934ODg/c3oR6uXHBwMCUlJYA3zpu/KIETERGRk3K8NWWVJSYmhsLCwgr3bd26lYSEBCIjI5k/fz7jx4+v9Ov36NGDYcOG8cADD/Ddd9+xefPmSr/G4SiBExERkYCUlJTE6aefTsuWLalVqxa1a9fev69Xr168/PLLtG7dmuzsbLp27Vrp13/00Ue59tpr+eCDDzjrrLOoW7cuMTExlX6diiiBExERkYD13nvvVbg9PDycr7/+usJ9+95zS05OZvbs2fu333ffffu///e//z2kPEDHjh3Jzc0FIC4ujm+//ZaQkBDGjRvHTz/9dECTrC8pgRMRERE5AStXruTqq6+mtLSUsLAwXn311Sq7thI4ERERkRPQpEkTpk2b5pdraxw4ERERkQDj0wTOzHqZ2QIzW2xmD1awv7+ZbTCz6WXLH8rtu8nMFpUtN/kyThEREZFA4rMmVDMLBl4AzgPygUlmNtw5N/egoh845+466NhE4FGgI+CAKWXHVl3/XBEREZFqypc1cJ2Bxc65pc653cBQoM8xHnsB8L1zrqAsafse6OWjOEVEREQCii8TuHpAXrn1/LJtB7vSzGaa2UdmlnGcx4qIiIiccnzZC9Uq2HbwnBNfAO8753aZ2W3AW0DPYzwWM7sFuAUgJSVl/7gsEniKior0/AKYnl/g0rMLbP58fnFxcYedBaG6qlu3LmvWrGHNmjXcf//9DBky5JAyF110EU8++STt27ev8ByPP/4477//Plu2bGHNmjXHdN3i4uJKf06+TODygYxy6+nA6vIFnHObyq2+Cvy93LE5Bx2be/AFnHODgcEA2dnZLicn5+AiEiByc3PR8wtcen6BS88usPnz+c2bN6/KZh2oTDExMcTExPDZZ59VuD84OJioqKjD3tuVV17JvffeS5MmTY75/iMiImjXrt0Jx1wRXzahTgKamFmWmYUB/YDh5QuYWd1yq72BeWXfvwXON7MEM0sAzi/bJiIiIgLAAw88wIsvvrh/feDAgTz22GOcc845tG/fnlatWvH5558fctzy5ctp2bIlADt37qRfv360bt2aa6655rCT2e/TtWtX6tate8QyVcFnNXDOuRIzuwsv8QoG3nDOzTGzx4HJzrnhwAAz6w2UAAVA/7JjC8zsCbwkEOBx51yBr2IVERGRk/D1g7B2VuWes04ruPBvRyzSr18/7r77bu644w4Ahg0bxjfffMM999xDbGwsGzdupGvXrvTu3Ruzit7OgpdeeonIyEhmzpzJzJkzD9t0Wt34dCYG59wIYMRB2x4p9/0h4KHDHPsG8IYv4xMREZHA1a5dO9avX8/q1avZsGEDCQkJ1K1bl3vuuYdRo0YRFBTEqlWrWLduHXXq1KnwHKNGjWLAgAEAtG7dmtatW1flLZwwTaUlIiIiJ+coNWW+1LdvXz766CPWrl1Lv379ePfdd9mwYQNTpkwhNDSUzMxMiouLj3iOw9XOVWeaSktEREQCVr9+/Rg6dCgfffQRffv2ZevWraSmphIaGspPP/3EihUrjnj8mWeeybvvvgvA7NmzmTlzZlWEfdKUwImIiEjAatGiBYWFhdSrV4+6dety/fXXM3nyZDp27Mi7775L06ZNj3j87bffTlFREa1bt+bpp5+mc+fORyx///33k56ezo4dO0hPT2fgwIGVeDfHTk2oIiIiEtBmzfq1A0VycjLjxo2rsFxRUREAmZmZzJ49G4BatWoxdOjQY77W008/zdNPP30S0VYO1cCJiIiIBBjVwImIiIgcpEuXLuzateuAbUOGDKFVq1Z+iuhASuBEREREDjJhwgR/h3BENaYJ1R0yU6qIiIj4ktP/fI/KV79RjUng8ov2UrB9t7/DEBEROSVERESwadMmJXFH4Jxj06ZNREREVPq5a0wTaiPy+fdXU3js6m7+DkVERKTGS09PJz8/nw0bNvg7lGotIiKC9PT0Sj9vjUngwikhfeZzTO3alPb1E/wdjoiISI0WGhpKVlaWv8M4ZdWYJtTdobH8NuQbXv/4S/aWqjpXREREaq6ak8CFJ1IaFsuNm5/nvQlHnjZDREREJJDVmATOWTChFzxOl6D5zPv2VTYW7Tr6QSIiIiIBqMYkcADW7gaKa7fjXvc2//5ykr/DEREREfGJGpXAERRERJ9/kWhFNJz9HFNWFPg7IhEREZFKV7MSOIC0tuxt/1tuDPmeNz8aTsneUn9HJCIiIlKpal4CB4Se9zAl4fH8duvzvDNumb/DEREREalUNTKBo1YCYb2epEPQIpZ8P5h124r9HZGIiIhIpamZCRxgba6luG5n7uVdBn32i7/DEREREak0NTaBIyiIiMufIzZoJ10WPcuP89f5OyIRERGRSlFzEziA1GbQ/X/oGzyKTz9+nx27S/wdkYiIiMhJq9kJHBCc82eKYxpwz66XeP772f4OR0REROSk1fgEjtBaRFz2LxoGrSVi/L+Zt2abvyMSEREROSk1P4EDaNSTXc36cnvwcF74cASlmuxeREREAtipkcAB4Rf/DRcayQ0bn9Vk9yIiIhLQTpkEjugUQns9SZeg+Sz49mXWbtXYcCIiIhKYfJrAmVkvM1tgZovN7MEjlOtrZs7MOpath5rZW2Y2y8zmmdlDlRJPuxsortuZP7khPPzOD5pmS0RERAKSzxI4MwsGXgAuBJoD15pZ8wrKxQADgAnlNl8FhDvnWgEdgFvNLPOkgwoKIuKK54kO2cst6wby7LdzTvqUIiIiIlXNlzVwnYHFzrmlzrndwFCgTwXlngCeBsq3aTogysxCgFrAbqByuo+mZBNy+Yt0ClpI3V8e1QC/IiIiEnB8mcDVA/LKreeXbdvPzNoBGc65Lw869iNgO7AGWAkMcs4VVFpkLa+gpNv/8JuQHxgzdBD5m3dU2qlFREREfC3Eh+e2CrbtH7/DzIKAZ4H+FZTrDOwF0oAEYLSZjXTOLT3gAma3ALcApKSkkJube+zRhZ1FduwoHtz6On96sR59urUhJKiikKUqFBUVHd/zk2pFzy9w6dkFNj2/U5cvE7h8IKPcejqwutx6DNASyDUzgDrAcDPrDVwHfOOc2wOsN7OxQEfggATOOTcYGAyQnZ3tcnJyji/Cru3Z/vyZPFz0LEM2v8WfrjzO46XS5ObmctzPT6oNPb/ApWcX2PT8Tl2+bEKdBDQxsywzCwP6AcP37XTObXXOJTvnMp1zmcB4oLdzbjJes2lP80QBXYH5lR5hrQSibvyA+OBd9JzxJ76ZrvHhREREpPrzWQLnnCsB7gK+BeYBw5xzc8zs8bJatiN5AYgGZuMlgm8652b6JNDazbErXqFd0GKKP72LmXmV96qdiIiIiC/4sgkV59wIYMRB2x45TNmcct+L8IYSqRKhLftQtPoBLvvl73zyxs3E3/kW9ZOjq+ryIiIiIsfl1JmJ4Siiz3uIgg4DuMKNZObLv6WgSDM1iIiISPWkBG4fMxIveZzVre/kkpLvmPz8TRTv3uPvqEREREQOoQSuPDPSLn+KxU1v4/zib5j4n5vYu3evv6MSEREROYASuIOZ0fiavzEj6w+cWfgV017sjytVEiciIiLVhxK4ipjR5sZBjE3rT8dNw5k55H5/RyQiIiKynxK4wzGj2++fZXT0hbRZ9hqLRg/zd0QiIiIigBK4IwoKDqL1ra+yIKghdX74HzbmVf5YwiIiIiLHSwncUcTFxBB8zRD2OqPwresoKd7u75BERETkFKcE7hg0zm7J3G6DaLBnKXNeuxmc83dIIiIicgpTAneMuve6jp/r9KfNxq+Y9cVz/g5HRERETmFK4I5D998/zbTQ9mRPeZy82WP9HY6IiIicopTAHYfwsDDq/m4Imyye0I/7U1iw1t8hiYiIyClICdxxqlM3nfUXvUpC6WbyXrmakj27/R2SiIiInGKUwJ2ANp17Mq3NQJrvmsGEwXfi1KlBREREqpASuBPU9Yq7mFT7Gk7fMIzRH/7H3+GIiIjIKUQJ3Eno8IfnWRDRhi5zHmfi2JH+DkdEREROEUrgTkJQaBgZtw5ja3A8Gd/dwoLFS/wdkoiIiJwClMCdpMiEOgRf9z4JVsiOd3/Dus3b/B2SiIiI1HBK4CpBUuNObOw5iHZuLrNfuJ41BUriRERExHeUwFWS9DNvIq/DA5xTMorlz/dh5doN/g5JREREaiglcJUo49L/Jb/H3+hcOo0tr1zMkhUr/R2SiIiI1EBK4CpZ+rm3s+6CV2jqluDevIj5Cxf4OyQRERGpYZTA+UBat2souOw90thAzLsXM2vGZH+HJCIiIjWIEjgfqdP2ArZf+zmRQbtJ++RyZkwZ6++QREREpIZQAudDKdldcf2/oTQolIzh1zB72jh/hyQiIiI1gBI4H0ts0Jyg335JaVAodT+7mjnTx/s7JBEREQlwPk3gzKyXmS0ws8Vm9uARyvU1M2dmHctta21m48xsjpnNMrMIX8bqS0n1m0P/L3FBwdT59CrmzZjg75BEREQkgPksgTOzYOAF4EKgOXCtmTWvoFwMMACYUG5bCPAOcJtzrgWQA+zxVaxVIblBC0pv8pK41E+vYsGsif4OSURERAKUL2vgOgOLnXNLnXO7gaFAnwrKPQE8DRSX23Y+MNM5NwPAObfJObfXh7FWidTMluy94QscQSR/3JdFsyf5OyQREREJQL5M4OoBeeXW88u27Wdm7YAM59yXBx17GuDM7Fszm2pm9/swzipVu2Er9twwHEcQ0R9dw/R5C/0dkoiIiASYEB+e2yrY5vbvNAsCngX6V1AuBOgBdAJ2AD+Y2RTn3A8HXMDsFuAWgJSUFHJzcysl8Kqwt+X/cfrsh8h//waebzGQlrVr+TskvyoqKgqo5ycH0vMLXHp2gU3P79TlywQuH8got54OrC63HgO0BHLNDKAOMNzMepcd+7NzbiOAmY0A2gMHJHDOucHAYIDs7GyXk5PjkxvxjRwKG0TR6avbWTj7NbZmP0eftvWOflgNlZubS2A9PylPzy9w6dkFNj2/U5cvm1AnAU3MLMvMwoB+wPB9O51zW51zyc65TOdcJjAe6O2cmwx8C7Q2s8iyDg1nAXN9GKtfxHS6jl1d/sj1wSOZ8OE/GTJ+hb9DEhERkQDgswTOOVcC3IWXjM0Dhjnn5pjZ42W1bEc6djPwDF4SOB2Y6pz7ylex+lP4BY+xt+E5PB76X4Z//hHP/7gI59xRjxMREZFTly+bUHHOjQBGHLTtkcOUzTlo/R28oURqtqBggq96A/dqT97Y+h/O/y6FLTv28H8XN6OsaVlERETkAJqJoTqoFY9d+z7RwSV8nPA874yZz4Mfz2JvqWriRERE5FBK4KqLlGzsyteoW7yI71P/wxeTFzHg/WnsLin1d2QiIiJSzSiBq06ye2FXvEpG4Qx+qv0vRs1aws1vT2bn7oAfw1hEREQqkRK46qZVX7jqv9QunMfPtZ9l5qJl3PD6BLbuDOiZxERERKQSKYGrjpr3hmveIbFoET+nPkNe/kque3U8Bdt3+zsyERERqQaUwFVX2b3g2qHEbl/Bj0n/YOv6PPoNHseGwl3+jkxERET8TAlcddb4HLj+Q6J2ruG7xEEUFqznmsHjWLu12N+RiYiIiB8pgavuss6A6z8kcns+39V9mS3bCrn6lXHkb97h78hERETET5TABYLM0+Hyl4lZP5kfGg5l645irnllPCs2bfd3ZCIiIuIHSuACRcsr4LwnSFj2JSNb/8T23SVc/co45q3Z5u/IREREpIopgQsk3f8InW4mZeYrfNN9PqUO+rwwljfGLKNUszaIiIicMpTABRIzuPDvcNqF1Bn7KD9cvJ0zmyTz+JdzuenNiazbps4NIiIipwIlcIEmKBj6vg512xL75a282mkNT13WgknLC7jgX6P4ZvYaf0coIiIiPqYELhCFRcF1H0BiFjbsBq6ffxffX5tA/cRIbntnKvd/NIPiPZp+S0REpKZSAheoolPh1lFw0SBYN4eMYb34LP197j89lmGT87n6lXGs2brT31GKiIiIDyiBC2TBodD5ZhgwDbrfRdDMD7hj5jWM7DiJZeu3cel/xjJlRYG/oxQREZFKpgSuJqgVD+c/CXdNhMbn0Hj2s4xv8DJ1wnbQb/B4Ppi00t8RioiISCVSAleTJDaEa4bApc8RteoXPg97mL7p23jg41kMHD6HPXtL/R2hiIiIVAIlcDVRh5vgtyMILinmLwX38o8WK/jvL8u58qVfmJm/xd/RiYiIyElSAldTZXSGW3Kx1GZcteQhvms7mrVbdtDnhbE8/Nlstu7Y4+8IRURE5AQpgavJYuvCb0dAu99w2vyXGJv5Gjd3TuHdCSs455lcPpmaj3OawUFERCTQKIGr6ULCoffzcOE/CF0ykv9dPYCvb8wgIzGSe4fN4JrB45myYrO/oxQREZHjoATuVGAGXW6BGz6BwjVkD+/Dx71K+dsVrViyvogrX/qF/m9OZEae3o8TEREJBErgTiUNc+DmHyEqmaB3LqNf0A+Muv9sHujVlOl5W+jzwlh+/99JzF611d+RioiIyBEogTvVJDWCP4z0krkv7yZq5IPc3i2VMQ/05M8XZDN5xWYu+c8Ybn57shI5ERGRakoJ3KkoIg6uGwbd7oJJr8KzLYge8xfu7BTL6AfO5p5zT2P80k1c8p8x3PL2ZOasViInIiJSnSiBO1UFBcMFT3lNqllnwehn4NmWxH7/Z/6nXRBjHujJ3ec2YdzSTVz8nBI5ERGR6sSnCZyZ9TKzBWa22MwePEK5vmbmzKzjQdvrm1mRmd3nyzhPafU6eLM33DUZ2vSD6e/C8x2J+/pO7u4cc0gid+d7U1mxabu/oxYRETml+SyBM7Ng4AXgQqA5cK2ZNa+gXAwwAJhQwWmeBb72VYxSTnJj6P0c3D3La1qd85mXyE17mbvPzmLMAz35Y8/G/DhvPec+8zMDh89hU9Euf0ctIiJySvJlDVxnYLFzbqlzbjcwFOhTQbkngKeB4vIbzewyYCkwx4cxysFi6sD5T8Cd46FBd/ju/8HLPYhbO54/nZ/Nz3/OoW+HDN4et5yz/pHL8z8uYufuvf6OWkRE5JTiywSuHpBXbj2/bNt+ZtYOyHDOfXnQ9ijgAeAxH8YnR5LY0Ovo0O992LMD3roEPvodqcFF/PWKVnx3z5l0a5TEoO8WcvagXIbPWK1ZHURERKpIiA/PbRVs2/9/eDMLwmsi7V9BuceAZ51zRWYVnWb/OW4BbgFISUkhNzf3JMKVikUS1GoQ9Vd+Qv05H7N70c/MafEghbFNuL4+dIqJ4N15uxnw/jRe+GYGv2keTkbM8f9dUFRUpOcXwPT8ApeeXWDT8zt1ma9qTcysGzDQOXdB2fpDAM65v5atxwFLgKKyQ+oABUBvvMQuo2x7PFAKPOKce/5w18vOznYLFizwwZ3Ifqunwwc3QNFauGgQdLgJgL2ljg8m5fGPb+ezdecebuyWyT3nnkZcZOgxnzo3N5ecnBwfBS6+pucXuPTsApueX+AysynOuY5HL1mxE25CNbPzjlJkEtDEzLLMLAzoBwzft9M5t9U5l+ycy3TOZQLjgd7OucnOuTPKbf8X8JcjJW9SRdLawq0/Q4PT4YsBMHwAlOwiOMi4rkt9frovh+u7NODtccs5+5/e+3H5m3f4O2oREZEa52TegXv9SDudcyXAXcC3wDxgmHNujpk9bma9T+K64k+RifCbj6HHvTD1LXjzQtiaD0B8ZBhPXNaSL/7Yg+Z1Yxn03UJ6/P0nrh08ng8n51G0q8TPwYuIiNQMR3wHzsyGH24XkHS0kzvnRgAjDtr2yGHK5hxm+8CjXUeqWFAwnPso1GsPn94OL50OPf8fdPgtBIfQIi2Od/7QhbyCHXw6bRWfTM3nzx/N5OHPZ3NJ6zT+96JmJEaF+fsuREREAtbROjGcAfyGX99T28fwhgmRU1mzSyGlGXx1D4y4Dya/CRf+HbLOACAjMZIB5zThjz0bM3XlZj6asoqPp+QzZtFGnr+uHR0zE/18AyIiIoHpaE2o44EdzrmfD1pyAfUYEG8A4BuHw9VDYHdUUaj/AAAgAElEQVShN9zIsJtgy8r9RcyMDg0S+esVrfjkju6EhwZxzeDxvJS7hNJSDT0iIiJyvI6YwDnnLnTO/XSYfWf6JiQJOGbQvDfcORHO/j9Y+C083wm+ug/WzDigaMt6cXz5xx70almHv38zn9+9NYmC7bv9FLiIiEhg0mT2UnlCa8FZ98Ndk6DFFTD1bXjlTHj5DJj4KuzcDEBMRCjPX9uOJy5ryS+LN3HRv0czfX2JauNERESO0RETODMrNLNtFSyFZratqoKUABOfAZe/BPct8MaLA+8duUHZ8MmtULQBM+OGrg345I7uRIQG8a+puzj3mZ/579hlFBbv8W/8IiIi1dzRmlBjnHOxFSwxzrnYqgpSAlStBOh8M9w2Gm4dBe1vhDmfwsunwxKvZb5lvTi+u+csbm0dTmytUAZ+MZduf/2RgcPnsGzjdj/fgIiISPWkJlSpGnXbwMWD4OYfISIehlwOIwfC3j2EhQTRLS2Ez+48nc/uPJ3zmtfm3Qkr6PnPXP4yYh67Svb6O3oREZFqRQmcVK06LeGWn7zauDHPegMBb16xf3fbjHievaYtYx/sybWd6zN41FL6PD+W+WvVYi8iIrKPEjipemFR0Ps56PsmbFgAL59B2qqvYdevww2mxkTwl8tb8Ub/jmws2kXv/4zltdFL1dFBREQEJXDiTy2v8N6PS23GaYtehmeawYg/w/r5+4v0bFqbb+8+k7OyU3jyq3lc/9oEVm3Z6cegRURE/E8JnPhXQib87humtvsbZF8IU/4LL3aBNy/2OjyUlpIUHc7gGzrw9ytbMSN/C+c98zPP/bCInbv1bpyIiJyalMCJ/5mxLa4ZXDEY7p0H5z4GW/Pgw/7w4U2weztmxjWd6vPt3WeSk53CM98v5OxBuXwyNV/NqiIicspRAifVS1Qy9LgbBkyD856A+V/CGxfAljzAm1/1xes78OFt3UiNDefeYTPo88JYJizd5OfARUREqo4SOKmegoLh9AFw3TCvl+qrZ8PKCft3d8pM5LM7Tudf17RlY9Eurhk8nov+PZp/j1zE/LXbcE61ciIiUnMpgZPqrcl58IeREB4Db10C097dvysoyLisXT1+/FMOj1zSnMiwYP71w0J6/Ws0OYO8MeRm5G3xY/AiIiK+EeLvAESOKiXbGwD4w/7w+R2wZjqcO9AbjgSoFRbM73pk8bseWawvLOb7uev4ds463hy7jMGjltI2I57f9cjiwpZ1CA3W3ywiIhL4lMBJYKiVANd/DN8/DONfhIXfwCXPQuNzDyiWGhPB9V0acH2XBmzduYfh01fx5tjlDHh/GnViI7ixewOu61yf+MgwP92IiIjIyVN1hASO4BDo9Vf47dcQEgHvXAkf/wGKNlRYPK5WKDd0y2TkvWfxRv+ONE6N5ulvFtD1rz/w6OezNZ6ciIgELNXASeBp0B1uGwOjn4HR/4TFI+H8p6BNP6/zw0GCgoyeTWvTs2ltFqwt5PUxS3lv4krem7iSK9qlc3tOIzKTo/xwIyIiIidGNXASmELC4eyH4PaxkNLUezfuH4289+Smvg1b8ys8LLtODE/3bUPun8/mus71+XT6Knr+M5f/GTqNhesKq/YeRERETpBq4CSwpWRD/xEw/wtY+C0s+dGbwQEg+TRvdofT74bIxAMOqxdfi8f6tOTOno15ffQyhoxfwefTV5OTncLvTs/ijCbJmJkfbkhEROTolMBJ4AsKguZ9vMU52LDAS+SW/AC//AemDoFzHoH2Nx7SxJoaE8FDFzXjtrMaMWT8Ct4et4Ib35hIk9Roftcji8vb1SMi9NBmWREREX9SE6rULGaQ2hS63QG/+RhuHQ2pzeDLu73BgPMmVnhYQlQYA85pwtgHz2bQVW0ICQ7ioU9m0e2vP/DM9wvZvH13Fd+IiIjI4SmBk5qtTkvo/xVc+ToUrYfXz4NPb4PCdRUWDw8Jpm+HdEYM6MH7N3elQ4MEnvthEaf//Uee+mou67YVV/ENiIiIHEpNqFLzmUGrvnBaLxg9CH55HuZ9AWf+Gbre7nWIOOQQo1ujJLo1SmLhukJeyl3CG2OX89YvK7iqYzq3ndWIjMRIP9yMiIiIauDkVBIe7c3gcOcEyDwDRj4KL3SB+SO8d+cO47TaMTx7TVt++lMOfTum8+HkfHIG5XLTGxP5dFo+23eVVNktiIiIgI8TODPrZWYLzGyxmT14hHJ9zcyZWcey9fPMbIqZzSr77OnLOOUUk9QIrhvqvSMXHAZDr4Uhl8P6eUc8rH5SJH+5vBWjHzib285qyOL1RdzzwQw6PjmSAe9P44d569izt7SKbkJERE5lPmtCNbNg4AXgPCAfmGRmw51zcw8qFwMMACaU27wRuNQ5t9rMWgLfAvV8FaucohqfC7efBZNeh9y/wEvdoeWV0OMeqN3isIfVjo3gzxc05U/nZTNl5WY+m7aKr2atYfiM1SRHh3FVxwyu61xfTawiIuIzvqyB6wwsds4tdc7tBoYCfSoo9wTwNLD/7XDn3DTn3Oqy1TlAhJkd+qKSyMkKDoWut8Efp0G3O2HB114i9/61kD/5iIcGBRmdMhN56vJWTPzfc3ntxo60r5/AKz8v4cx//MRNb0zk+7nrKFGtnIiIVDJfdmKoB+SVW88HupQvYGbtgAzn3Jdmdt9hznMlMM05t8s3YYoAUUlw/pPQ416YOBjGvwQLRkDWWXDGvd7nEQb2DQsJ4tzmtTm3eW3WbN3J0Il5DJ20kpvfnkzduAj6dkjnyvbpmrJLREQqhbkjvLx9Uic2uwq4wDn3h7L1G4DOzrk/lq0HAT8C/Z1zy80sF7jPOTe53DlaAMOB851zSyq4xi3ALQApKSkdhg0b5pN7Ed8rKioiOjra32HsF1yyg7TV35Ke/znhuzezLaYJK+v3ZWNyZ7Bjq7jeW+qYvmEvP+WVMGfjXhzQOD6IHvVC6FQnhKjQmjPTQ3V7fnLs9OwCm55f4Dr77LOnOOc6nujxvkzgugEDnXMXlK0/BOCc+2vZehywBCgqO6QOUAD0ds5NNrN0vATvt865sUe7XnZ2tluwYEHl34hUidzcXHJycvwdxqH2FMOM92DMv2DLCm/e1dPv9oYlCQ495tOs3VrMp9NW8fHUfBavL/Jq7JqlkpOdyhlNkqkbV8uHN+F71fb5yVHp2QU2Pb/AZWYnlcD5sgl1EtDEzLKAVUA/4Lp9O51zW4Hkfevla+DMLB74CnjoWJI3EZ8JjYCOv4N2N3pzrI55Fj67DX76C3S+Gdr95pB5VitSJy6C23MacdtZDZmZv5VPpuYzYvZaRsxaC0CT1Gh6NEnmzCYpdGmYSGSYhmgUEZHD89n/JZxzJWZ2F14P0mDgDefcHDN7HJjsnBt+hMPvAhoDD5vZw2XbznfOrfdVvCJHFBwCra/yat4Wfgtj/w3fP+wlcq2vgs63erM+HIWZ0SYjnjYZ8Qzs3YIF6woZvXAjoxdv5L0JK3lz7HLCQoLo2jCJntkpnN00lQZJem9OREQO5NM/851zI4ARB2175DBlc8p9fxJ40pexiZwQM8ju5S1rZsKkV2HmhzD1bajf3evR2vQSCAo+hlMZTevE0rROLDef2ZDiPXuZtLyA3AUb+GnBegZ+MZeBX8ylYXIUZzdN5cr26TRPi62CmxQRkepO7TQiJ6pua+j9Hzj3MZj2jpfMDbsRkhp7Y8m1uhpCwo75dBGhwZzRJIUzmqTw8CXNWb5xO7kL1vPjgg0MGbeC18cso1W9OK7plEHvtmnERhz7O3giIlKzaCotkZMVmQinD4AB0+Gq/0JoLfj8TniuHUx4BXbvOKHTZiZH0f/0LN7+XWcm/t85DLy0OXv2lvL/PptN56dGcu+w6YxauIHiPXsr935ERKTaUw2cSGUJCoYWl0Pzy2DxSBj9DHx9P/z8NHS5DTr9/pg6PFQkPjKM/qdncVP3TGat2srQSXkMn76aT6auIiwkiA71E+jRJJnujZJoVS+OkGD9bSYiUpMpgROpbGbQ5DxvWfGL13P1pydh9D+9Xqvd7oDEhid4aqN1ejyt0+N5+OLmTFi2ibGLNzJm8Sb+8a03jE5sRAi926ZxU7dMmtSOqcw7ExGRakIJnIgvNejuLevnwbjnYepbMOk1aHapVyuX0cXr4XoCaoUFk5PtjSUHsLFoF+OWbOLH+esZNjmfd8avpHujJG7qnsk5TVNVKyciUoMogROpCqnNoM8L0PNhb6quSa/DvOEQGgX1u5Qlej2gXnsIObFpf5Ojw7m0TRqXtknj/13cjA8m5/HOuBXcOmQK9eJrcW3nDPq0rUdGYmQl35yIiFQ1JXAiVSmmDpzziDfn6qLvvCbWFWPhx7JRc0IioH43yL4QTrsAEjJP6DJJ0eHckdOYW85oyMh563nrl+UM+m4hg75bSNuMeC5tk8YlretSOzai8u5NRESqjBI4EX8Ij4aWV3gLwI6CX5O5Rd97nR++vt+buuu0CyD7Iq+51Y5v/tSQ4CB6taxDr5Z1yCvYwVez1jB8+mqe+HIuT341ly5ZifRsmkrnrCRapsWqmVVEJEAogROpDiITodkl3tLrr7BpiTfjw8JvYNwL3swPSU2g0x+g7bUQEXfcl8hIjOS2sxpx21mNWLy+iC9nruarmWv4y4j5AESFBdMhM5EuWd7SKj2O8JCjD0gsIiJVTwmcSHWU1MjrrdrtDijeCvNHeJ0fvnkAfngcWl/tzcVau8UJnb5xajR3n3sad597Guu3FTNhWQETlxUwYdmvvVnDQoJomx5Px8wEOmUl0r5+AnG1NHiwiEh1oAROpLqLiPNq3dpeC6unwcTXYMb7MOVNSO/sbW9xOdRKOKHTp8ZG7O/8ALCpaBeTlm9m8vICJq3YzOBRS3kxdwlm0DglmhZpsbRIi9v/GReppE5EpKopgRMJJGnt4LIX4PwnYPq73hReX94DXz/gdXxocy00PheCTzypSooO3//eHMCO3SVMz9vCpGWbmbVqCxOWFfDZ9NX7y6cn1CKj1m7WRq6ka8MkGiRFYsf5rp6IiBwfJXAigSgyEbr/EbrdBWtmwIyhMOtDmPs5RCZ5iVyH30Jy45O/VFgI3Rsl071R8v5tm4p2MXfNNmav2sasVVsYs2At4z6ZBUDt2HC6ZCXRtWESnbMSaZQSpYRORKSSKYETCWRmkNbWW85/Apb86NXKTXjZGzg460wvkWt6CYSEVdplk6LDOaNJCmc0SQHgp59+IqNFJyYs28T4pQWMX7qJ4TO8Wrrk6HCvY0TDRLpkJdEkNZqgICV0IiInQwmcSE0RHOoNOXLaBVC4DqYNgSlvwUe/hagUaH0NNDrbG2cuLKpSL21mNE6NpnFqNNd3aYBzjuWbdjBh6SYmLPMSuq9mrQEgKSqMrg2T6Nooie6NkmiYrBo6EZHjpQROpCaKqQ1n3gc97vFq5Sa/ARNe8WrlgkKgXkfIOsOrocvocsKzPxyOmZGVHEVWchT9OtfHOUdewU7GL9vE+CWbGFcuoUuNCadHk2QubZ1GjybJhGosOhGRo1ICJ1KTBQVDk/O8Zfd2WDkelo2C5aNh9D9h1D8gPNbb3/QS7zM8ptLDMDPqJ0VSPymSqztm7K+hG1eWzP0wbz2fTF1FQmQoF7euS5+29ehQP0FNrSIih6EETuRUERYFjc/xFvDGl1s+FhaMgAVfw+yPITgMGubAab289+pSmkFY5c+dWr6G7rou9dlVspdRCzcyfMZqPpqSzzvjV5IWF8E5zWrTqWxgYU37JSLyKyVwIqeqiDhoepG3lO6FvAkw70uY/4U3TysABokNvQGDa7eEOq2gXntvTtdKFB4SzHnNa3Ne89ps31XC93PXMXzGaj6Zms+Q8SsAaJAUSafMRDpnJtKufjwNU6IJVg2diJyilMCJiNfU2qC7t1zwFBQshXVzYP1cWDfbW+Z9ATivfExdSGsP9dpBWnuCS3ZVWihR4SFc1q4el7WrR8neUuau2cbEspkifpi3jo+m5AMQHR5Cq3pxtMmIp21GHB0aJJISU7nv8omIVFdK4ETkQGbeVF5JjaB571+37yryErlVU70ZIVZPhQVfAdCDIFjewesUkXmG1zGiEppeQ4KDaJ0eT+v0eP5wRkNKSx1LNxYxI28rM/K3MCNvC6+PWcqevV5i2SY9jrObptKzaSot0+L0Dp2I1FhK4ETk2IRHQ/2u3rJP8VZYNYWVP79Lg9KVMPbfXueI4DCo28Zrek1tAbWbQ2pzbwDikxAUZDROjaFxagxXdkgHYFfJXuau3sbYxRv5cf56/v3DIv41chEpMeGcdVoKrerF0Tg1mkYp0dSODdeQJSJSIyiBE5ETFxEHjXqyLC+IBjk5sKvw156uq6Z6M0NM+e+v5WPSvLHoml/mdZaohMGFw0OCaVc/gXb1E7irZxM2Fe3i54Ub+HH+ekaWa3IFiAkPoWFqNI1TovePW9c4NZr6iZF6n05EAooSOBGpPOExvw5bAuAcFK6F9XNg3Vxv2q95X3rzuEbEQfbF0OIyaHh2pc0UkRQdzhXt07mifTrOOTYU7mLx+iIWbyjyPtcXMXrRBj6e+mtiFxYcRFZyFGnxEaTGRJAaG05KTDipMeHUjo0gKzmK+MjKm8lCRORkKYETEd8xg9i63tL4XG9byS5YmgtzPoX5X8GM9yAiHtpeDx1/Vynzt/56eSM1NoLU2Ai6N04+YN/WnXtYUpbULVlfxJINRazZWsyc1dvYWLSLUnfguRKjwmiYHEXDlCgapUTTIi2O9g3iiQzTP6MiUvX0L4+IVK2Q8F+n/NqXzE1/Dya+AuNfgKyzoNPvIfsib3owH4mrFUr7+gm0r59wyL69pY6C7btZX1jMmi3FLNu4nSUbili6YTs/zl/PsMle7V1IkNEqPY7OWYl0zUqiQ2YCsRG+i1lEZB8lcCLiP+WTucK1v87fOuxGiK4DTS/2Ok1kdIH4+l6NXhUIDjJSYrxm1BZpcYfs37pjD9PyNjNxWQETlhXwxphlvPLzUsCrqasTG0FafAR142pRJy6C9IRaNEyOJjM5khgleCJSCXyawJlZL+DfQDDwmnPub4cp1xf4EOjknJtctu0h4PfAXmCAc+5bX8YqIn4WUwfO/DP0uBcWfe91fpj5AUx+3dsfXQfqd4F6HSC6NtRK9Hq11krwPiPiqyzBi4sMJSc7lZzsVAB27t7LtLzNTFu5hdVbdrJmazH5m3cyaflmtu7cc8CxKTHhZCVH0TA5ihb14miXEU92nRjNASsix8VnCZyZBQMvAOcB+cAkMxvunJt7ULkYYAAwody25kA/oAWQBow0s9Occ3t9Fa+IVBNBwZDdy1tK93oDCudN8JaVE7yerRWJiIeMzmVLF2+g4fDoKgm5Vlgw3Rsl071R8iH7duwuIa9gJ8s2FrFs446yz+18O2ctQyfleaGHBtEyLY62GfG0rBdHekIt0uJrUTs2Qr1jRaRCvqyB6wwsds4tBTCzoUAfYO5B5Z4AngbuK7etDzDUObcLWGZmi8vON86H8YpIdRMUDHVbe0vnm71tOzfDjgJv2Vn2uWMTbJgP+ZN+nQbMgr1x6Op18Kb/SmsPKU0huGrfHIkMCyG7TgzZdWIO2O6cI3/zTqblbWH6yi1Mz9vM2+NXsLukdH+Z4CCjTmwE9eJrkZEYSYMkb8lMiqJBUqR6xoqcwnz5L1k9IK/cej7QpXwBM2sHZDjnvjSz+w46dvxBx9bzVaAiEkBqJXhLUqOK9+/cDPmTIW+iV2s3+xOY8qa3L6SWlwymtfcSu/QOkJBVZU2v5ZkZGYmRZCRG0rtNGgC7S0pZvmk7q7bsZPX+pZhVm3cydvFGPp5afMA5YiNCyEiMJD2hFukJv36eVtsb206DFovUXL5M4Cr6l2N/x3wzCwKeBfof77HlznELcAtASkoKubm5JxKnVANFRUV6fgGs+j2/UAg6HRqcDvVLqbVzLTGFi4gpXETs1sVEr3qd4AkvAbAnJIZtsU0ojGlMcUQqJSHRlIREsSc0hpKQaPaExlIaXLVzrBreX6z1woHaZQvB7N4byYYdjvU7S1m33fvcuHM7s1YU8tN8x+5yL5nEhxunJQRxWkIwTRKCyIgJIqiChK76PTs5Hnp+py5fJnD5QEa59XRgdbn1GKAlkFv2V2IdYLiZ9T6GYwFwzg0GBgNkZ2e7nJycSgxfqlJubi56foEr4J7f3hLYMA9WTSE0fzJJq6aStPIjcKUVl4/LgOTTICW77LMp1GnpDVxcTTjnDX2St3kns1ZtZdKyAiYtL2DiWq/WLjo85NeBimPCSYkNp3ZMBBsKl3BGs5akRIeTFB1OfK1QzSEbQALuvz2pNL5M4CYBTcwsC1iF1ynhun07nXNbgf1v/JpZLnCfc26yme0E3jOzZ/A6MTQBJvowVhE5lQSHQJ1W3tKhv7dt9w7YsRF2boHiLb9+Fq6DjQth4wKY/AuU7PTKB4VAeidvFomGOd57dj4ct+5ozIyksiSsbUY8N3RtAED+5h1MWl7A9JVbWLO1mPWFu1i2cTsbCnexe6+XsL40Y38fMoKDjMSoMBokRh4w3Vjj1GjS4mopuROpJnyWwDnnSszsLuBbvGFE3nDOzTGzx4HJzrnhRzh2jpkNw+vwUALcqR6oIuJTYZEQVt8bb+5wSktha57XYWLlOG8Q4ty/Qu5fICwGGnSHpMYQnwFx6V7NXXx97509P72P5r0bF8nl7dIP2O6cY8uOPYz4cQxZzVuzqWg3G4t2saloNxsKd7Fs03a+m7tuf09ZgLCQIGrHhpMaE7H/MzU2fH8ni/qJkSRFhendO5Eq4NPuWM65EcCIg7Y9cpiyOQetPwU85bPgRESOV1AQJDTwltMu8LbtKIDlo2HJT7ByvPd9z44DjwuLhoTMQ5fU5hCb5rdOFAlRYdSLCapw+JN9CrbvZvH6IhatL2TFph2s31bMum27mL+2kNELN1K4q+SA8pFhwdQv65yRmRRJZnIUWUlRZCZHUSc2QjV4IpVEMzGIiJyMyERo3sdbAJzzkrqtK2FLnldjt2UlbF4OmxbD4pFQUq43aWSy1zO2TtlwKbVbecldSPUYIiQxKozOWYl0zkqscP/2XSWs3rKTlQU7WFmwg7wC7/uKTdsZtXADu8oNixIeEkRmUhSNUqNomBxNw5QoGqZ4n5qCTOT4KIETEalMZhCV5C1p7Q7d7xwUrYOCpbB2NqydAWtmwrgXoLRs1gYL9mr5khqXLY0gNh2iUyAqBaJSITSiau/rMKLCQ2hSO4YmtQ/t0FFa6lizrZjlG7ezbOP2/Z/z1hTy7Zx17C39dXCB1JhwGqV479o1SomicWoMDZIiSYgKIyosWM2yIgdRAiciUpXMvGnDYup478ztU7Lb6xm7bi4ULPFq6zYthuVjDm2SBQiP9aYUS25SrodstrceEVt193MEQUFGvfha1IuvxemND2ym3V1SysqCHSzdUMSSDdtZsqGIxeuL+GzaqkOaZUODjfjIMBIiQ0mIDCM1NoK6cRHUiY2gTpy3pMXVIjUmXE20cspQAiciUh2EhEHdNt5SnnNQuAa2rYHt62H7Bigq+9y2GjYu8uaOLS0352piI8g601syz/Bq7qqZsJCg/b1by3POsaFwF4vXF5G/eSebd+xm8449bNmx2/u+fQ+z8rfw3ZziA5pn4f+3d+cxctb3Hcff351rZ2Zn7117fWEbjI2BYidACAmRRZMql0oaNcpVNUoPkog0CUkV0ahq1UpV0zZqmyoIiZKDSLkqyEFTStomOEQQwJzmMCZgsHft9YF3Z+/Zndn99o/fs97xHr7wevdZf17So2eeZ54Z/9YPz/rD7wxNtKubc1zQnGNNSxhUsa41rxG0siQpwImILGZmYaBD/Yq5rxmvhD52r+0OI2Q7d8Czd0+tQNF+Kay7Lkyb0rYp1Ngtklq66cyM9vpa2utP3EQ8OYq2u6/Ewf4R9hdLdEZ97/YeHebXe44yXDWzcTaVYH1bngujPnfN+TT1tSkKtUnqsynqa1O01KU1ilZiQwFORCTuEklovShsm94Tzo1XoPtpeOWX8MoD8PidU3PYAdSvhLaNXDycgKGfQiId5rFLpCCRgfoOaF4ftrplCzYNylwmR9E25dNsXjEzjLo7rw2OHWuifenwIC8fGeTxvb3c8/SMeeGPyaYSrGoK06KsjvbLG2pZVl/LsmjalNpUYj5/NJFTogAnIrIUJZJhrddVb4TrPg8T46GW7khUSxftW197FYqPhcA3PgbjozO/K5WLwty6qoEV0ZZrWXThDkLAaytkaCtkeNP6luPeG6tM0F8q0z9SZqBUiV5XODJQorN3hM6eYTp7R9jxSs+M/ngADdkUHQ21XLyswMXL6rh4WYGNywusbsqpmVbOGQU4EZHzQU0ijGZtuRA2vfvY6YemL8XkDuNl6O8KI2V7XpnaH34Bdt93fH+72oYwn13HFlixJexbN4Q/b5FKJ2torcvQWnfiNW7dnb6RMof6RznUX+JQf1jJ4lB/aK6dXptXm6rhguY8KxprWdmUZUU0gKOjIUtjLjTT1meTZFMaVSuvnwKciIhMMQsDKiabT6cbr4Q57o5WjZQ9+Aw8cSc8clu4JpUL89qtujIsN7b66hP34VukzMLo18Zcmo3LZ1/3dnC0wm8ODfDioQFePDTIvp5hDhRHeKqzSO9wedbPJGuM+myK9kKGC1pyrG3Js2ZyHzXZphI18/mjyRKgACciIqcukZwKdxveMXV+YjysGXvgKeh+CvY/AY/+O/z6a+H9+lWw+ipYeWWoqVv+W4t2IMXpqMsk2bqmia1rmma8NzRaobtvhO6+En0joZm2v1SmbyRsh/pKvHxkiPtfOHJsXVoIGbq1LkNH1Peuo6GWtroMjbkUDbk0DdkUjdkUjbkUQ2VnYsLVdHseUoATEZHXryYB7ZeEbcuHw7nKWKid6260mdwAABFKSURBVHoUOh+Frh3w3I+mPtNyUWhy7bgirB2bb5vask1h6bIYy2eSXNRe4KL22WvvJo1POAf7S+w9OsS+o8NhZG1fiYP9JfYdHeaRPUfpL83sizep5hf3hlAXhbumXIrmfIaWujTN+bC15MN7+UySumjLZ5Kkk/H+Oz6fKcCJiMj8SKanBlJc86lwbvBIqKGbrKnb9zA8e9fMz1oNZAphVYqaRDi2mnBc1x4GVDStC8uOTb4udMQy9CWqJjy+9sLZrxmtjEe1eGWKw6EGrzhc5rFndtG6Yg3F4TLFkTBf3uGBUXYfHODo0NiMufKmyyRraMmnaS1kwr4uQ0tdhmX1mWN9+FY2hj586re3uCjAiYjIuVPXFppeq5tfh3vC8mJDR6LttbAv9YFPhG1ifGo/0A37H4fnfgw+NdcbyVpovCAEuub1UwGvaS00rlk0y4+diUwyQXshQXvh+J+hZeAltm3bOOtn3J3hsXF6hsY4OjRG/0iZodEKA6MVhqKtv1Th6OAYrw2OcmRwlF3dAxwdGqU87sd9Vy6doKOhlvpsKtTepZPkMgnqMkla6zJcsbqRLasaachpTdtzRQFOREQWVq45bFxyep8bL0NfF/S+EkbJHtu/Gua+m74EWaEjBLzGNaGJNlMI/fAyhbA0Wb41vNewOsyHF3NmRj5qKl3dnDvlz7k7PUNjHCiW2F8cZn+xxIHiCAeKIwyUKgyOVjjUX2JodJyhsQp9I2U8ynvr2/JsWd3I1jVNrGvJs6w+Q3uhlvpsUjV4Z5kCnIiIxFMiFdW2rYPpTY/uYcmx4t4Q6HqjfXEvdD4MpX4Y7Q+1etNZTZjouHFNCHz1K6L1azuibXmY3DixNP8JNTNaoqbUy1c1nPT6gVKZZ7r6eLKzyJP7ijzw4hF++MT+467JJGtor8+wrBCmWFnZmD22X9WUZU1zXv3xTtPS/K9PRETOb2ZQWBa21VfPfo17qKUr9cPoQGjGLe4NYa+4F4r7YM92GDw4M+jVJMMgjLaNYXmyyX3dcsjUQfLEc8wtJYXaFNde1Mq1F7UCoQZvf3GErt4RDg+Mcrhq/ryDfSUe39vLf+3spjIx1UybTtSweUU9W1Y3HtsuaMmp1u4EFOBEROT8ZAbpfNjogLaLgetmXjcxHvrkDXTDwMGw790bpk05+Azs+s9ZAl4qBLl0ITTRZhtDs+2xfVOoxWtYFZps61fGuo9eNTNjVVOOVU1zN9uOTziH+ktR0Bvmhe4Bnuws8oMdnXzroVcBKNQmacmnZ4ycbchG69bWZWjNp6PawjTN0Sjc82VKFQU4ERGRE6lJRE2oy2d/vzwSJjQ+shuGj4bavLFBGB2M9gMwUgz980Z6w1a9Lu2kfFsIdJPNtNX7fFvoJ5htDoEz5jVTiRpjRWNYreKqtc2wNZyvjE/w4qFBnu4q8vyBfvpGygyOhn533X0lhsYqx0bhzqbGwlJnTfk0Tbk0TbkU9dmwCkZDNrxuiAZi1GWmBmLkM0nq0knymQTJmEyirAAnIiLyeqSysPzysJ2q8khosu3rgmJn2PdF+969YXqVkZ7ZP5tIH6vF2zrq0Lky1PJN1vZl6qJ99blCKGcqG0brVu8T6UUTCJNRU+rmFSee5Lk8PkHv0BivDY5Fo2xH6Rkao3dojN7hMj3D4fX+Yold3QP0l8K6t6cik6wJAa82jLZd3lDL5o56Lumo55KOAmtb8ouilk8BTkRE5FxLZaemOJlLuRRC3uQ2WXs33BO97mH84L7Qh69vf1XN3wDgc3/vDBaFuVpIZkP/vUwd1E5r9q1tDO/VJI/f0rmw0kbDytAsfA7WwU0lamivr6W9/tSbnccnnMFSGDU7MFoOo2ij2r2pfRhZe+xcqUJX7wi/fPEI41GfvWwqwcXLCywrZEItXz7U9FW/bsyFCZQbsikS8xT2FOBEREQWo1QtNF0Qtjns3L6dbdu2HX9yYgLKQ6EJd3QAxgai/TBUSmErj1TtR0OTbmV06nh0AErF0M9vMjiOj528zDXJ0ORbvzKM3q1fER13QKHqOJl+fX83ZyBRYzTkUmc0V12pPM5Lhwd5vrufXd39vHhogL1Hh3mqs0hxuHzcUmjVLGrSvWnbRfzp22ZZW/h1UIATERFZSmpqpppN6Tg73+kehb/RMKhjogIT5bAfHYD+A1EzcBf07w81ggeehN33hs9Nl2+Lgl0U6to2wQXXQvuli3I1jdpUgstWNnDZypnTqkxOmNw7PEbvUDnsoybcnuGwOsb6tvxZL5MCnIiIiJyY2VQfutnM1f/PPdTeDXRDfzcMHKjaHwhBr/PRqf5+tQ2w5toQ5tZcEwZtJDOhiXdyn0gtmj57cPyEyauazt2fqwAnIiIi88NsaqWNZZfOfV1xH+x9CPY+CK8+CC/+94m/N5EOW01y6jVES6+NTy3BNttEzZOfT2UhlYu27NSAjmPfmQqvcy3QciE0Xxj2+bbjA+TEeBhlPNITahvT+WjwSF0InPMUNhXgREREZGE1rgnbFR8KxwMHYf8TMDY01W+vMlrVjFsOS6mNj4WtMhaCktVMbTUJwGYGKPfw+bHhMJFzeSQaFXw4fOfE5PdWwn6kJzQVT0oXQr/E8kh4b6TInINGLBGC3HVfgLd89qz+lSnAiYiIyOJSWA6b3r3QpQjGK2Fljp49cPRl6Hk5TP2SzoUm3sn5+XJRc+/YUNiq5wNs33zWizWvAc7M3gl8FUgAd7j7l6e9/0ngJmAcGARudPfnzSwF3AG8ISrjt9397+ezrCIiIiIzJJKh6bTlQtjwjoUuzTHzNtTDzBLArcC7gM3Ah81segT9rrtf7u5bgH8E/jk6/wEg4+6XA28EPmFma+errCIiIiJxMp9jda8GXnL3Pe4+BnwfuKH6AnfvrzrMM9WI7EDezJJAFhgDqq8VEREROW/NZxPqSqCz6rgLeNP0i8zsJuDzQBq4Pjp9FyHsdQM54GZ3n2NNEREREZHzy3wGuNnGzc4YpuHutwK3mtlHgL8EPkaovRsHVgBNwK/M7P/cfc9xf4DZjcCNAG1tbWzfvv2s/gBy7gwODur+xZjuX3zp3sWb7t/5az4DXBewuup4FXDgBNd/H7gtev0R4D53LwOHzexB4ErguADn7rcDtwNs3LjRZywnIrGxfbblYCQ2dP/iS/cu3nT/zl/z2QduB7DBzNaZWRr4EHBP9QVmtqHq8D3Ab6LX+4DrLcgD1wAvzGNZRURERGJj3mrg3L1iZp8GfkaYRuQb7v6cmf0t8Ji73wN82szeDpSBXkLzKYTRq98EniU0xX7T3XfOV1lFRERE4mRe54Fz93uBe6ed+6uq17NOS+zug4SpRERERERkmvlsQhURERGReaAAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMWPuvtBlOCvMbADYvdDlkDPWCry20IWQM6b7F1+6d/Gm+xdfG929cKYfTp7Nkiyw3e5+5UIXQs6MmT2m+xdfun/xpXsXb7p/8WVmj72ez6sJVURERCRmFOBEREREYmYpBbjbF7oA8rro/sWb7l986d7Fm+5ffL2ue7dkBjGIiIiInC+WUg2ciIiIyHlhSQQ4M3unme02s5fM7JaFLo/MzcxWm9n9ZrbLzJ4zs89G55vN7H/N7DfRvmmhyypzM7OEmT1pZj+NjteZ2SPR/fuBmaUXuowyOzNrNLO7zOyF6Dl8s56/eDCzm6Pfm8+a2ffMrFbP3uJlZt8ws8Nm9mzVuVmfNQv+LcoxO83sDSf7/tgHODNLALcC7wI2Ax82s80LWyo5gQrwBXe/BLgGuCm6X7cAP3f3DcDPo2NZvD4L7Ko6/gfgX6L71wv88YKUSk7FV4H73H0TcAXhPur5W+TMbCXwGeBKd78MSAAfQs/eYvYt4J3Tzs31rL0L2BBtNwK3nezLYx/ggKuBl9x9j7uPAd8HbljgMskc3L3b3Z+IXg8Q/vFYSbhnd0aX3Qm8b2FKKCdjZquA9wB3RMcGXA/cFV2i+7dImVk98Dbg6wDuPubuRfT8xUUSyJpZEsgB3ejZW7Tc/QGgZ9rpuZ61G4Bve/Aw0GhmHSf6/qUQ4FYCnVXHXdE5WeTMbC2wFXgEWObu3RBCHtC+cCWTk/hX4IvARHTcAhTdvRId6xlcvNYDR4BvRk3gd5hZHj1/i5677we+AuwjBLc+4HH07MXNXM/aaWeZpRDgbJZzGlq7yJlZHXA38Dl371/o8sipMbP3Aofd/fHq07NcqmdwcUoCbwBuc/etwBBqLo2FqK/UDcA6YAWQJzS7TadnL55O+/foUghwXcDqquNVwIEFKoucAjNLEcLbd9z9h9HpQ5PVxdH+8EKVT07oLcDvmtmrhO4K1xNq5BqjZh3QM7iYdQFd7v5IdHwXIdDp+Vv83g684u5H3L0M/BC4Fj17cTPXs3baWWYpBLgdwIZoJE6a0KnzngUuk8wh6i/1dWCXu/9z1Vv3AB+LXn8M+Mm5LpucnLv/hbuvcve1hGftF+7+UeB+4Pejy3T/Fil3Pwh0mtnG6NRvA8+j5y8O9gHXmFku+j06ee/07MXLXM/aPcAfRqNRrwH6Jpta57IkJvI1s3cTagESwDfc/e8WuEgyBzN7K/Ar4Bmm+lB9idAP7j+ANYRfVB9w9+mdP2URMbNtwJ+7+3vNbD2hRq4ZeBL4A3cfXcjyyezMbAthAEoa2AN8nPA/83r+Fjkz+xvgg4TR/E8Cf0LoJ6VnbxEys+8B24BW4BDw18CPmeVZi0L51wijVoeBj7v7CRe7XxIBTkREROR8shSaUEVERETOKwpwIiIiIjGjACciIiISMwpwIiIiIjGjACciIiISMwpwIrIkmNlgtF9rZh85y9/9pWnHD53N7xcROV0KcCKy1KwFTivAmVniJJccF+Dc/drTLJOIyFmlACciS82XgevM7Ckzu9nMEmb2T2a2w8x2mtknIExEbGb3m9l3CRNLY2Y/NrPHzew5M7sxOvdlIBt933eic5O1fRZ997Nm9oyZfbDqu7eb2V1m9oKZfSeaqBMz+7KZPR+V5Svn/G9HRJaE5MkvERGJlVuIVogAiIJYn7tfZWYZ4EEz+5/o2quBy9z9lej4j6JZ0bPADjO7291vMbNPu/uWWf6s9wNbgCsIs63vMLMHove2ApcS1jN8EHiLmT0P/B6wyd3dzBrP+k8vIucF1cCJyFL3O4Q1Bp8iLNnWAmyI3nu0KrwBfMbMngYeJiwsvYETeyvwPXcfd/dDwC+Bq6q+u8vdJ4CnCE27/UAJuMPM3k9YMkdE5LQpwInIUmfAn7n7lmhb5+6TNXBDxy4Ka7u+HXizu19BWFey9hS+ey7V61GOA0l3rxBq/e4G3gfcd1o/iYhIRAFORJaaAaBQdfwz4FNmlgIws4vNLD/L5xqAXncfNrNNwDVV75UnPz/NA8AHo352bcDbgEfnKpiZ1QEN7n4v8DlC86uIyGlTHzgRWWp2ApWoKfRbwFcJzZdPRAMJjhBqv6a7D/ikme0EdhOaUSfdDuw0syfc/aNV538EvBl4GnDgi+5+MAqAsykAPzGzWkLt3c1n9iOKyPnO3H2hyyAiIiIip0FNqCIiIiIxowAnIiIiEjMKcCIiIiIxowAnIiIiEjMKcCIiIiIxowAnIiIiEjMKcCIiIiIxowAnIiIiEjP/D31iCMapHfvSAAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnAAAAFNCAYAAACAH1JNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzs3Xd8FVX+//HXJ530DoQEEoqh9y5qxIYNLKioq7LF7vJV17V8vz8V2xaX1V3Xim0VC2JHxYYaKdJ77yWhQygJECDk/P6YgAFCz83NDe/n4zGPe2fmzMxn7jzET86Zc4455xARERGRwBHk7wBERERE5PgogRMREREJMErgRERERAKMEjgRERGRAKMETkRERCTAKIETERERCTBK4ESk2jCz/zWz1yrhPAPN7B1/x+ErZva1md1U2WVFJHCYxoETkSMxs+VAGpDmnNtYbvt0oA2Q5ZxbfpRz5ADvOOfSfRfpAdcbCDR2zv2mKq53PMzMAU2cc4v9HYuIBC7VwInIsVgGXLtvxcxaAbUq8wJmFlKZ5ztR/o7D39cXkcCgBE5EjsUQ4MZy6zcBb5cvYGbhZjbIzFaa2Toze9nMaplZFPA1kGZmRWVLWlkz50dm9o6ZbQP6H9z0aWY9zOwXM9tiZnlm1r+i4Mwsy8x+NrNCM/seSC63L8fM8g8qv9zMzi37fsQ4zCzTzJyZ3VR2bxvN7P/KnauWmb1lZpvNbJ6Z3X/w9cqVHVX2dUbZ73DNvvjM7AEzWwu8aWYJZvalmW0oO++XZpZe7jy5ZvaHsu/9zWxM2W+/2cyWmdmFJ1g2y8xGlf2OI83shZNpihYR31ECJyLHYjwQa2bNzCwYuAY4+H/sfwdOA9oCjYF6wCPOue3AhcBq51x02bK67Jg+wEdAPPBu+ZOZWX28xO8/QErZeacfJr73gCl4idsTeAnm8ThsHOX0ALKBc4BHzKxZ2fZHgUygIXAecNhmW+fcmWVf25T9Dh+UrdcBEoEGwC14/za/WbZeH9gJPH+E+LsAC/Du/2ngdTOzEyj7HjARSAIGAjcc4Zoi4kdK4ETkWO2rhTsPmA+s2rejLAG4GbjHOVfgnCsE/gL0O8o5xznnPnPOlTrndh6073pgpHPufefcHufcJufcIQlcWaLXCXjYObfLOTcK+OI47+1IcezzmHNup3NuBjAD7/0/gKuBvzjnNjvn8oHnjvPaAKXAo2Xx7yy714+dczvKfsungLOOcPwK59yrzrm9wFtAXaD28ZQt9zs+4pzb7ZwbAww/gXsRkSqgdy1E5FgNAUYBWRzUfIpXQxYJTClX8WNA8FHOmXeEfRnAkmOIKw3YXFbTt8+KsuOP1ZHi2Gdtue87gOhy1y9//LGc62AbnHPF+1bMLBJ4FugFJJRtjjGz4LLE67CxOed2lD2D6ArKHalsMlDgnNtx0L0cz+8oIlVENXAickyccyvwOjNcBHxy0O6NeM18LZxz8WVLnHNuXxJxuO7uR+oGnwc0OobQ1gAJZe/a7VO/3PfteMklAGVNwCnHEcexXL9879oTSXgOvv6f8JpruzjnYoF9Ta+HaxatDGuAxLLkcR8lbyLVlBI4ETkevwd6HlTbhXOuFHgVeNbMUgHMrJ6ZXVBWZB2QZGZxx3Gtd4FzzexqMwsxsyQza3twobLEcjLwmJmFmVkP4NJyRRYCEWZ2sZmFAv8PCD+OOI5mGPBQWceDesBdRym/Du99uSOJwUuIt5hZIt57dj5V7nccWPY7duPA31FEqhElcCJyzJxzS5xzkw+z+wFgMTC+rDfnSLxaJJxz84H3gaVlPUrTjuFaK/Fq+/4EFOB1YGhzmOLX4b2cX4CX7Oxv4nXObQXuAF7De29vO1BhL9ET9HjZ+Zbh3fNHwK4jlB8IvFX2O1x9mDL/whumZSNeB5JvKi3aI7se6AZsAp4EPuDI9yIifqKBfEVEKpGZ3Q70c84dqdNBQDCzD4D5zjmf1wCKyPFRDZyIyEkws7pmdrqZBZlZNl6N4af+jutEmFknM2tUdi+98IZX+czfcYnIodQLVUTk5IQBr+D1zt0CDAVe9GtEJ64OXgeVJLxm4dudc9P8G5KIVERNqCIiIiIBRk2oIiIiIgFGCZyIiIhIgKkx78DFx8e7xo0b+zsMOUHbt28nKirq6AWlWtLzC1x6doFNzy9wTZkyZaNz7uBBxY9ZjUngateuzeTJhxueSqq73NxccnJy/B2GnCA9v8ClZxfY9PwCl5mtOJnj1YQqIiIiEmCUwImIiIgEGCVwIiIiIgGmxrwDJyIiIlVnz5495OfnU1xc7O9QqrWIiAjS09MJDQ2t1PMqgRMREZHjlp+fT0xMDJmZmZiZv8OplpxzbNq0ifz8fLKysir13GpCFRERkeNWXFxMUlKSkrcjMDOSkpJ8UkupBE5EREROiJK3o/PVb6QETkRERALSli1bePHFF4/7uIsuuogtW7YcscwjjzzCyJEjTzQ0n1MCJyIiIgHpcAnc3r17j3jciBEjiI+PP2KZxx9/nHPPPfek4vMlJXAiIiISkB588EGWLFlC27Zt6dSpE2effTbXXXcdrVq1AuCyyy6jQ4cOtGjRgsGDB+8/LjMzk40bN7J8+XKaNWvGzTffTIsWLTj//PPZuXMnAP379+ejjz7aX/7RRx+lffv2tGrVivnz5wOwYcMGzjvvPNq3b8+tt95KgwYN2LhxY5XcuxI4ERERCUh/+9vfaNSoEdOnT+cf//gHEydO5KmnnmLu3LkAvPHGG0yZMoXJkyfz3HPPsWnTpkPOsWjRIu68807mzJlDfHw8H3/8cYXXSk5OZurUqdx+++0MGjQIgMcee4yePXsydepULr/8clauXOm7mz2IhhERERGRk/LYF3OYu3pbpZ6zeVosj17a4riO6dy58wHDdTz33HN8+umnAOTl5bFo0SKSkpIOOCYrK4u2bdsC0KFDB5YvX17hua+44or9ZT755BMAxowZs//8vXr1IiEh4bjiPRlK4ERERKRGiIqK2v89NzeXkSNHMm7cOCIjI8nJyalwOI/w8PD934ODg/c3oR6uXHBwMCUlJYA3zpu/KIETERGRk3K8NWWVJSYmhsLCwgr3bd26lYSEBCIjI5k/fz7jx4+v9Ov36NGDYcOG8cADD/Ddd9+xefPmSr/G4SiBExERkYCUlJTE6aefTsuWLalVqxa1a9fev69Xr168/PLLtG7dmuzsbLp27Vrp13/00Ue59tpr+eCDDzjrrLOoW7cuMTExlX6diiiBExERkYD13nvvVbg9PDycr7/+usJ9+95zS05OZvbs2fu333ffffu///e//z2kPEDHjh3Jzc0FIC4ujm+//ZaQkBDGjRvHTz/9dECTrC8pgRMRERE5AStXruTqq6+mtLSUsLAwXn311Sq7thI4ERERkRPQpEkTpk2b5pdraxw4ERERkQDj0wTOzHqZ2QIzW2xmD1awv7+ZbTCz6WXLH8rtu8nMFpUtN/kyThEREZFA4rMmVDMLBl4AzgPygUlmNtw5N/egoh845+466NhE4FGgI+CAKWXHVl3/XBEREZFqypc1cJ2Bxc65pc653cBQoM8xHnsB8L1zrqAsafse6OWjOEVEREQCii8TuHpAXrn1/LJtB7vSzGaa2UdmlnGcx4qIiIiccnzZC9Uq2HbwnBNfAO8753aZ2W3AW0DPYzwWM7sFuAUgJSVl/7gsEniKior0/AKYnl/g0rMLbP58fnFxcYedBaG6qlu3LmvWrGHNmjXcf//9DBky5JAyF110EU8++STt27ev8ByPP/4477//Plu2bGHNmjXHdN3i4uJKf06+TODygYxy6+nA6vIFnHObyq2+Cvy93LE5Bx2be/AFnHODgcEA2dnZLicn5+AiEiByc3PR8wtcen6BS88usPnz+c2bN6/KZh2oTDExMcTExPDZZ59VuD84OJioqKjD3tuVV17JvffeS5MmTY75/iMiImjXrt0Jx1wRXzahTgKamFmWmYUB/YDh5QuYWd1yq72BeWXfvwXON7MEM0sAzi/bJiIiIgLAAw88wIsvvrh/feDAgTz22GOcc845tG/fnlatWvH5558fctzy5ctp2bIlADt37qRfv360bt2aa6655rCT2e/TtWtX6tate8QyVcFnNXDOuRIzuwsv8QoG3nDOzTGzx4HJzrnhwAAz6w2UAAVA/7JjC8zsCbwkEOBx51yBr2IVERGRk/D1g7B2VuWes04ruPBvRyzSr18/7r77bu644w4Ahg0bxjfffMM999xDbGwsGzdupGvXrvTu3Ruzit7OgpdeeonIyEhmzpzJzJkzD9t0Wt34dCYG59wIYMRB2x4p9/0h4KHDHPsG8IYv4xMREZHA1a5dO9avX8/q1avZsGEDCQkJ1K1bl3vuuYdRo0YRFBTEqlWrWLduHXXq1KnwHKNGjWLAgAEAtG7dmtatW1flLZwwTaUlIiIiJ+coNWW+1LdvXz766CPWrl1Lv379ePfdd9mwYQNTpkwhNDSUzMxMiouLj3iOw9XOVWeaSktEREQCVr9+/Rg6dCgfffQRffv2ZevWraSmphIaGspPP/3EihUrjnj8mWeeybvvvgvA7NmzmTlzZlWEfdKUwImIiEjAatGiBYWFhdSrV4+6dety/fXXM3nyZDp27Mi7775L06ZNj3j87bffTlFREa1bt+bpp5+mc+fORyx///33k56ezo4dO0hPT2fgwIGVeDfHTk2oIiIiEtBmzfq1A0VycjLjxo2rsFxRUREAmZmZzJ49G4BatWoxdOjQY77W008/zdNPP30S0VYO1cCJiIiIBBjVwImIiIgcpEuXLuzateuAbUOGDKFVq1Z+iuhASuBEREREDjJhwgR/h3BENaYJ1R0yU6qIiIj4ktP/fI/KV79RjUng8ov2UrB9t7/DEBEROSVERESwadMmJXFH4Jxj06ZNREREVPq5a0wTaiPy+fdXU3js6m7+DkVERKTGS09PJz8/nw0bNvg7lGotIiKC9PT0Sj9vjUngwikhfeZzTO3alPb1E/wdjoiISI0WGhpKVlaWv8M4ZdWYJtTdobH8NuQbXv/4S/aWqjpXREREaq6ak8CFJ1IaFsuNm5/nvQlHnjZDREREJJDVmATOWTChFzxOl6D5zPv2VTYW7Tr6QSIiIiIBqMYkcADW7gaKa7fjXvc2//5ykr/DEREREfGJGpXAERRERJ9/kWhFNJz9HFNWFPg7IhEREZFKV7MSOIC0tuxt/1tuDPmeNz8aTsneUn9HJCIiIlKpal4CB4Se9zAl4fH8duvzvDNumb/DEREREalUNTKBo1YCYb2epEPQIpZ8P5h124r9HZGIiIhIpamZCRxgba6luG5n7uVdBn32i7/DEREREak0NTaBIyiIiMufIzZoJ10WPcuP89f5OyIRERGRSlFzEziA1GbQ/X/oGzyKTz9+nx27S/wdkYiIiMhJq9kJHBCc82eKYxpwz66XeP772f4OR0REROSk1fgEjtBaRFz2LxoGrSVi/L+Zt2abvyMSEREROSk1P4EDaNSTXc36cnvwcF74cASlmuxeREREAtipkcAB4Rf/DRcayQ0bn9Vk9yIiIhLQTpkEjugUQns9SZeg+Sz49mXWbtXYcCIiIhKYfJrAmVkvM1tgZovN7MEjlOtrZs7MOpath5rZW2Y2y8zmmdlDlRJPuxsortuZP7khPPzOD5pmS0RERAKSzxI4MwsGXgAuBJoD15pZ8wrKxQADgAnlNl8FhDvnWgEdgFvNLPOkgwoKIuKK54kO2cst6wby7LdzTvqUIiIiIlXNlzVwnYHFzrmlzrndwFCgTwXlngCeBsq3aTogysxCgFrAbqByuo+mZBNy+Yt0ClpI3V8e1QC/IiIiEnB8mcDVA/LKreeXbdvPzNoBGc65Lw869iNgO7AGWAkMcs4VVFpkLa+gpNv/8JuQHxgzdBD5m3dU2qlFREREfC3Eh+e2CrbtH7/DzIKAZ4H+FZTrDOwF0oAEYLSZjXTOLT3gAma3ALcApKSkkJube+zRhZ1FduwoHtz6On96sR59urUhJKiikKUqFBUVHd/zk2pFzy9w6dkFNj2/U5cvE7h8IKPcejqwutx6DNASyDUzgDrAcDPrDVwHfOOc2wOsN7OxQEfggATOOTcYGAyQnZ3tcnJyji/Cru3Z/vyZPFz0LEM2v8WfrjzO46XS5ObmctzPT6oNPb/ApWcX2PT8Tl2+bEKdBDQxsywzCwP6AcP37XTObXXOJTvnMp1zmcB4oLdzbjJes2lP80QBXYH5lR5hrQSibvyA+OBd9JzxJ76ZrvHhREREpPrzWQLnnCsB7gK+BeYBw5xzc8zs8bJatiN5AYgGZuMlgm8652b6JNDazbErXqFd0GKKP72LmXmV96qdiIiIiC/4sgkV59wIYMRB2x45TNmcct+L8IYSqRKhLftQtPoBLvvl73zyxs3E3/kW9ZOjq+ryIiIiIsfl1JmJ4Siiz3uIgg4DuMKNZObLv6WgSDM1iIiISPWkBG4fMxIveZzVre/kkpLvmPz8TRTv3uPvqEREREQOoQSuPDPSLn+KxU1v4/zib5j4n5vYu3evv6MSEREROYASuIOZ0fiavzEj6w+cWfgV017sjytVEiciIiLVhxK4ipjR5sZBjE3rT8dNw5k55H5/RyQiIiKynxK4wzGj2++fZXT0hbRZ9hqLRg/zd0QiIiIigBK4IwoKDqL1ra+yIKghdX74HzbmVf5YwiIiIiLHSwncUcTFxBB8zRD2OqPwresoKd7u75BERETkFKcE7hg0zm7J3G6DaLBnKXNeuxmc83dIIiIicgpTAneMuve6jp/r9KfNxq+Y9cVz/g5HRERETmFK4I5D998/zbTQ9mRPeZy82WP9HY6IiIicopTAHYfwsDDq/m4Imyye0I/7U1iw1t8hiYiIyClICdxxqlM3nfUXvUpC6WbyXrmakj27/R2SiIiInGKUwJ2ANp17Mq3NQJrvmsGEwXfi1KlBREREqpASuBPU9Yq7mFT7Gk7fMIzRH/7H3+GIiIjIKUQJ3Eno8IfnWRDRhi5zHmfi2JH+DkdEREROEUrgTkJQaBgZtw5ja3A8Gd/dwoLFS/wdkoiIiJwClMCdpMiEOgRf9z4JVsiOd3/Dus3b/B2SiIiI1HBK4CpBUuNObOw5iHZuLrNfuJ41BUriRERExHeUwFWS9DNvIq/DA5xTMorlz/dh5doN/g5JREREaiglcJUo49L/Jb/H3+hcOo0tr1zMkhUr/R2SiIiI1EBK4CpZ+rm3s+6CV2jqluDevIj5Cxf4OyQRERGpYZTA+UBat2souOw90thAzLsXM2vGZH+HJCIiIjWIEjgfqdP2ArZf+zmRQbtJ++RyZkwZ6++QREREpIZQAudDKdldcf2/oTQolIzh1zB72jh/hyQiIiI1gBI4H0ts0Jyg335JaVAodT+7mjnTx/s7JBEREQlwPk3gzKyXmS0ws8Vm9uARyvU1M2dmHctta21m48xsjpnNMrMIX8bqS0n1m0P/L3FBwdT59CrmzZjg75BEREQkgPksgTOzYOAF4EKgOXCtmTWvoFwMMACYUG5bCPAOcJtzrgWQA+zxVaxVIblBC0pv8pK41E+vYsGsif4OSURERAKUL2vgOgOLnXNLnXO7gaFAnwrKPQE8DRSX23Y+MNM5NwPAObfJObfXh7FWidTMluy94QscQSR/3JdFsyf5OyQREREJQL5M4OoBeeXW88u27Wdm7YAM59yXBx17GuDM7Fszm2pm9/swzipVu2Er9twwHEcQ0R9dw/R5C/0dkoiIiASYEB+e2yrY5vbvNAsCngX6V1AuBOgBdAJ2AD+Y2RTn3A8HXMDsFuAWgJSUFHJzcysl8Kqwt+X/cfrsh8h//waebzGQlrVr+TskvyoqKgqo5ycH0vMLXHp2gU3P79TlywQuH8got54OrC63HgO0BHLNDKAOMNzMepcd+7NzbiOAmY0A2gMHJHDOucHAYIDs7GyXk5PjkxvxjRwKG0TR6avbWTj7NbZmP0eftvWOflgNlZubS2A9PylPzy9w6dkFNj2/U5cvm1AnAU3MLMvMwoB+wPB9O51zW51zyc65TOdcJjAe6O2cmwx8C7Q2s8iyDg1nAXN9GKtfxHS6jl1d/sj1wSOZ8OE/GTJ+hb9DEhERkQDgswTOOVcC3IWXjM0Dhjnn5pjZ42W1bEc6djPwDF4SOB2Y6pz7ylex+lP4BY+xt+E5PB76X4Z//hHP/7gI59xRjxMREZFTly+bUHHOjQBGHLTtkcOUzTlo/R28oURqtqBggq96A/dqT97Y+h/O/y6FLTv28H8XN6OsaVlERETkAJqJoTqoFY9d+z7RwSV8nPA874yZz4Mfz2JvqWriRERE5FBK4KqLlGzsyteoW7yI71P/wxeTFzHg/WnsLin1d2QiIiJSzSiBq06ye2FXvEpG4Qx+qv0vRs1aws1vT2bn7oAfw1hEREQqkRK46qZVX7jqv9QunMfPtZ9l5qJl3PD6BLbuDOiZxERERKQSKYGrjpr3hmveIbFoET+nPkNe/kque3U8Bdt3+zsyERERqQaUwFVX2b3g2qHEbl/Bj0n/YOv6PPoNHseGwl3+jkxERET8TAlcddb4HLj+Q6J2ruG7xEEUFqznmsHjWLu12N+RiYiIiB8pgavuss6A6z8kcns+39V9mS3bCrn6lXHkb97h78hERETET5TABYLM0+Hyl4lZP5kfGg5l645irnllPCs2bfd3ZCIiIuIHSuACRcsr4LwnSFj2JSNb/8T23SVc/co45q3Z5u/IREREpIopgQsk3f8InW4mZeYrfNN9PqUO+rwwljfGLKNUszaIiIicMpTABRIzuPDvcNqF1Bn7KD9cvJ0zmyTz+JdzuenNiazbps4NIiIipwIlcIEmKBj6vg512xL75a282mkNT13WgknLC7jgX6P4ZvYaf0coIiIiPqYELhCFRcF1H0BiFjbsBq6ffxffX5tA/cRIbntnKvd/NIPiPZp+S0REpKZSAheoolPh1lFw0SBYN4eMYb34LP197j89lmGT87n6lXGs2brT31GKiIiIDyiBC2TBodD5ZhgwDbrfRdDMD7hj5jWM7DiJZeu3cel/xjJlRYG/oxQREZFKpgSuJqgVD+c/CXdNhMbn0Hj2s4xv8DJ1wnbQb/B4Ppi00t8RioiISCVSAleTJDaEa4bApc8RteoXPg97mL7p23jg41kMHD6HPXtL/R2hiIiIVAIlcDVRh5vgtyMILinmLwX38o8WK/jvL8u58qVfmJm/xd/RiYiIyElSAldTZXSGW3Kx1GZcteQhvms7mrVbdtDnhbE8/Nlstu7Y4+8IRURE5AQpgavJYuvCb0dAu99w2vyXGJv5Gjd3TuHdCSs455lcPpmaj3OawUFERCTQKIGr6ULCoffzcOE/CF0ykv9dPYCvb8wgIzGSe4fN4JrB45myYrO/oxQREZHjoATuVGAGXW6BGz6BwjVkD+/Dx71K+dsVrViyvogrX/qF/m9OZEae3o8TEREJBErgTiUNc+DmHyEqmaB3LqNf0A+Muv9sHujVlOl5W+jzwlh+/99JzF611d+RioiIyBEogTvVJDWCP4z0krkv7yZq5IPc3i2VMQ/05M8XZDN5xWYu+c8Ybn57shI5ERGRakoJ3KkoIg6uGwbd7oJJr8KzLYge8xfu7BTL6AfO5p5zT2P80k1c8p8x3PL2ZOasViInIiJSnSiBO1UFBcMFT3lNqllnwehn4NmWxH7/Z/6nXRBjHujJ3ec2YdzSTVz8nBI5ERGR6sSnCZyZ9TKzBWa22MwePEK5vmbmzKzjQdvrm1mRmd3nyzhPafU6eLM33DUZ2vSD6e/C8x2J+/pO7u4cc0gid+d7U1mxabu/oxYRETml+SyBM7Ng4AXgQqA5cK2ZNa+gXAwwAJhQwWmeBb72VYxSTnJj6P0c3D3La1qd85mXyE17mbvPzmLMAz35Y8/G/DhvPec+8zMDh89hU9Euf0ctIiJySvJlDVxnYLFzbqlzbjcwFOhTQbkngKeB4vIbzewyYCkwx4cxysFi6sD5T8Cd46FBd/ju/8HLPYhbO54/nZ/Nz3/OoW+HDN4et5yz/pHL8z8uYufuvf6OWkRE5JTiywSuHpBXbj2/bNt+ZtYOyHDOfXnQ9ijgAeAxH8YnR5LY0Ovo0O992LMD3roEPvodqcFF/PWKVnx3z5l0a5TEoO8WcvagXIbPWK1ZHURERKpIiA/PbRVs2/9/eDMLwmsi7V9BuceAZ51zRWYVnWb/OW4BbgFISUkhNzf3JMKVikUS1GoQ9Vd+Qv05H7N70c/MafEghbFNuL4+dIqJ4N15uxnw/jRe+GYGv2keTkbM8f9dUFRUpOcXwPT8ApeeXWDT8zt1ma9qTcysGzDQOXdB2fpDAM65v5atxwFLgKKyQ+oABUBvvMQuo2x7PFAKPOKce/5w18vOznYLFizwwZ3Ifqunwwc3QNFauGgQdLgJgL2ljg8m5fGPb+ezdecebuyWyT3nnkZcZOgxnzo3N5ecnBwfBS6+pucXuPTsApueX+AysynOuY5HL1mxE25CNbPzjlJkEtDEzLLMLAzoBwzft9M5t9U5l+ycy3TOZQLjgd7OucnOuTPKbf8X8JcjJW9SRdLawq0/Q4PT4YsBMHwAlOwiOMi4rkt9frovh+u7NODtccs5+5/e+3H5m3f4O2oREZEa52TegXv9SDudcyXAXcC3wDxgmHNujpk9bma9T+K64k+RifCbj6HHvTD1LXjzQtiaD0B8ZBhPXNaSL/7Yg+Z1Yxn03UJ6/P0nrh08ng8n51G0q8TPwYuIiNQMR3wHzsyGH24XkHS0kzvnRgAjDtr2yGHK5hxm+8CjXUeqWFAwnPso1GsPn94OL50OPf8fdPgtBIfQIi2Od/7QhbyCHXw6bRWfTM3nzx/N5OHPZ3NJ6zT+96JmJEaF+fsuREREAtbROjGcAfyGX99T28fwhgmRU1mzSyGlGXx1D4y4Dya/CRf+HbLOACAjMZIB5zThjz0bM3XlZj6asoqPp+QzZtFGnr+uHR0zE/18AyIiIoHpaE2o44EdzrmfD1pyAfUYEG8A4BuHw9VDYHdUUaj/AAAgAElEQVShN9zIsJtgy8r9RcyMDg0S+esVrfjkju6EhwZxzeDxvJS7hNJSDT0iIiJyvI6YwDnnLnTO/XSYfWf6JiQJOGbQvDfcORHO/j9Y+C083wm+ug/WzDigaMt6cXz5xx70almHv38zn9+9NYmC7bv9FLiIiEhg0mT2UnlCa8FZ98Ndk6DFFTD1bXjlTHj5DJj4KuzcDEBMRCjPX9uOJy5ryS+LN3HRv0czfX2JauNERESO0RETODMrNLNtFSyFZratqoKUABOfAZe/BPct8MaLA+8duUHZ8MmtULQBM+OGrg345I7uRIQG8a+puzj3mZ/579hlFBbv8W/8IiIi1dzRmlBjnHOxFSwxzrnYqgpSAlStBOh8M9w2Gm4dBe1vhDmfwsunwxKvZb5lvTi+u+csbm0dTmytUAZ+MZduf/2RgcPnsGzjdj/fgIiISPWkJlSpGnXbwMWD4OYfISIehlwOIwfC3j2EhQTRLS2Ez+48nc/uPJ3zmtfm3Qkr6PnPXP4yYh67Svb6O3oREZFqRQmcVK06LeGWn7zauDHPegMBb16xf3fbjHievaYtYx/sybWd6zN41FL6PD+W+WvVYi8iIrKPEjipemFR0Ps56PsmbFgAL59B2qqvYdevww2mxkTwl8tb8Ub/jmws2kXv/4zltdFL1dFBREQEJXDiTy2v8N6PS23GaYtehmeawYg/w/r5+4v0bFqbb+8+k7OyU3jyq3lc/9oEVm3Z6cegRURE/E8JnPhXQib87humtvsbZF8IU/4LL3aBNy/2OjyUlpIUHc7gGzrw9ytbMSN/C+c98zPP/bCInbv1bpyIiJyalMCJ/5mxLa4ZXDEY7p0H5z4GW/Pgw/7w4U2weztmxjWd6vPt3WeSk53CM98v5OxBuXwyNV/NqiIicspRAifVS1Qy9LgbBkyD856A+V/CGxfAljzAm1/1xes78OFt3UiNDefeYTPo88JYJizd5OfARUREqo4SOKmegoLh9AFw3TCvl+qrZ8PKCft3d8pM5LM7Tudf17RlY9Eurhk8nov+PZp/j1zE/LXbcE61ciIiUnMpgZPqrcl58IeREB4Db10C097dvysoyLisXT1+/FMOj1zSnMiwYP71w0J6/Ws0OYO8MeRm5G3xY/AiIiK+EeLvAESOKiXbGwD4w/7w+R2wZjqcO9AbjgSoFRbM73pk8bseWawvLOb7uev4ds463hy7jMGjltI2I57f9cjiwpZ1CA3W3ywiIhL4lMBJYKiVANd/DN8/DONfhIXfwCXPQuNzDyiWGhPB9V0acH2XBmzduYfh01fx5tjlDHh/GnViI7ixewOu61yf+MgwP92IiIjIyVN1hASO4BDo9Vf47dcQEgHvXAkf/wGKNlRYPK5WKDd0y2TkvWfxRv+ONE6N5ulvFtD1rz/w6OezNZ6ciIgELNXASeBp0B1uGwOjn4HR/4TFI+H8p6BNP6/zw0GCgoyeTWvTs2ltFqwt5PUxS3lv4krem7iSK9qlc3tOIzKTo/xwIyIiIidGNXASmELC4eyH4PaxkNLUezfuH4289+Smvg1b8ys8LLtODE/3bUPun8/mus71+XT6Knr+M5f/GTqNhesKq/YeRERETpBq4CSwpWRD/xEw/wtY+C0s+dGbwQEg+TRvdofT74bIxAMOqxdfi8f6tOTOno15ffQyhoxfwefTV5OTncLvTs/ijCbJmJkfbkhEROTolMBJ4AsKguZ9vMU52LDAS+SW/AC//AemDoFzHoH2Nx7SxJoaE8FDFzXjtrMaMWT8Ct4et4Ib35hIk9Roftcji8vb1SMi9NBmWREREX9SE6rULGaQ2hS63QG/+RhuHQ2pzeDLu73BgPMmVnhYQlQYA85pwtgHz2bQVW0ICQ7ioU9m0e2vP/DM9wvZvH13Fd+IiIjI4SmBk5qtTkvo/xVc+ToUrYfXz4NPb4PCdRUWDw8Jpm+HdEYM6MH7N3elQ4MEnvthEaf//Uee+mou67YVV/ENiIiIHEpNqFLzmUGrvnBaLxg9CH55HuZ9AWf+Gbre7nWIOOQQo1ujJLo1SmLhukJeyl3CG2OX89YvK7iqYzq3ndWIjMRIP9yMiIiIauDkVBIe7c3gcOcEyDwDRj4KL3SB+SO8d+cO47TaMTx7TVt++lMOfTum8+HkfHIG5XLTGxP5dFo+23eVVNktiIiIgI8TODPrZWYLzGyxmT14hHJ9zcyZWcey9fPMbIqZzSr77OnLOOUUk9QIrhvqvSMXHAZDr4Uhl8P6eUc8rH5SJH+5vBWjHzib285qyOL1RdzzwQw6PjmSAe9P44d569izt7SKbkJERE5lPmtCNbNg4AXgPCAfmGRmw51zcw8qFwMMACaU27wRuNQ5t9rMWgLfAvV8FaucohqfC7efBZNeh9y/wEvdoeWV0OMeqN3isIfVjo3gzxc05U/nZTNl5WY+m7aKr2atYfiM1SRHh3FVxwyu61xfTawiIuIzvqyB6wwsds4tdc7tBoYCfSoo9wTwNLD/7XDn3DTn3Oqy1TlAhJkd+qKSyMkKDoWut8Efp0G3O2HB114i9/61kD/5iIcGBRmdMhN56vJWTPzfc3ntxo60r5/AKz8v4cx//MRNb0zk+7nrKFGtnIiIVDJfdmKoB+SVW88HupQvYGbtgAzn3Jdmdt9hznMlMM05t8s3YYoAUUlw/pPQ416YOBjGvwQLRkDWWXDGvd7nEQb2DQsJ4tzmtTm3eW3WbN3J0Il5DJ20kpvfnkzduAj6dkjnyvbpmrJLREQqhbkjvLx9Uic2uwq4wDn3h7L1G4DOzrk/lq0HAT8C/Z1zy80sF7jPOTe53DlaAMOB851zSyq4xi3ALQApKSkdhg0b5pN7Ed8rKioiOjra32HsF1yyg7TV35Ke/znhuzezLaYJK+v3ZWNyZ7Bjq7jeW+qYvmEvP+WVMGfjXhzQOD6IHvVC6FQnhKjQmjPTQ3V7fnLs9OwCm55f4Dr77LOnOOc6nujxvkzgugEDnXMXlK0/BOCc+2vZehywBCgqO6QOUAD0ds5NNrN0vATvt865sUe7XnZ2tluwYEHl34hUidzcXHJycvwdxqH2FMOM92DMv2DLCm/e1dPv9oYlCQ495tOs3VrMp9NW8fHUfBavL/Jq7JqlkpOdyhlNkqkbV8uHN+F71fb5yVHp2QU2Pb/AZWYnlcD5sgl1EtDEzLKAVUA/4Lp9O51zW4Hkfevla+DMLB74CnjoWJI3EZ8JjYCOv4N2N3pzrI55Fj67DX76C3S+Gdr95pB5VitSJy6C23MacdtZDZmZv5VPpuYzYvZaRsxaC0CT1Gh6NEnmzCYpdGmYSGSYhmgUEZHD89n/JZxzJWZ2F14P0mDgDefcHDN7HJjsnBt+hMPvAhoDD5vZw2XbznfOrfdVvCJHFBwCra/yat4Wfgtj/w3fP+wlcq2vgs63erM+HIWZ0SYjnjYZ8Qzs3YIF6woZvXAjoxdv5L0JK3lz7HLCQoLo2jCJntkpnN00lQZJem9OREQO5NM/851zI4ARB2175DBlc8p9fxJ40pexiZwQM8ju5S1rZsKkV2HmhzD1bajf3evR2vQSCAo+hlMZTevE0rROLDef2ZDiPXuZtLyA3AUb+GnBegZ+MZeBX8ylYXIUZzdN5cr26TRPi62CmxQRkepO7TQiJ6pua+j9Hzj3MZj2jpfMDbsRkhp7Y8m1uhpCwo75dBGhwZzRJIUzmqTw8CXNWb5xO7kL1vPjgg0MGbeC18cso1W9OK7plEHvtmnERhz7O3giIlKzaCotkZMVmQinD4AB0+Gq/0JoLfj8TniuHUx4BXbvOKHTZiZH0f/0LN7+XWcm/t85DLy0OXv2lvL/PptN56dGcu+w6YxauIHiPXsr935ERKTaUw2cSGUJCoYWl0Pzy2DxSBj9DHx9P/z8NHS5DTr9/pg6PFQkPjKM/qdncVP3TGat2srQSXkMn76aT6auIiwkiA71E+jRJJnujZJoVS+OkGD9bSYiUpMpgROpbGbQ5DxvWfGL13P1pydh9D+9Xqvd7oDEhid4aqN1ejyt0+N5+OLmTFi2ibGLNzJm8Sb+8a03jE5sRAi926ZxU7dMmtSOqcw7ExGRakIJnIgvNejuLevnwbjnYepbMOk1aHapVyuX0cXr4XoCaoUFk5PtjSUHsLFoF+OWbOLH+esZNjmfd8avpHujJG7qnsk5TVNVKyciUoMogROpCqnNoM8L0PNhb6quSa/DvOEQGgX1u5Qlej2gXnsIObFpf5Ojw7m0TRqXtknj/13cjA8m5/HOuBXcOmQK9eJrcW3nDPq0rUdGYmQl35yIiFQ1JXAiVSmmDpzziDfn6qLvvCbWFWPhx7JRc0IioH43yL4QTrsAEjJP6DJJ0eHckdOYW85oyMh563nrl+UM+m4hg75bSNuMeC5tk8YlretSOzai8u5NRESqjBI4EX8Ij4aWV3gLwI6CX5O5Rd97nR++vt+buuu0CyD7Iq+51Y5v/tSQ4CB6taxDr5Z1yCvYwVez1jB8+mqe+HIuT341ly5ZifRsmkrnrCRapsWqmVVEJEAogROpDiITodkl3tLrr7BpiTfjw8JvYNwL3swPSU2g0x+g7bUQEXfcl8hIjOS2sxpx21mNWLy+iC9nruarmWv4y4j5AESFBdMhM5EuWd7SKj2O8JCjD0gsIiJVTwmcSHWU1MjrrdrtDijeCvNHeJ0fvnkAfngcWl/tzcVau8UJnb5xajR3n3sad597Guu3FTNhWQETlxUwYdmvvVnDQoJomx5Px8wEOmUl0r5+AnG1NHiwiEh1oAROpLqLiPNq3dpeC6unwcTXYMb7MOVNSO/sbW9xOdRKOKHTp8ZG7O/8ALCpaBeTlm9m8vICJq3YzOBRS3kxdwlm0DglmhZpsbRIi9v/GReppE5EpKopgRMJJGnt4LIX4PwnYPq73hReX94DXz/gdXxocy00PheCTzypSooO3//eHMCO3SVMz9vCpGWbmbVqCxOWFfDZ9NX7y6cn1CKj1m7WRq6ka8MkGiRFYsf5rp6IiBwfJXAigSgyEbr/EbrdBWtmwIyhMOtDmPs5RCZ5iVyH30Jy45O/VFgI3Rsl071R8v5tm4p2MXfNNmav2sasVVsYs2At4z6ZBUDt2HC6ZCXRtWESnbMSaZQSpYRORKSSKYETCWRmkNbWW85/Apb86NXKTXjZGzg460wvkWt6CYSEVdplk6LDOaNJCmc0SQHgp59+IqNFJyYs28T4pQWMX7qJ4TO8Wrrk6HCvY0TDRLpkJdEkNZqgICV0IiInQwmcSE0RHOoNOXLaBVC4DqYNgSlvwUe/hagUaH0NNDrbG2cuLKpSL21mNE6NpnFqNNd3aYBzjuWbdjBh6SYmLPMSuq9mrQEgKSqMrg2T6Nooie6NkmiYrBo6EZHjpQROpCaKqQ1n3gc97vFq5Sa/ARNe8WrlgkKgXkfIOsOrocvocsKzPxyOmZGVHEVWchT9OtfHOUdewU7GL9vE+CWbGFcuoUuNCadHk2QubZ1GjybJhGosOhGRo1ICJ1KTBQVDk/O8Zfd2WDkelo2C5aNh9D9h1D8gPNbb3/QS7zM8ptLDMDPqJ0VSPymSqztm7K+hG1eWzP0wbz2fTF1FQmQoF7euS5+29ehQP0FNrSIih6EETuRUERYFjc/xFvDGl1s+FhaMgAVfw+yPITgMGubAab289+pSmkFY5c+dWr6G7rou9dlVspdRCzcyfMZqPpqSzzvjV5IWF8E5zWrTqWxgYU37JSLyKyVwIqeqiDhoepG3lO6FvAkw70uY/4U3TysABokNvQGDa7eEOq2gXntvTtdKFB4SzHnNa3Ne89ps31XC93PXMXzGaj6Zms+Q8SsAaJAUSafMRDpnJtKufjwNU6IJVg2diJyilMCJiNfU2qC7t1zwFBQshXVzYP1cWDfbW+Z9ATivfExdSGsP9dpBWnuCS3ZVWihR4SFc1q4el7WrR8neUuau2cbEspkifpi3jo+m5AMQHR5Cq3pxtMmIp21GHB0aJJISU7nv8omIVFdK4ETkQGbeVF5JjaB571+37yryErlVU70ZIVZPhQVfAdCDIFjewesUkXmG1zGiEppeQ4KDaJ0eT+v0eP5wRkNKSx1LNxYxI28rM/K3MCNvC6+PWcqevV5i2SY9jrObptKzaSot0+L0Dp2I1FhK4ETk2IRHQ/2u3rJP8VZYNYWVP79Lg9KVMPbfXueI4DCo28Zrek1tAbWbQ2pzbwDikxAUZDROjaFxagxXdkgHYFfJXuau3sbYxRv5cf56/v3DIv41chEpMeGcdVoKrerF0Tg1mkYp0dSODdeQJSJSIyiBE5ETFxEHjXqyLC+IBjk5sKvw156uq6Z6M0NM+e+v5WPSvLHoml/mdZaohMGFw0OCaVc/gXb1E7irZxM2Fe3i54Ub+HH+ekaWa3IFiAkPoWFqNI1TovePW9c4NZr6iZF6n05EAooSOBGpPOExvw5bAuAcFK6F9XNg3Vxv2q95X3rzuEbEQfbF0OIyaHh2pc0UkRQdzhXt07mifTrOOTYU7mLx+iIWbyjyPtcXMXrRBj6e+mtiFxYcRFZyFGnxEaTGRJAaG05KTDipMeHUjo0gKzmK+MjKm8lCRORkKYETEd8xg9i63tL4XG9byS5YmgtzPoX5X8GM9yAiHtpeDx1/Vynzt/56eSM1NoLU2Ai6N04+YN/WnXtYUpbULVlfxJINRazZWsyc1dvYWLSLUnfguRKjwmiYHEXDlCgapUTTIi2O9g3iiQzTP6MiUvX0L4+IVK2Q8F+n/NqXzE1/Dya+AuNfgKyzoNPvIfsib3owH4mrFUr7+gm0r59wyL69pY6C7btZX1jMmi3FLNu4nSUbili6YTs/zl/PsMle7V1IkNEqPY7OWYl0zUqiQ2YCsRG+i1lEZB8lcCLiP+WTucK1v87fOuxGiK4DTS/2Ok1kdIH4+l6NXhUIDjJSYrxm1BZpcYfs37pjD9PyNjNxWQETlhXwxphlvPLzUsCrqasTG0FafAR142pRJy6C9IRaNEyOJjM5khgleCJSCXyawJlZL+DfQDDwmnPub4cp1xf4EOjknJtctu0h4PfAXmCAc+5bX8YqIn4WUwfO/DP0uBcWfe91fpj5AUx+3dsfXQfqd4F6HSC6NtRK9Hq11krwPiPiqyzBi4sMJSc7lZzsVAB27t7LtLzNTFu5hdVbdrJmazH5m3cyaflmtu7cc8CxKTHhZCVH0TA5ihb14miXEU92nRjNASsix8VnCZyZBQMvAOcB+cAkMxvunJt7ULkYYAAwody25kA/oAWQBow0s9Occ3t9Fa+IVBNBwZDdy1tK93oDCudN8JaVE7yerRWJiIeMzmVLF2+g4fDoKgm5Vlgw3Rsl071R8iH7duwuIa9gJ8s2FrFs446yz+18O2ctQyfleaGHBtEyLY62GfG0rBdHekIt0uJrUTs2Qr1jRaRCvqyB6wwsds4tBTCzoUAfYO5B5Z4AngbuK7etDzDUObcLWGZmi8vON86H8YpIdRMUDHVbe0vnm71tOzfDjgJv2Vn2uWMTbJgP+ZN+nQbMgr1x6Op18Kb/SmsPKU0huGrfHIkMCyG7TgzZdWIO2O6cI3/zTqblbWH6yi1Mz9vM2+NXsLukdH+Z4CCjTmwE9eJrkZEYSYMkb8lMiqJBUqR6xoqcwnz5L1k9IK/cej7QpXwBM2sHZDjnvjSz+w46dvxBx9bzVaAiEkBqJXhLUqOK9+/cDPmTIW+iV2s3+xOY8qa3L6SWlwymtfcSu/QOkJBVZU2v5ZkZGYmRZCRG0rtNGgC7S0pZvmk7q7bsZPX+pZhVm3cydvFGPp5afMA5YiNCyEiMJD2hFukJv36eVtsb206DFovUXL5M4Cr6l2N/x3wzCwKeBfof77HlznELcAtASkoKubm5JxKnVANFRUV6fgGs+j2/UAg6HRqcDvVLqbVzLTGFi4gpXETs1sVEr3qd4AkvAbAnJIZtsU0ojGlMcUQqJSHRlIREsSc0hpKQaPaExlIaXLVzrBreX6z1woHaZQvB7N4byYYdjvU7S1m33fvcuHM7s1YU8tN8x+5yL5nEhxunJQRxWkIwTRKCyIgJIqiChK76PTs5Hnp+py5fJnD5QEa59XRgdbn1GKAlkFv2V2IdYLiZ9T6GYwFwzg0GBgNkZ2e7nJycSgxfqlJubi56foEr4J7f3hLYMA9WTSE0fzJJq6aStPIjcKUVl4/LgOTTICW77LMp1GnpDVxcTTjnDX2St3kns1ZtZdKyAiYtL2DiWq/WLjo85NeBimPCSYkNp3ZMBBsKl3BGs5akRIeTFB1OfK1QzSEbQALuvz2pNL5M4CYBTcwsC1iF1ynhun07nXNbgf1v/JpZLnCfc26yme0E3jOzZ/A6MTQBJvowVhE5lQSHQJ1W3tKhv7dt9w7YsRF2boHiLb9+Fq6DjQth4wKY/AuU7PTKB4VAeidvFomGOd57dj4ct+5ozIyksiSsbUY8N3RtAED+5h1MWl7A9JVbWLO1mPWFu1i2cTsbCnexe6+XsL40Y38fMoKDjMSoMBokRh4w3Vjj1GjS4mopuROpJnyWwDnnSszsLuBbvGFE3nDOzTGzx4HJzrnhRzh2jpkNw+vwUALcqR6oIuJTYZEQVt8bb+5wSktha57XYWLlOG8Q4ty/Qu5fICwGGnSHpMYQnwFx6V7NXXx97509P72P5r0bF8nl7dIP2O6cY8uOPYz4cQxZzVuzqWg3G4t2saloNxsKd7Fs03a+m7tuf09ZgLCQIGrHhpMaE7H/MzU2fH8ni/qJkSRFhendO5Eq4NPuWM65EcCIg7Y9cpiyOQetPwU85bPgRESOV1AQJDTwltMu8LbtKIDlo2HJT7ByvPd9z44DjwuLhoTMQ5fU5hCb5rdOFAlRYdSLCapw+JN9CrbvZvH6IhatL2TFph2s31bMum27mL+2kNELN1K4q+SA8pFhwdQv65yRmRRJZnIUWUlRZCZHUSc2QjV4IpVEMzGIiJyMyERo3sdbAJzzkrqtK2FLnldjt2UlbF4OmxbD4pFQUq43aWSy1zO2TtlwKbVbecldSPUYIiQxKozOWYl0zkqscP/2XSWs3rKTlQU7WFmwg7wC7/uKTdsZtXADu8oNixIeEkRmUhSNUqNomBxNw5QoGqZ4n5qCTOT4KIETEalMZhCV5C1p7Q7d7xwUrYOCpbB2NqydAWtmwrgXoLRs1gYL9mr5khqXLY0gNh2iUyAqBaJSITSiau/rMKLCQ2hSO4YmtQ/t0FFa6lizrZjlG7ezbOP2/Z/z1hTy7Zx17C39dXCB1JhwGqV479o1SomicWoMDZIiSYgKIyosWM2yIgdRAiciUpXMvGnDYup478ztU7Lb6xm7bi4ULPFq6zYthuVjDm2SBQiP9aYUS25SrodstrceEVt193MEQUFGvfha1IuvxemND2ym3V1SysqCHSzdUMSSDdtZsqGIxeuL+GzaqkOaZUODjfjIMBIiQ0mIDCM1NoK6cRHUiY2gTpy3pMXVIjUmXE20cspQAiciUh2EhEHdNt5SnnNQuAa2rYHt62H7Bigq+9y2GjYu8uaOLS0352piI8g601syz/Bq7qqZsJCg/b1by3POsaFwF4vXF5G/eSebd+xm8449bNmx2/u+fQ+z8rfw3ZziA5pn4f+3d+cxctb3Hcff351rZ2Zn7117fWEbjI2BYidACAmRRZMql0oaNcpVNUoPkog0CUkV0ahq1UpV0zZqmyoIiZKDSLkqyEFTStomOEQQwJzmMCZgsHft9YF3Z+/Zndn99o/fs97xHr7wevdZf17So2eeZ54Z/9YPz/rD7wxNtKubc1zQnGNNSxhUsa41rxG0siQpwImILGZmYaBD/Yq5rxmvhD52r+0OI2Q7d8Czd0+tQNF+Kay7Lkyb0rYp1Ngtklq66cyM9vpa2utP3EQ8OYq2u6/Ewf4R9hdLdEZ97/YeHebXe44yXDWzcTaVYH1bngujPnfN+TT1tSkKtUnqsynqa1O01KU1ilZiQwFORCTuEklovShsm94Tzo1XoPtpeOWX8MoD8PidU3PYAdSvhLaNXDycgKGfQiId5rFLpCCRgfoOaF4ftrplCzYNylwmR9E25dNsXjEzjLo7rw2OHWuifenwIC8fGeTxvb3c8/SMeeGPyaYSrGoK06KsjvbLG2pZVl/LsmjalNpUYj5/NJFTogAnIrIUJZJhrddVb4TrPg8T46GW7khUSxftW197FYqPhcA3PgbjozO/K5WLwty6qoEV0ZZrWXThDkLAaytkaCtkeNP6luPeG6tM0F8q0z9SZqBUiV5XODJQorN3hM6eYTp7R9jxSs+M/ngADdkUHQ21XLyswMXL6rh4WYGNywusbsqpmVbOGQU4EZHzQU0ijGZtuRA2vfvY6YemL8XkDuNl6O8KI2V7XpnaH34Bdt93fH+72oYwn13HFlixJexbN4Q/b5FKJ2torcvQWnfiNW7dnb6RMof6RznUX+JQf1jJ4lB/aK6dXptXm6rhguY8KxprWdmUZUU0gKOjIUtjLjTT1meTZFMaVSuvnwKciIhMMQsDKiabT6cbr4Q57o5WjZQ9+Aw8cSc8clu4JpUL89qtujIsN7b66hP34VukzMLo18Zcmo3LZ1/3dnC0wm8ODfDioQFePDTIvp5hDhRHeKqzSO9wedbPJGuM+myK9kKGC1pyrG3Js2ZyHzXZphI18/mjyRKgACciIqcukZwKdxveMXV+YjysGXvgKeh+CvY/AY/+O/z6a+H9+lWw+ipYeWWoqVv+W4t2IMXpqMsk2bqmia1rmma8NzRaobtvhO6+En0joZm2v1SmbyRsh/pKvHxkiPtfOHJsXVoIGbq1LkNH1Peuo6GWtroMjbkUDbk0DdkUjdkUjbkUQ2VnYsLVdHseUoATEZHXryYB7ZeEbcuHw7nKWKid6260mdwAABFKSURBVHoUOh+Frh3w3I+mPtNyUWhy7bgirB2bb5vask1h6bIYy2eSXNRe4KL22WvvJo1POAf7S+w9OsS+o8NhZG1fiYP9JfYdHeaRPUfpL83sizep5hf3hlAXhbumXIrmfIaWujTN+bC15MN7+UySumjLZ5Kkk/H+Oz6fKcCJiMj8SKanBlJc86lwbvBIqKGbrKnb9zA8e9fMz1oNZAphVYqaRDi2mnBc1x4GVDStC8uOTb4udMQy9CWqJjy+9sLZrxmtjEe1eGWKw6EGrzhc5rFndtG6Yg3F4TLFkTBf3uGBUXYfHODo0NiMufKmyyRraMmnaS1kwr4uQ0tdhmX1mWN9+FY2hj586re3uCjAiYjIuVPXFppeq5tfh3vC8mJDR6LttbAv9YFPhG1ifGo/0A37H4fnfgw+NdcbyVpovCAEuub1UwGvaS00rlk0y4+diUwyQXshQXvh+J+hZeAltm3bOOtn3J3hsXF6hsY4OjRG/0iZodEKA6MVhqKtv1Th6OAYrw2OcmRwlF3dAxwdGqU87sd9Vy6doKOhlvpsKtTepZPkMgnqMkla6zJcsbqRLasaachpTdtzRQFOREQWVq45bFxyep8bL0NfF/S+EkbJHtu/Gua+m74EWaEjBLzGNaGJNlMI/fAyhbA0Wb41vNewOsyHF3NmRj5qKl3dnDvlz7k7PUNjHCiW2F8cZn+xxIHiCAeKIwyUKgyOVjjUX2JodJyhsQp9I2U8ynvr2/JsWd3I1jVNrGvJs6w+Q3uhlvpsUjV4Z5kCnIiIxFMiFdW2rYPpTY/uYcmx4t4Q6HqjfXEvdD4MpX4Y7Q+1etNZTZjouHFNCHz1K6L1azuibXmY3DixNP8JNTNaoqbUy1c1nPT6gVKZZ7r6eLKzyJP7ijzw4hF++MT+467JJGtor8+wrBCmWFnZmD22X9WUZU1zXv3xTtPS/K9PRETOb2ZQWBa21VfPfo17qKUr9cPoQGjGLe4NYa+4F4r7YM92GDw4M+jVJMMgjLaNYXmyyX3dcsjUQfLEc8wtJYXaFNde1Mq1F7UCoQZvf3GErt4RDg+Mcrhq/ryDfSUe39vLf+3spjIx1UybTtSweUU9W1Y3HtsuaMmp1u4EFOBEROT8ZAbpfNjogLaLgetmXjcxHvrkDXTDwMGw790bpk05+Azs+s9ZAl4qBLl0ITTRZhtDs+2xfVOoxWtYFZps61fGuo9eNTNjVVOOVU1zN9uOTziH+ktR0Bvmhe4Bnuws8oMdnXzroVcBKNQmacmnZ4ycbchG69bWZWjNp6PawjTN0Sjc82VKFQU4ERGRE6lJRE2oy2d/vzwSJjQ+shuGj4bavLFBGB2M9gMwUgz980Z6w1a9Lu2kfFsIdJPNtNX7fFvoJ5htDoEz5jVTiRpjRWNYreKqtc2wNZyvjE/w4qFBnu4q8vyBfvpGygyOhn533X0lhsYqx0bhzqbGwlJnTfk0Tbk0TbkU9dmwCkZDNrxuiAZi1GWmBmLkM0nq0knymQTJmEyirAAnIiLyeqSysPzysJ2q8khosu3rgmJn2PdF+969YXqVkZ7ZP5tIH6vF2zrq0Lky1PJN1vZl6qJ99blCKGcqG0brVu8T6UUTCJNRU+rmFSee5Lk8PkHv0BivDY5Fo2xH6Rkao3dojN7hMj3D4fX+Yold3QP0l8K6t6cik6wJAa82jLZd3lDL5o56Lumo55KOAmtb8ouilk8BTkRE5FxLZaemOJlLuRRC3uQ2WXs33BO97mH84L7Qh69vf1XN3wDgc3/vDBaFuVpIZkP/vUwd1E5r9q1tDO/VJI/f0rmw0kbDytAsfA7WwU0lamivr6W9/tSbnccnnMFSGDU7MFoOo2ij2r2pfRhZe+xcqUJX7wi/fPEI41GfvWwqwcXLCywrZEItXz7U9FW/bsyFCZQbsikS8xT2FOBEREQWo1QtNF0Qtjns3L6dbdu2HX9yYgLKQ6EJd3QAxgai/TBUSmErj1TtR0OTbmV06nh0AErF0M9vMjiOj528zDXJ0ORbvzKM3q1fER13QKHqOJl+fX83ZyBRYzTkUmc0V12pPM5Lhwd5vrufXd39vHhogL1Hh3mqs0hxuHzcUmjVLGrSvWnbRfzp22ZZW/h1UIATERFZSmpqpppN6Tg73+kehb/RMKhjogIT5bAfHYD+A1EzcBf07w81ggeehN33hs9Nl2+Lgl0U6to2wQXXQvuli3I1jdpUgstWNnDZypnTqkxOmNw7PEbvUDnsoybcnuGwOsb6tvxZL5MCnIiIiJyY2VQfutnM1f/PPdTeDXRDfzcMHKjaHwhBr/PRqf5+tQ2w5toQ5tZcEwZtJDOhiXdyn0gtmj57cPyEyauazt2fqwAnIiIi88NsaqWNZZfOfV1xH+x9CPY+CK8+CC/+94m/N5EOW01y6jVES6+NTy3BNttEzZOfT2UhlYu27NSAjmPfmQqvcy3QciE0Xxj2+bbjA+TEeBhlPNITahvT+WjwSF0InPMUNhXgREREZGE1rgnbFR8KxwMHYf8TMDY01W+vMlrVjFsOS6mNj4WtMhaCktVMbTUJwGYGKPfw+bHhMJFzeSQaFXw4fOfE5PdWwn6kJzQVT0oXQr/E8kh4b6TInINGLBGC3HVfgLd89qz+lSnAiYiIyOJSWA6b3r3QpQjGK2Fljp49cPRl6Hk5TP2SzoUm3sn5+XJRc+/YUNiq5wNs33zWizWvAc7M3gl8FUgAd7j7l6e9/0ngJmAcGARudPfnzSwF3AG8ISrjt9397+ezrCIiIiIzJJKh6bTlQtjwjoUuzTHzNtTDzBLArcC7gM3Ah81segT9rrtf7u5bgH8E/jk6/wEg4+6XA28EPmFma+errCIiIiJxMp9jda8GXnL3Pe4+BnwfuKH6AnfvrzrMM9WI7EDezJJAFhgDqq8VEREROW/NZxPqSqCz6rgLeNP0i8zsJuDzQBq4Pjp9FyHsdQM54GZ3n2NNEREREZHzy3wGuNnGzc4YpuHutwK3mtlHgL8EPkaovRsHVgBNwK/M7P/cfc9xf4DZjcCNAG1tbWzfvv2s/gBy7gwODur+xZjuX3zp3sWb7t/5az4DXBewuup4FXDgBNd/H7gtev0R4D53LwOHzexB4ErguADn7rcDtwNs3LjRZywnIrGxfbblYCQ2dP/iS/cu3nT/zl/z2QduB7DBzNaZWRr4EHBP9QVmtqHq8D3Ab6LX+4DrLcgD1wAvzGNZRURERGJj3mrg3L1iZp8GfkaYRuQb7v6cmf0t8Ji73wN82szeDpSBXkLzKYTRq98EniU0xX7T3XfOV1lFRERE4mRe54Fz93uBe6ed+6uq17NOS+zug4SpRERERERkmvlsQhURERGReaAAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMaMAJyIiIhIzCnAiIiIiMWPuvtBlOCvMbADYvdDlkDPWCry20IWQM6b7F1+6d/Gm+xdfG929cKYfTp7Nkiyw3e5+5UIXQs6MmT2m+xdfun/xpXsXb7p/8WVmj72ez6sJVURERCRmFOBEREREYmYpBbjbF7oA8rro/sWb7l986d7Fm+5ffL2ue7dkBjGIiIiInC+WUg2ciIiIyHlhSQQ4M3unme02s5fM7JaFLo/MzcxWm9n9ZrbLzJ4zs89G55vN7H/N7DfRvmmhyypzM7OEmT1pZj+NjteZ2SPR/fuBmaUXuowyOzNrNLO7zOyF6Dl8s56/eDCzm6Pfm8+a2ffMrFbP3uJlZt8ws8Nm9mzVuVmfNQv+LcoxO83sDSf7/tgHODNLALcC7wI2Ax82s80LWyo5gQrwBXe/BLgGuCm6X7cAP3f3DcDPo2NZvD4L7Ko6/gfgX6L71wv88YKUSk7FV4H73H0TcAXhPur5W+TMbCXwGeBKd78MSAAfQs/eYvYt4J3Tzs31rL0L2BBtNwK3nezLYx/ggKuBl9x9j7uPAd8HbljgMskc3L3b3Z+IXg8Q/vFYSbhnd0aX3Qm8b2FKKCdjZquA9wB3RMcGXA/cFV2i+7dImVk98Dbg6wDuPubuRfT8xUUSyJpZEsgB3ejZW7Tc/QGgZ9rpuZ61G4Bve/Aw0GhmHSf6/qUQ4FYCnVXHXdE5WeTMbC2wFXgEWObu3RBCHtC+cCWTk/hX4IvARHTcAhTdvRId6xlcvNYDR4BvRk3gd5hZHj1/i5677we+AuwjBLc+4HH07MXNXM/aaWeZpRDgbJZzGlq7yJlZHXA38Dl371/o8sipMbP3Aofd/fHq07NcqmdwcUoCbwBuc/etwBBqLo2FqK/UDcA6YAWQJzS7TadnL55O+/foUghwXcDqquNVwIEFKoucAjNLEcLbd9z9h9HpQ5PVxdH+8EKVT07oLcDvmtmrhO4K1xNq5BqjZh3QM7iYdQFd7v5IdHwXIdDp+Vv83g684u5H3L0M/BC4Fj17cTPXs3baWWYpBLgdwIZoJE6a0KnzngUuk8wh6i/1dWCXu/9z1Vv3AB+LXn8M+Mm5LpucnLv/hbuvcve1hGftF+7+UeB+4Pejy3T/Fil3Pwh0mtnG6NRvA8+j5y8O9gHXmFku+j06ee/07MXLXM/aPcAfRqNRrwH6Jpta57IkJvI1s3cTagESwDfc/e8WuEgyBzN7K/Ar4Bmm+lB9idAP7j+ANYRfVB9w9+mdP2URMbNtwJ+7+3vNbD2hRq4ZeBL4A3cfXcjyyezMbAthAEoa2AN8nPA/83r+Fjkz+xvgg4TR/E8Cf0LoJ6VnbxEys+8B24BW4BDw18CPmeVZi0L51wijVoeBj7v7CRe7XxIBTkREROR8shSaUEVERETOKwpwIiIiIjGjACciIiISMwpwIiIiIjGjACciIiISMwpwIrIkmNlgtF9rZh85y9/9pWnHD53N7xcROV0KcCKy1KwFTivAmVniJJccF+Dc/drTLJOIyFmlACciS82XgevM7Ckzu9nMEmb2T2a2w8x2mtknIExEbGb3m9l3CRNLY2Y/NrPHzew5M7sxOvdlIBt933eic5O1fRZ997Nm9oyZfbDqu7eb2V1m9oKZfSeaqBMz+7KZPR+V5Svn/G9HRJaE5MkvERGJlVuIVogAiIJYn7tfZWYZ4EEz+5/o2quBy9z9lej4j6JZ0bPADjO7291vMbNPu/uWWf6s9wNbgCsIs63vMLMHove2ApcS1jN8EHiLmT0P/B6wyd3dzBrP+k8vIucF1cCJyFL3O4Q1Bp8iLNnWAmyI3nu0KrwBfMbMngYeJiwsvYETeyvwPXcfd/dDwC+Bq6q+u8vdJ4CnCE27/UAJuMPM3k9YMkdE5LQpwInIUmfAn7n7lmhb5+6TNXBDxy4Ka7u+HXizu19BWFey9hS+ey7V61GOA0l3rxBq/e4G3gfcd1o/iYhIRAFORJaaAaBQdfwz4FNmlgIws4vNLD/L5xqAXncfNrNNwDVV75UnPz/NA8AHo352bcDbgEfnKpiZ1QEN7n4v8DlC86uIyGlTHzgRWWp2ApWoKfRbwFcJzZdPRAMJjhBqv6a7D/ikme0EdhOaUSfdDuw0syfc/aNV538EvBl4GnDgi+5+MAqAsykAPzGzWkLt3c1n9iOKyPnO3H2hyyAiIiIip0FNqCIiIiIxowAnIiIiEjMKcCIiIiIxowAnIiIiEjMKcCIiIiIxowAnIiIiEjMKcCIiIiIxowAnIiIiEjP/D31iCMapHfvSAAAAAElFTkSuQmCC", "text/plain": [ "
" ] @@ -198,9 +194,9 @@ "source": [ "if INTERACTIVE:\n", " # create widget to switch between metrics\n", - " interact(render_metric, metric_name=params['metric'])\n", + " interact(render_metric, metric_name=params[\"metric\"])\n", "else:\n", - " render_metric(params['metric'][0])" + " render_metric(params[\"metric\"][0])" ] }, { @@ -221,12 +217,15 @@ }, "outputs": [], "source": [ - "def render_plot_importance(importance_type, max_features=10,\n", - " ignore_zero=True, precision=3):\n", - " ax = lgb.plot_importance(gbm, importance_type=importance_type,\n", - " max_num_features=max_features,\n", - " ignore_zero=ignore_zero, figsize=(12, 8),\n", - " precision=precision)\n", + "def render_plot_importance(importance_type, max_features=10, ignore_zero=True, precision=3):\n", + " lgb.plot_importance(\n", + " gbm,\n", + " importance_type=importance_type,\n", + " max_num_features=max_features,\n", + " ignore_zero=ignore_zero,\n", + " figsize=(12, 8),\n", + " precision=precision,\n", + " )\n", " plt.show()" ] }, @@ -237,7 +236,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAtQAAAHwCAYAAACG+PhNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzs3XucnHV99//XhyRATIAYk0AkhpiCECAhHCRQAm7EeBNAEUEORg2nUkKVwk0t2N6E1hblECrc9kbKQU2hBQsihoMIP+igpSpyNKgNIKwmHAOCJCHAbvL5/TFX4hA2ZDbXzs4s+3o+HvvYme/1va75zH50eee735mJzESSJEnShtmo2QVIkiRJfZmBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSS9A4QEZdGxFnNrkOS+qPwfagl9WcR0Q5sCaysGf5AZj5d4pptwNWZOaZcdX1TRHwbWJyZ/6fZtUhSb3CFWpLgY5k5tOZrg8N0T4iIgc18/DIiYkCza5Ck3maglqR1iIi9IuK/I+LliHi4WHlefezYiPh1RCyNiCci4s+L8SHAD4D3RsSy4uu9EfHtiPjHmvPbImJxzf32iDgjIn4BLI+IgcV5342IJRHxZESc8ja1rrn+6mtHxF9HxPMR8UxEfCIiDoyIRyPi9xHxNzXn/l1EXB8R3ymezwMRsUvN8QkRUSl+Dr+MiI+v9bjfiIhbI2I5cDwwE/jr4rnfVMw7MyJ+U1z/VxFxaM01jomI/4qIuRHxUvFcZ9QcHx4R34qIp4vjN9YcOzgiHipq+++ImFR3gyWphxioJakLEbE1cAvwj8Bw4K+A70bEyGLK88DBwObAscDXImK3zFwOzACe3oAV76OBg4BhwCrgJuBhYGtgf+DUiPhfdV5rK2DT4tw5wOXAZ4DdgX2BORExvmb+IcB1xXP9d+DGiBgUEYOKOm4HRgFfAP4tIravOffTwDnAZsC/Av8GnF88948Vc35TPO4WwN8DV0fE6JprTAEWAiOA84ErIyKKY1cB7wJ2Kmr4GkBE7AZ8E/hz4D3AvwDzI2KTOn9GktQjDNSSVA2PLxdfq1c/PwPcmpm3ZuaqzLwDuA84ECAzb8nM32TV3VQD574l6/i/mbkoM1cAHwRGZuaXM/ONzHyCaig+qs5rdQDnZGYHcC3VoHpxZi7NzF8CvwRqV3Pvz8zri/n/RDWM71V8DQXOLeq4C7iZavhf7fuZeU/xc3qtq2Iy87rMfLqY8x3gMWDPmim/zczLM3MlMA8YDWxZhO4ZwEmZ+VJmdhQ/b4A/A/4lM3+WmSszcx7welGzJPWaPrtPT5J60Ccy8/9ba2wb4FMR8bGasUHAfwIUWxLOBj5AdXHiXcCCknUsWuvx3xsRL9eMDQB+XOe1XizCKcCK4vtzNcdXUA3Kb3nszFxVbEd57+pjmbmqZu5vqa58d1V3lyLic8D/BsYVQ0OphvzVnq15/FeLxemhVFfMf5+ZL3Vx2W2AWRHxhZqxjWvqlqReYaCWpK4tAq7KzD9b+0CxpeC7wOeors52FCvbq7codPX2Scuphu7VtupiTu15i4AnM3O7DSl+A7xv9Y2I2AgYA6zeqvK+iNioJlSPBR6tOXft5/um+xGxDdXV9f2Bn2Tmyoh4iD/+vN7OImB4RAzLzJe7OHZOZp5Tx3UkqWHc8iFJXbsa+FhE/K+IGBARmxYv9htDdRV0E2AJ0FmsVn+05tzngPdExBY1Yw8BBxYvsNsKOHU9j38v8ErxQsXBRQ07R8QHe+wZvtnuEfHJ4h1GTqW6deKnwM+o/mPgr4s91W3Ax6huI1mX54Da/dlDqIbsJVB9QSewcz1FZeYzVF/keUlEvLuoYb/i8OXASRExJaqGRMRBEbFZnc9ZknqEgVqSupCZi6i+UO9vqAbBRcAXgY0ycylwCvAfwEtUX5Q3v+bc/wGuAZ4o9mW/l+oL6x4G2qnut/7Oeh5/JdXgOhl4EngBuILqi/oa4fvAkVSfz2eBTxb7ld8APk51H/MLwCXA54rnuC5XAjuu3pOemb8CLgR+QjVsTwTu6UZtn6W6J/x/qL4Y9FSAzLyP6j7qfy7qfhw4phvXlaQe4Qe7SFI/FxF/B2ybmZ9pdi2S1Be5Qi1JkiSVYKCWJEmSSnDLhyRJklSCK9SSJElSCQZqSZIkqYQ++cEuw4YNy2233bbZZajG8uXLGTJkSLPLUA170prsS+uxJ63JvrSe/tiT+++//4XMHLm+eX0yUG+55Zbcd999zS5DNSqVCm1tbc0uQzXsSWuyL63HnrQm+9J6+mNPIuK39cxzy4ckSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkST3u5Zdf5vDDD2eHHXZgwoQJ/OQnP+H3v/8906dPZ7vttmP69Om89NJLzS6zR0RmNubCEacAs4EdgAXF8DJgdmY+XMwZBlwB7AwkcFxm/mR91x47ftvc6IiLG1K3NszpEzu5cMHAZpehGvakNdmX1mNPWpN9aT3fPmAIbW1tdc+fNWsW++67LyeccAJvvPEGr776Kl/5ylcYPnw4Z555Jueeey4vvfQS5513XuOKLiki7s/MPdY3r5Er1CcDBwL7AB/KzEnAPwCX1cy5GLgtM3cAdgF+3cB6JEmS1AteeeUVfvSjH3H88ccDsPHGGzNs2DC+//3vM2vWLKAauG+88cZmltljGhKoI+JSYDwwH5iSmavX838KjCnmbA7sB1wJkJlvZObLjahHkiRJveeJJ55g5MiRHHvssey6666ccMIJLF++nOeee47Ro0cDMHr0aJ5//vkmV9ozGrnlox3YIzNfqBn7K2CHzDwhIiZTXa3+FdXV6fuBv8zM5eu43onAiQAjRozcfc5Flzekbm2YLQfDcyuaXYVq2ZPWZF9ajz1pTfal9bx/iwEMHTq0rrkLFy7k5JNP5utf/zo77rgjX//61xkyZAg33HADN99885p5H/vYx7jpppsaVXJp06ZNq2vLR68F6oiYBlwCTM3MFyNiD6or1vtk5s8i4mLglcw8a33Xdg9163GvW+uxJ63JvrQee9Ka7Evr6c4e6meffZa99tqL9vZ2AH784x9z7rnn8vjjj1OpVBg9ejTPPPMMbW1tLFy4sHFFl9QKe6hri5lE9cWHh2Tmi8XwYmBxZv6suH89sFtv1CNJkqTG2WqrrXjf+963Jizfeeed7Ljjjnz84x9n3rx5AMybN49DDjmkmWX2mIb/0y8ixgI3AJ/NzEdXj2fmsxGxKCK2z8yFwP5Ut39IkiSpj/v617/OzJkzeeONNxg/fjzf+ta3WLVqFUcccQRXXnklY8eO5brrrmt2mT2i4Vs+gHOBw4DfFoc6Vy+dF/uorwA2Bp4Ajq15AeM6bb/99tnKfx7ojyqVSrfeSkeNZ09ak31pPfakNdmX1tMfe1Lvlo+GrVBn5rji5gnFV1dzHqIauiVJkqQ+yU9KlCRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSep1K1euZNddd+Xggw8G4M4772S33XZj8uTJTJ06laeeeqrJFUr1G9jIi0fEKcBsYAdgQTG8DJidmQ8Xc04DTgCymHNsZr72dtdd0bGScWfe0rC61X2nT+zkGHvSUuxJa7Ivrcee9Iz2cw/q1vyLL76YCRMm8MorrwAwe/Zsvv/97zNhwgQuueQSrrrqKmbOnNmIUqUe1+gV6pOBA4F9gA9l5iTgH4DLACJia+AUYI/M3BkYABzV4JokSVITLV68mFtuuYUTTjhhzVhErAnXf/jDH3jPe97TrPKkbmvYCnVEXAqMB+YD38zM/y4O/RQYs1YNgyOiA3gX8HSjapIkSc136qmncv7557N06dI1Y1dccQUHHngggwcPZvPNN+eCCy5oYoVS9zRshTozT6Iajqdl5tdqDh0P/KCY8xQwF/gd8Azwh8y8vVE1SZKk5rr55psZNWoUu++++5vGv/a1r3HrrbeyePFijj32WC655JImVSh1X0P3UK8tIqZRDdRTi/vvBg4B3g+8DFwXEZ/JzKu7OPdE4ESAESNGMmdiZ6/VrfXbcnB1H6Jahz1pTfal9diTnlGpVOqad80113D77bdzww038MYbb/Dqq6+y1157sWjRIlasWEGlUmHs2LEsWLCg7muqdyxbtsyerEOvBeqImARcAczIzBeL4Y8AT2bmkmLODcCfAm8J1Jl5GcXe67Hjt80LF/TqvwW0HqdP7MSetBZ70prsS+uxJz2jfWZbXfPa2v44r1KpMHfuXG688Ua22mor3vve9/KBD3yAK6+8knHjxr1prpqvUqnYk3Xold8gETEWuAH4bGY+WnPod8BeEfEuYAWwP3Bfb9QkSZJaw8CBA7n88ss57LDD2GijjXj3u9/NSSed1OyypLr11j/J5wDvAS6JCIDOzNwjM38WEdcDDwCdwIMUq9CSJOmdra2tbc2K56GHHsqhhx665phbC9SXNDRQZ+a44uYJxVdXc84Gzu7OdQcPGsDCbr7fpRqrUqnU/ec+9Q570prsS+uxJ5LK8pMSJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJKmbVq5cya677srBBx8MwJNPPsmUKVPYbrvtOPLII3njjTeaXKGk3jSwUReOiFOA2cAOwIJieBkwOzMfLua0A0uBlUBnZu5Rz7VXdKxk3Jm39HjN2nCnT+zkGHvSUuxJa7IvrefbBwzp9jkXX3wxEyZM4JVXXgHgjDPO4LTTTuOoo47ipJNO4sorr2T27Nk9XaqkFtXIFeqTgQOBfYAPZeYk4B+Ay9aaNy0zJ9cbpiVJaqbFixdzyy23cMIJJwCQmdx1110cfvjhAMyaNYsbb7yxmSVK6mUNCdQRcSkwHpgPTMnMl4pDPwXGNOIxJUnqDaeeeirnn38+G21U/U/oiy++yLBhwxg4sPpH3zFjxvDUU081s0RJvawhgTozTwKeprr6/LWaQ8cDP6idCtweEfdHxImNqEWSpJ5y8803M2rUKHbfffc1Y5n5lnkR0ZtlSWqyhu2hXltETKMaqKfWDO+TmU9HxCjgjoj4n8z80TrOPxE4EWDEiJHMmdjZ8JpVvy0HV/eGqnXYk9ZkX1rPsmXLqFQqdc295ppruP3227nhhht44403ePXVVzn66KNZsmQJd955JwMGDOCXv/wlm266ad3XVNe60xf1Dnuybr0SqCNiEnAFMCMzX1w9nplPF9+fj4jvAXsCXQbqzLyMYv/12PHb5oULeu3fAqrD6RM7sSetxZ60JvvSer59wBDa2trqmls7r1KpMHfuXG6++WY+9alPsWTJEo466iiuvfZajj322Lqvqa5VKhV/hi3Gnqxbw982LyLGAjcAn83MR2vGh0TEZqtvAx8FHml0PZIk9bTzzjuPf/qnf2LbbbflxRdf5Pjjj292SZJ6UW8sk8wB3gNcUuwpW/32eFsC3yvGBgL/npm31XPBwYMGsPDcgxpUrjZEpVKhfWZbs8tQDXvSmuxL69nQP2G3tbWtWa0bP3489957b88VJalPaVigzsxxxc0Tiq+1jz8B7NKox5ckSZJ6g5+UKEmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJA5tdgCRJ3fXaa6+x33778frrr9PZ2cnhhx/O3//937PvvvuydOlSAJ5//nn23HNPbrzxxiZXK+mdrmGBOiJOAWYDOwALiuFlwOzMfDgitge+U3PKeGBOZl60vmuv6FjJuDNv6emSVcLpEzs5xp60FHvSmuzLurWfe1DdczfZZBPuuusuhg4dSkdHB1OnTmXGjBn8+Mc/XjPnsMMO45BDDmlEqZL0Jo1coT4ZmAGMBn6dmS9FxAzgMmBKZi4EJgNExADgKeB7DaxHkvQOEREMHToUgI6ODjo6OoiINceXLl3KXXfdxbe+9a1mlSipH2nIHuqIuJTqivN8quH5peLQT4ExXZyyP/CbzPxtI+qRJL3zrFy5ksmTJzNq1CimT5/OlClT1hz73ve+x/7778/mm2/exAol9RcNCdSZeRLwNDAtM79Wc+h44AddnHIUcE0japEkvTMNGDCAhx56iMWLF3PvvffyyCOPrDl2zTXXcPTRRzexOkn9SWRmYy4c0Q7skZkvFPenAZcAUzPzxZp5G1MN3ztl5nNvc70TgRMBRowYufuciy5vSN3aMFsOhudWNLsK1bInrcm+rNvErbfY4HPnzZvHpptuypFHHskf/vAHPve5z3Hdddex8cYbr/fcZcuWrdk+otZhX1pPf+zJtGnT7s/MPdY3r1fe5SMiJgFXADNqw3RhBvDA24VpgMy8jOr+a8aO3zYvXOAblLSS0yd2Yk9aiz1pTfZl3dpnttU9d8mSJQwaNIhhw4axYsUKzjrrLM444wza2tq49NJL+cQnPsFHP/rRuq5VqVRoa6v/sdU77EvrsSfr1vDf6hExFrgB+GxmPtrFlKNxu4ckqRueeeYZZs2axcqVK1m1ahVHHHEEBx98MADXXnstZ555ZpMrlNSf9MYyyRzgPcAlxSuwO1cvnUfEu4DpwJ/3Qh2SpHeISZMm8eCDD3Z5rFKp9G4xkvq9hgXqzBxX3Dyh+OpqzqtUw3a3DB40gIXdeL9SNV6lUunWn2vVePakNdkXSXrn8aPHJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWpH7itddeY88992SXXXZhp5124uyzzwZg5syZbL/99uy8884cd9xxdHR0NLlSSepbBjby4hFxCjAb2AFYUAwvA2Zn5sMRsSnwI2CTopbrM/Ps9V13RcdKxp15S4Oq1oY4fWInx9iTlmJPWlNP96X93IPqnrvJJptw1113MXToUDo6Opg6dSozZsxg5syZXH311QB8+tOf5oorrmD27Nk9VqMkvdM1NFADJwMzgNHArzPzpYiYAVwGTAFeBz6cmcsiYhDwXxHxg8z8aYPrkqR+JyIYOnQoAB0dHXR0dBARHHjggWvm7LnnnixevLhZJUpSn9SwLR8RcSkwHpgPTMnMl4pDPwXGAGTVsmJ8UPGVjapJkvq7lStXMnnyZEaNGsX06dOZMmXKmmMdHR1cddVVHHDAAU2sUJL6noYF6sw8CXgamJaZX6s5dDzwg9V3ImJARDwEPA/ckZk/a1RNktTfDRgwgIceeojFixdz77338sgjj6w5dvLJJ7Pffvux7777NrFCSep7IrN7C8IR8W7gfZn5izrmtgN7ZOYLxf1pwCXA1Mx8ca25w4DvAV/IzEe6uNaJwIkAI0aM3H3ORZd3q2411paD4bkVza5CtexJa+rpvkzceosNPnfevHlsuummHHnkkcybN4/HHnuML3/5y2y0Uf96vfqyZcvWbIVR67Avrac/9mTatGn3Z+Ye65tX1x7qiKgAHy/mPwQsiYi7M/N/11tQREwCrgBmrB2mATLz5eJxDgDeEqgz8zKqe68ZO37bvHBBo7d/qztOn9iJPWkt9qQ19XRf2me21T13yZIlDBo0iGHDhrFixQrOOusszjjjDB5//HEWLlzInXfeyeDBg3ustr6iUqnQ1tbW7DK0FvvSeuzJutX7W32LzHwlIk4AvpWZZ0fEeleoV4uIscANwGcz89Ga8ZFARxGmBwMfAc7rRv2SpDo988wzzJo1i5UrV7Jq1SqOOOIIDj74YAYOHMg222zD3nvvDcAnP/lJ5syZ0+RqJanvqDdQD4yI0cARwN9uwOPMAd4DXBIRAJ3F8vloYF5EDKC6n/s/MvPmDbi+JGk9Jk2axIMPPviW8c7OziZUI0nvHPUG6i8DPwTuycyfR8R44LH1nZSZ44qbJxRfax//BbBrnTWsMXjQABZ2471X1XiVSqVbf3pW49mT1mRfJOmdp65AnZnXAdfV3H8COKxRRUmSJEl9RV0v5Y6ID0TEnRHxSHF/UkT8n8aWJkmSJLW+et8b6XLgS0AHrNmqcVSjipIkSZL6inoD9bsy8961xnwViyRJkvq9egP1CxHxJxQfCx4RhwPPNKwqSZIkqY+o910+/oLqh6rsEBFPAU8CMxtWlSRJktRHrDdQR8RGVD8+/CMRMQTYKDOXNr40SZIkqfWtd8tHZq4CPl/cXm6YliRJkv6o3j3Ud0TEX0XE+yJi+OqvhlYmSZIk9QH17qE+rvj+FzVjCYzv2XIkSZKkvqXeT0p8f6MLkSRJkvqiugJ1RHyuq/HM/NeeLUeSJEnqW+rd8vHBmtubAvsDDwAGakmSJPVr9W75+ELt/YjYAriqIRVJkiRJfUi97/KxtleB7XqyEEmSJKkvqncP9U0UHztONYTvCFzXqKIkSZKkvqLePdRza253Ar/NzMUNqEeSJEnqU+rd8nFgZt5dfN2TmYsj4ryGViZJkiT1AfUG6uldjM3oyUIkSZKkvuhtt3xExGzgZGB8RPyi5tBmwD2NLEySJEnqC9a3h/rfgR8AXwXOrBlfmpm/b1hVkiRJUh/xtoE6M/8A/AE4GiAiRlH9YJehETE0M3/X+BIlSZKk1lXXHuqI+FhEPAY8CdwNtFNduZYkSZL6tXpflPiPwF7Ao5n5fqofPe4eakmSJPV79Qbqjsx8EdgoIjbKzP8EJjewLkmSJKlPqPeDXV6OiKHAj4F/i4jnqX7AiyRJktSv1btCfQjwKnAqcBvwG+BjjSpKkiRJ6ivqWqHOzOURsQ2wXWbOi4h3AQMaW5okSZLU+up9l48/A64H/qUY2hq4sVFFSZIkSX1FvVs+/gLYB3gFIDMfA0Y1qihJUs977bXX2HPPPdlll13YaaedOPvsswGYOXMm22+/PTvvvDPHHXccHR0dTa5UkvqWel+U+HpmvhERAETEQCA39EEj4hRgNvAAcDlwETAIeCEzP7S+81d0rGTcmbds6MOrAU6f2Mkx9qSl2JPW1NN9aT/3oLrnbrLJJtx1110MHTqUjo4Opk6dyowZM5g5cyZXX301AJ/+9Ke54oormD17do/VKEnvdPUG6rsj4m+AwRExHTgZuKnE454MzABeAv4bOCAzf1d8EqMkqQEigqFDhwLQ0dFBR0cHEcGBBx64Zs6ee+7J4sWLm1WiJPVJ9W75OBNYAiwA/hy4Ffg/G/KAEXEpMB6YT3UryQ2rP8I8M5/fkGtKkuqzcuVKJk+ezKhRo5g+fTpTpkxZc6yjo4OrrrqKAw44oIkVSlLf87aBOiLGAmTmqsy8PDM/lZmHF7c3aMtHZp4EPA1MA0YC746ISkTcHxGf25BrSpLqM2DAAB566CEWL17MvffeyyOPPLLm2Mknn8x+++3Hvvvu28QKJanvibfLxRHxQGbuVtz+bmYe1iMPGtEO7AH8XfF9f2Aw8BPgoMx8tItzTgROBBgxYuTucy66vCdKUQ/ZcjA8t6LZVaiWPWlNPd2XiVtvscHnzps3j0033ZQjjzySefPm8dhjj/HlL3+ZjTaq94+X7wzLli1bsxVGrcO+tJ7+2JNp06bdn5l7rG/e+vZQR83t8eVK6tJiqi9EXA4sj4gfAbsAbwnUmXkZcBnA2PHb5oUL6t3+rd5w+sRO7ElrsSetqaf70j6zre65S5YsYdCgQQwbNowVK1Zw1llnccYZZ/D444+zcOFC7rzzTgYPHtxjtfUVlUqFtra2ZpehtdiX1mNP1m19v9VzHbd7yveBfy7eNWRjYArwtQY8jiT1e8888wyzZs1i5cqVrFq1iiOOOIKDDz6YgQMHss0227D33nsD8MlPfpI5c+Y0uVpJ6jvWF6h3iYhXqK5UDy5uU9zPzNy8zINn5q8j4jbgF8Aq4IrMfGQ9p0mSNsCkSZN48MEH3zLe2dnZhGok6Z3jbQN1Zjbk48Uzc1zN7QuAC7pz/uBBA1jYjfdeVeNVKpVu/elZjWdPWpN9kaR3nv71yhNJkiSphxmoJUmSpBIM1JLtERTUAAAWb0lEQVQkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC2pT1i0aBHTpk1jwoQJ7LTTTlx88cVvOj537lwighdeeKFJFUqS+quBzXjQiDgFmA08kJkzI+KDwE+BIzPz+vWdv6JjJePOvKXRZaobTp/YyTH2pKX0hZ60n3tQ3XMHDhzIhRdeyG677cbSpUvZfffdmT59OjvuuCOLFi3ijjvuYOzYsQ2sVpKkrjVrhfpk4MAiTA8AzgN+2KRaJPUBo0ePZrfddgNgs802Y8KECTz11FMAnHbaaZx//vlERDNLlCT1U72+Qh0RlwLjgfkR8U0gge8CH+ztWiT1Te3t7Tz44INMmTKF+fPns/XWW7PLLrs0uyxJUj/V64E6M0+KiAOAacAmwL8DH8ZALakOy5Yt47DDDuOiiy5i4MCBnHPOOdx+++3NLkuS1I81ZQ91jYuAMzJz5fr+VBsRJwInAowYMZI5Ezt7oTzVa8vB1T27ah19oSeVSqVb8zs7O/nSl77ElClTGD58ONdeey2PPvoo22+/PQBLlixhp5124hvf+AbDhw9vQMXlLVu2rNvPW41lT1qTfWk99mTdIjN7/0Ej2oE9gJ8Dq5P0COBV4MTMvPHtzh87ftvc6IiL326KetnpEzu5cEGz/32mWn2hJ915UWJmMmvWLIYPH85FF13U5Zxx48Zx3333MWLEiJ4qscdVKhXa2tqaXYZq2JPWZF9aT3/sSUTcn5l7rG9eU982LzPfn5njMnMccD1w8vrCtKT+6Z577uGqq67irrvuYvLkyUyePJlbb7212WVJktT0LR+SVJepU6eyvr+otbe3904xkiTVaEqgLlak1x47pt7zBw8awMJu/KlYjVepVGif2dbsMlTDnkiS1Dv8pERJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS2qIRYsWMW3aNCZMmMBOO+3ExRdfDMAXv/hFdthhByZNmsShhx7Kyy+/3ORKJUkqZ2AzHjQiTgFmA78C3gvsBvxtZs6t5/wVHSsZd+YtDaxQ3XX6xE6OsSctpRE9aT/3oLrnDhw4kAsvvJDddtuNpUuXsvvuuzN9+nSmT5/OV7/6VQYOHMgZZ5zBV7/6Vc4777werVOSpN7UlEANnAzMAJYD2wCfaFIdkhpk9OjRjB49GoDNNtuMCRMm8NRTT/HRj350zZy99tqL66+/vlklSpLUI3p9y0dEXAqMB+YDMzPz50BHb9chqfe0t7fz4IMPMmXKlDeNf/Ob32TGjBlNqkqSpJ7R6yvUmXlSRBwATMvMF3r78SX1rmXLlnHYYYdx0UUXsfnmm68ZP+eccxg4cCAzZ85sYnWSJJUXmdn7DxrRDuyxOlBHxN8By95uD3VEnAicCDBixMjd51x0eS9UqnptORieW9HsKlSrET2ZuPUW3Zrf2dnJl770JT74wQ9yxBFHrBm/7bbbuOmmm7jwwgvZdNNNe7bIFrds2TKGDh3a7DJUw560JvvSevpjT6ZNm3Z/Zu6xvnnN2kPdbZl5GXAZwNjx2+aFC/pM6f3C6RM7sSetpRE9aZ/ZVvfczGTWrFnss88+XHTRRWvGb7vtNubPn8/dd9/NyJEje7S+vqBSqdDW1tbsMlTDnrQm+9J67Mm6mYAkNcQ999zDVVddxcSJE5k8eTIAX/nKVzjllFN4/fXXmT59OlB9YeKll17azFIlSSqlqYE6IrYC7gM2B1ZFxKnAjpn5SjPrklTe1KlT6WpL2YEHHtiEaiRJapymBOrMHFdzd0x3zx88aAALu/F+uGq8SqXSre0Aajx7IklS7/CTEiVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCglrpw3HHHMWrUKHbeeee3HJs7dy4RwQsvvNCEyiRJUqsZ2MiLR8QpwGzgV8B7gd2Av83MuTVzTgNOABJYABybma+93XVXdKxk3Jm3NKxudd/pEzs5psV70n7uQXXPPeaYY/j85z/P5z73uTeNL1q0iDvuuIOxY8f2dHmSJKmPavQK9cnAgVRD9SnA3NqDEbF1Mb5HZu4MDACOanBN0nrtt99+DB8+/C3jp512Gueffz4R0YSqJElSK2pYoI6IS4HxwHxgZmb+HOjoYupAYHBEDATeBTzdqJqkMubPn8/WW2/NLrvs0uxSJElSC2nYlo/MPCkiDgCmZWaXm00z86mImAv8DlgB3J6ZtzeqJmlDvfrqq5xzzjncfrv/85QkSW/W0D3U6xMR7wYOAd4PvAxcFxGfycyru5h7InAiwIgRI5kzsbNXa9Xb23JwdR91K6tUKt2a/+yzz7J8+XIqlQpPPPEEjz76KNtvvz0AS5YsYaedduIb3/hGl1tDWsGyZcu6/ZzVePal9diT1mRfWo89WbemBmrgI8CTmbkEICJuAP4UeEugzszLgMsAxo7fNi9c0OzSVev0iZ20ek/aZ7Z1b357O0OGDKGtrY22tjaOO+64NcfGjRvHfffdx4gRI3q4yp5TqVRoa2trdhlai31pPfakNdmX1mNP1q3Zb5v3O2CviHhXVF/ltT/w6ybXJHH00Uez9957s3DhQsaMGcOVV17Z7JIkSVKL6pUlxYjYCrgP2BxYFRGnAjtm5s8i4nrgAaATeJBiFVpqpmuuueZtj7e3t/dOIZIkqeU1NFBn5riau2PWMeds4OzuXHfwoAEs7MZ7CqvxKpVKt7dUSJIkvRM0e8uHJEmS1KcZqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqYWCzC9gQKzpWMu7MW5pdhmqcPrGTY1q8J+3nHlT33OOOO46bb76ZUaNG8cgjj7zp2Ny5c/niF7/IkiVLGDFiRE+XKUmS+piGrlBHxCkR8euI+G5E/CQiXo+Iv1przgERsTAiHo+IMxtZj1SvY445httuu+0t44sWLeKOO+5g7NixTahKkiS1okZv+TgZOBCYDZwCzK09GBEDgP8HzAB2BI6OiB0bXJO0Xvvttx/Dhw9/y/hpp53G+eefT0Q0oSpJktSKGhaoI+JSYDwwH5iZmT8HOtaatifweGY+kZlvANcChzSqJqmM+fPns/XWW7PLLrs0uxRJktRCGraHOjNPiogDgGmZ+cI6pm0NLKq5vxiY0tXEiDgROBFgxIiRzJnY2ZPlqqQtB1f3UbeySqXSrfnPPvssy5cvp1Kp8Nprr3HGGWdwwQUXrLl/zz33sMUWWzSm2B6wbNmybj9nNZ59aT32pDXZl9ZjT9at2S9K7Orv5tnVxMy8DLgMYOz4bfPCBc0uXbVOn9hJq/ekfWZb9+a3tzNkyBDa2tpYsGABL774Ip///OcBeOGFF/jCF77Avffey1ZbbdWAasurVCq0tbU1uwytxb60HnvSmuxL67En69bsBLQYeF/N/THA002qRVqniRMn8vzzz6+5P27cOO677z7f5UOSJDX9fah/DmwXEe+PiI2Bo6juuZaa6uijj2bvvfdm4cKFjBkzhiuvvLLZJUmSpBbVKyvUEbEVcB+wObAqIk4FdszMVyLi88APgQHANzPzl+u73uBBA1jYjfcUVuNVKpVub6loZddcc83bHm9vb++dQiRJUstraKDOzHE1d8esY86twK2NrEOSJElqlGZv+ZAkSZL6NAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklRCZGaza+i2iFgKLGx2HXqTEcALzS5Cb2JPWpN9aT32pDXZl9bTH3uyTWaOXN+kgb1RSQMszMw9ml2E/igi7rMnrcWetCb70nrsSWuyL63HnqybWz4kSZKkEgzUkiRJUgl9NVBf1uwC9Bb2pPXYk9ZkX1qPPWlN9qX12JN16JMvSpQkSZJaRV9doZYkSZJaQp8K1BFxQEQsjIjHI+LMZtfTX0XENyPi+Yh4pGZseETcERGPFd/f3cwa+5uIeF9E/GdE/DoifhkRf1mM25cmiYhNI+LeiHi46MnfF+Pvj4ifFT35TkRs3Oxa+6OIGBARD0bEzcV9+9JEEdEeEQsi4qGIuK8Y8/dXk0XEsIi4PiL+p/jvy972pWt9JlBHxADg/wEzgB2BoyNix+ZW1W99GzhgrbEzgTszczvgzuK+ek8ncHpmTgD2Av6i+P+HfWme14EPZ+YuwGTggIjYCzgP+FrRk5eA45tYY3/2l8Cva+7bl+ablpmTa96Wzd9fzXcxcFtm7gDsQvX/M/alC30mUAN7Ao9n5hOZ+QZwLXBIk2vqlzLzR8Dv1xo+BJhX3J4HfKJXi+rnMvOZzHyguL2U6i+9rbEvTZNVy4q7g4qvBD4MXF+M25MmiIgxwEHAFcX9wL60In9/NVFEbA7sB1wJkJlvZObL2Jcu9aVAvTWwqOb+4mJMrWHLzHwGquEOGNXkevqtiBgH7Ar8DPvSVMW2goeA54E7gN8AL2dmZzHF32PNcRHw18Cq4v57sC/NlsDtEXF/RJxYjPn7q7nGA0uAbxXbo66IiCHYly71pUAdXYz5FiVSjYgYCnwXODUzX2l2Pf1dZq7MzMnAGKp/ZZvQ1bTerap/i4iDgecz8/7a4S6m2pfetU9m7kZ1W+dfRMR+zS5IDAR2A76RmbsCy3F7xzr1pUC9GHhfzf0xwNNNqkVv9VxEjAYovj/f5Hr6nYgYRDVM/1tm3lAM25cWUPyZtEJ1f/uwiBhYHPL3WO/bB/h4RLRT3Tr4Yaor1valiTLz6eL788D3qP4D1N9fzbUYWJyZPyvuX081YNuXLvSlQP1zYLvildgbA0cB85tck/5oPjCruD0L+H4Ta+l3ij2gVwK/zsx/qjlkX5okIkZGxLDi9mDgI1T3tv8ncHgxzZ70ssz8UmaOycxxVP87cldmzsS+NE1EDImIzVbfBj4KPIK/v5oqM58FFkXE9sXQ/sCvsC9d6lMf7BIRB1JdSRgAfDMzz2lySf1SRFwDtAEjgOeAs4Ebgf8AxgK/Az6VmWu/cFENEhFTgR8DC/jjvtC/obqP2r40QURMovqCnQFUFy/+IzO/HBHjqa6MDgceBD6Tma83r9L+KyLagL/KzIPtS/MUP/vvFXcHAv+emedExHvw91dTRcRkqi/e3Rh4AjiW4vcZ9uVN+lSgliRJklpNX9ryIUmSJLUcA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLUh0iYmVEPFTzNW4DrjEsIk7u+erWXP/jEdGrn2QWEZ+IiB178zElqdX4tnmSVIeIWJaZQ0teYxxwc2bu3M3zBmTmyjKP3QjFJwteQfU5Xd/seiSpWVyhlqQNFBEDIuKCiPh5RPwiIv68GB8aEXdGxAMRsSAiDilOORf4k2KF+4KIaIuIm2uu988RcUxxuz0i5kTEfwGfiog/iYjbIuL+iPhxROzQRT3HRMQ/F7e/HRHfiIj/jIgnIuJDEfHNiPh1RHy75pxlEXFhUeudETGyGJ8cET8tntf3IuLdxXglIr4SEXcDZwAfBy4ontOfRMSfFT+PhyPiuxHxrpp6/m9E/HdRz+E1Nfx18XN6OCLOLcbW+3wlqVUMbHYBktRHDI6Ih4rbT2bmocDxwB8y84MRsQlwT0TcDiwCDs3MVyJiBPDTiJgPnAnsnJmTYc0n9b2d1zJzajH3TuCkzHwsIqYAlwAfXs/57y7mfBy4CdgHOAH4eURMzsyHgCHAA5l5ekTMofrJp58H/hX4QmbeHRFfLsZPLa47LDM/VNS1HTUr1BHxcmZeXtz+x+Jn9PXivNHAVGAHqh9ffH1EzAA+AUzJzFcjYngx97INeL6S1BQGakmqz4rVQbjGR4FJNautWwDbAYuBr0TEflQ/Cn5rYMsNeMzvQHXFG/hT4LqIWH1skzrOvykzMyIWAM9l5oLier8ExgEPFfV9p5h/NXBDRGxBNTTfXYzPA65bu6512LkI0sOAocAPa47dmJmrgF9FxOqfx0eAb2XmqwCZ+fsSz1eSmsJALUkbLqiu4v7wTYPVbRsjgd0zsyMi2oFNuzi/kzdvvVt7zvLi+0bAy10E+vV5vfi+qub26vvr+v1fzwtrlr/NsW8Dn8jMh4ufQ1sX9UD1Z7f6+9qPuaHPV5Kawj3UkrThfgjMjohBABHxgYgYQnWl+vkiTE8DtinmLwU2qzn/t8COEbFJsSq8f1cPkpmvAE9GxKeKx4mI2KWHnsNGwOoV9k8D/5WZfwBeioh9i/HPAnd3dTJvfU6bAc8UP5OZdTz+7cBxNXuthzf4+UpSjzNQS9KGuwL4FfBARDwC/AvVld9/A/aIiPuohsr/AcjMF6nus34kIi7IzEXAfwC/KM558G0eayZwfEQ8DPwSOORt5nbHcmCniLif6h7lLxfjs6i+2PAXwOSa8bVdC3wxIh6MiD8BzgJ+BtxB8bzfTmbeRnU/9X3FHvW/Kg416vlKUo/zbfMkqR+LHng7QEnq71yhliRJkkpwhVqSJEkqwRVqSZIkqQQDtSRJklSCgVqSJP3/7daxAAAAAMAgf+tB7C2KgEGoAQBgEGoAABiEGgAAhgA3ESOFhO8zCgAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAtQAAAHwCAYAAACG+PhNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzs3XucnHV99//XhyRATIAYk0AkhpiCECAhHCRQAm7EeBNAEUEORg2nUkKVwk0t2N6E1hblECrc9kbKQU2hBQsihoMIP+igpSpyNKgNIKwmHAOCJCHAbvL5/TFX4hA2ZDbXzs4s+3o+HvvYme/1va75zH50eee735mJzESSJEnShtmo2QVIkiRJfZmBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSS9A4QEZdGxFnNrkOS+qPwfagl9WcR0Q5sCaysGf5AZj5d4pptwNWZOaZcdX1TRHwbWJyZ/6fZtUhSb3CFWpLgY5k5tOZrg8N0T4iIgc18/DIiYkCza5Ck3maglqR1iIi9IuK/I+LliHi4WHlefezYiPh1RCyNiCci4s+L8SHAD4D3RsSy4uu9EfHtiPjHmvPbImJxzf32iDgjIn4BLI+IgcV5342IJRHxZESc8ja1rrn+6mtHxF9HxPMR8UxEfCIiDoyIRyPi9xHxNzXn/l1EXB8R3ymezwMRsUvN8QkRUSl+Dr+MiI+v9bjfiIhbI2I5cDwwE/jr4rnfVMw7MyJ+U1z/VxFxaM01jomI/4qIuRHxUvFcZ9QcHx4R34qIp4vjN9YcOzgiHipq+++ImFR3gyWphxioJakLEbE1cAvwj8Bw4K+A70bEyGLK88DBwObAscDXImK3zFwOzACe3oAV76OBg4BhwCrgJuBhYGtgf+DUiPhfdV5rK2DT4tw5wOXAZ4DdgX2BORExvmb+IcB1xXP9d+DGiBgUEYOKOm4HRgFfAP4tIravOffTwDnAZsC/Av8GnF88948Vc35TPO4WwN8DV0fE6JprTAEWAiOA84ErIyKKY1cB7wJ2Kmr4GkBE7AZ8E/hz4D3AvwDzI2KTOn9GktQjDNSSVA2PLxdfq1c/PwPcmpm3ZuaqzLwDuA84ECAzb8nM32TV3VQD574l6/i/mbkoM1cAHwRGZuaXM/ONzHyCaig+qs5rdQDnZGYHcC3VoHpxZi7NzF8CvwRqV3Pvz8zri/n/RDWM71V8DQXOLeq4C7iZavhf7fuZeU/xc3qtq2Iy87rMfLqY8x3gMWDPmim/zczLM3MlMA8YDWxZhO4ZwEmZ+VJmdhQ/b4A/A/4lM3+WmSszcx7welGzJPWaPrtPT5J60Ccy8/9ba2wb4FMR8bGasUHAfwIUWxLOBj5AdXHiXcCCknUsWuvx3xsRL9eMDQB+XOe1XizCKcCK4vtzNcdXUA3Kb3nszFxVbEd57+pjmbmqZu5vqa58d1V3lyLic8D/BsYVQ0OphvzVnq15/FeLxemhVFfMf5+ZL3Vx2W2AWRHxhZqxjWvqlqReYaCWpK4tAq7KzD9b+0CxpeC7wOeors52FCvbq7codPX2Scuphu7VtupiTu15i4AnM3O7DSl+A7xv9Y2I2AgYA6zeqvK+iNioJlSPBR6tOXft5/um+xGxDdXV9f2Bn2Tmyoh4iD/+vN7OImB4RAzLzJe7OHZOZp5Tx3UkqWHc8iFJXbsa+FhE/K+IGBARmxYv9htDdRV0E2AJ0FmsVn+05tzngPdExBY1Yw8BBxYvsNsKOHU9j38v8ErxQsXBRQ07R8QHe+wZvtnuEfHJ4h1GTqW6deKnwM+o/mPgr4s91W3Ax6huI1mX54Da/dlDqIbsJVB9QSewcz1FZeYzVF/keUlEvLuoYb/i8OXASRExJaqGRMRBEbFZnc9ZknqEgVqSupCZi6i+UO9vqAbBRcAXgY0ycylwCvAfwEtUX5Q3v+bc/wGuAZ4o9mW/l+oL6x4G2qnut/7Oeh5/JdXgOhl4EngBuILqi/oa4fvAkVSfz2eBTxb7ld8APk51H/MLwCXA54rnuC5XAjuu3pOemb8CLgR+QjVsTwTu6UZtn6W6J/x/qL4Y9FSAzLyP6j7qfy7qfhw4phvXlaQe4Qe7SFI/FxF/B2ybmZ9pdi2S1Be5Qi1JkiSVYKCWJEmSSnDLhyRJklSCK9SSJElSCQZqSZIkqYQ++cEuw4YNy2233bbZZajG8uXLGTJkSLPLUA170prsS+uxJ63JvrSe/tiT+++//4XMHLm+eX0yUG+55Zbcd999zS5DNSqVCm1tbc0uQzXsSWuyL63HnrQm+9J6+mNPIuK39cxzy4ckSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkST3u5Zdf5vDDD2eHHXZgwoQJ/OQnP+H3v/8906dPZ7vttmP69Om89NJLzS6zR0RmNubCEacAs4EdgAXF8DJgdmY+XMwZBlwB7AwkcFxm/mR91x47ftvc6IiLG1K3NszpEzu5cMHAZpehGvakNdmX1mNPWpN9aT3fPmAIbW1tdc+fNWsW++67LyeccAJvvPEGr776Kl/5ylcYPnw4Z555Jueeey4vvfQS5513XuOKLiki7s/MPdY3r5Er1CcDBwL7AB/KzEnAPwCX1cy5GLgtM3cAdgF+3cB6JEmS1AteeeUVfvSjH3H88ccDsPHGGzNs2DC+//3vM2vWLKAauG+88cZmltljGhKoI+JSYDwwH5iSmavX838KjCnmbA7sB1wJkJlvZObLjahHkiRJveeJJ55g5MiRHHvssey6666ccMIJLF++nOeee47Ro0cDMHr0aJ5//vkmV9ozGrnlox3YIzNfqBn7K2CHzDwhIiZTXa3+FdXV6fuBv8zM5eu43onAiQAjRozcfc5Flzekbm2YLQfDcyuaXYVq2ZPWZF9ajz1pTfal9bx/iwEMHTq0rrkLFy7k5JNP5utf/zo77rgjX//61xkyZAg33HADN99885p5H/vYx7jpppsaVXJp06ZNq2vLR68F6oiYBlwCTM3MFyNiD6or1vtk5s8i4mLglcw8a33Xdg9163GvW+uxJ63JvrQee9Ka7Evr6c4e6meffZa99tqL9vZ2AH784x9z7rnn8vjjj1OpVBg9ejTPPPMMbW1tLFy4sHFFl9QKe6hri5lE9cWHh2Tmi8XwYmBxZv6suH89sFtv1CNJkqTG2WqrrXjf+963Jizfeeed7Ljjjnz84x9n3rx5AMybN49DDjmkmWX2mIb/0y8ixgI3AJ/NzEdXj2fmsxGxKCK2z8yFwP5Ut39IkiSpj/v617/OzJkzeeONNxg/fjzf+ta3WLVqFUcccQRXXnklY8eO5brrrmt2mT2i4Vs+gHOBw4DfFoc6Vy+dF/uorwA2Bp4Ajq15AeM6bb/99tnKfx7ojyqVSrfeSkeNZ09ak31pPfakNdmX1tMfe1Lvlo+GrVBn5rji5gnFV1dzHqIauiVJkqQ+yU9KlCRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSep1K1euZNddd+Xggw8G4M4772S33XZj8uTJTJ06laeeeqrJFUr1G9jIi0fEKcBsYAdgQTG8DJidmQ8Xc04DTgCymHNsZr72dtdd0bGScWfe0rC61X2nT+zkGHvSUuxJa7Ivrcee9Iz2cw/q1vyLL76YCRMm8MorrwAwe/Zsvv/97zNhwgQuueQSrrrqKmbOnNmIUqUe1+gV6pOBA4F9gA9l5iTgH4DLACJia+AUYI/M3BkYABzV4JokSVITLV68mFtuuYUTTjhhzVhErAnXf/jDH3jPe97TrPKkbmvYCnVEXAqMB+YD38zM/y4O/RQYs1YNgyOiA3gX8HSjapIkSc136qmncv7557N06dI1Y1dccQUHHngggwcPZvPNN+eCCy5oYoVS9zRshTozT6Iajqdl5tdqDh0P/KCY8xQwF/gd8Azwh8y8vVE1SZKk5rr55psZNWoUu++++5vGv/a1r3HrrbeyePFijj32WC655JImVSh1X0P3UK8tIqZRDdRTi/vvBg4B3g+8DFwXEZ/JzKu7OPdE4ESAESNGMmdiZ6/VrfXbcnB1H6Jahz1pTfal9diTnlGpVOqad80113D77bdzww038MYbb/Dqq6+y1157sWjRIlasWEGlUmHs2LEsWLCg7muqdyxbtsyerEOvBeqImARcAczIzBeL4Y8AT2bmkmLODcCfAm8J1Jl5GcXe67Hjt80LF/TqvwW0HqdP7MSetBZ70prsS+uxJz2jfWZbXfPa2v44r1KpMHfuXG688Ua22mor3vve9/KBD3yAK6+8knHjxr1prpqvUqnYk3Xold8gETEWuAH4bGY+WnPod8BeEfEuYAWwP3Bfb9QkSZJaw8CBA7n88ss57LDD2GijjXj3u9/NSSed1OyypLr11j/J5wDvAS6JCIDOzNwjM38WEdcDDwCdwIMUq9CSJOmdra2tbc2K56GHHsqhhx665phbC9SXNDRQZ+a44uYJxVdXc84Gzu7OdQcPGsDCbr7fpRqrUqnU/ec+9Q570prsS+uxJ5LK8pMSJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJKmbVq5cya677srBBx8MwJNPPsmUKVPYbrvtOPLII3njjTeaXKGk3jSwUReOiFOA2cAOwIJieBkwOzMfLua0A0uBlUBnZu5Rz7VXdKxk3Jm39HjN2nCnT+zkGHvSUuxJa7IvrefbBwzp9jkXX3wxEyZM4JVXXgHgjDPO4LTTTuOoo47ipJNO4sorr2T27Nk9XaqkFtXIFeqTgQOBfYAPZeYk4B+Ay9aaNy0zJ9cbpiVJaqbFixdzyy23cMIJJwCQmdx1110cfvjhAMyaNYsbb7yxmSVK6mUNCdQRcSkwHpgPTMnMl4pDPwXGNOIxJUnqDaeeeirnn38+G21U/U/oiy++yLBhwxg4sPpH3zFjxvDUU081s0RJvawhgTozTwKeprr6/LWaQ8cDP6idCtweEfdHxImNqEWSpJ5y8803M2rUKHbfffc1Y5n5lnkR0ZtlSWqyhu2hXltETKMaqKfWDO+TmU9HxCjgjoj4n8z80TrOPxE4EWDEiJHMmdjZ8JpVvy0HV/eGqnXYk9ZkX1rPsmXLqFQqdc295ppruP3227nhhht44403ePXVVzn66KNZsmQJd955JwMGDOCXv/wlm266ad3XVNe60xf1Dnuybr0SqCNiEnAFMCMzX1w9nplPF9+fj4jvAXsCXQbqzLyMYv/12PHb5oULeu3fAqrD6RM7sSetxZ60JvvSer59wBDa2trqmls7r1KpMHfuXG6++WY+9alPsWTJEo466iiuvfZajj322Lqvqa5VKhV/hi3Gnqxbw982LyLGAjcAn83MR2vGh0TEZqtvAx8FHml0PZIk9bTzzjuPf/qnf2LbbbflxRdf5Pjjj292SZJ6UW8sk8wB3gNcUuwpW/32eFsC3yvGBgL/npm31XPBwYMGsPDcgxpUrjZEpVKhfWZbs8tQDXvSmuxL69nQP2G3tbWtWa0bP3489957b88VJalPaVigzsxxxc0Tiq+1jz8B7NKox5ckSZJ6g5+UKEmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJA5tdgCRJ3fXaa6+x33778frrr9PZ2cnhhx/O3//937PvvvuydOlSAJ5//nn23HNPbrzxxiZXK+mdrmGBOiJOAWYDOwALiuFlwOzMfDgitge+U3PKeGBOZl60vmuv6FjJuDNv6emSVcLpEzs5xp60FHvSmuzLurWfe1DdczfZZBPuuusuhg4dSkdHB1OnTmXGjBn8+Mc/XjPnsMMO45BDDmlEqZL0Jo1coT4ZmAGMBn6dmS9FxAzgMmBKZi4EJgNExADgKeB7DaxHkvQOEREMHToUgI6ODjo6OoiINceXLl3KXXfdxbe+9a1mlSipH2nIHuqIuJTqivN8quH5peLQT4ExXZyyP/CbzPxtI+qRJL3zrFy5ksmTJzNq1CimT5/OlClT1hz73ve+x/7778/mm2/exAol9RcNCdSZeRLwNDAtM79Wc+h44AddnHIUcE0japEkvTMNGDCAhx56iMWLF3PvvffyyCOPrDl2zTXXcPTRRzexOkn9SWRmYy4c0Q7skZkvFPenAZcAUzPzxZp5G1MN3ztl5nNvc70TgRMBRowYufuciy5vSN3aMFsOhudWNLsK1bInrcm+rNvErbfY4HPnzZvHpptuypFHHskf/vAHPve5z3Hdddex8cYbr/fcZcuWrdk+otZhX1pPf+zJtGnT7s/MPdY3r1fe5SMiJgFXADNqw3RhBvDA24VpgMy8jOr+a8aO3zYvXOAblLSS0yd2Yk9aiz1pTfZl3dpnttU9d8mSJQwaNIhhw4axYsUKzjrrLM444wza2tq49NJL+cQnPsFHP/rRuq5VqVRoa6v/sdU77EvrsSfr1vDf6hExFrgB+GxmPtrFlKNxu4ckqRueeeYZZs2axcqVK1m1ahVHHHEEBx98MADXXnstZ555ZpMrlNSf9MYyyRzgPcAlxSuwO1cvnUfEu4DpwJ/3Qh2SpHeISZMm8eCDD3Z5rFKp9G4xkvq9hgXqzBxX3Dyh+OpqzqtUw3a3DB40gIXdeL9SNV6lUunWn2vVePakNdkXSXrn8aPHJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWpH7itddeY88992SXXXZhp5124uyzzwZg5syZbL/99uy8884cd9xxdHR0NLlSSepbBjby4hFxCjAb2AFYUAwvA2Zn5sMRsSnwI2CTopbrM/Ps9V13RcdKxp15S4Oq1oY4fWInx9iTlmJPWlNP96X93IPqnrvJJptw1113MXToUDo6Opg6dSozZsxg5syZXH311QB8+tOf5oorrmD27Nk9VqMkvdM1NFADJwMzgNHArzPzpYiYAVwGTAFeBz6cmcsiYhDwXxHxg8z8aYPrkqR+JyIYOnQoAB0dHXR0dBARHHjggWvm7LnnnixevLhZJUpSn9SwLR8RcSkwHpgPTMnMl4pDPwXGAGTVsmJ8UPGVjapJkvq7lStXMnnyZEaNGsX06dOZMmXKmmMdHR1cddVVHHDAAU2sUJL6noYF6sw8CXgamJaZX6s5dDzwg9V3ImJARDwEPA/ckZk/a1RNktTfDRgwgIceeojFixdz77338sgjj6w5dvLJJ7Pffvux7777NrFCSep7IrN7C8IR8W7gfZn5izrmtgN7ZOYLxf1pwCXA1Mx8ca25w4DvAV/IzEe6uNaJwIkAI0aM3H3ORZd3q2411paD4bkVza5CtexJa+rpvkzceosNPnfevHlsuummHHnkkcybN4/HHnuML3/5y2y0Uf96vfqyZcvWbIVR67Avrac/9mTatGn3Z+Ye65tX1x7qiKgAHy/mPwQsiYi7M/N/11tQREwCrgBmrB2mATLz5eJxDgDeEqgz8zKqe68ZO37bvHBBo7d/qztOn9iJPWkt9qQ19XRf2me21T13yZIlDBo0iGHDhrFixQrOOusszjjjDB5//HEWLlzInXfeyeDBg3ustr6iUqnQ1tbW7DK0FvvSeuzJutX7W32LzHwlIk4AvpWZZ0fEeleoV4uIscANwGcz89Ga8ZFARxGmBwMfAc7rRv2SpDo988wzzJo1i5UrV7Jq1SqOOOIIDj74YAYOHMg222zD3nvvDcAnP/lJ5syZ0+RqJanvqDdQD4yI0cARwN9uwOPMAd4DXBIRAJ3F8vloYF5EDKC6n/s/MvPmDbi+JGk9Jk2axIMPPviW8c7OziZUI0nvHPUG6i8DPwTuycyfR8R44LH1nZSZ44qbJxRfax//BbBrnTWsMXjQABZ2471X1XiVSqVbf3pW49mT1mRfJOmdp65AnZnXAdfV3H8COKxRRUmSJEl9RV0v5Y6ID0TEnRHxSHF/UkT8n8aWJkmSJLW+et8b6XLgS0AHrNmqcVSjipIkSZL6inoD9bsy8961xnwViyRJkvq9egP1CxHxJxQfCx4RhwPPNKwqSZIkqY+o910+/oLqh6rsEBFPAU8CMxtWlSRJktRHrDdQR8RGVD8+/CMRMQTYKDOXNr40SZIkqfWtd8tHZq4CPl/cXm6YliRJkv6o3j3Ud0TEX0XE+yJi+OqvhlYmSZIk9QH17qE+rvj+FzVjCYzv2XIkSZKkvqXeT0p8f6MLkSRJkvqiugJ1RHyuq/HM/NeeLUeSJEnqW+rd8vHBmtubAvsDDwAGakmSJPVr9W75+ELt/YjYAriqIRVJkiRJfUi97/KxtleB7XqyEEmSJKkvqncP9U0UHztONYTvCFzXqKIkSZKkvqLePdRza253Ar/NzMUNqEeSJEnqU+rd8nFgZt5dfN2TmYsj4ryGViZJkiT1AfUG6uldjM3oyUIkSZKkvuhtt3xExGzgZGB8RPyi5tBmwD2NLEySJEnqC9a3h/rfgR8AXwXOrBlfmpm/b1hVkiRJUh/xtoE6M/8A/AE4GiAiRlH9YJehETE0M3/X+BIlSZKk1lXXHuqI+FhEPAY8CdwNtFNduZYkSZL6tXpflPiPwF7Ao5n5fqofPe4eakmSJPV79Qbqjsx8EdgoIjbKzP8EJjewLkmSJKlPqPeDXV6OiKHAj4F/i4jnqX7AiyRJktSv1btCfQjwKnAqcBvwG+BjjSpKkiRJ6ivqWqHOzOURsQ2wXWbOi4h3AQMaW5okSZLU+up9l48/A64H/qUY2hq4sVFFSZIkSX1FvVs+/gLYB3gFIDMfA0Y1qihJUs977bXX2HPPPdlll13YaaedOPvsswGYOXMm22+/PTvvvDPHHXccHR0dTa5UkvqWel+U+HpmvhERAETEQCA39EEj4hRgNvAAcDlwETAIeCEzP7S+81d0rGTcmbds6MOrAU6f2Mkx9qSl2JPW1NN9aT/3oLrnbrLJJtx1110MHTqUjo4Opk6dyowZM5g5cyZXX301AJ/+9Ke54oormD17do/VKEnvdPUG6rsj4m+AwRExHTgZuKnE454MzABeAv4bOCAzf1d8EqMkqQEigqFDhwLQ0dFBR0cHEcGBBx64Zs6ee+7J4sWLm1WiJPVJ9W75OBNYAiwA/hy4Ffg/G/KAEXEpMB6YT3UryQ2rP8I8M5/fkGtKkuqzcuVKJk+ezKhRo5g+fTpTpkxZc6yjo4OrrrqKAw44oIkVSlLf87aBOiLGAmTmqsy8PDM/lZmHF7c3aMtHZp4EPA1MA0YC746ISkTcHxGf25BrSpLqM2DAAB566CEWL17MvffeyyOPPLLm2Mknn8x+++3Hvvvu28QKJanvibfLxRHxQGbuVtz+bmYe1iMPGtEO7AH8XfF9f2Aw8BPgoMx8tItzTgROBBgxYuTucy66vCdKUQ/ZcjA8t6LZVaiWPWlNPd2XiVtvscHnzps3j0033ZQjjzySefPm8dhjj/HlL3+ZjTaq94+X7wzLli1bsxVGrcO+tJ7+2JNp06bdn5l7rG/e+vZQR83t8eVK6tJiqi9EXA4sj4gfAbsAbwnUmXkZcBnA2PHb5oUL6t3+rd5w+sRO7ElrsSetqaf70j6zre65S5YsYdCgQQwbNowVK1Zw1llnccYZZ/D444+zcOFC7rzzTgYPHtxjtfUVlUqFtra2ZpehtdiX1mNP1m19v9VzHbd7yveBfy7eNWRjYArwtQY8jiT1e8888wyzZs1i5cqVrFq1iiOOOIKDDz6YgQMHss0227D33nsD8MlPfpI5c+Y0uVpJ6jvWF6h3iYhXqK5UDy5uU9zPzNy8zINn5q8j4jbgF8Aq4IrMfGQ9p0mSNsCkSZN48MEH3zLe2dnZhGok6Z3jbQN1Zjbk48Uzc1zN7QuAC7pz/uBBA1jYjfdeVeNVKpVu/elZjWdPWpN9kaR3nv71yhNJkiSphxmoJUmSpBIM1JLtERTUAAAWb0lEQVQkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC2pT1i0aBHTpk1jwoQJ7LTTTlx88cVvOj537lwighdeeKFJFUqS+quBzXjQiDgFmA08kJkzI+KDwE+BIzPz+vWdv6JjJePOvKXRZaobTp/YyTH2pKX0hZ60n3tQ3XMHDhzIhRdeyG677cbSpUvZfffdmT59OjvuuCOLFi3ijjvuYOzYsQ2sVpKkrjVrhfpk4MAiTA8AzgN+2KRaJPUBo0ePZrfddgNgs802Y8KECTz11FMAnHbaaZx//vlERDNLlCT1U72+Qh0RlwLjgfkR8U0gge8CH+ztWiT1Te3t7Tz44INMmTKF+fPns/XWW7PLLrs0uyxJUj/V64E6M0+KiAOAacAmwL8DH8ZALakOy5Yt47DDDuOiiy5i4MCBnHPOOdx+++3NLkuS1I81ZQ91jYuAMzJz5fr+VBsRJwInAowYMZI5Ezt7oTzVa8vB1T27ah19oSeVSqVb8zs7O/nSl77ElClTGD58ONdeey2PPvoo22+/PQBLlixhp5124hvf+AbDhw9vQMXlLVu2rNvPW41lT1qTfWk99mTdIjN7/0Ej2oE9gJ8Dq5P0COBV4MTMvPHtzh87ftvc6IiL326KetnpEzu5cEGz/32mWn2hJ915UWJmMmvWLIYPH85FF13U5Zxx48Zx3333MWLEiJ4qscdVKhXa2tqaXYZq2JPWZF9aT3/sSUTcn5l7rG9eU982LzPfn5njMnMccD1w8vrCtKT+6Z577uGqq67irrvuYvLkyUyePJlbb7212WVJktT0LR+SVJepU6eyvr+otbe3904xkiTVaEqgLlak1x47pt7zBw8awMJu/KlYjVepVGif2dbsMlTDnkiS1Dv8pERJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS2qIRYsWMW3aNCZMmMBOO+3ExRdfDMAXv/hFdthhByZNmsShhx7Kyy+/3ORKJUkqZ2AzHjQiTgFmA78C3gvsBvxtZs6t5/wVHSsZd+YtDaxQ3XX6xE6OsSctpRE9aT/3oLrnDhw4kAsvvJDddtuNpUuXsvvuuzN9+nSmT5/OV7/6VQYOHMgZZ5zBV7/6Vc4777werVOSpN7UlEANnAzMAJYD2wCfaFIdkhpk9OjRjB49GoDNNtuMCRMm8NRTT/HRj350zZy99tqL66+/vlklSpLUI3p9y0dEXAqMB+YDMzPz50BHb9chqfe0t7fz4IMPMmXKlDeNf/Ob32TGjBlNqkqSpJ7R6yvUmXlSRBwATMvMF3r78SX1rmXLlnHYYYdx0UUXsfnmm68ZP+eccxg4cCAzZ85sYnWSJJUXmdn7DxrRDuyxOlBHxN8By95uD3VEnAicCDBixMjd51x0eS9UqnptORieW9HsKlSrET2ZuPUW3Zrf2dnJl770JT74wQ9yxBFHrBm/7bbbuOmmm7jwwgvZdNNNe7bIFrds2TKGDh3a7DJUw560JvvSevpjT6ZNm3Z/Zu6xvnnN2kPdbZl5GXAZwNjx2+aFC/pM6f3C6RM7sSetpRE9aZ/ZVvfczGTWrFnss88+XHTRRWvGb7vtNubPn8/dd9/NyJEje7S+vqBSqdDW1tbsMlTDnrQm+9J67Mm6mYAkNcQ999zDVVddxcSJE5k8eTIAX/nKVzjllFN4/fXXmT59OlB9YeKll17azFIlSSqlqYE6IrYC7gM2B1ZFxKnAjpn5SjPrklTe1KlT6WpL2YEHHtiEaiRJapymBOrMHFdzd0x3zx88aAALu/F+uGq8SqXSre0Aajx7IklS7/CTEiVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCglrpw3HHHMWrUKHbeeee3HJs7dy4RwQsvvNCEyiRJUqsZ2MiLR8QpwGzgV8B7gd2Av83MuTVzTgNOABJYABybma+93XVXdKxk3Jm3NKxudd/pEzs5psV70n7uQXXPPeaYY/j85z/P5z73uTeNL1q0iDvuuIOxY8f2dHmSJKmPavQK9cnAgVRD9SnA3NqDEbF1Mb5HZu4MDACOanBN0nrtt99+DB8+/C3jp512Gueffz4R0YSqJElSK2pYoI6IS4HxwHxgZmb+HOjoYupAYHBEDATeBTzdqJqkMubPn8/WW2/NLrvs0uxSJElSC2nYlo/MPCkiDgCmZWaXm00z86mImAv8DlgB3J6ZtzeqJmlDvfrqq5xzzjncfrv/85QkSW/W0D3U6xMR7wYOAd4PvAxcFxGfycyru5h7InAiwIgRI5kzsbNXa9Xb23JwdR91K6tUKt2a/+yzz7J8+XIqlQpPPPEEjz76KNtvvz0AS5YsYaedduIb3/hGl1tDWsGyZcu6/ZzVePal9diT1mRfWo89WbemBmrgI8CTmbkEICJuAP4UeEugzszLgMsAxo7fNi9c0OzSVev0iZ20ek/aZ7Z1b357O0OGDKGtrY22tjaOO+64NcfGjRvHfffdx4gRI3q4yp5TqVRoa2trdhlai31pPfakNdmX1mNP1q3Zb5v3O2CviHhXVF/ltT/w6ybXJHH00Uez9957s3DhQsaMGcOVV17Z7JIkSVKL6pUlxYjYCrgP2BxYFRGnAjtm5s8i4nrgAaATeJBiFVpqpmuuueZtj7e3t/dOIZIkqeU1NFBn5riau2PWMeds4OzuXHfwoAEs7MZ7CqvxKpVKt7dUSJIkvRM0e8uHJEmS1KcZqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqYWCzC9gQKzpWMu7MW5pdhmqcPrGTY1q8J+3nHlT33OOOO46bb76ZUaNG8cgjj7zp2Ny5c/niF7/IkiVLGDFiRE+XKUmS+piGrlBHxCkR8euI+G5E/CQiXo+Iv1przgERsTAiHo+IMxtZj1SvY445httuu+0t44sWLeKOO+5g7NixTahKkiS1okZv+TgZOBCYDZwCzK09GBEDgP8HzAB2BI6OiB0bXJO0Xvvttx/Dhw9/y/hpp53G+eefT0Q0oSpJktSKGhaoI+JSYDwwH5iZmT8HOtaatifweGY+kZlvANcChzSqJqmM+fPns/XWW7PLLrs0uxRJktRCGraHOjNPiogDgGmZ+cI6pm0NLKq5vxiY0tXEiDgROBFgxIiRzJnY2ZPlqqQtB1f3UbeySqXSrfnPPvssy5cvp1Kp8Nprr3HGGWdwwQUXrLl/zz33sMUWWzSm2B6wbNmybj9nNZ59aT32pDXZl9ZjT9at2S9K7Orv5tnVxMy8DLgMYOz4bfPCBc0uXbVOn9hJq/ekfWZb9+a3tzNkyBDa2tpYsGABL774Ip///OcBeOGFF/jCF77Avffey1ZbbdWAasurVCq0tbU1uwytxb60HnvSmuxL67En69bsBLQYeF/N/THA002qRVqniRMn8vzzz6+5P27cOO677z7f5UOSJDX9fah/DmwXEe+PiI2Bo6juuZaa6uijj2bvvfdm4cKFjBkzhiuvvLLZJUmSpBbVKyvUEbEVcB+wObAqIk4FdszMVyLi88APgQHANzPzl+u73uBBA1jYjfcUVuNVKpVub6loZddcc83bHm9vb++dQiRJUstraKDOzHE1d8esY86twK2NrEOSJElqlGZv+ZAkSZL6NAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklSCgVqSJEkqwUAtSZIklWCgliRJkkowUEuSJEklGKglSZKkEgzUkiRJUgkGakmSJKkEA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLkiRJJRioJUmSpBIM1JIkSVIJBmpJkiSpBAO1JEmSVIKBWpIkSSrBQC1JkiSVYKCWJEmSSjBQS5IkSSUYqCVJkqQSDNSSJElSCQZqSZIkqQQDtSRJklRCZGaza+i2iFgKLGx2HXqTEcALzS5Cb2JPWpN9aT32pDXZl9bTH3uyTWaOXN+kgb1RSQMszMw9ml2E/igi7rMnrcWetCb70nrsSWuyL63HnqybWz4kSZKkEgzUkiRJUgl9NVBf1uwC9Bb2pPXYk9ZkX1qPPWlN9qX12JN16JMvSpQkSZJaRV9doZYkSZJaQp8K1BFxQEQsjIjHI+LMZtfTX0XENyPi+Yh4pGZseETcERGPFd/f3cwa+5uIeF9E/GdE/DoifhkRf1mM25cmiYhNI+LeiHi46MnfF+Pvj4ifFT35TkRs3Oxa+6OIGBARD0bEzcV9+9JEEdEeEQsi4qGIuK8Y8/dXk0XEsIi4PiL+p/jvy972pWt9JlBHxADg/wEzgB2BoyNix+ZW1W99GzhgrbEzgTszczvgzuK+ek8ncHpmTgD2Av6i+P+HfWme14EPZ+YuwGTggIjYCzgP+FrRk5eA45tYY3/2l8Cva+7bl+ablpmTa96Wzd9fzXcxcFtm7gDsQvX/M/alC30mUAN7Ao9n5hOZ+QZwLXBIk2vqlzLzR8Dv1xo+BJhX3J4HfKJXi+rnMvOZzHyguL2U6i+9rbEvTZNVy4q7g4qvBD4MXF+M25MmiIgxwEHAFcX9wL60In9/NVFEbA7sB1wJkJlvZObL2Jcu9aVAvTWwqOb+4mJMrWHLzHwGquEOGNXkevqtiBgH7Ar8DPvSVMW2goeA54E7gN8AL2dmZzHF32PNcRHw18Cq4v57sC/NlsDtEXF/RJxYjPn7q7nGA0uAbxXbo66IiCHYly71pUAdXYz5FiVSjYgYCnwXODUzX2l2Pf1dZq7MzMnAGKp/ZZvQ1bTerap/i4iDgecz8/7a4S6m2pfetU9m7kZ1W+dfRMR+zS5IDAR2A76RmbsCy3F7xzr1pUC9GHhfzf0xwNNNqkVv9VxEjAYovj/f5Hr6nYgYRDVM/1tm3lAM25cWUPyZtEJ1f/uwiBhYHPL3WO/bB/h4RLRT3Tr4Yaor1valiTLz6eL788D3qP4D1N9fzbUYWJyZPyvuX081YNuXLvSlQP1zYLvildgbA0cB85tck/5oPjCruD0L+H4Ta+l3ij2gVwK/zsx/qjlkX5okIkZGxLDi9mDgI1T3tv8ncHgxzZ70ssz8UmaOycxxVP87cldmzsS+NE1EDImIzVbfBj4KPIK/v5oqM58FFkXE9sXQ/sCvsC9d6lMf7BIRB1JdSRgAfDMzz2lySf1SRFwDtAEjgOeAs4Ebgf8AxgK/Az6VmWu/cFENEhFTgR8DC/jjvtC/obqP2r40QURMovqCnQFUFy/+IzO/HBHjqa6MDgceBD6Tma83r9L+KyLagL/KzIPtS/MUP/vvFXcHAv+emedExHvw91dTRcRkqi/e3Rh4AjiW4vcZ9uVN+lSgliRJklpNX9ryIUmSJLUcA7UkSZJUgoFakiRJKsFALUmSJJVgoJYkSZJKMFBLUh0iYmVEPFTzNW4DrjEsIk7u+erWXP/jEdGrn2QWEZ+IiB178zElqdX4tnmSVIeIWJaZQ0teYxxwc2bu3M3zBmTmyjKP3QjFJwteQfU5Xd/seiSpWVyhlqQNFBEDIuKCiPh5RPwiIv68GB8aEXdGxAMRsSAiDilOORf4k2KF+4KIaIuIm2uu988RcUxxuz0i5kTEfwGfiog/iYjbIuL+iPhxROzQRT3HRMQ/F7e/HRHfiIj/jIgnIuJDEfHNiPh1RHy75pxlEXFhUeudETGyGJ8cET8tntf3IuLdxXglIr4SEXcDZwAfBy4ontOfRMSfFT+PhyPiuxHxrpp6/m9E/HdRz+E1Nfx18XN6OCLOLcbW+3wlqVUMbHYBktRHDI6Ih4rbT2bmocDxwB8y84MRsQlwT0TcDiwCDs3MVyJiBPDTiJgPnAnsnJmTYc0n9b2d1zJzajH3TuCkzHwsIqYAlwAfXs/57y7mfBy4CdgHOAH4eURMzsyHgCHAA5l5ekTMofrJp58H/hX4QmbeHRFfLsZPLa47LDM/VNS1HTUr1BHxcmZeXtz+x+Jn9PXivNHAVGAHqh9ffH1EzAA+AUzJzFcjYngx97INeL6S1BQGakmqz4rVQbjGR4FJNautWwDbAYuBr0TEflQ/Cn5rYMsNeMzvQHXFG/hT4LqIWH1skzrOvykzMyIWAM9l5oLier8ExgEPFfV9p5h/NXBDRGxBNTTfXYzPA65bu6512LkI0sOAocAPa47dmJmrgF9FxOqfx0eAb2XmqwCZ+fsSz1eSmsJALUkbLqiu4v7wTYPVbRsjgd0zsyMi2oFNuzi/kzdvvVt7zvLi+0bAy10E+vV5vfi+qub26vvr+v1fzwtrlr/NsW8Dn8jMh4ufQ1sX9UD1Z7f6+9qPuaHPV5Kawj3UkrThfgjMjohBABHxgYgYQnWl+vkiTE8DtinmLwU2qzn/t8COEbFJsSq8f1cPkpmvAE9GxKeKx4mI2KWHnsNGwOoV9k8D/5WZfwBeioh9i/HPAnd3dTJvfU6bAc8UP5OZdTz+7cBxNXuthzf4+UpSjzNQS9KGuwL4FfBARDwC/AvVld9/A/aIiPuohsr/AcjMF6nus34kIi7IzEXAfwC/KM558G0eayZwfEQ8DPwSOORt5nbHcmCniLif6h7lLxfjs6i+2PAXwOSa8bVdC3wxIh6MiD8BzgJ+BtxB8bzfTmbeRnU/9X3FHvW/Kg416vlKUo/zbfMkqR+LHng7QEnq71yhliRJkkpwhVqSJEkqwRVqSZIkqQQDtSRJklSCgVqSJP3/7daxAAAAAMAgf+tB7C2KgEGoAQBgEGoAABiEGgAAhgA3ESOFhO8zCgAAAABJRU5ErkJggg==", "text/plain": [ "
" ] @@ -251,12 +250,14 @@ "source": [ "if INTERACTIVE:\n", " # create widget for interactive feature importance plot\n", - " interact(render_plot_importance,\n", - " importance_type=['split', 'gain'],\n", - " max_features=(1, X_train.shape[-1]),\n", - " precision=(0, 10))\n", + " interact(\n", + " render_plot_importance,\n", + " importance_type=[\"split\", \"gain\"],\n", + " max_features=(1, X_train.shape[-1]),\n", + " precision=(0, 10),\n", + " )\n", "else:\n", - " render_plot_importance(importance_type='split')" + " render_plot_importance(importance_type=\"split\")" ] }, { @@ -273,8 +274,7 @@ "outputs": [], "source": [ "def render_histogram(feature):\n", - " ax = lgb.plot_split_value_histogram(gbm, feature=feature,\n", - " bins='auto', figsize=(10, 5))\n", + " lgb.plot_split_value_histogram(gbm, feature=feature, bins=\"auto\", figsize=(10, 5))\n", " plt.show()" ] }, @@ -285,7 +285,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAmEAAAFNCAYAAABIc7ibAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzt3XucZGV95/HPF/CCDoLZ0YkiMom3REFJaO9Ge8QYFJVN4o0QFdfs7MaNt2gi2bjxsmsk0bjekhiiBI2GUVGzCl7AS0MkQDJDlAHxkugoIIKgDDSCiv72jzotPUVfqnu66umu+bxfr35N1alzzvN76qnq/s5zTtVJVSFJkqTR2qt1AZIkSXsiQ5gkSVIDhjBJkqQGDGGSJEkNGMIkSZIaMIRJkiQ1YAjTHi3JVJLf6W4fm+SMUbe7wvt9VZL3LPD4xUkmV7rd1SbJ/0lydZJvr9D+Hpnkq0mmk/znldjnarPY6z/JZJLLRlnTMCTZN8lHk+xM8oHW9WjPZgjTmpfkUUn+uful+t0k5yR58FL3U1XvrarHz9pvJbn3ylbbVlU9oKqmFlonycau7/uMqKwVleQg4KXA/avqZ1dot68B3lZV66rqH3dnR0l2JHncCtW1YvaE13/nqcAG4D9V1dOSHJXkc0muTfLtJH+bZL/ZGyR5XJILktyQ5NIkT29TusaNIUxrWpI7AacBbwV+BjgQeDXwg5Z1aX4jCHcHA9dU1VVL3XCB2g4GLt6tqlbIWg3Hq8jBwFeq6ubu/v7A/wHuDvwicA/g9TMrJ7k/8A/AH3frHgZsG2XBGl+GMK119wWoqlOq6sdVdWNVnVFVFwIkOa6bGXtrN1P2pSRHzLWjbt3PdbfP7hZ/oTsE9Yy+dW/X/c/5kFnL7pLkxiR3TXLnJKcl+U6S73W37zFPu7scQuyfiUqyf5J3JrkiyeXdoba9F3hObpvk3Umu7w4/Tsza909nYZI8JMnWJNcluTLJG7vVZvp+bdf3hyfZK8krknwjyVXd/veftd9nd49dk+R/9bXzqiSnJnlPkuuA47q2z+2ewyuSvC3JbWftr5I8vzsEeH2S/53kXt021yV5/+z1Z233OOBM4O5d7Sd3y5/SPRfXpnco+Bf7npOXJ7kQuKE/5CT5D+DngY92+7zdQmPS1fmZ7rm4Osl7kxzQPfb3wD1n7esPM8dhvgGev72SHJ/kP7p23p/kZ+Z6MSQ5K8lvdrcf1T23T5x5vpJ8vrs90Os/yUu718AVSZ47V5vdelPduJ3TjeEZSdbPevwD6c087UxydpIHzHrs5CR/leTjXfvnJPnZJG9K7/30pSS/NGv9uyf5YHrvt68neeE8Nb0a+BPgGd1+n1dV/1BVn6iq71fV94C/BR45a7NXAH9TVR+vqpur6pqq+o/5+i0thSFMa91XgB8neVeSJyS58xzrPBT4GrAeeCXwofn+YM2oqkd3Nx/UHYJ6X9/jPwA+BBwza/HTgbO6GZi9gL+j97/uewI3Am9bcu963gXcDNwb+CXg8cBC55M9BdgCHAB8ZIF23wy8uaruBNwLeH+3fKbvB3R9Pxc4rvvZRC+QrJvZb3ozBX8FHAvcjd5swYF9bR0NnNrV9F7gx8BL6I3Jw4EjgOf3bXMkcDjwMOAPgRO7Ng4CDmHX5x6AqvoU8ATgW13txyW5L3AK8GLgLsDH6IWg2SHuGOCors839+3zXsA3gSd3+/wBC49JgNdxy8zKQcCrun09q29ff97fh3n0P38vBP4z8Jiune8BfznPtmcBk93tR9N7Lzxm1v2z+jdY4PX/s9wyvs8D/nKe99yM3wKeC9wVuC3wslmPfRy4T/fYBV2/Zns6vQC0nt7M9rndeuvpPRdvBEiyF/BR4AtdXUcAL07ya3P065XAnwLv6/r1zjlqfjS7zno+rGtnexc837PY7w9pUIYwrWlVdR3wKKDo/Q/2O0k+kmTDrNWuAt5UVT/q/ph8md4f3N31D+waBH6rW0b3v+UPdv+7vh54Lbf84RtY148nAC+uqhu6gPd/gWcusNnnqupjVfVj4O+BB82z3o+AeydZX1XTVXXeAvs8FnhjVX2tqqaBPwKe2c0aPRX4aFV9rqp+SG+mof+itOdW1T9W1U+62cptVXVeN7OwA/gbbv38/FlVXVdVFwMXAWd07e+k9wf8lxjMM4DTq+rMqvoR8AZgX+ARs9Z5S1VdWlU3Lrazxcakqv69a+sHVfUdemFhyWPfZ5fnD/hvwB9X1WVdKHwV8NT+WbzOWewaul436/5jmCOELeBHwGu699LHgGngfgus/3dV9ZWu5vfTO5QHQFWdVFXXz6r/QZk1uwp8uHud3AR8GLipqt7dva7fxy3j/2DgLlX1mqr6YVV9jd7vgoXeI3NK8qvAc+i9hmfcA3gW8Jv0QuO+9E5/kHab5xZozauqS+jN0pDkF4D3AG/iloB0ee16pfpv0Js92F2fAfZN8lDg2/T+wHy4q+MO9P4wHwnMzBTsl2Tv7o/IoA4GbgNckWRm2V7ApQtsM/sTgd8Hbp9kn/4ZHnozGa8BvpTk68Crq+q0efZ5d3rP24xv0Pv9saF77Kf1VNX3k1zTt/0u9XazU28EJoA7dPvqP8/mylm3b5zj/qAn3e9Se1X9JMml7Dpbt9Dz2W/BMUlyV+AtwK8A+3WPfW8J+59Lf30HAx9O8pNZy35Mbzwu71v3XOC+XXg8jN5M6au7Q4MP4ZbDz4O4pu919H16s6Lz6X8trgPoDt2+FngavdnJmX6sB3Z2txcb/5l2D6Z3+PnaWY/vDfzTYp2ZLcnD6P0n6qlV9ZW+tv5uZlmSPwU+tZR9S/NxJkxjpaq+BJxM73DVjAMz668lvcOD31qBtn5C73/3x9CbBTutm/WC3qfz7gc8tDvcN3N4J7faEdxAL4jMmB0uLqV3KGZ9VR3Q/dypqh7Abqqqr1bVMfQOB/0ZcGqSO3LrWSzoPV8Hz7p/T3qH464ErqA3WwD0vgIA+E/9zfXd/2vgS8B9uufnfzL3c7MSdqm9ey0cxK5hZa4+z2exMXldt78Hdn37bXbtW39bu4x/F1Du0rdO/zaXAk+Y1f4BVXX7quoPYFTV9+kF3BcBF3Wzlf8M/D7wH1V19eBdXzG/Re8Q6+PoHd7c2C1fzmvgUuDrfc/FflX1xEF30J1f9hHgv1TVp/sevpClvT6kgRnCtKYl+YXuROF7dPcPoheKZh9auyvwwiS3SfI0eufpfGyA3V9J7/ynhfwDvcNdx3a3Z+xH73/Q13bnj7xygX18Hnh0knt2h2P+aOaBqroCOAP4iyR36k7IvleS3T28RZLfTnKXLkzOzCL8GPgOvZmJ2X0/BXhJkp9Lso5bzqu5md75OU9O8ojuPKtXs/gf0/2A64Dpbvbyd3e3Pwt4P3BUkiOS3IZeQP4BvSCyZAOMyX70DtNdm+RA4A/6dtH/uvoKvdnKo7r6XgHcbpEy3g68NsnB8NMPhRy9wPpnAb/HLYcep/ruz2WQ1/9y7UdvDK6hF0D/dDf29S/Adel9uGLfJHsnOSQDfk1Neh+u+QTwgqr66Byr/B3w3CQ/381wv5zeJ7Kl3WYI01p3Pb0T789PcgO98HURvT+0M86ndy7H1fQOgTy1qvoPl83lVcC70vtE3ZzfC1RV59Obybg7vfOUZryJ3rkjV3c1fWK+RqrqTHrnuFxIb8ai/xf8s+md1PxFeoe1TqV3AvzuOhK4OMk0vZP0n1lVN3UzJ68Fzun6/jDgJHrnl50NfB24CXhBV//F3e0t9GbFrqd3Ht5CXxPyMnqzIdfTO3/nfQusu1uq6sv0ZqPeSm88nkzvxPgf7sZuFxqTVwO/TO+w2un0PsAx2+uAV3TP7cu6c9yeD7yD3uzcDcBiX4r6ZnozN2ckuZ7ea+yhC6x/Fr3gc/Y89+fyKhZ5/e+Gd9M7RHw5vedwofMRF9Qd3n8yvUOtX6c3xu+gN8M2iJfSm3l8Z/eJyekkPz0xv6pO6uo9v6v5B/Q+GCHttux6qow0XpIcB/xOVT2qdS17im6m7Fp6hxq/3roeSVqtnAmTtNuSPDnJHbpzyt4AbAd2tK1KklY3Q5iklXA0vRPgv0Xv0O8zy2l2SVqQhyMlSZIacCZMkiSpAUOYJElSA2viG/PXr19fGzdubF3GqnTDDTdwxzvesXUZGhLHd7w5vuPN8R1/843xtm3brq6q/i9dvpU1EcI2btzI1q1bW5exKk1NTTE5Odm6DA2J4zveHN/x5viOv/nGOMk3br32rXk4UpIkqQFDmCRJUgOGMEmSpAYMYZIkSQ0YwiRJkhowhEmSJDVgCJMkSWrAECZJktSAIUySJKkBQ5gkSVIDhjBJkqQGDGGSJEkNGMIkSZIaMIRJkiQ1YAiTJElqwBAmSZLUgCFMkiSpAUOYJElSA4YwSZKkBoYWwpKclOSqJBfNWnZYkvOSfD7J1iQPGVb7kiRJq9kwZ8JOBo7sW/bnwKur6jDgT7r7kiRJe5yhhbCqOhv4bv9i4E7d7f2Bbw2rfUmSpNVsnxG392Lgk0neQC8APmLE7UuSJK0Kqarh7TzZCJxWVYd0998CnFVVH0zydGBzVT1unm03A5sBNmzYcPiWLVuGVudaNj09zbp161qXsSptv3xn6xKW5dAD9//pbcd3vDm+483xHX/zjfGmTZu2VdXEYtuPOoTtBA6oqkoSYGdV3WmBXQAwMTFRW7duHVqda9nU1BSTk5Oty1iVNh5/eusSlmXHCUf99LbjO94c3/Hm+I6/+cY4yUAhbNRfUfEt4DHd7ccCXx1x+5IkSavC0M4JS3IKMAmsT3IZ8ErgvwJvTrIPcBPd4UZJkqQ9zdBCWFUdM89Dhw+rTUmSpLXCb8yXJElqwBAmSZLUgCFMkiSpAUOYJElSA4YwSZKkBgxhkiRJDRjCJEmSGjCESZIkNWAIkyRJasAQJkmS1IAhTJIkqQFDmCRJUgOGMEmSpAYMYZIkSQ0YwiRJkhowhEmSJDVgCJMkSWrAECZJktTA0EJYkpOSXJXkor7lL0jy5SQXJ/nzYbUvSZK0mg1zJuxk4MjZC5JsAo4GHlhVDwDeMMT2JUmSVq2hhbCqOhv4bt/i3wVOqKofdOtcNaz2JUmSVrNRnxN2X+BXkpyf5KwkDx5x+5IkSatCqmp4O082AqdV1SHd/YuAzwAvAh4MvA/4+ZqjiCSbgc0AGzZsOHzLli1Dq3Mtm56eZt26da3LWJW2X76zdQnLcuiB+//0tuM73hzf8eb4jr/5xnjTpk3bqmpise33GUpV87sM+FAXuv4lyU+A9cB3+lesqhOBEwEmJiZqcnJylHWuGVNTU/jczO24409vXcKy7Dh28qe3Hd/x5viON8d3/O3uGI/6cOQ/Ao8FSHJf4LbA1SOuQZIkqbmhzYQlOQWYBNYnuQx4JXAScFJ3WPKHwHPmOhQpSZI07oYWwqrqmHke+u1htSlJkrRW+I35kiRJDRjCJEmSGjCESZIkNWAIkyRJasAQJkmS1IAhTJIkqQFDmCRJUgOGMEmSpAYMYZIkSQ2M+gLeWiU2rtWLW59wVOsSJElaEc6ESZIkNWAIkyRJasAQJkmS1IAhTJIkqQFDmCRJUgOGMEmSpAYMYZIkSQ0YwiRJkhowhEmSJDUwtBCW5KQkVyW5aI7HXpakkqwfVvuSJEmr2TBnwk4GjuxfmOQg4FeBbw6xbUmSpFVtaCGsqs4GvjvHQ/8X+EOghtW2JEnSajfSc8KSPAW4vKq+MMp2JUmSVptUDW9CKslG4LSqOiTJHYDPAo+vqp1JdgATVXX1PNtuBjYDbNiw4fAtW7YMrc61bHp6mnXr1i15u+2X7xxCNcN36IH7D7zuOPRxueOrtcHxHW+O7/ibb4w3bdq0raomFtt+lCHsUODTwPe7h+8BfAt4SFV9e6H9TExM1NatW4dW51o2NTXF5OTkkrfbePzpK1/MCOw44aiB1x2HPi53fLU2OL7jzfEdf/ONcZKBQtg+wyhqLlW1HbjrzP3FZsIkSZLG2TC/ouIU4FzgfkkuS/K8YbUlSZK01gxtJqyqjlnk8Y3DaluSJGm18xvzJUmSGjCESZIkNWAIkyRJasAQJkmS1IAhTJIkqQFDmCRJUgOGMEmSpAYMYZIkSQ0YwiRJkhowhEmSJDVgCJMkSWrAECZJktSAIUySJKkBQ5gkSVIDhjBJkqQGDGGSJEkNGMIkSZIaMIRJkiQ1MLQQluSkJFcluWjWstcn+VKSC5N8OMkBw2pfkiRpNRvmTNjJwJF9y84EDqmqBwJfAf5oiO1LkiStWkMLYVV1NvDdvmVnVNXN3d3zgHsMq31JkqTVrOU5Yf8F+HjD9iVJkppJVQ1v58lG4LSqOqRv+R8DE8Bv1DwFJNkMbAbYsGHD4Vu2bBlanWvZ9PQ069atW/J22y/fOYRqhu/QA/cfeN1x6ONyx1drg+M73hzf8TffGG/atGlbVU0stv0+Q6lqAUmeAzwJOGK+AAZQVScCJwJMTEzU5OTkaApcY6ampljOc3Pc8aevfDEjsOPYyYHXHYc+Lnd8tTY4vuPN8R1/uzvGIw1hSY4EXg48pqq+P8q2JUmSVpNhfkXFKcC5wP2SXJbkecDbgP2AM5N8Psnbh9W+JEnSaja0mbCqOmaOxe8cVnuSJElrid+YL0mS1IAhTJIkqQFDmCRJUgOGMEmSpAYMYZIkSQ0YwiRJkhowhEmSJDVgCJMkSWrAECZJktSAIUySJKkBQ5gkSVIDA4WwJI8cZJkkSZIGM+hM2FsHXCZJkqQB7LPQg0keDjwCuEuS35/10J2AvYdZmCRJ0jhbMIQBtwXWdevtN2v5dcBTh1WUJEnSuFswhFXVWcBZSU6uqm+MqCZJkqSxt9hM2IzbJTkR2Dh7m6p67DCKkiRJGneDhrAPAG8H3gH8eHjlSJIk7RkGDWE3V9VfD7USSZKkPcigX1Hx0STPT3K3JD8z87PQBklOSnJVkotmLfuZJGcm+Wr37513q3pJkqQ1atAQ9hzgD4B/BrZ1P1sX2eZk4Mi+ZccDn66q+wCf7u5LkiTtcQY6HFlVP7fUHVfV2Uk29i0+Gpjsbr8LmAJevtR9S5IkrXUDhbAkz55reVW9e4ntbaiqK7ptr0hy1yVuL0mSNBZSVYuvlMy+RNHtgSOAC6pqwS9s7WbCTquqQ7r711bVAbMe/15VzXleWJLNwGaADRs2HL5ly5ZF69wTTU9Ps27duiVvt/3ynUOoZvgOPXD/gdcdhz4ud3y1Nji+483xHX/zjfGmTZu2VdXEYtsPejjyBbPvJ9kf+PtBi5zlyiR362bB7gZctUCbJwInAkxMTNTk5OQymht/U1NTLOe5Oe7401e+mBHYcezkwOuOQx+XO75aGxzf8eb4jr/dHeNBT8zv933gPsvY7iP0TvKn+/f/LbN9SZKkNW3Qc8I+Cswct9wb+EXg/Ytscwq9k/DXJ7kMeCVwAvD+JM8Dvgk8bXllS5IkrW2DflnrG2bdvhn4RlVdttAGVXXMPA8dMWCbkiRJY2ugw5Hdhby/BOwH3Bn44TCLkiRJGncDhbAkTwf+hd7hw6cD5ydZ8JORkiRJmt+ghyP/GHhwVV0FkOQuwKeAU4dVmCRJ0jgb9NORe80EsM41S9hWkiRJfQadCftEkk8Cp3T3nwF8bDglSZIkjb8FQ1iSe9O71NAfJPkN4FFAgHOB946gPkmSpLG02CHFNwHXA1TVh6rq96vqJfRmwd407OIkSZLG1WIhbGNVXdi/sKq2AhuHUpEkSdIeYLEQdvsFHtt3JQuRJEnakywWwv41yX/tX9hddmjbcEqSJEkaf4t9OvLFwIeTHMstoWsCuC3w68MsTJIkaZwtGMKq6krgEUk2AYd0i0+vqs8MvTJJkqQxNtD3hFXVZ4HPDrkWSZKkPYbfei9JktSAIUySJKkBQ5gkSVIDhjBJkqQGDGGSJEkNNAlhSV6S5OIkFyU5JclC38wvSZI0dkYewpIcCLwQmKiqQ4C9gWeOug5JkqSWWh2O3AfYN8k+wB2AbzWqQ5IkqYmRh7Cquhx4A/BN4ApgZ1WdMeo6JEmSWkpVjbbB5M7AB4FnANcCHwBOrar39K23GdgMsGHDhsO3bNky0jrXiunpadatW7fk7bZfvnMI1QzfoQfuP/C649DHxcZ3HPq4J1vu+1drg+M7/uYb402bNm2rqonFtm8Rwp4GHFlVz+vuPxt4WFU9f75tJiYmauvWraMqcU2ZmppicnJyydttPP70lS9mBHaccNTA645DHxcb33Ho455sue9frQ2O7/ibb4yTDBTCWpwT9k3gYUnukCTAEcAlDeqQJElqpsU5YecDpwIXANu7Gk4cdR2SJEkt7dOi0ap6JfDKFm1LkiStBn5jviRJUgOGMEmSpAYMYZIkSQ0YwiRJkhowhEmSJDVgCJMkSWrAECZJktSAIUySJKkBQ5gkSVIDhjBJkqQGDGGSJEkNGMIkSZIaMIRJkiQ1YAiTJElqwBAmSZLUgCFMkiSpAUOYJElSA4YwSZKkBgxhkiRJDTQJYUkOSHJqki8luSTJw1vUIUmS1Mo+jdp9M/CJqnpqktsCd2hUhyRJUhMjD2FJ7gQ8GjgOoKp+CPxw1HVIkiS1lKoabYPJYcCJwBeBBwHbgBdV1Q19620GNgNs2LDh8C1btoy0zrVienqadevWLXm77ZfvHEI1w3fogfsPvO449HGx8R2HPu7Jlvv+1drg+I6/+cZ406ZN26pqYrHtW4SwCeA84JFVdX6SNwPXVdX/mm+biYmJ2rp168hqXEumpqaYnJxc8nYbjz995YsZgR0nHDXwuuPQx8XGdxz6uCdb7vtXa4PjO/7mG+MkA4WwFifmXwZcVlXnd/dPBX65QR2SJEnNjDyEVdW3gUuT3K9bdAS9Q5OSJEl7jFafjnwB8N7uk5FfA57bqA5JkqQmmoSwqvo8sOixUkmSpHHlN+ZLkiQ1YAiTJElqwBAmSZLUgCFMkiSpAUOYJElSA4YwSZKkBgxhkiRJDRjCJEmSGmj1jfmSNJA94SLl9nH18mLzGiZnwiRJkhowhEmSJDVgCJMkSWrAECZJktSAIUySJKkBQ5gkSVIDhjBJkqQGDGGSJEkNGMIkSZIaaBbCkuyd5N+SnNaqBkmSpFZazoS9CLikYfuSJEnNNAlhSe4BHAW8o0X7kiRJrbWaCXsT8IfATxq1L0mS1FSqarQNJk8CnlhVz08yCbysqp40x3qbgc0AGzZsOHzLli0jrXOtmJ6eZt26dUvebvvlO4dQzfAdeuD+A687Dn1cbHzHoY+LGec+zozvOPdxxp7Yxw37wpU3rnRFK28pfdSu5vsdvWnTpm1VNbHY9i1C2OuAZwE3A7cH7gR8qKp+e75tJiYmauvWrSOqcG2ZmppicnJyydttPP70lS9mBHaccNTA645DHxcb33Ho42LGuY8z4zvOfZyxJ/bxpYfezF9s32elS1pxS+mjdjXf7+gkA4WwkR+OrKo/qqp7VNVG4JnAZxYKYJIkSePI7wmTJElqoOk8aVVNAVMta5AkSWrBmTBJkqQGDGGSJEkNGMIkSZIaMIRJkiQ1YAiTJElqwBAmSZLUgCFMkiSpAUOYJElSA4YwSZKkBgxhkiRJDRjCJEmSGjCESZIkNWAIkyRJasAQJkmS1IAhTJIkqQFDmCRJUgOGMEmSpAYMYZIkSQ0YwiRJkhoYeQhLclCSzya5JMnFSV406hokSZJa26dBmzcDL62qC5LsB2xLcmZVfbFBLZIkSU2MfCasqq6oqgu629cDlwAHjroOSZKkllJV7RpPNgJnA4dU1XV9j20GNgNs2LDh8C1btoy8vrVgenqadevWLXm77ZfvHEI1w3fogfsPvO449HGx8R2HPi5mnPs4M77j3McZe2IfN+wLV9640hWtvKX0Ubua73f0pk2btlXVxGLbNwthSdYBZwGvraoPLbTuxMREbd26dTSFrTFTU1NMTk4uebuNx5++8sWMwI4Tjhp43XHo42LjOw59XMw493FmfMe5jzP2xD6+9NCb+YvtLc76WZql9FG7mu93dJKBQliTT0cmuQ3wQeC9iwUwSZKkcdTi05EB3glcUlVvHHX7kiRJq0GLmbBHAs8CHpvk893PExvUIUmS1MzID1ZX1eeAjLpdSZKk1cRvzJckSWrAECZJktSAIUySJKkBQ5gkSVIDhjBJkqQGDGGSJEkNGMIkSZIaMIRJkiQ1YAiTJElqwBAmSZLUgCFMkiSpAUOYJElSA4YwSZKkBgxhkiRJDRjCJEmSGjCESZIkNWAIkyRJasAQJkmS1ECTEJbkyCRfTvLvSY5vUYMkSVJLIw9hSfYG/hJ4AnB/4Jgk9x91HZIkSS21mAl7CPDvVfW1qvohsAU4ukEdkiRJzbQIYQcCl866f1m3TJIkaY+Rqhptg8nTgF+rqt/p7j8LeEhVvaBvvc3A5u7u/YAvj7TQtWM9cHXrIjQ0ju94c3zHm+M7/uYb44Or6i6LbbzPytezqMuAg2bdvwfwrf6VqupE4MRRFbVWJdlaVROt69BwOL7jzfEdb47v+NvdMW5xOPJfgfsk+bkktwWeCXykQR2SJEnNjHwmrKpuTvJ7wCeBvYGTquriUdchSZLUUovDkVTVx4CPtWh7DHnIdrw5vuPN8R1vju/4260xHvmJ+ZIkSfKyRZIkSU0YwtaAxS7zlOS4JN9J8vnu53da1KnlSXJSkquSXDTP40nylm78L0zyy6OuUcs3wPhOJtk56/37J6OuUcuX5KAkn01ySZKLk7xojnV8D69RA47vst/DTc4J0+BmXebpV+l9vce/JvlIVX2xb9X3VdXvjbxArYSTgbcB757n8ScA9+l+Hgr8dfev1oaTWXh8Af6pqp40mnK0wm4GXlpVFyTZD9iW5My+39G+h9euQcYXlvkediZs9fMyT2Ouqs4GvrvAKkcD766e84ADktxtNNVpdw0wvlrDquqKqrqgu309cAm3vgqM7+E1asDxXTZD2Oo36GWefrOb5j41yUFzPK61y0t9jb+HJ/lCko8neUDrYrQ8STYCvwSc3/eQ7+ExsMD4wjLfw4aw1S9zLOv/SOtHgY1V9UDgU8C7hl6VRmmQ14DCONToAAAFK0lEQVTWrgvoXeLkQcBbgX9sXI+WIck64IPAi6vquv6H59jE9/Aassj4Lvs9bAhb/Ra9zFNVXVNVP+ju/i1w+Ihq02gMdKkvrU1VdV1VTXe3PwbcJsn6xmVpCZLcht4f6PdW1YfmWMX38Bq22PjuznvYELb6LXqZp75zC55C75i1xsdHgGd3n7B6GLCzqq5oXZRWRpKfTZLu9kPo/V6+pm1VGlQ3du8ELqmqN86zmu/hNWqQ8d2d97Cfjlzl5rvMU5LXAFur6iPAC5M8hd6nOL4LHNesYC1ZklOASWB9ksuAVwK3Aaiqt9O7usQTgX8Hvg88t02lWo4BxvepwO8muRm4EXhm+S3aa8kjgWcB25N8vlv2P4F7gu/hMTDI+C77Pew35kuSJDXg4UhJkqQGDGGSJEkNGMIkSZIaMIRJkiQ1YAiTJElqwBAmabcl+XGSz8/62biMfRyQ5PkrX93yJNkx84WLSf65+3djkt9aof1vTHLRSuxL0tpkCJO0Em6sqsNm/exYxj4OAJYcwpLsvYy2lqSqHtHd3AisSAiTJEOYpKFIsneS1yf51+7i8v+tW74uyaeTXJBke5Kju01OAO7VzaS9PslkktNm7e9tSY7rbu9I8idJPgc8Lcm9knwiybYk/5TkF+ao5zGzZur+Lcl+XRtnJ/lwki8meXuSW/1eTDI9q8Zf6fbxkr513pfkibPun5zkN7sZr3/q+ntBkkfQJ8lxSd426/5pSSa7249Pcm637Qe6a9hJGgN+Y76klbDvrG+T/npV/TrwPHqXZ3lwktsB5yQ5A7gU+PWquq473Hdeko8AxwOHVNVhADMhZAE3VdWjunU/Dfz3qvpqkocCfwU8tm/9lwH/o6rO6YLMTd3yhwD3B74BfAL4DeDUedo8HnhZVT1pjse2AM8APtZdYuwI4HfpXbz5V6vqpiT3AU4BJhbpG12/1gOvAB5XVTckeTnw+8BrBtle0upmCJO0Em6cCU+zPB54YJKndvf3B+5D72LGf5rk0cBPgAOBDcto833Qm1kDHgF8oLt8G8Dt5lj/HOCNSd4LfKiqLuvW/5eq+lq3r1OARzF/CFvIx4G3dIHzSODsqroxyf7A25IcBvwYuO8S9vkwegHxnK7W2wLnLqM2SauQIUzSsAR4QVV9cpeFvUOKdwEOr6ofJdkB3H6O7W9m11Mm+te5oft3L+DaOULgLqrqhCSn07uG33lJHjfzUP+qC+1ngf3flGQK+DV6M2KndA+9BLgSeFBX601zbD5fXwOcWVXHLKcmSaub54RJGpZP0ruo7W0Aktw3yR3pzYhd1QWwTcDB3frXA/vN2v4bwP2T3K6bTTpirkaq6jrg60me1rWTJA/qXy/Jvapqe1X9GbAVmDlv7CFJfq47F+wZwOcW6FN/jf220Ls48690/afr7xVV9RN6FwKe64MEO4DDkuyV5CB6h0gBzgMemeTeXR/ukGQpM2mSVjFDmKRheQfwReCC7qsY/obe7Pt7gYkkW4FjgS8BVNU19A67XZTk9VV1KfB+4MJum39boK1jgecl+QJwMXD0HOu8uNv3F4Ab6R0+hN7hvROAi4CvAx9eoJ0LgZuTfKH/xPzOGcCjgU9V1Q+7ZX8FPCfJefQORd4wx3bndG1vB94AXABQVd8BjgNOSXIhvVB2qw8dSFqbUrWsmXdJWvO6k//nO9FekobKmTBJkqQGnAmTJElqwJkwSZKkBgxhkiRJDRjCJEmSGjCESZIkNWAIkyRJasAQJkmS1MD/B7dABDmm/sr4AAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAmEAAAFNCAYAAABIc7ibAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzt3XucZGV95/HPF/CCDoLZ0YkiMom3REFJaO9Ge8QYFJVN4o0QFdfs7MaNt2gi2bjxsmsk0bjekhiiBI2GUVGzCl7AS0MkQDJDlAHxkugoIIKgDDSCiv72jzotPUVfqnu66umu+bxfr35N1alzzvN76qnq/s5zTtVJVSFJkqTR2qt1AZIkSXsiQ5gkSVIDhjBJkqQGDGGSJEkNGMIkSZIaMIRJkiQ1YAjTHi3JVJLf6W4fm+SMUbe7wvt9VZL3LPD4xUkmV7rd1SbJ/0lydZJvr9D+Hpnkq0mmk/znldjnarPY6z/JZJLLRlnTMCTZN8lHk+xM8oHW9WjPZgjTmpfkUUn+uful+t0k5yR58FL3U1XvrarHz9pvJbn3ylbbVlU9oKqmFlonycau7/uMqKwVleQg4KXA/avqZ1dot68B3lZV66rqH3dnR0l2JHncCtW1YvaE13/nqcAG4D9V1dOSHJXkc0muTfLtJH+bZL/ZGyR5XJILktyQ5NIkT29TusaNIUxrWpI7AacBbwV+BjgQeDXwg5Z1aX4jCHcHA9dU1VVL3XCB2g4GLt6tqlbIWg3Hq8jBwFeq6ubu/v7A/wHuDvwicA/g9TMrJ7k/8A/AH3frHgZsG2XBGl+GMK119wWoqlOq6sdVdWNVnVFVFwIkOa6bGXtrN1P2pSRHzLWjbt3PdbfP7hZ/oTsE9Yy+dW/X/c/5kFnL7pLkxiR3TXLnJKcl+U6S73W37zFPu7scQuyfiUqyf5J3JrkiyeXdoba9F3hObpvk3Umu7w4/Tsza909nYZI8JMnWJNcluTLJG7vVZvp+bdf3hyfZK8krknwjyVXd/veftd9nd49dk+R/9bXzqiSnJnlPkuuA47q2z+2ewyuSvC3JbWftr5I8vzsEeH2S/53kXt021yV5/+z1Z233OOBM4O5d7Sd3y5/SPRfXpnco+Bf7npOXJ7kQuKE/5CT5D+DngY92+7zdQmPS1fmZ7rm4Osl7kxzQPfb3wD1n7esPM8dhvgGev72SHJ/kP7p23p/kZ+Z6MSQ5K8lvdrcf1T23T5x5vpJ8vrs90Os/yUu718AVSZ47V5vdelPduJ3TjeEZSdbPevwD6c087UxydpIHzHrs5CR/leTjXfvnJPnZJG9K7/30pSS/NGv9uyf5YHrvt68neeE8Nb0a+BPgGd1+n1dV/1BVn6iq71fV94C/BR45a7NXAH9TVR+vqpur6pqq+o/5+i0thSFMa91XgB8neVeSJyS58xzrPBT4GrAeeCXwofn+YM2oqkd3Nx/UHYJ6X9/jPwA+BBwza/HTgbO6GZi9gL+j97/uewI3Am9bcu963gXcDNwb+CXg8cBC55M9BdgCHAB8ZIF23wy8uaruBNwLeH+3fKbvB3R9Pxc4rvvZRC+QrJvZb3ozBX8FHAvcjd5swYF9bR0NnNrV9F7gx8BL6I3Jw4EjgOf3bXMkcDjwMOAPgRO7Ng4CDmHX5x6AqvoU8ATgW13txyW5L3AK8GLgLsDH6IWg2SHuGOCors839+3zXsA3gSd3+/wBC49JgNdxy8zKQcCrun09q29ff97fh3n0P38vBP4z8Jiune8BfznPtmcBk93tR9N7Lzxm1v2z+jdY4PX/s9wyvs8D/nKe99yM3wKeC9wVuC3wslmPfRy4T/fYBV2/Zns6vQC0nt7M9rndeuvpPRdvBEiyF/BR4AtdXUcAL07ya3P065XAnwLv6/r1zjlqfjS7zno+rGtnexc837PY7w9pUIYwrWlVdR3wKKDo/Q/2O0k+kmTDrNWuAt5UVT/q/ph8md4f3N31D+waBH6rW0b3v+UPdv+7vh54Lbf84RtY148nAC+uqhu6gPd/gWcusNnnqupjVfVj4O+BB82z3o+AeydZX1XTVXXeAvs8FnhjVX2tqqaBPwKe2c0aPRX4aFV9rqp+SG+mof+itOdW1T9W1U+62cptVXVeN7OwA/gbbv38/FlVXVdVFwMXAWd07e+k9wf8lxjMM4DTq+rMqvoR8AZgX+ARs9Z5S1VdWlU3Lrazxcakqv69a+sHVfUdemFhyWPfZ5fnD/hvwB9X1WVdKHwV8NT+WbzOWewaul436/5jmCOELeBHwGu699LHgGngfgus/3dV9ZWu5vfTO5QHQFWdVFXXz6r/QZk1uwp8uHud3AR8GLipqt7dva7fxy3j/2DgLlX1mqr6YVV9jd7vgoXeI3NK8qvAc+i9hmfcA3gW8Jv0QuO+9E5/kHab5xZozauqS+jN0pDkF4D3AG/iloB0ee16pfpv0Js92F2fAfZN8lDg2/T+wHy4q+MO9P4wHwnMzBTsl2Tv7o/IoA4GbgNckWRm2V7ApQtsM/sTgd8Hbp9kn/4ZHnozGa8BvpTk68Crq+q0efZ5d3rP24xv0Pv9saF77Kf1VNX3k1zTt/0u9XazU28EJoA7dPvqP8/mylm3b5zj/qAn3e9Se1X9JMml7Dpbt9Dz2W/BMUlyV+AtwK8A+3WPfW8J+59Lf30HAx9O8pNZy35Mbzwu71v3XOC+XXg8jN5M6au7Q4MP4ZbDz4O4pu919H16s6Lz6X8trgPoDt2+FngavdnJmX6sB3Z2txcb/5l2D6Z3+PnaWY/vDfzTYp2ZLcnD6P0n6qlV9ZW+tv5uZlmSPwU+tZR9S/NxJkxjpaq+BJxM73DVjAMz668lvcOD31qBtn5C73/3x9CbBTutm/WC3qfz7gc8tDvcN3N4J7faEdxAL4jMmB0uLqV3KGZ9VR3Q/dypqh7Abqqqr1bVMfQOB/0ZcGqSO3LrWSzoPV8Hz7p/T3qH464ErqA3WwD0vgIA+E/9zfXd/2vgS8B9uufnfzL3c7MSdqm9ey0cxK5hZa4+z2exMXldt78Hdn37bXbtW39bu4x/F1Du0rdO/zaXAk+Y1f4BVXX7quoPYFTV9+kF3BcBF3Wzlf8M/D7wH1V19eBdXzG/Re8Q6+PoHd7c2C1fzmvgUuDrfc/FflX1xEF30J1f9hHgv1TVp/sevpClvT6kgRnCtKYl+YXuROF7dPcPoheKZh9auyvwwiS3SfI0eufpfGyA3V9J7/ynhfwDvcNdx3a3Z+xH73/Q13bnj7xygX18Hnh0knt2h2P+aOaBqroCOAP4iyR36k7IvleS3T28RZLfTnKXLkzOzCL8GPgOvZmJ2X0/BXhJkp9Lso5bzqu5md75OU9O8ojuPKtXs/gf0/2A64Dpbvbyd3e3Pwt4P3BUkiOS3IZeQP4BvSCyZAOMyX70DtNdm+RA4A/6dtH/uvoKvdnKo7r6XgHcbpEy3g68NsnB8NMPhRy9wPpnAb/HLYcep/ruz2WQ1/9y7UdvDK6hF0D/dDf29S/Adel9uGLfJHsnOSQDfk1Neh+u+QTwgqr66Byr/B3w3CQ/381wv5zeJ7Kl3WYI01p3Pb0T789PcgO98HURvT+0M86ndy7H1fQOgTy1qvoPl83lVcC70vtE3ZzfC1RV59Obybg7vfOUZryJ3rkjV3c1fWK+RqrqTHrnuFxIb8ai/xf8s+md1PxFeoe1TqV3AvzuOhK4OMk0vZP0n1lVN3UzJ68Fzun6/jDgJHrnl50NfB24CXhBV//F3e0t9GbFrqd3Ht5CXxPyMnqzIdfTO3/nfQusu1uq6sv0ZqPeSm88nkzvxPgf7sZuFxqTVwO/TO+w2un0PsAx2+uAV3TP7cu6c9yeD7yD3uzcDcBiX4r6ZnozN2ckuZ7ea+yhC6x/Fr3gc/Y89+fyKhZ5/e+Gd9M7RHw5vedwofMRF9Qd3n8yvUOtX6c3xu+gN8M2iJfSm3l8Z/eJyekkPz0xv6pO6uo9v6v5B/Q+GCHttux6qow0XpIcB/xOVT2qdS17im6m7Fp6hxq/3roeSVqtnAmTtNuSPDnJHbpzyt4AbAd2tK1KklY3Q5iklXA0vRPgv0Xv0O8zy2l2SVqQhyMlSZIacCZMkiSpAUOYJElSA2viG/PXr19fGzdubF3GqnTDDTdwxzvesXUZGhLHd7w5vuPN8R1/843xtm3brq6q/i9dvpU1EcI2btzI1q1bW5exKk1NTTE5Odm6DA2J4zveHN/x5viOv/nGOMk3br32rXk4UpIkqQFDmCRJUgOGMEmSpAYMYZIkSQ0YwiRJkhowhEmSJDVgCJMkSWrAECZJktSAIUySJKkBQ5gkSVIDhjBJkqQGDGGSJEkNGMIkSZIaMIRJkiQ1YAiTJElqwBAmSZLUgCFMkiSpAUOYJElSA4YwSZKkBoYWwpKclOSqJBfNWnZYkvOSfD7J1iQPGVb7kiRJq9kwZ8JOBo7sW/bnwKur6jDgT7r7kiRJe5yhhbCqOhv4bv9i4E7d7f2Bbw2rfUmSpNVsnxG392Lgk0neQC8APmLE7UuSJK0Kqarh7TzZCJxWVYd0998CnFVVH0zydGBzVT1unm03A5sBNmzYcPiWLVuGVudaNj09zbp161qXsSptv3xn6xKW5dAD9//pbcd3vDm+483xHX/zjfGmTZu2VdXEYtuPOoTtBA6oqkoSYGdV3WmBXQAwMTFRW7duHVqda9nU1BSTk5Oty1iVNh5/eusSlmXHCUf99LbjO94c3/Hm+I6/+cY4yUAhbNRfUfEt4DHd7ccCXx1x+5IkSavC0M4JS3IKMAmsT3IZ8ErgvwJvTrIPcBPd4UZJkqQ9zdBCWFUdM89Dhw+rTUmSpLXCb8yXJElqwBAmSZLUgCFMkiSpAUOYJElSA4YwSZKkBgxhkiRJDRjCJEmSGjCESZIkNWAIkyRJasAQJkmS1IAhTJIkqQFDmCRJUgOGMEmSpAYMYZIkSQ0YwiRJkhowhEmSJDVgCJMkSWrAECZJktTA0EJYkpOSXJXkor7lL0jy5SQXJ/nzYbUvSZK0mg1zJuxk4MjZC5JsAo4GHlhVDwDeMMT2JUmSVq2hhbCqOhv4bt/i3wVOqKofdOtcNaz2JUmSVrNRnxN2X+BXkpyf5KwkDx5x+5IkSatCqmp4O082AqdV1SHd/YuAzwAvAh4MvA/4+ZqjiCSbgc0AGzZsOHzLli1Dq3Mtm56eZt26da3LWJW2X76zdQnLcuiB+//0tuM73hzf8eb4jr/5xnjTpk3bqmpise33GUpV87sM+FAXuv4lyU+A9cB3+lesqhOBEwEmJiZqcnJylHWuGVNTU/jczO24409vXcKy7Dh28qe3Hd/x5viON8d3/O3uGI/6cOQ/Ao8FSHJf4LbA1SOuQZIkqbmhzYQlOQWYBNYnuQx4JXAScFJ3WPKHwHPmOhQpSZI07oYWwqrqmHke+u1htSlJkrRW+I35kiRJDRjCJEmSGjCESZIkNWAIkyRJasAQJkmS1IAhTJIkqQFDmCRJUgOGMEmSpAYMYZIkSQ2M+gLeWiU2rtWLW59wVOsSJElaEc6ESZIkNWAIkyRJasAQJkmS1IAhTJIkqQFDmCRJUgOGMEmSpAYMYZIkSQ0YwiRJkhowhEmSJDUwtBCW5KQkVyW5aI7HXpakkqwfVvuSJEmr2TBnwk4GjuxfmOQg4FeBbw6xbUmSpFVtaCGsqs4GvjvHQ/8X+EOghtW2JEnSajfSc8KSPAW4vKq+MMp2JUmSVptUDW9CKslG4LSqOiTJHYDPAo+vqp1JdgATVXX1PNtuBjYDbNiw4fAtW7YMrc61bHp6mnXr1i15u+2X7xxCNcN36IH7D7zuOPRxueOrtcHxHW+O7/ibb4w3bdq0raomFtt+lCHsUODTwPe7h+8BfAt4SFV9e6H9TExM1NatW4dW51o2NTXF5OTkkrfbePzpK1/MCOw44aiB1x2HPi53fLU2OL7jzfEdf/ONcZKBQtg+wyhqLlW1HbjrzP3FZsIkSZLG2TC/ouIU4FzgfkkuS/K8YbUlSZK01gxtJqyqjlnk8Y3DaluSJGm18xvzJUmSGjCESZIkNWAIkyRJasAQJkmS1IAhTJIkqQFDmCRJUgOGMEmSpAYMYZIkSQ0YwiRJkhowhEmSJDVgCJMkSWrAECZJktSAIUySJKkBQ5gkSVIDhjBJkqQGDGGSJEkNGMIkSZIaMIRJkiQ1MLQQluSkJFcluWjWstcn+VKSC5N8OMkBw2pfkiRpNRvmTNjJwJF9y84EDqmqBwJfAf5oiO1LkiStWkMLYVV1NvDdvmVnVNXN3d3zgHsMq31JkqTVrOU5Yf8F+HjD9iVJkppJVQ1v58lG4LSqOqRv+R8DE8Bv1DwFJNkMbAbYsGHD4Vu2bBlanWvZ9PQ069atW/J22y/fOYRqhu/QA/cfeN1x6ONyx1drg+M73hzf8TffGG/atGlbVU0stv0+Q6lqAUmeAzwJOGK+AAZQVScCJwJMTEzU5OTkaApcY6ampljOc3Pc8aevfDEjsOPYyYHXHYc+Lnd8tTY4vuPN8R1/uzvGIw1hSY4EXg48pqq+P8q2JUmSVpNhfkXFKcC5wP2SXJbkecDbgP2AM5N8Psnbh9W+JEnSaja0mbCqOmaOxe8cVnuSJElrid+YL0mS1IAhTJIkqQFDmCRJUgOGMEmSpAYMYZIkSQ0YwiRJkhowhEmSJDVgCJMkSWrAECZJktSAIUySJKkBQ5gkSVIDA4WwJI8cZJkkSZIGM+hM2FsHXCZJkqQB7LPQg0keDjwCuEuS35/10J2AvYdZmCRJ0jhbMIQBtwXWdevtN2v5dcBTh1WUJEnSuFswhFXVWcBZSU6uqm+MqCZJkqSxt9hM2IzbJTkR2Dh7m6p67DCKkiRJGneDhrAPAG8H3gH8eHjlSJIk7RkGDWE3V9VfD7USSZKkPcigX1Hx0STPT3K3JD8z87PQBklOSnJVkotmLfuZJGcm+Wr37513q3pJkqQ1atAQ9hzgD4B/BrZ1P1sX2eZk4Mi+ZccDn66q+wCf7u5LkiTtcQY6HFlVP7fUHVfV2Uk29i0+Gpjsbr8LmAJevtR9S5IkrXUDhbAkz55reVW9e4ntbaiqK7ptr0hy1yVuL0mSNBZSVYuvlMy+RNHtgSOAC6pqwS9s7WbCTquqQ7r711bVAbMe/15VzXleWJLNwGaADRs2HL5ly5ZF69wTTU9Ps27duiVvt/3ynUOoZvgOPXD/gdcdhz4ud3y1Nji+483xHX/zjfGmTZu2VdXEYtsPejjyBbPvJ9kf+PtBi5zlyiR362bB7gZctUCbJwInAkxMTNTk5OQymht/U1NTLOe5Oe7401e+mBHYcezkwOuOQx+XO75aGxzf8eb4jr/dHeNBT8zv933gPsvY7iP0TvKn+/f/LbN9SZKkNW3Qc8I+Cswct9wb+EXg/Ytscwq9k/DXJ7kMeCVwAvD+JM8Dvgk8bXllS5IkrW2DflnrG2bdvhn4RlVdttAGVXXMPA8dMWCbkiRJY2ugw5Hdhby/BOwH3Bn44TCLkiRJGncDhbAkTwf+hd7hw6cD5ydZ8JORkiRJmt+ghyP/GHhwVV0FkOQuwKeAU4dVmCRJ0jgb9NORe80EsM41S9hWkiRJfQadCftEkk8Cp3T3nwF8bDglSZIkjb8FQ1iSe9O71NAfJPkN4FFAgHOB946gPkmSpLG02CHFNwHXA1TVh6rq96vqJfRmwd407OIkSZLG1WIhbGNVXdi/sKq2AhuHUpEkSdIeYLEQdvsFHtt3JQuRJEnakywWwv41yX/tX9hddmjbcEqSJEkaf4t9OvLFwIeTHMstoWsCuC3w68MsTJIkaZwtGMKq6krgEUk2AYd0i0+vqs8MvTJJkqQxNtD3hFXVZ4HPDrkWSZKkPYbfei9JktSAIUySJKkBQ5gkSVIDhjBJkqQGDGGSJEkNNAlhSV6S5OIkFyU5JclC38wvSZI0dkYewpIcCLwQmKiqQ4C9gWeOug5JkqSWWh2O3AfYN8k+wB2AbzWqQ5IkqYmRh7Cquhx4A/BN4ApgZ1WdMeo6JEmSWkpVjbbB5M7AB4FnANcCHwBOrar39K23GdgMsGHDhsO3bNky0jrXiunpadatW7fk7bZfvnMI1QzfoQfuP/C649DHxcZ3HPq4J1vu+1drg+M7/uYb402bNm2rqonFtm8Rwp4GHFlVz+vuPxt4WFU9f75tJiYmauvWraMqcU2ZmppicnJyydttPP70lS9mBHaccNTA645DHxcb33Ho455sue9frQ2O7/ibb4yTDBTCWpwT9k3gYUnukCTAEcAlDeqQJElqpsU5YecDpwIXANu7Gk4cdR2SJEkt7dOi0ap6JfDKFm1LkiStBn5jviRJUgOGMEmSpAYMYZIkSQ0YwiRJkhowhEmSJDVgCJMkSWrAECZJktSAIUySJKkBQ5gkSVIDhjBJkqQGDGGSJEkNGMIkSZIaMIRJkiQ1YAiTJElqwBAmSZLUgCFMkiSpAUOYJElSA4YwSZKkBgxhkiRJDTQJYUkOSHJqki8luSTJw1vUIUmS1Mo+jdp9M/CJqnpqktsCd2hUhyRJUhMjD2FJ7gQ8GjgOoKp+CPxw1HVIkiS1lKoabYPJYcCJwBeBBwHbgBdV1Q19620GNgNs2LDh8C1btoy0zrVienqadevWLXm77ZfvHEI1w3fogfsPvO449HGx8R2HPu7Jlvv+1drg+I6/+cZ406ZN26pqYrHtW4SwCeA84JFVdX6SNwPXVdX/mm+biYmJ2rp168hqXEumpqaYnJxc8nYbjz995YsZgR0nHDXwuuPQx8XGdxz6uCdb7vtXa4PjO/7mG+MkA4WwFifmXwZcVlXnd/dPBX65QR2SJEnNjDyEVdW3gUuT3K9bdAS9Q5OSJEl7jFafjnwB8N7uk5FfA57bqA5JkqQmmoSwqvo8sOixUkmSpHHlN+ZLkiQ1YAiTJElqwBAmSZLUgCFMkiSpAUOYJElSA4YwSZKkBgxhkiRJDRjCJEmSGmj1jfmSNJA94SLl9nH18mLzGiZnwiRJkhowhEmSJDVgCJMkSWrAECZJktSAIUySJKkBQ5gkSVIDhjBJkqQGDGGSJEkNGMIkSZIaaBbCkuyd5N+SnNaqBkmSpFZazoS9CLikYfuSJEnNNAlhSe4BHAW8o0X7kiRJrbWaCXsT8IfATxq1L0mS1FSqarQNJk8CnlhVz08yCbysqp40x3qbgc0AGzZsOHzLli0jrXOtmJ6eZt26dUvebvvlO4dQzfAdeuD+A687Dn1cbHzHoY+LGec+zozvOPdxxp7Yxw37wpU3rnRFK28pfdSu5vsdvWnTpm1VNbHY9i1C2OuAZwE3A7cH7gR8qKp+e75tJiYmauvWrSOqcG2ZmppicnJyydttPP70lS9mBHaccNTA645DHxcb33Ho42LGuY8z4zvOfZyxJ/bxpYfezF9s32elS1pxS+mjdjXf7+gkA4WwkR+OrKo/qqp7VNVG4JnAZxYKYJIkSePI7wmTJElqoOk8aVVNAVMta5AkSWrBmTBJkqQGDGGSJEkNGMIkSZIaMIRJkiQ1YAiTJElqwBAmSZLUgCFMkiSpAUOYJElSA4YwSZKkBgxhkiRJDRjCJEmSGjCESZIkNWAIkyRJasAQJkmS1IAhTJIkqQFDmCRJUgOGMEmSpAYMYZIkSQ0YwiRJkhoYeQhLclCSzya5JMnFSV406hokSZJa26dBmzcDL62qC5LsB2xLcmZVfbFBLZIkSU2MfCasqq6oqgu629cDlwAHjroOSZKkllJV7RpPNgJnA4dU1XV9j20GNgNs2LDh8C1btoy8vrVgenqadevWLXm77ZfvHEI1w3fogfsPvO449HGx8R2HPi5mnPs4M77j3McZe2IfN+wLV9640hWtvKX0Ubua73f0pk2btlXVxGLbNwthSdYBZwGvraoPLbTuxMREbd26dTSFrTFTU1NMTk4uebuNx5++8sWMwI4Tjhp43XHo42LjOw59XMw493FmfMe5jzP2xD6+9NCb+YvtLc76WZql9FG7mu93dJKBQliTT0cmuQ3wQeC9iwUwSZKkcdTi05EB3glcUlVvHHX7kiRJq0GLmbBHAs8CHpvk893PExvUIUmS1MzID1ZX1eeAjLpdSZKk1cRvzJckSWrAECZJktSAIUySJKkBQ5gkSVIDhjBJkqQGDGGSJEkNGMIkSZIaMIRJkiQ1YAiTJElqwBAmSZLUgCFMkiSpAUOYJElSA4YwSZKkBgxhkiRJDRjCJEmSGjCESZIkNWAIkyRJasAQJkmS1ECTEJbkyCRfTvLvSY5vUYMkSVJLIw9hSfYG/hJ4AnB/4Jgk9x91HZIkSS21mAl7CPDvVfW1qvohsAU4ukEdkiRJzbQIYQcCl866f1m3TJIkaY+Rqhptg8nTgF+rqt/p7j8LeEhVvaBvvc3A5u7u/YAvj7TQtWM9cHXrIjQ0ju94c3zHm+M7/uYb44Or6i6LbbzPytezqMuAg2bdvwfwrf6VqupE4MRRFbVWJdlaVROt69BwOL7jzfEdb47v+NvdMW5xOPJfgfsk+bkktwWeCXykQR2SJEnNjHwmrKpuTvJ7wCeBvYGTquriUdchSZLUUovDkVTVx4CPtWh7DHnIdrw5vuPN8R1vju/4260xHvmJ+ZIkSfKyRZIkSU0YwtaAxS7zlOS4JN9J8vnu53da1KnlSXJSkquSXDTP40nylm78L0zyy6OuUcs3wPhOJtk56/37J6OuUcuX5KAkn01ySZKLk7xojnV8D69RA47vst/DTc4J0+BmXebpV+l9vce/JvlIVX2xb9X3VdXvjbxArYSTgbcB757n8ScA9+l+Hgr8dfev1oaTWXh8Af6pqp40mnK0wm4GXlpVFyTZD9iW5My+39G+h9euQcYXlvkediZs9fMyT2Ouqs4GvrvAKkcD766e84ADktxtNNVpdw0wvlrDquqKqrqgu309cAm3vgqM7+E1asDxXTZD2Oo36GWefrOb5j41yUFzPK61y0t9jb+HJ/lCko8neUDrYrQ8STYCvwSc3/eQ7+ExsMD4wjLfw4aw1S9zLOv/SOtHgY1V9UDgU8C7hl6VRmmQ14DCONToAAAFK0lEQVTWrgvoXeLkQcBbgX9sXI+WIck64IPAi6vquv6H59jE9/Aassj4Lvs9bAhb/Ra9zFNVXVNVP+ju/i1w+Ihq02gMdKkvrU1VdV1VTXe3PwbcJsn6xmVpCZLcht4f6PdW1YfmWMX38Bq22PjuznvYELb6LXqZp75zC55C75i1xsdHgGd3n7B6GLCzqq5oXZRWRpKfTZLu9kPo/V6+pm1VGlQ3du8ELqmqN86zmu/hNWqQ8d2d97Cfjlzl5rvMU5LXAFur6iPAC5M8hd6nOL4LHNesYC1ZklOASWB9ksuAVwK3Aaiqt9O7usQTgX8Hvg88t02lWo4BxvepwO8muRm4EXhm+S3aa8kjgWcB25N8vlv2P4F7gu/hMTDI+C77Pew35kuSJDXg4UhJkqQGDGGSJEkNGMIkSZIaMIRJkiQ1YAiTJElqwBAmabcl+XGSz8/62biMfRyQ5PkrX93yJNkx84WLSf65+3djkt9aof1vTHLRSuxL0tpkCJO0Em6sqsNm/exYxj4OAJYcwpLsvYy2lqSqHtHd3AisSAiTJEOYpKFIsneS1yf51+7i8v+tW74uyaeTXJBke5Kju01OAO7VzaS9PslkktNm7e9tSY7rbu9I8idJPgc8Lcm9knwiybYk/5TkF+ao5zGzZur+Lcl+XRtnJ/lwki8meXuSW/1eTDI9q8Zf6fbxkr513pfkibPun5zkN7sZr3/q+ntBkkfQJ8lxSd426/5pSSa7249Pcm637Qe6a9hJGgN+Y76klbDvrG+T/npV/TrwPHqXZ3lwktsB5yQ5A7gU+PWquq473Hdeko8AxwOHVNVhADMhZAE3VdWjunU/Dfz3qvpqkocCfwU8tm/9lwH/o6rO6YLMTd3yhwD3B74BfAL4DeDUedo8HnhZVT1pjse2AM8APtZdYuwI4HfpXbz5V6vqpiT3AU4BJhbpG12/1gOvAB5XVTckeTnw+8BrBtle0upmCJO0Em6cCU+zPB54YJKndvf3B+5D72LGf5rk0cBPgAOBDcto833Qm1kDHgF8oLt8G8Dt5lj/HOCNSd4LfKiqLuvW/5eq+lq3r1OARzF/CFvIx4G3dIHzSODsqroxyf7A25IcBvwYuO8S9vkwegHxnK7W2wLnLqM2SauQIUzSsAR4QVV9cpeFvUOKdwEOr6ofJdkB3H6O7W9m11Mm+te5oft3L+DaOULgLqrqhCSn07uG33lJHjfzUP+qC+1ngf3flGQK+DV6M2KndA+9BLgSeFBX601zbD5fXwOcWVXHLKcmSaub54RJGpZP0ruo7W0Aktw3yR3pzYhd1QWwTcDB3frXA/vN2v4bwP2T3K6bTTpirkaq6jrg60me1rWTJA/qXy/Jvapqe1X9GbAVmDlv7CFJfq47F+wZwOcW6FN/jf220Ls48690/afr7xVV9RN6FwKe64MEO4DDkuyV5CB6h0gBzgMemeTeXR/ukGQpM2mSVjFDmKRheQfwReCC7qsY/obe7Pt7gYkkW4FjgS8BVNU19A67XZTk9VV1KfB+4MJum39boK1jgecl+QJwMXD0HOu8uNv3F4Ab6R0+hN7hvROAi4CvAx9eoJ0LgZuTfKH/xPzOGcCjgU9V1Q+7ZX8FPCfJefQORd4wx3bndG1vB94AXABQVd8BjgNOSXIhvVB2qw8dSFqbUrWsmXdJWvO6k//nO9FekobKmTBJkqQGnAmTJElqwJkwSZKkBgxhkiRJDRjCJEmSGjCESZIkNWAIkyRJasAQJkmS1MD/B7dABDmm/sr4AAAAAElFTkSuQmCC", "text/plain": [ "
" ] @@ -299,10 +299,9 @@ "source": [ "if INTERACTIVE:\n", " # create widget for interactive split value histogram\n", - " interact(render_histogram,\n", - " feature=gbm.feature_name())\n", + " interact(render_histogram, feature=gbm.feature_name())\n", "else:\n", - " render_histogram(feature='f26')" + " render_histogram(feature=\"f26\")" ] }, { @@ -324,9 +323,8 @@ "outputs": [], "source": [ "def render_tree(tree_index, show_info, precision=3):\n", - " show_info = None if 'None' in show_info else show_info\n", - " return lgb.create_tree_digraph(gbm, tree_index=tree_index,\n", - " show_info=show_info, precision=precision)" + " show_info = None if \"None\" in show_info else show_info\n", + " return lgb.create_tree_digraph(gbm, tree_index=tree_index, show_info=show_info, precision=precision)" ] }, { @@ -338,7 +336,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAFfgAAAbSCAYAAADbl1DoAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzs3FuI1VX/x/G1tzOecizTiZDxgHlEs+mmUunGtAkiK7DDEN1kUXcFXYRQURAEFgR1U9BNJDKhdGEWaehNmZITpULBOB3wnI6Sh9Qy5/e/+P/h//A8/r57nrXTPTqv1+2btfZSghyd+VSKokgAAAAAAAAAAAAAAAAAAAAAAAAAAADAf6fa6AcAAAAAAAAAAAAAAAAAAAAAAAAAAADAlcjALwAAAAAAAAAAAAAAAAAAAAAAAAAAAGQw8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDPwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMHALwAAAAAAAAAAAAAAAAAAAAAAAAAAAGQw8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDPwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkKGp0Q/4P0WjHwAAAAAAAAAAAAAAAAAAAAAAAAAAAMCQ0Fmjdw30omqdDwEAAAAAAAAAAAAAAAAAAAAAAAAAAIAhycAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZGhq9AMAAAAAAAAAAAAAAACuZKdOnSptZ8+eDc+ePn067CdOnAh7f39/XfefP38+7PWo9bZav7ZLqVqthv3aa6+9TC/5TyNHjgz7qFGjwj58+PCwX3PNNaWt1q979OjRYa/1NgAAAAAAAAAAuBrF340EAAAAAAAAAAAAAAAAAAAAAAAAAAAAXJSBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyNDX6AQAAAAAAAAAAAAAAwP/666+/wn7o0KGw79u3L+xHjhwJe19fX2k7evRoePbYsWN19eiza52vdfepU6fCfubMmbrOAwMzbty4sI8ePbq0jRkzJjw7YcKEsI8fP76uXuv+G2644ZJ99o033hj2tra27PPVajU8CwAAAAAAAABAbb4DAwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgQ1OjHwAAAAAAAAAAAAAAAP/u3LlzYe/t7c1qKaX0008/hX3v3r1h37dvX9gPHDiQffbw4cNhL4oi7PW67rrrSltra2t4dvz48WGfMGFC2GvdP2fOnOzPHjNmTNhHjx4d9rFjx2bfP2rUqPBsS0tLXZ89bNiwsNf6/JEjR4b9Uor+e0sppUqlcsk++/z582E/ffr0JfvsWmp9dq23//nnn2E/c+ZMafv999+zz6aU0tmzZ8Ne6/4//vijtNX6fTl27FjY+/r6wn7o0KGw7969O+xHjx7N/uzo1/1PaG5uLm0TJ04Mz06aNCnsbW1tdfXJkyeHfebMmaVt+vTp4dkpU6aEvanJj9YBAAAAAAAAAP+MaqMfAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFciA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZKgURdHoN6SU0qB4BAAAAAAAAAAAAADA1aSvry/sO3fuLG27du0Kz/b09IR9z549Ye/t7Q37vn37wt7f31/aqtVqeLatrS3skyZNqqtH99dzdiC91v2tra1hb2pqCjsA/7xz586F/bfffgt7rf9n7t27t7QdPHjwkt09kPt//fXXsB85ciTskebm5rBPmzYt7NOnTw/7zJkzS9usWbPCs+3t7WGfO3du2MeMGRN2AAAAAAAAAGBAOmv0roFeFH/XIgAAAAAAAAAAAAAAAAAAAAAAAAAAAHBRBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyFApiqLRb0gppUHxCAAAAAAAAAAAAACA/9bPP/8c9u7u7tL23XffhWd37doV9p07d4b9wIEDYY+0tbWFfcaMGZe0T58+PewzZ84sbTfddFN4dsSIEWEHAAaHkydPlrbe3t7wbE9PT9hrnd+zZ0/2/bU++/jx42GvVqthr/XnpPnz54e9vb09q6WU0m233Rb21tbWsAMAAAAAAADAINJZo3cN9KL4X/oBAAAAAAAAAAAAAAAAAAAAAAAAAACAizLwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkqRVE0+g0ppTQoHgEAAAAAAAAAAAAADE5nz54Ne3d3d2n7+uuvw7Pbt2+vqx8+fDjsw4cPL21z584Nz86fPz/s7e3tYb/55pvDfuutt5a266+/PjwLAECevXv3hn3Xrl1h37lzZ9i///777Pt7e3vDs/39/WGfNm1a2BcuXBj2BQsWlLZFixaFZ+fNmxf2YcOGhR0AAAAAAACAIaezRu8a6EXVOh8CAAAAAAAAAAAAAAAAAAAAAAAAAAAAQ5KBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyVIqiaPQbUkppUDwCAAAAAAAAAAAAALi4CxcuhH3Hjh1h/+KLL8K+cePGsH/zzTdhP3/+fGmbPHlyeHbRokVhX7BgQdgXLlwY9vnz55e25ubm8CwAAFxOp0+fDnt3d3fYt27dGvZt27aFPfpz/9GjR8OzLS0tYb/zzjvD3tHREfa77767tM2ePTs8CwAAAAAAAMCg1Fmjdw30omqdDwEAAAAAAAAAAAAAAAAAAAAAAAAAAIAhycAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkqRVE0+g0ppTQoHgEAAAAAAAAAAAAAV7KDBw+G/bPPPgv7pk2bStvmzZvDs8ePHw/77Nmzw75kyZKwL168OOy33357aZs4cWJ4FgAAGPx6enrCvm3btrBv2bIl7NHXQymldPjw4dI2ZcqU8OzSpUvD3tHRUVdvaWkJOwAAAAAAAAAX1Vmjdw30omqdDwEAAAAAAAAAAAAAAAAAAAAAAAAAAIAhycAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkqRVE0+g0ppTQoHgEAAAAAAAAAAAAAl9r+/ftL28cffxyeXbduXdi3bt0a9paWlrAvWbKktHV0dIRnly5dGvapU6eGHQAAoJFq/azl7t27S9vGjRvDs5s2bQr7V199FfZaan29tnz58tK2bNmy8OzYsWOz3gQAAAAAAABwBeis0bsGelG1zocAAAAAAAAAAAAAAAAAAAAAAAAAAADAkGTgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMlaIoGv2GlFIaFI8AAAAAAAAAAAAAgBMnToR99erVYV+zZk3Yt2/fXtrGjRsXnn3ggQfCvnz58rDfddddYW9ubg47AAAA/7yTJ0+GfcOGDWFfu3Zt2D///PPSVutnTDs6OsL++OOPh/3+++8Pu69DAQAAAAAAgAbqrNG7BnpRtc6HAAAAAAAAAAAAAAAAAAAAAAAAAAAAwJBk4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyVIqiaPQbUkppUDwCAAAAAAAAAAAAgCvfjh07wv7ee++FvaurK+yVSiXsjzzySNgffvjh0rZ48eLwbFNTU9gBAADg3506daq0bdiwITxb62vkTz/9NOytra1hf+KJJ0rbU089FZ6dOnVq2AEAAAAAAABq6KzR438w/RfVOh8CAAAAAAAAAAAAAAAAAAAAAAAAAAAAQ5KBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyVIqiaPQbUkppUDwCAAAAAAAAAAAAgMujv78/7OvWrQv7qlWrStu3334bnm1vbw/7008/HfbHHnss7C0tLWEHAACAq8X+/fvD/v7772f3Q4cOhWfvvffesK9cuTLsCxYsCDsAAAAAAABw1eus0bsGelG1zocAAAAAAAAAAAAAAAAAAAAAAAAAAADAkGTgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMlaIoGv2GlFIaFI8AAAAAAAAAAAAAYGD6+/vDvnbt2rC/9tprYf/hhx/C/tBDD5W25557Ljx7xx13hB0AAAC4PC5cuFDaPvnkk/Dsm2++GfatW7eGvaOjI+wvv/xy2BcuXBh2AAAAAAAAYNDrrNG7BnpRtc6HAAAAAAAAAAAAAAAAAAAAAAAAAAAAwJBk4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADJWiKBr9hpRSGhSPAAAAAAAAAAAAAOD/bd68ubQ9++yz4dkff/wx7I8++mjYX3zxxbDPmTMn7AAAAMDQFv29Rkopvfrqq2H/8ssvw37PPfeUtrfffjs8O2PGjLADAAAAAAAAl0Vnjd410IuqdT4EAAAAAAAAAAAAAAAAAAAAAAAAAAAAhiQDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkMPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAYDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkqBRF0eg3pJTSoHgEAAAAAAAAAAAAwNWkr68v7M8//3zYP/zww9J23333hWdXrVoV9lmzZoUdAAAAoJE2b94c9ujvVXp6esKzK1euDPsLL7wQ9uHDh4cdAAAAAAAAGJDOGr1roBdV63wIAAAAAAAAAAAAAAAAAAAAAAAAAAAADEkGfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMhj4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwGfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIUCmKotFvSCmlQfEIAAAAAAAAAAAAgCvJ+vXrw75ixYqwjxgxIuzvvPNOaXvwwQfDswAAAABXs7///ru0vfXWW+HZV155JexTp04N+0cffRT2efPmhR0AAAAAAABIKaXUWaN3DfSiap0PAQAAAAAAAAAAAAAAAAAAAAAAAAAAgCHJwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkMPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAYDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkqBRF0eg3pJTSoHgEAAAAAAAAAAAAwOUWfS/n66+/Hp596aWXwv7kk0+G/Y033gj72LFjww6X05EjR8K+ZcuW0rZmzZrw7Pr167PeBDm2b98e9g8++CDs7777btifeeaZuvott9wSdgAAoH6//PJL2FesWBH27u7usK9evbq0LVu2LDwLAAAAAAAAQ0hnjd410IuqdT4EAAAAAAAAAAAAAAAAAAAAAAAAAAAAhiQDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAPwPO/cbW2V9NnD8PpV1mwgMUitiLHuhgIOtyMiC4Y9Zm8VNVySLIXZuyZYI0zmcbri4Sd0SmPHFXJbFDYQXupBg1WgMJcYZIGFGt5iMP2MBgWwLJWNYGpkwGEHceV482ROfufu66e9uOaft5/P2m999rnOfntKWkwsAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAElSq1WqtZ8iyLKuLIQAAAAAAAAAAAAAG27lz58L+9a9/Pbc999xz4dnHH3887MuXLw87DCd333132NetW5d87Tr5TDUjyPbt23Nbe3t7ePbw4cNhb2lpCXt3d3fYN23aFPbNmzeHvZZ6enpy24YNG0pde9myZWHv6OgodX0YTEXv887OzuRrP/3002G//fbbk69NvqF8TbMsfl2H82tay/uWZcP73pUR/ZyTZcW/x65du3Ywx4ER6/z582H/9re/Hfbod+RHH300PPvAAw+EHQAAAAAAAEaQog8XxB9OeJ+GkoMAAAAAAAAAAAAAAAAAAAAAAAAAAADAqGTBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAQW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAgQaVardZ6hizLsroYAgAAAAAAAAAAAGCgij6Leccdd4T9lVdeyW0vvPBCeHbRokVhh9GkUqkkn62Tz1Qzgtx99925bd26deHZ0fz12N3dHfZNmzblto0bN5Z67AcffDDsc+bMCfuyZctKPT68X1dXV9jXrFkT9gMHDiQ/9vTp08O+atWqsK9evTr5sUeyWr6mWRa/rvX+mkb3rpb3Lcvie1fr+9bX1xf27du357bOzs7BHuf/Gc0/68DF9MQTT+S2e+65Jzz705/+NOz33ntv0kwAAAAAAABQh4r+kzz+UNv7NJQcBAAAAAAAAAAAAAAAAAAAAAAAAAAAAEYlC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAkq1Wq11jNkWZbVxRAAAAAAAAAAAAAAA/Xoo4+W6lu3bs1tc+fOTZoJRqNKpZJ8tk4+U80I4uvxv+vt7Q371KlTw/7b3/42t82bNy9ppn/bs2dP2GfPnh323bt357bW1takmRi5yn69FSnzfaTM968si98LWTay3w/R61rL1zTLyr2uQ/2aDuX7YSTftyJdXV3JZ9esWTOIk3zQSP5ZB4aLp556KuzLli0L+69//euwt7W1DXQkAAAAAAAAqJXOgt59oRdqKDkIAAAAAAAAAAAAAAAAAAAAAAAAAAAAjEoW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEoyp9QAAAAAAAAAAAAAA9ezNN98M+49+9KOwd3d3h33u3LkDHQkYZvr6+nLbxo0bw7MrV64Me0dHR9jvu+++sLe1tYU98s4774T92WefDfvy5cuTHzvLsmzVqlW5bcWKFeHZ5ubmsFcqlaSZBkPZx65Wq4M0yeB7/fXXS52fMmXKIE3yQVdeeWWp82+88UZua21tLXXtrq6uUudXr15d6jyDL/p6Ge6KnlvZ90M9G6mv61C/pu7b0CjzvX/NmjWDOAlQj772ta+Fff/+/WG/8847w75v376wf+QjHwk7AAAAAAAADEcNtR4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAhiMLfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASjKn1AAAAAAAAAAAAAAD17P777w/70qVLw75kyZLBHAeoQ319fWG/8847c9uXv/zl8Gy1Wg379u3bw97e3h723bt357bW1tbw7IMPPhj2devWhf2tt94K+9mzZ8M+derU3Nbf3x+eXbulLdp/AAAgAElEQVR2bdiL7nuRSqWSfLbsY9ezHTt2lDrf0tIySJN8UHNzc6nzPT09uW3ZsmWlrs3Is3PnzlqPMGSi90KWjez3w0h9XYf6NXXfAOrPj3/847Bv2bIl7I899ljYH3rooQHPBAAAAAAAAPWuodYDAAAAAAAAAAAAAAAAAAAAAAAAAAAAwHBkwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAAgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIEGlWq3WeoYsy7K6GAIAAAAAAAAAAAAYffbt2xf2WbNmhX3v3r1hnzlz5oBnAgauUqkkny37meru7u6wd3Z2DtljFym6L6tWrcptq1evDs92dXWFvb+/P+xr164Ne5FavuZF6nm2WipzX7KstvfGa8pgKvteKFLma66eZ6t3Q3nvyt43s6Wp59mGku8DQJH169eH/aGHHgr7X//619zW2NiYNBMAAAAAAAAkyv+A5/+KPyD6Pg0lBwEAAAAAAAAAAAAAAAAAAAAAAAAAAIBRyYJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAQW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAECCSrVarfUMWZZldTEEAAAAAAAAAAAAMPo88sgjYX/yySfDfujQocEcB0hUqVSSz5b9TPXixYvD3tPTU+r6tTLUnzXv7e0N+3PPPRf2lStXJj/2UD+3Wn491rMy9yXLantvvKYMprLvhSJlvubqebZ6N5T3rux9M1uaep5tKPk+ABTp7+8Pe3Nzc9i3bt2a29ra2pJmAgAAAAAAgESdBb37Qi/UUHIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAGJUs+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAAgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDAgl8AAAAAAAAAAAAAAAAAAAAAAAAAAABIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJBhT6wEAAAAAAAAAAAAAamnPnj1hb21tvUiTAMNVT09P8tlqtTqIk9SXDRs2hL3ovv3kJz8J+8qVKwc8E7XV0dER9jLvpVq76667aj0Cw8hIfi8UPbeRLHruXtP06w/Xezea3wvAyNfU1BT2KVOmhD36W1xbW1vSTAAAAAAAAFBrDbUeAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIYjC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAnG1HoAAAAAAAAAAAAAgFo6c+ZM2CdNmnSRJgFGo4MHD4Z92rRpF2mSgevu7g778uXLw3748OGwt7S0DHgm6ltHR0fYe3p6wt7X15fbmpubk2b6t97e3lLn58yZU+o8/12lUqn1CLmq1Wry2bLvhXpW9NxGsui5e03Trz9c791ofi8AjB8/PuynTp26SJMAAAAAAADAxdNQ6wEAAAAAAAAAAAAAAAAAAAAAAAAAAABgOLLgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQYEytBwAAAAAAAAAAAACopebm5rAfPXr0Ik0CDFfr168P+/Lly3Pbxo0bw7MrV64M+4QJE8Le19cX9ujxv/vd74ZnOzs7w16kpaWl1HmGn5tuuqnU+T//+c+5rejf8yJl/70v+9z476rVaq1HGBKf+cxnaj3CkBnK57Z9+/awt7e3h33btm1hb2trG/BM7zdSX9ehfl7uG8DIc+zYsbBPnjz5Ik0CAAAAAAAAF09DrQcAAAAAAAAAAAAAAAAAAAAAAAAAAACA4ciCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEY2o9AAAAAAAAAAAAAEAt3XDDDWH/3ve+F/bz58+HfcwYH9eEwdDX11ezazc3N4f91ltvDfvy5ctz25o1a8KzRb2sw4cPJ5/t6OgIe09PT9h7e3vDfvbs2QHPdKHKvuZ79uwZzHH+n4MHD4Z92rRpQ/bYQ62lpSXs69evD/uvfvWr3HbdddclzXQh186y4tmKnlsZXV1dpc6vXr16kCZhsLS2toZ91apVYS/6t6Ho+0gZRbMVPbcy2tvbh/R8tVotdf3ouXtN068fzVfL+5Zl8WxDfd/KKvo5bCiN5J91YLTYt29f2E+cOBH2+fPnD+Y4AAAAjHBnzpwJe39/f3I/efJkqcc+ffp02It+Ry46Hz3+qVOnwrPvvfde2Iue+1Aqmr3oczhlRZ/jGTdu3JA+dpHx48fntksuuSQ8e9lll4V97NixpfrEiRNz26WXXlrq2tHzzrIsa2pqKtWL5gMAGCwNtR4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAhiMLfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACcbUegAAAAAAAAAAAACAWrrtttvCfv/994d9y5YtYV+yZMmAZwI+6IorrqjZtavVatibm5vDfvjw4dy2YcOG8OyaNWvCftddd4X9+9//fthbWlrCHlm9enXYe3p6wl703FesWBH2VatW5bb+/v7w7NmzZ8NeqVTCPpSmT59e6nzR12s9W7ZsWdijr6mPfexj4dmOjo6w33fffWFva2sLOwymou+vM2fODHuZ7yNPP/102G+//fbka5e1bdu2sLe3t5c6P5Rq+ZpmWfy61vI1vRDRvavlfcuy+r53tfxZpkiZ12U4/5wDI8mTTz4Z9rlz54a96Ps3AADASPX3v/897EeOHAl79H+Ovb29pa599OjRsB8/fjy59/X1hWeL/l/vzJkzYa9nRf9/NXbs2LBfeumluW38+PFJM/3bxIkTS50vI3peWZZlH/7wh4f08aOvqaKv9aF24sSJ5LOnTp0K++nTp0v1ou9h9Sz6mmtqagrPFn0e5PLLLw970fWnTJmS266++urw7NSpU8Ne9HmQol70PQwA+KCGWg8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAw5EFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDAgl8AAAAAAAAAAAAAAAAAAAAAAAAAAABIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABJVqtVrrGbIsy+piCAAAAAAAAAAAAID/9PDDD4d98+bNYX/jjTfC3tjYOOCZAAAAAGAo9Pb2hn3mzJlh7+7uDvstt9wy4JkAAIDRo7+/P+wHDhwo1Q8ePJh89tChQ2E/cuRI2E+ePBn2MpqamsLe0tIS9ilTppS6ftSvuOKKIbt2lmXZ5Zdfnnx+3Lhx4dmxY8eW6jBanD59ulQ/depU2Iv+bSjTjx8/Hp596623huyxsyzLjh49mtuK/k5XdO2yir5HRv+2XHvtteHZadOmlerTp08P+4wZM3Jb0b8rAIxKnQU9/s+v92koOQgAAAAAAAAAAAAAAAAAAAAAAAAAAACMShb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASVKrVaq1nyLIsq4shAAAAAAAAAAAAAP7TmTNnwj5z5syw33zzzWH/xS9+MeCZAAAAACDFuXPnwn7jjTeGvampKew9PT0DngkAABhc7777btj37t0b9t27d4d9586dyef3798fnn377bfDXmTcuHFhnzZtWlLLsiy75pprwj516tSwX3311aX6xz/+8dz20Y9+NDwLwMjzz3/+M+yHDx8O+5EjR8Le29ubfP7QoUPh2QMHDoT94MGDYT916lTYI5MmTQr7ddddF/bW1tawf/rTnw777Nmzc9usWbPCs42NjWEHIFlnQe++0As1lBwEAAAAAAAAAAAAAAAAAAAAAAAAAAAARiULfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACSrVarXWM2RZltXFEAAAAAAAAAAAAAAD9frrr4f9s5/9bNh/+MMf5rYf/OAHSTMBAAAAMHqdP38+t912223h2d///velenNzc9gBAGC0OHToUNhfe+215L5z587w7B//+Mewnzt3Luzjx48P++zZs8N+/fXX57YZM2aEZ6dNmxb26dOnh/2qq64KOwAw/B09ejTsBw4cSGpZlmVvvvlm2Hfv3h32Xbt2hf3kyZO5rbGxMTw7a9assEc/g2VZls2fPz/sCxYsCPu1114bdoBhrLOgd1/ohRpKDgIAAAAAAAAAAAAAAAAAAAAAAAAAAACjkgW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAkq1Wq11jNkWZbVxRAAAAAAAAAAAAAAg627uzvsX/nKV3LbPffcE5597LHHwj5mzJiwAwAAADD8vP3222FfunRpbtu5c2d4dseOHWH/5Cc/GXYAALiYivbm7NmzJ+yvvvpqUruQfuzYsbCPGzcu7PPmzcttc+bMCc8W9euvvz7s11xzTdgrlUrYAQBGq6KfT//0pz/ltqK/3e7atSvsRed/97vfhf3kyZNhnzx5cm5bsGBBeHbhwoVhX7RoUdhbW1vD7udToKTOgh5/EPx9GkoOAgAAAAAAAAAAAAAAAAAAAAAAAAAAAKOSBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgASVarVa6xmyLMvqYggAAAAAAAAAAACAi+3FF1/MbV/96lfDszfccEPYn3nmmbBPnDgx7AAAAABcfPv37w/74sWLw/7ee+/lts2bN4dnZ82aFXYAABiod955J7dt3bo1PPvSSy+V6seOHQv75MmTc9vChQvDswsWLAh70flPfepTYb/kkkvCDgAAAxH93TjLsmzv3r1hf/XVV5PahfQyP7dnWZZ9/vOfz2233HJLePZzn/tc2CdMmBB2YEToLOjdF3qhhpKDAAAAAAAAAAAAAAAAAAAAAAAAAAAAwKhkwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAAgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIEGlWq3WeoYsy7K6GAIAAAAAAAAAAACgnvzhD38I+6233hr2os+J/vKXvwz7zTffHHYAAAAAPuhf//pX2B9//PGwd3V1hX327Nlhf/7553NbU1NTeBYAgNHnb3/7W9ifffbZsG/evDnsv/nNb3JbQ0NDePbGG28M+xe+8IVSfcaMGWEHAACG3qFDh8K+ZcuWsL/88su5Lfp9JMuy7Pz582GfP39+2Is+w7l06dKwX3XVVWEHLorOgt59oReK/8oBAAAAAAAAAAAAAAAAAAAAAAAAAAAA/FcW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAElSq1WqtZ8iyLKuLIQAAAAAAAAAAAACGk+PHj4d9xYoVYX/mmWfCvnTp0tz2s5/9LDx75ZVXhh0AAABgONu1a1du+8Y3vhGe3bNnT9gfeOCBsD/88MNhb2xsDDsAAMPPiRMnctsLL7wQnt20aVPYd+zYEfYJEyaE/Utf+lLYv/jFL+a29vb28Oxll10WdgAAgMg//vGPsG/bti3sL730Utiff/75sEe/y2VZli1atCi33XHHHeHZot/FJk2aFHbg/3QW9O4LvVBDyUEAAAAAAAAAAAAAAAAAAAAAAAAAAABgVLLgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQoFKtVms9Q5ZlWV0MAQAAAAAAAAAAADCavPzyy2H/5je/mdtOnDgRnv3Od74T9nvvvTfsEyZMCDsAAABAGX/5y1/C/sgjj4T9qaeeym3z5s0Lzz7xxBNh/8QnPhF2AACGn9deey3sP//5z8P+4osv5rbGxsbw7OLFi8Pe2dkZ9ptuuinsH/rQh8IOAAAwUr377rthf+WVV8Le3d2d26LfA7Msy86dOxf2JUuWhP1b3/pW2BcuXBh2GEHiP4xkWf4b9T80lBwEAAAAAAAAAAAAAAAAAAAAAAAAAAAARiULfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAA/8PO3QfXVdeJHz83DbQUaBRK4uqagtAHH6BRRMJWt9pWKGqLg2YkHRgfhtZ2XLQzyR/FTXVnknV0SB0ZdRpbRgfr0IyFQVtHGgvRKmBFhEYdsEWR1uKUQIGIlFK7ufuXs/78cT6nPSfJvUlfr3/f8znnm/uQe3PvyRcAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJBDqVwuV3oNSZIkVbEIAAAAAAAAAAAAAP7P4cOHU9tNN90Uzt58882Fzv2Zz3wm7KtXr05tdXV1hc4NAAAAVL8//vGPYf/v//7vsH/7298O++tf//qwf/7zn09t1113XThbKpXCDgBAZRw5ciS1bd68OZz92te+FvaHHnoo7M3NzWH/1Kc+ldquvvrqcHbq1KlhBwAAoPq89NJLYb/zzjvD/vWvfz3s999/f9ibmppS23/8x3+Es8uWLQv7aaedFnYYY60Zvfd4D1RTcCEAAAAAAAAAAAAAAAAAAAAAAAAAAABwUrLBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxK5XK50mtIkiSpikUAAAAAAAAAAADp1q1bl9ra29tH9dzd3d1hb2try33s6OdKkuI/2759+8Le2NiY2gYHB8PZTZs2hT1r7UuWLAn76tWrU9uCBQvC2aKK3i8bNmxIbVdddVU429DQEPYque4Oqt7Q0FDYb7755rB/5StfCfvw8HBq+9jHPhbOfvKTnwz7G9/4xrADAAAAxyfrs7SdO3emtm984xvh7B133BH26LPXJEmSjo6OsF977bVhr62tDTsAAGPv6NGjYY++R06SJPnCF76Q2p599tlw9iMf+UjYb7jhhrC//e1vDzsAAACMpAcffDDsX/va11Jbb29vOPvqV7867J/97GfDnnWN56mnnhp2OEGtGT1+wP+DmoILAQAAAAAAAAAAAAAAAAAAAAAAAAAAgJOSDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUCqXy5VeQ5IkSVUsAgAAAAAAAAAAyGfXrl1hv+yyy8K+cuXKsK9fv/6E1zRSli5dGvZbbrkl7PX19WEfHBxMbddff304u2zZsrBfc801Ye/v7w/7woULU9vu3bvD2blz54Z93bp1YW9paQl7Y2Nj2IeGhlJbd3d3ONvV1RX2KrnujgnkwIEDqe3LX/5yOPvqV7867GeffXbYzzrrrNx9+vTphY6dtbas59qGDRtytSRJksceeyzs8+fPD/snP/nJsF999dVhnzx5ctgBAACgWjz77LNh//a3vx32b3zjG2H/3e9+l9rmzZsXzq5atSrsH/nIR8JeW1sbdgAAxt7//M//hP073/lO2P/rv/4r7AcPHgx7dO3CmjVrwtmGhoawAwAAwEQRXf+eJEnyxS9+Mew9PT1hz7r+Puvv/2uvvTa1+X6IV9Ca0XuP90A1BRcCAAAAAAAAAAAAAAAAAAAAAAAAAAAAJyUb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMihVC6XK72GJEmSqlgEAAAAAAAAAAAwOtatWxf29vb2sO/bty/sjY2NJ7ymvxsYGAj7o48+GvZrrrkm97mTJEl6e3tTW2trazg72td/lUql1NbR0RHOdnZ25j52kiTJU089Ffb6+vqwRwYHB8Pe0NAQ9iq57o4J5M4770xtV199dThbW1sb9qzn2rFjx8Jeycf7pEmTwj5t2rTUdtZZZ4Wzp5xyStiHhobCfvDgwbBPmTIl7NFrx4c//OFwdtGiRWE/9dRTww4AAMDE85e//CXsW7duTW1btmwJZ/v6+sKe9TfwtddeG/aVK1emtre85S3hLAAA489vf/vbsH/iE58I+8MPPxz2j3/842Ffu3Zt2F//+teHHQAAACjuwIEDYc+6Dv2b3/xm2OfOnZvavvWtb4WzF154YdiZkOJ/2kiS9H/4+Cc1BRcCAAAAAAAAAAAAAAAAAAAAAAAAAAAAJyUb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMihVC6XK72GJEmSqlgEAAAAAAAAAAAwOgYGBsLe1NQU9g0bNoR9+fLlJ7ymv1u3bl3YW1pawt7Y2Jj73EmSJEuXLk1t27ZtK3TsSsq6Nm3VqlVh7+npCfvmzZvDfuWVV6a2urq6cBbG2nPPPZfapk+fHs4ODw+P9HIYATU1NWF/5zvfmdruvffecDbrd9hVV10V9g996ENhX7BgQWqbOnVqOAsAAMAre/rpp8N+1113hW5u1c4AACAASURBVP32228P+49+9KOwR3+nLl68OJzN+nw0+nwzSZLk9NNPDzsAAOPPsWPHwv6lL30ptXV2doazb33rW8P+zW9+M+xvfOMbww4AAACMf3v27An7Jz7xidT24IMPhrMdHR1hX7NmTdhPOeWUsFOVWjN67/EeKL56GAAAAAAAAAAAAAAAAAAAAAAAAAAAAHhFNvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQQ6lcLld6DUmSJFWxCAAAAAAAAAAAoDJWrVoV9p6enrA///zzuc+9Zs2asK9fvz73sY9HqVTKPVsl13/lsnfv3rC3t7eHfdu2bbnP3d3dHfa2trbcx4aR9ra3vS3sDz/88BitZGKpra0Ne9bv18997nNh/+xnP5v7/AcPHgxn77jjjrBv2bIl7Pfee2/Yo7XNmzcvnL388ssL9aamprAXec0EAAA4evRo2O+7776w79ixI7X96Ec/Cmez/n6fMmVK2K+44oqwt7S0hP0DH/hAajvzzDPDWQAATj7PPvts2D/4wQ+G/cEHH0xtnZ2d4ezq1avDPmnSpLADAAAADA8Pp7abb745nO3o6Ah71nWO3//+98M+ffr0sFMRrRm993gPVFNwIQAAAAAAAAAAAAAAAAAAAAAAAAAAAHBSssEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOpXK5XOk1JEmSVMUiAAAAAAAAAACAyhgYGAh7U1NT2Ddv3hz2008/PbWdc8454Wxzc3PYiyqVSrln9+zZE/ZZs2blPna1y3rM9PT05GpJkiTd3d1hb2trCzuMpI6OjrDfdNNNYT969OhILmfcqK2tDft5550X9ttuuy3sb3/72094TdViaGgo7HfffXdq27FjRzjb19cX9ieeeCLs9fX1YZ8/f37Yo9fsrNfziy++OOyTJ08OOwAAcHxeeOGFsP/iF79Ibffff384m9Xvu+++sL/44othjz6jXLRoUTh7xRVXhH3evHlhnzJlStgBAOBE/OEPfwj7lVdeGfbh4eGw/+AHP0htc+bMCWdhrA0ODqa2/v7+cDbrO8WtW7fmWhPksWvXrrDfeuutYc+6lmblypW5+9y5c8NZAACoJlnX5y9ZsiTsWfu7/vCHP0xtM2fODGcZNa0Zvfd4D1RTcCEAAAAAAAAAAAAAAAAAAAAAAAAAAABwUrLBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxK5XK50mtIkiSpikUAAAAAAAAAAADVadWqVWHv6ekJ+5IlS1Lb1q1bc61ppGzcuDG1rVixIpzt6OgIe3t7e9jr6urCPjg4mNo2bdoUzra1tYW9VCqF/fnnnw971tojAwMDYW9qagp7lVx3RxU5fPhw2Hfu3Bn2HTt2pLY777wznH3iiSfCPp5NmjQp7MPDw6lt9erV4ewXvvCFsE+ZMiXs5PPYY4+Fva+vL+w//elPw37//fentieffDKcnTx5ctgvvvjisF922WVhf8c73pHaLrroonB25syZYc96rgAAcPI5cuRI2B955JHUtnv37nD2F7/4RaH+29/+NuzR33pz5swJZ7Pely9YsCDsixYtCntDQ0PYAQCgWkTv+ZMkSd7znveE/fzzzw971nfs06dPDztUk+h6lKxrUbK4toCR1t/fn9oWLlwYzu7bty/sjY2NYe/t7Q37bbfdltoqfW1WdP3TV7/61XC2q6ur0Lk3b94c9muuuabQ8Rl/sp5Lra2tuY/t8VYZ7tN8RvN2S5L4thvPt1tR+/fvD3t07VWSxNdBZr139t6YieTQoUNh/+AHPxj2PXv2pLas5+EFF1wQdnLLeuGJX7j+QU3BhQAAAAAAAAAAAAAAAAAAAAAAAAAAAMBJyQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAciiVy+VKryFJkqQqFgEAAAAAAAAAAFSnXbt2hf2yyy4L+4YNG1Lb8uXLc61ppAwODqa2hoaGMVzJidm3b1/YGxsbw14qlcLe0dER9qz7LTr//v37w9ktW7aEva2tLexURnQ95MDAQDi7Y8eOsPf19YX9vvvuC/uRI0fCfuGFF6a2hQsXhrPr168P+8svvxz2SqqtrQ37a17zmrB/5zvfSW3z58/PtSYmrqzf/VnP45///Odhz3qvEv0eOnr0aDg7derUsL/5zW8Oe1NTU9gvuuii1Bb9fkqSJJk9e3bYs57HAADVbHh4OOzRZyN79uwJZ7P+Ts3qv/71r8Oedf5jx46ltjPPPDOcvfjii8M+b968sGd9jhf1s846K5wFAICTyaFDh1LbW9/61nA2+lw4SZLk9ttvD/uUKVPCDhNF1nUNWapkTx8mkFWrVqW2np6ecHYiPx6ja6+SJEkef/zx1Nbc3Fzo3L29vWFvbW0Ne3d3d9hdozT+rF27NuxdXV1hz/psN5L1/X3W9XidnZ25zz2RuU/zqeTtliTxbVfNt1tRGzduDPuKFSvCnvW6tGjRotR27rnnhrN1dXVhh4kk65rhq6++OrVlXed4//33h33atGlhJ1X8xj1J4jf+/6Cm4EIAAAAAAAAAAAAAAAAAAAAAAAAAAADgpGSDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADmUyuVypdeQJElSFYsAAAAAAAAAAADGp6VLl4a9u7s7tc2aNWuklzNi9u/fH/aNGzeGvaurK+wrV64M+4033pjaGhsbw9kspVIp7E899VTYN23aFPb29vbUFj0ekiRJ2trawk4+Bw8eDPvdd98d9u3bt4f9nnvuyX3uc845J+yLFi0K++WXX16ov/a1rw175H3ve1/Y+/r6wj48PJz73DU1NYWO/fGPfzzsX/nKV8I+bdq0sEM1+dvf/pbaHnnkkXD2N7/5TdgHBgZGrQ8ODoazWbKepxdccEHYs96nRfOzZ88OZ88///ywZ73Xec1rXhP2SZMmhR0AJoqXX3457E8++WTYsz77eOyxx3K14+l79+4N+x/+8IewZ/3skfPOOy/sc+fODftFF11UaD7qb3jDG8LZrM90AACAsdHS0pLannjiiXB2586dYZ86dWqeJcGEU/Rv4CrZ04cJpMhjciI/Hnft2hX25ubmMVrJ/8/vkYkn6/v3pqamQscvcp8Xfbzt3r077FmfO49X7tP8otuukrdbkhS77ar5ubB27dqwZ10zXM0/G5xMhoaGUtull14azl544YVh37JlS641kbRm9N7jPVB8ZTUAAAAAAAAAAAAAAAAAAAAAAAAAAADwimzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIfaSi8AAAAAAAAAAAAgy9DQUNhf97rXhX3WrFkjuZwx09jYGPbOzs5CvZLK5XKh+ba2tkL9ZHXkyJGw33fffWHv6+tLbTt27AhnBwYGwn7KKaeEfd68eWH/9Kc/ndquuOKKcLapqSnsNTU1Ya+krJ8t634ZHh4Oe3S/TJs2LZy99dZbw/7+978/7DCRRM+luXPnhrNZ/dprr821puMxODgY9kcffTTsv//978O+d+/eQvPf//73U9tjjz0Wzma9JmaprY0vxW9oaEhtM2bMCGez3ttm9az3kOecc07Yzz777NQ2ffr0cLa+vj73sZMkSc4444ywA4ym5557LuxPP/102A8dOpTannnmmdyzSZIkBw8eDPuBAwfC/qc//Sn37J///OewZ62tqFe96lWp7YILLghnsz4PamlpKTQ/c+bM1DZ79uxwNuvvKQAAgKzveH74wx+mtqzv5aZOnZprTUD1yPoOZ9OmTWFvb28P+5IlS8K+evXqsC9YsCDskaxrhL773e+GfcWKFbnP3dHREfYbbrgh7Fnfk5RKpRNe00gpeu6i1/mMpubm5oqdO+vxmiXrMTea1q5dW2i+mq9LG00PPPBApZcwarJ+tqxrF8Yr9+noHX+8quRzobe3N+xdXV1hv+eee8I+UZ/HMN7U1dWltu9973vh7CWXXBL27du3h33x4sVhp7jqveIcAAAAAAAAAAAAAAAAAAAAAAAAAAAAqpgNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcqit9AIAAAAAAAAAAACyfPe73w17S0vLGK0ERtcjjzwS9r6+vrDv2LEj7Dt37gz74cOHwz5nzpzUtmjRonC2q6sr7PPnzw/7GWecEfaT1Xvf+96wHzt2rNDxly5dmtp6enrC2enTpxc6N1B59fX1hXrW7/bRVC6Xw37gwIGw79+/P+xPPvlk7uP/6U9/yj2bJEnywAMPhH3Lli1hf+aZZ8L+8ssvh300TZkyJexnn312ast63Zk6dWrYTz/99LC/6lWvCvtpp52W+9xZx86anzx5ctizROcvlUqFjp1ltH+2SNZ739F8Lvztb38L+1//+tdCx3/xxRfD/tJLL6W2v/zlL+HsCy+8kPvYSZL9s0Xnz/q5sn6/HTp0KOxF3zsXkfVc+Jd/+Zewv/a1rw37jBkzUlvW3xSve93rwt7Y2Diq81nvNwAAACaqG2+8Mew33HBDarvgggtGejlABQwODqa266+/PpxdtmxZ2LO+R+nv7w/7woULw7579+7UNnfu3HB2zZo1Yc/6nvqpp54K+5EjR1Jb9DlakmR/Brl+/fqwZ93uWYp8Zl/03CerrO8MN27cWOj41113XaF5xt5DDz1U6SWMmm3btoV9+fLlY7SSseU+zW+i3najfbtF7/FaW1sLHfvHP/5x2LPew61cuTLsH/3oR1Nbc3NzOAscn+g67SRJkk9/+tNhz/p7avHixSe8Jk5MTaUXAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOORDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUCqXy5VeQ5IkSVUsAgAAAAAAAAAAyGft2rVh7+rqKnT8jo6OsHd2dhY6PvyjZ555Juz33HNP2Pv6+sK+Y8eO1HbgwIFw9qyzzgr7e97znrAvXrw47IsWLQr7ueeeG3aqz4oVK8L+7ne/O+zLli0bwdUAMB688MILqS3rfdLTTz8d9qz5Q4cO5Z7Pmn3xxRfDfvjw4bA/99xzYX/ppZdytSRJkueffz7sWWs/evRo2IeHh8M+NDQU9tEUPd6SJEmOHTs2aueePHly2KdOnTpq566pqQl7XV1doeNnrf20007Lfe4zzjgj97GTJEnOPPPM3D3r3GeffXbYp0+fXqifc845uc+fdeys2w0AAICJ5+GHHw772972trDv3bs3tc2cOTPXmoD/V6lUKjRfdE+f3t7e1Nba2jqq586SddtE1/lkXeOTdQ1S1ncN69evD3uk0vd5liLrq5I9pqrO/v37wz5jxoxRPX93d3fY29raRvX8nLiivyeyFHmuVvPaqlk1327VvLYkGd31TeS1FXmPlyVrbf39/WFfuHBh7nP//Oc/D3tzc3PuYwP/549//GPY3/CGN4T9gQceCPsll1xywmuaILJ+Aaf/8v4n8RVBAAAAAAAAAAAAAAAAAAAAAAAAAAAAwCuywS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcaiu9AAAAAAAAAAAAYPxrbGwsNL9hw4awL1++vNDxGX+OHTsW9l/+8pdhv+uuu1Lb9u3bw9lf/epXYa+pqQn7pZdeGvYVK1aktve+973h7CWXXBL2SZMmhZ2TT9bvVwD4Z2eeeWauliRJct555430cgAAAKiwdevWhb29vX3Uzt3d3R32tra2QscfzZ9t3759Yc/6bm1wcDDsmzZtSm1Z616yZEnYV69eHfYFCxaEvYii90nWZ+JXXXVV2BsaGlJbuVwOZ+FE9ff3h/1f//Vfwz5z5syRXA5QhW677bbcs6VSaQRXcuK6urpSW2dnZzib1bPs378/7Fu2bCl0fCaWrPflWe8BBwYGwn777beHPev97bRp01Kb6+kAxqedO3dW7NxvectbRu3Yt956a9ibm5tH7dxwMsm6Niur//jHPw571nXqZIv/ywAAAAAAAAAAAAAAAAAAAAAAAAAAAAB4RTb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkEOpXC5Xeg1JkiRVsQgAAAAAAAAAAACOz4EDB8K+ffv2Qv3uu+8O+9DQUNjPPffc1LZ48eJw9vLLLw/7ggULwl5XVxd2AAAAAACYKHbt2hX2yy67LOwrV65MbevXr8+1ppGydOnS1HbLLbeEs/X19WEfHBwM+/XXXx/2ZcuWpbZrrrkmnO3v7w/7woULw7579+6wz507N+zr1q1LbS0tLeFsY2Nj2LO+P+ru7g57V1dXaquSfRmYQD7zmc+E/aGHHgr7z372s5FcDvAKSqVSofmirx1Fzj+RX7c2btwY9m3btoU9ej8we/bsXGv6u9G+3T0mxp+9e/eGvchjzn1aGdHfiUmS/TsoS5H7tejr1pIlS8K+devWQsevVu7T/KLbrpK3W5IUu+1G+3Yrer9GKnm7ZfG6BWMj67PdmTNnhr2np2cklzOetGb03uM9UE3BhQAAAAAAAAAAAAAAAAAAAAAAAAAAAMBJyQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcqit9AIAAAAAAAAAAADI5+jRo2G/9957w759+/bc/Te/+U04O2XKlLD/+7//e9g///nPh/3KK68M+5w5c8IOAAAAAAAU19zcHPbu7u6wt7e3p7Ybb7wxnG1sbAx7loGBgbAvW7YstdXX1xc6d39/f9i3bdsW9q1bt+Y+94IFC3LPJkmS3H777WGfO3du2KP7/Lrrrsu1pr+rq6sL+w033BD2rq6uQucHgGqxd+/esM+aNWuMVnLient7w75ixYqw79u3L+xF30PCiajm5xr5LFmyJOxZf8tVs6yfbaJyn47O8d1u+Y7vdgNGW6lUqvQSJryaSi8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAxiMb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMihttILAAAAAAAAAAAAmMieeOKJsG/fvj1XS5Ikueeee8L+17/+Nexz5swJ+5VXXpnabrrppnD2Xe96V9inTp0adgAAAAAAYPxbtGhR7tm+vr6wL1++PPexkyRJ7r777rC3tLQUOn7ktttuKzRfKpVGaCUnrqurK+ydnZ1hX7lyZWpraGgIZzdv3hz26LutJEmS+vr6sJfL5bDDSJoxY0bY77jjjjFaCVCtNmzYkNpWrFgRzm7atCns7e3tYa+rqwv74OBg7vO3tbWFs62trWHP0tjYWGgeRtLQ0FCh+az3v4y9d7zjHZVewqgZzZ+tv78/7AsXLgx71nWSCxYsOOE1/Z37tHqPXymj/XMtW7YstW3btq3Qsffv3x/20XyfFP1cwNh5/PHHw7548eIxWsnJq6bSCwAAAAAAAAAAAAAAAAAAAAAAAAAAAIDxyAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5lMrlcqXXkCRJUhWLAAAAAABGzwMPPJDaLr300jFcCQAAANXgP//zP8Pe1dU1RivhZHH48OHU9pOf/CScveuuu8K+ffv2sP/+978P+xlnnJHaFi5cGM4uXry4UD/33HPDDgBUv0cffTTsb3rTm8ZoJQAAjJWXX3457KeeeuoYrQQAilu1alVq6+npCWeff/75Qudes2ZN2NevX1/o+JFSqVRovkr2CMhl7969qa29vT2c3bZtW6Fzd3d3h72tra3Q8eFEDAwMhL2pqSnse/bsSW2zZs3KtSY4GQ0ODqa2hoaGQsd+6qmnwl5fXx/20VzbaNu3b19qa2xsDGeXLl0a9qz3A9G5kyRJjhw5ktpmz54dzmYpep8XfW2IRK8bSTK+XzuyHjPz589PbS0tLeFs1uN1aGgo7FnvP7N0dnYWmo+sXbu20Pxorm08y7pds66NzXquRrJ+h3V0dIR9NO/Ton8DZxnNv5Hdp/lU8nZLkvi2q/TtFr12XHfddeFs1vugzZs3h/3f/u3fwj5jxoywL1myJLXdcsst4WzW+yDg+Dz++ONhP//888P+4IMPhv3iiy8+4TVNEK0Zvfd4D1RTcCEAAAAAAAAAAAAAAAAAAAD8Lzv3G5tVfTZw/LQUxImpf2JRZKWdC4joQIbIFGEBHCgUhg6lY8zMWILJjGawZSTAtsCLZWPLjEYawG3GUVB0ibAqW4RFcM6ZBWXqnM7RwuIcqBsNOFCQPi+emOzJ47mOnNObu4XP5+0317l/tqU9PXe9AAAAAAAAOCVZ8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVjwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVjwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlUlfsAAAAAAMCp4fXXX889+/DDD3fhSQAAADgR5syZE/a2trYTdBJ6ir/85S9hf+KJJ8K+efPmsG/bti21HT58OJy97LLLwj5z5sywT5kyJexjx45NbX369AlnAQB27dpVaH716tWprbq6utC1AQD4aK2trWF/4IEHwn7o0KGwe6YEQE8yf/781Nbc3BzOZr1/dMYZZ4T9lltuCXt39tprr6W2wYMHn8CTHL/ofBs3bgxnd+7cGfasr5mFCxeGPbJgwYLcs/BRhg8fHvZRo0aFfc2aNantBz/4Qa4zwamof//+Zbt2Z2dn2GtqalLb7t27w9no/Z8kSZLly5eHPbpHS5IkWbRoUdhra2vDHlm2bFnYN23aFPas//Y77rgjtS1evDicffvtt8Oe9TdIFRUVYS+lIUOGFJrP+notp6amprBPnz49tRW5P0ySJFmxYkXYp06dGvYxY8YUen26n6zvYcOGDQt7kX+r69atC/vs2bNzX7uoLVu2mwZsjwAAIABJREFUhH3ixImF5kvJ5zSfcn7ckiT+2JX74xb9LU70e16SJMljjz0W9sbGxlxn+tCqVavCPmPGjNQW3bsCXSfr952RI0eG/bOf/WxXHoePUFnuAwAAAAAAAAAAAAAAAAAAAAAAAAAAAEBPZMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUNHZ2VnuMyRJknSLQwAAAAAApdPS0pLa5syZE852k+eYAAAAHIes3/WyrF27totOwvE4ePBg2Lds2ZLaNm/eHM5m9fb29rCfc845Yb/22mvDPmXKlNQ2adKkcHbgwIFhBwAop9bW1rBPmzYt7Pv3709t1dXVuc4EAECslPdwSeI+DoCTx+233x725ubmsDc0NIR948aNx32mrrJ69eqwz5s3L+yLFy9ObQsXLgxns+4V9u3bF/YHH3ww7AsWLAh7RUVFaiv1fc7OnTvDPmLEiNTm75k50Z588smwz5gxI7U9//zz4ezgwYNznQkAAACA0nnllVfCPnr06LA/8sgjYZ88efJxn+kU0ZjR13/cC1UWPAgAAAAAAAAAAAAAAAAAAAAAAAAAAACckiz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHKrKfQAAAAAAAAAAAODjefXVV8P++OOPF+rbtm0L+/vvv5/arrjiinB27ty5Yb/uuuvCPnr06LD36tUr7AAAAAAAAPQst9xyS9ibm5vD3tDQ0JXH6VIzZswI+7x588K+fPnyXK0r7N69u2TXXrFiRdibmprCXltbG/azzz670OuTz3333Zfa7rrrrnA263Oe9T7ylVdeGfYhQ4aktoqKinC21CZNmhT2qVOnprYvf/nL4WzW3wZ84hOfCDsAAAAA+XR0dKS2rOfGU6ZMCfvkyZNznYmuU1nuAwAAAAAAAAAAAAAAAAAAAAAAAAAAAEBPZMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUFXuAwAAAAAAAAAAQE9y+PDhsG/bti3smzZtCvvmzZtT2+uvvx7OnnPOOWG/9tprw75mzZqwT5kyJbWdd9554SwAAAAAAAAcjzFjxoS9oaEh7OPHj+/K43SpmpqasO/evTvsq1evTm3Lly8PZ+fPnx/2RYsWhb22tjbsRdxxxx1hf/DBB8O+cOHCsK9YsSLsCxYsCDv5vPzyy6ntyJEj4Wz0tZ4kSdLc3Bz2Y8eOhb1fv36pbfTo0eHs1VdfHfas+SuvvDLsWe/BR//tWdeeNWtW2B955JGwn3766WEHAAAAOFUdOnQo7DfddFNq69u3bzj7s5/9LNeZOHEqy30AAAAAAAAAAAAAAAAAAAAAAAAAAAAA6Iks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHKrKfQAAAAAAAAAAAOhqu3fvTm1PPPFEOPv444+HfcuWLWH/z3/+E/bLLrss7LNmzUptU6ZMCWevuuqqsFdV+XMhAAAAAAAAeoaOjo6wX3jhhWEfPHhwVx7nhKqtrQ37smXLcrXuoLOzM/fsggULCnXK4+KLL05tvXv3DmePHDnS1cf5Pw4ePJjafvvb34azv/vd78L+3nvv5TrTh7K+x11zzTWp7eabbw5nV65cGfbx48eH/Ve/+lXYa2pqwg4AAADQU7399tthnz59etj/9re/pbas5039+vULO+VXWe4DAAAAAAAAAAAAAAAAAAAAAAAAAAAAQE9kwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORQVe4DAAAAAAAAAABw6nn77bfD/q1vfSvsmzdvDvuLL76Y2vr16xfOTpw4Mew//vGPwz516tSwDxw4MOwAAAAAAABAkjz88MNhnzVr1gk6CVBEXV1dajty5MiJO8hx6uzsDPt7771X0td/4403wr5hw4bUVlFREc4ePXo07P/617/CPnz48LBv2bIltV1yySXhLAAAAEA5vfrqq2GfNm1a2LOeyzzzzDOp7aKLLgpn6f4qy30AAAAAAAAAAAAAAAAAAAAAAAAAAAAA6Iks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgByqyn0AAAAAAAAAAABOPS+++GLYd+3aFfZp06aFfcWKFalt3Lhx4Wzfvn3DDgAAAAAAAPyvJUuWhH358uW5r7148eKwNzU15b420HU++OCDsJ922mkn6CSnlujj3rt370LX7t+/f9jr6urCPmrUqNT2ve99L5z9xje+EfZevXqFHQAAAODYsWOp7Sc/+Uk4m/VceuTIkWF/7LHHwn7uueeGnZ6tstwHAAAAAAAAAAAAAAAAAAAAAAAAAAAAgJ7Igl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIwYJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIwYJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMihqtwHAAAAAAAAAADg1HPNNdeE/aGHHjpBJwEAAAAAAADyqq2tLTS/atWq1NbU1FTo2nAiHTt2LOz/+Mc/wt7e3h72tra23PNZ1y7a//73v4f9yJEjYeej9e7dO+xHjx5NbTNnzgxnly5dGvZhw4blfu0kSZIf/vCHqW3JkiXh7KOPPhr2NWvWhP3SSy8NOwAAANDzvfzyy2G/7bbbUtuOHTvC2axnF9/+9rfDXlVlxeuprLLcBwAAAAAAAAAAAAAAAAAAAAAAAAAAAICeyIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIwYJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIoarcBwAAAAAAAAAA4NRTVeXPVgAAAAAAAKCna2pqKtQ5+XR2dob9zTffTG1tbW3hbHt7e9l61uyePXvC/v7774c9y2mnnRb2QYMGpba6urpw9tOf/nTYJ02aFPas60d98uTJ4eyBAwfC3p317t077EePHg37DTfcEPalS5emtksuuSScLSrrbz4WLVqU2mbMmBHO3nrrrWG//PLLw/7Vr3417N/5znfCXltbG3YAAACguKxnacuWLQv7z3/+87CPHDkyte3YsSOcHTZsWNghUlnuAwAAAAAAAAAAAAAAAAAAAAAAAAAAAEBPZMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUFXuAwAAAAAAAAAAAAAAAAAAAMCpYu/evWFvb2/P3YvMfpze1tZWaP69994Le6RPnz5hr62tDXtdXV3u/vnPf75k106SJKmvrw/7BRdcEPaKioqwd1dZH5cXX3zxxBzkI/Tu3TvsR48eDfuNN94Y9qVLl4Z96NChYe+pLrnkkrA/88wzYV+7dm3Yv/vd74b9F7/4Rdjnz5+f2hYtWhTOnn/++WEHAACAk0XW883vf//7YV+5cmXYBwwYEPb7778/7F/5yldSW2VlZTgLRfjqAgAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHKrKfQAAAAAAAAAAAIB9+/aFfevWrWFvaWkJ+8aNG1Pbpk2bwtnp06eHvaGhIez33ntv2Gtra8NeREdHR9ifeOKJsDc2NuZ+7VWrVoV9xowZYa+pqcn92gAAAAAAQM/w1ltvpbb29vZwtpw9a7atrS3shw4dCnuWqqr0NQEDBw4MZ+vq6gr1MWPGhL2+vj7sn/rUp3K/9oABA8JeWVkZdrqfwYMHh/2ll14Ke2dnZ9h79+4d9qNHj6a2L33pS+Hs0qVLw37xxReHnY+W9e947ty5Yb/55pvD/tOf/jTsy5YtS23Nzc3hbNbXzJ133hn20aNHhx0AAAC60nPPPRf2e+65J7Vt2LAhnD3nnHPCvmLFirDPmzcv7H369Ak7lIsn1AAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJBDVbkPAAAAAAAAAAAAcNttt4V906ZNha7/7LPPpraGhoZwdvfu3WEfNGhQ2C+88MKwr1y5MuxFzJ07N+xZ/+2dnZ2pbd++feFs0c/pgw8+GPbq6uqwAwAAAABAT/HOO++Evb29PXcvMvtxeltbW6H5d999N+yRXr16hT3rPZq6urrc/YorrijZtT9OHzhwYGqrqrJCgJ6jtrY27NH7lUmSJJWVlWGfNWtW2JcuXZrahgwZEs7SPfXp0yfs8+fPD/vXvva11LZ+/fpw9t577w37lVdeGfbRo0eH/etf/3pqmzlzZjjbr1+/sAMAAND9ZD07/eUvfxn2rN9Tn3vuubCPHDkytWX9/XtjY2PY+/btG3boqeKnlQAAAAAAAAAAAAAAAAAAAAAAAAAAAMBHsuAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAByqOjs7Cz3GZIkSbrFIQAAAACA0mlpaUltc+bMCWe7yXNMAAAAjkPW73pZ1q5d20Un4WRRUVFRaL6UzxfKebatW7eGfeLEiWHfu3dv2Gtqao77TB969tlnw/65z30u7OvWrQv77Nmzj/tMAHAyam1tDfu0adPCvn///tRWXV2d60wAAMRKeQ+XJO7jgJ7r3//+d9jb29vL2tva2kp27QMHDoQ9S2VlZWobMGBAOFtXVxf2+vr6QvNFetbsJz/5ybD37t077ED5/fGPfwz7/fffH/a77ror7EOGDDnuM0GpZL2Hfs8994T90UcfTW29evUKZ6dPnx72xsbGsE+ZMiXsffr0CTsAAMDJ6siRI2H/9a9/Hfbo/7/fuHFjOHv06NGwz5w5M+x33HFH2K+66qqww0kkfjCSJOs/7oXS360BAAAAAAAAAAAAAAAAAAAAAAAAAAAAUlnwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVjwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVSV+wAAAAAAAAAAAADks2HDhkLzNTU1XXSS/2/o0KGF5ltaWsI+e/bsQtcHAAAAAOiuDhw4EPb29vbcva2trdC1d+3aVWg+6h0dHeFsloqKirBfcMEFYa+vrw97XV1daps+fXru2a7otbW1qa1Pnz7hLEA5jRo1qlCHnmTMmDGF+n333ZfaHn300XB23bp1Yb/hhhvCXl1dHfYvfvGLYZ86dWpq+8IXvhDO9uvXL+wAAACRgwcPhn3Lli1hb21tDXvW72NZz73Hjx+f2u6+++5w9sYbbwz7WWedFXag61WW+wAAAAAAAAAAAAAAAAAAAAAAAAAAAADQE1nwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVjwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVSV+wAAAAAAAAAAAADk09zcXO4jpKquri40v2nTpi46CQAAAADQE7377rthb29vD3tbW1uh+SK96LXfeeedsBfRv3//sNfV1RXq1113Xdjr6+tL9tqDBg0K+2mnnRZ2AIDuLHoP/tZbbw1ns/o///nPsD/00ENh37hxY9gbGxvDHhk7dmzYs+4/p06dGvahQ4ce95kAAICu9corr4R98+bNYW9tbU1t27dvD2ePHTsW9nHjxoV9yZIlYb/pppvCPmDAgLADPUtluQ8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAPZEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQQ1W5DwAAAAAAQPe0fv36sD/11FNhb25uzv3aq1atCntTU1PYs87e0tIS9k2bNqW2+fPnh7NZffjw4WEvp46OjrA//PDDYZ83b17u1163bl3YZ8+enfvawImzdevW1LZhw4ZwduXKlV19HJLsn4mNjY2Frt+Tv39HH5si9wpJ0rPvF/bs2RP2QYMGnaCTnFidnZ3lPgJAbg0NDWHP+rm1b9++sNfU1Bz3mbpK1s9MAICeqpzvwSRJsfdhSvkeTJIUe67Snd+D6c4ftyTp3h87OFl4Xg8UdejQobC3tbWFvb29PVcrd3/rrbfC2aLOO++8sNfV1eXuEyZMKNm1P06vr69Pbaeffno4CwDAqef8888P+5133lmoHzhwILX95je/CWc3b94c9rvvvjvs3/zmN8Me/V3E1VdfHc6OGzcu7GPHjg37iBEjwl5VZS0QAABd54MPPgj7Cy+8EPbt27fnakmSJE8//XTYs/6eecCAAWG//vrrU9vtt98ezk6aNCns1dXVYQf4b5XlPgAAAAAAAAAAAAAAAAAAAAAAAAAAAAD0RBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhWdnZ3lPkOSJEm3OAQAAAAAUDotLS2pbc6cOeFsN3mOedJZsmRJ2JcvX17o+lmft+nTp+e+9oUXXhj25ubm3Ncutd///vdhHzNmTMlee9++fWG/7bbbwr5p06auPM5xWbFiRdgXLFhwgk7CibJz586wP/nkk2H3NfHRsr4PbN26NeyNjY1deZz/w8/7fIr+PH/11VcLvf6QIUPCvnjx4tS2bNmyQq+dpdT3OqUU3S+U8l4hSbJ/3he5hyu36H7Cz43SyPpdL8vatWu76CScLCoqKgrNl/J+o5xnW79+fdiz7uFK+XtqR0dH2M8666ywb9myJewTJkw47jMBwMmotbU17NOmTQv7/v37U1t1dXWuMxE/m+jO78EkSfw+jPdg0pXyc15q5XweBSeTIt8HTubn9Xy0Ut7DJcnJex93+PDhsLe3t5etl/q19+7dG/Yizj333LDX1dWVrWfN1tfXF3rtM844I+wAAED3l/XM/KWXXgr7tm3bUtvTTz8dzm7fvj3sb7zxRtjPPPPMsF9xxRWpbdSoUeHsiBEjwn755ZeHffDgwWGvrKwMOwDAqerYsWNh/+tf/5raduzYEc6+8MILYc+a/8Mf/hD2AwcOhH3AgAGp7Zprrglnx44dG/Zx48aF/bLLLgt70b/lBk55Wf/jbPw/bfwXvy0DAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhWdnZ3lPkOSJEm3OAQAAAAAUDotLS2pbc6cOeFsN3mOedKpqKgo6fWLfN6effbZsLe2toZ94cKFYa+urg77+vXrU1tjY2M4m6WhoSHsGzduLHT9yI9+9KOwX3755WGfMGFC2Hfu3Bn2ESNGhL0I3ye6p+jf8gMPPBDOjh8/PuxXXXVV2Gtra8N+qlqyZEmh+eXLl3fRSf4//47TRd9fi35vLfpxL3I/8cILL4R9+PDhYS/l/UKRe4UkKe39QinvFZIkSVavXh32yZMnh72c33+zPi9Dhw5NbVlfb+ST9btelrVr13bRSegp9u3bF/b+/fsXuv7+/ftTW9b3/o6OjrCfddZZuc70ob1796a2mpqacDbrbHPnzs11pg+tWbMmtWWdLet781NPPRX2lStXhh0A+F9ZvwNPmzYt7EXuk0hXyvdhij7TKfJcpZTvwSRJsecqpX4P5mT9uCVJeZ9HQU9SyvdDe/LzevIpeg+X9Swt65lRe3t7amtrawtns3p07aL9zTffDGeLOvvss8NeV1eXq3VFr6+vL9n1zzzzzHAWAACAfHbt2hX27du3h/2ZZ55JbTt27AhnX3rppbAfPnw47P369Qv7Zz7zmbBHz5SGDRsWzg4ZMiTsgwcPDru/IweAk9+ePXvC/tprr+VqSZIkf/7zn8P+/PPPh/1Pf/pT2A8ePJja+vbtG85eeumlYR85cmTYs/5/vLFjx4b9oosuCjtAD5b1x1fxH2/9l8qCBwEAAAAAAAAAAAAAAAAAAAAAAAAAAIBTkgW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQQ0VnZ2e5z5AkSdItDgEAAAAAlE5LS0tqmzNnTjjbTZ5j9jgVFRXlPkIo+rzu2bMnnO3bt2/Ya2pqcp3p4yj1x/Vk/nov8rHbsmVL2CdMmJD72qeyjo6OsG/bti3sjz/+eNivv/761DZu3Lhwtrq6OuyURym/B57M3/+KWr16dWqbN29eoWsX/bgX+ZpYtWpV2JuamsJ+qt4v+LeS7vbbbw/7ypUrT9BJ+FDW73pZ1q5d20Unoaco5++xWd9fu/PZsuzbty/sjz32WNiL3G+sW7cu7Nddd13Y/V4A0P1k3VcfPHgw7DfffHPYa2trj/tMJElra2vYp02bFvb9+/enNj+P03Xn92Gy7iGLPFcp5TOVJOnez1V83IDoeX2SFPsduic/ryefovdwlZWVYf8f9u4txqr6bPz4AobzYUBgQMFhRgVEU6DYKmijFTzROGJNoaBWLyo4Wm21kNhavCnE9k3wzhRKTdPUeiiYxkKs4oGqiWgxtWAPIpDCcBA5qJwFHJlyCWNHAAAgAElEQVT34k3T/791PQvXmj17Dp/P7TfPWr8Zxtlr1l7758mTJz/3mv6lb9++Ya+trS3Ua2pqcs9nzRbtrn8BAABoLxobG8P+97//Pezr1q0r1P/yl7+ktn/84x/h7N69e8OepXfv3mEfNWpUahs9enQ4O3LkyLCfeeaZhXrW+6VR79OnTzgLQPtz5MiRsDc0NIR9+/btYc96jzya37x5czj77rvvFupZX3tk8ODBYR8zZkzYx48fH/YJEybknj/vvPPC2a5du4YdgNxmZfQnT/VA8Tv5AAAAAAAAAAAAAAAAAAAAAAAAAAAAwGeywS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcKsq9AAAAAAAASqOpqSnsnTp1Kuv5I9XV1c24ktZl6dKl5V5CbgcOHAj7s88+G/a6urrUtmjRonB21KhRYe/Itm3bltrWrFkTzr7yyithv/XWW8O+ePHisAPN46233ir3Ekpi5cqVYZ89e3bYXS90PBs3bgz7ZZdd1kIrAUqlyN+Rpdaa15alqqoq7FmvuVkdgI7lzjvvLDR/3333hf2iiy4K+y233BL26dOnp7ZBgwaFs/B5lfN9mKLXp+31vkqp76m01+9bkrgflabU76e2V235b+gs7tfTmvzqV78K+3nnnRf2mpqa1DZw4MAcKwIAAABak4qKeEuhcePGFepZz5kX8dFHH4X93XffDfuGDRtyz2cd++mnnw779u3bw75///6wF3HaaaeFffjw4YV61vutUR88eHA4m/V8U9Z8kbX16dMnnO3du3fY+/XrF3boKA4ePBj2I0eOhP3w4cNh37dvX8n67t27w9m9e/eW7NxJkiQ7duzI1ZIkST788MOwF5X1Oy56D33kyJHh7JVXXhn2u+66K+xjxowJ++jRo1PbgAEDwlkAKKJzuRcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAbZENfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORQUe4FAAAAAP/20EMPhX3evHklO/eiRYvCPnfu3ELHL+XX1tDQEPbq6uqw79mzJ+yPPvpoastad11dXdjvueeesE+ePDnsRRT9N1m6dGnYp02bFvYhQ4aktqampnAWoNSyXhsiWb/7Z8yYkfvY5da/f/+SHXvYsGFhv/fee8M+atSo5lxOmzJixIjUlnWNt3jx4uZeDlACS5YsKfcSSmLlypXlXkJuRa4VkqR9Xy+U0iuvvBL2q6++uoVWAgAAHdfpp58e9l27doU96z2gtWvXhv3NN98M+913353apkyZEs7efPPNYf/6178e9j59+oQd+D+lvK/Snu+puB9VHp5d4D+5X09rct1114W9srKyhVYCAAAA0LwGDBgQ9okTJxbq5XTw4MGwb9++PezR5yizZrP6zp07w75v376wb9y4MbXt3r07nN27d2/YDx8+HPbWrG/fvmHv3bt37l70Mz69evUKe/fu3QsdP9KjR4+w9+zZs2TnTpIk+fjjj1PbsWPHSnru48ePh/3o0aO5j71///6wHzlypNC5s36HtWbRMx2DBw8OZ6uqqsJedH78+PGpLevz8cOHDw97TU1N2M8888yw9+vXL+wAwH/rXO4FAAAAAAAAAAAAAAAAAAAAAAAAAAAAQFtkg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBwqyr0AAAAA4N/mzp0b9ksuuSTskyZNCnt9fX3ucxeVdfxXXnkltT3yyCPhbFVVVdj37NkT9ttuuy3sN954Y2pramoKZ1evXh32KVOmhH3dunVhHzduXNgfeuih1DZ9+vRwNuvf7MCBA2FftGhR2AFas6zf35EFCxaEvbKyMvexy23//v1hX7ZsWdjnzJmT2pYsWRLOZvWir5ltWUNDQ2pbs2ZNOHvHHXeE/dZbbw37xIkTww7QXhW5VkiS9n29UEpvvfVW2GfPnt1CKwEAAErl5MmTJTv2Sy+9FPYXX3wx7FnvKU6bNi3s0XuOU6dODWe7d+8edmhLSnlfpT3fU3E/CgAAAAAA2r5+/fqF/fzzzy/U26tjx46Ffd++fWHfu3dvajt8+HA4e+TIkbAfOnQo7AcPHix0/Khnfb4zS9baGxsbCx0/kvV1nzhxomTnTpL4vbEzzjijpOeuqIi3fOvbt2/uY2e959e7d++w9+rVK/fxs9adde4+ffqEfdCgQWEfPHhw2Hv06BF2AIDm0rncCwAAAAAAAAAAAAAAAAAAAAAAAAAAAIC2yAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcujU1NRU7jUkSZK0ikUAAABAW/fQQw+Ffd68eamtoaEhnK2urs61pn9Zv3592N95553UNnPmzELnfvLJJ8M+a9assJfy/kmnTp3CPn/+/LAvWLAg9/F3794dzlZVVYU9y549e8I+ZMiQ1NZK7lnRzB5//PHUdtNNN4WzfiZKI+t3UFGt+d8t63Vp/PjxqW3dunXh7Lhx43KtqSOIXpOzXo+z1NXVhX3FihWFjt9eHThwIOyvvvpq2P/whz+E/Wtf+1pqu/TSS8PZysrKsFMepXztaM2vG+V23XXXpbaVK1cWOnbR73uRn4nW/Lu7yLVCkrheyOuNN94I+9GjR8M+efLk5lwOzSDrb70sjz32WDOtBACA5nLGGWeEfdeuXS20ktana9euYW9sbExtvXv3DmdnzJgR9rPPPjvsP/rRj8K+f//+1OY+XX4d9V6a+yr5+L61TqV+P7W9as2/o4qK7tcnSbF79u7XdzzPPPNM2K+99tqwR9dwSeI6DoD2I+t55NWrV4c9enY1SbKvdaJrvKzrw6zrrIcffjjsRZ/fj2Q9N/bss8+GvejzfkuXLk1t06ZNC2eLPuMOAAAAAAD/Ieumd7xpzf+jc8GFAAAAAAAAAAAAAAAAAAAAAAAAAAAAQIdkg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5VJR7AeW2du3asF900UUttBIAAICW9ac//SnsF154YQuthOZ0xRVX5J5dtWpV2GfPnp372EmSJC+++GLYp0+fXuj4kccff7zQfKdOnZppJZ/fwoULw75gwYKw19fXp7YhQ4aEs0888UTYp06dGvaqqqqwNzU1hR2giPXr14f9gQceCPvu3btTW9bvN9JlvXbQ8iorK8NeV1dXqL/xxhup7Qc/+EE4e9lll4X94osvDnt1dXXYoS2J/ltbuXJlC66keWX9Dim16HqhyLVCkrheyOuZZ54J+7x581poJbSUF154IezlvCcDAMBnq6jo8I8ep/rkk09yzx4+fDjsv/zlL3MfG5pbKd+DSZL2e1/F961t8lwD/ynrvnZbvWdf7vv1AACR2267LexFr8Gi55uSJL5WamhoCGdHjBgR9mHDhoV98eLFYS/iW9/6VtizrhGz/l7as2dP2KN/16x/0xUrVoQdAAAAAADKpXO5FwAAAAAAAAAAAAAAAAAAAAAAAAAAAABtkQ1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5FBR7gWU2+bNmwvNL1u2rJlWAgAA0PxmzJiR2rL+Hrrwwgubezm0gHHjxoW9vr4+tc2ZMyecjX6eTkXWz1x1dXWh40dWrlxZaL6pqamZVtLy7r333tS2c+fOcHbWrFmFzr1o0aKwz507t9DxgY5t/fr1YX/qqafC/sgjj4S9qqrqc6/pVB04cCDsWfedZ8+enfvcDzzwQNjPP//8sM+cOTP3uZMkSSorKwvNR4p8XyidiRMn5mpJkv3f+fLly8PuWoPPa/Xq1altypQp4exLL70U9smTJ+da07+017/RS/11FbleKOe1QpLE1wulvFYotazroP79+4e9lNcSlMfYsWPDfvvtt7fQSgAAOFXR+41JkiQffvhhC62k7amoSH9su7GxMZzt169f2M8666ywr1u3Luzwn6L7Kt6DSddWv29J0n7vR0Fzc78eAKDlrVixIuydOnUqdPysZ5giRZ9/X7JkSdgXL16c+9jRczBJkv18fdbfqVmy/o69//77U9ukSZMKnRsAAAAAAMqlc7kXAAAAAAAAAAAAAAAAAAAAAAAAAAAAAG2RDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUFHuBbR106dPL/cSAAAA4JTV19entiVLloSzzz77bNh79+4d9ltvvTXsrdnGjRtT26hRo1pwJZ9ftL4VK1aEs+vXrw971s/MvHnzwh6ZO3du7lmgfVi9enXYp0yZUuj4CxcuLDRfSq+//nrJjp31u73o92XmzJlhf+ONN3Ife/78+WGvq6vLfWxap3HjxhXq5LNt27aynTu67k6S0l97F3ltyZptamrKfewkiX/es34/Zv1uz/q+FxWtr+h/x6W8Xuio1wql9uqrr4b9iiuuaKGV0FoMGTIk7J7JAABofb73ve+Vewkl06VLl7CfPHky7N27dw/79ddfn9puuummcPbqq68O+/PPPx/2a6+9Nux0PO6r5OP7BmTd147uibfn+/UAALQ+y5cvLzRfVVXVTCv5bGPGjCnp8QEAoKUcOXIk7B9//HHYDx48GPZDhw6ltsbGxnD26NGjYT9+/HjYS+mjjz4q27mzDBgwoKzn79atW2rL+hx3RUW8nVzfvn0L9V69eqW2rLUBAHQUncu9AAAAAAAAAAAAAAAAAAAAAAAAAAAAAGiLbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJBDRbkXAAAAALSccePGpbb6+vpwdtasWWGvq6sL+4oVK8JeSkuXLg37nDlzwv7oo4+mtnnz5oWzlZWVYd+zZ0/ucydJksydOzfsnTp1Sm379+8PZ6OflyRJksWLF4c962dq/PjxqS3r6wJOTfQ7oDWcv6mpKbVNmTKluZfTZkycOLFkx160aFHYhw0bFvas64GsPn/+/NT20ksvhbOTJ08OO3Bqyv3aEBk9enSh+eh15VREv4eyXpeyfoeV0oIFC8J+/vnnh73o9/2JJ54I+8yZMwsdP9JRrxdKea1QamvXrg171r0FAACAzp07hz2695F1X+Sqq64K+8033xz2adOmhb1Xr15hp+0p5722Iu/BJIn7Knn5vgFZonv27fl+PQAArc+SJUvKvYRQ1jP2AAAdVWNjY9h3796d2hoaGsLZrM8xfvDBB2Hfu3dv2Pft25f72NHsqcxn9UOHDqW2o0ePhrMHDhwIO3Bqsv4OzHqmok+fPmEfNGhQ2AcOHJirncqxs/rgwYPDHp2/qqoqnK2urg770KFDw15RYQtCAGhu8VOuAAAAAAAAAAAAAAAAAAAAAAAAAAAAwGeywS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcKsq9AAAAAKB1uPXWW8O+ZMmSsNfV1TXncprVtGnTwj5nzpywL1y4MFdrDg0NDSU79qJFi8I+e/bssFdXV4d9wIABhc4PFNfU1FTuJeTWltfemo0aNSrsixcvLtSB1s/v13STJ09ObW35+zZz5sxCvTVry/8uHdWCBQvKvQQAAKCgXbt2FZqvqIgfXf7000/Dfskll4T9lltuSW033HBDOHvaaaeFHf5TW7430ZbXXk6+b0AR7fl+PeVx8uTJci8BAGjFsp7tX7lyZdj37NkT9qqqqs+9JgCAU3XixImwb9myJeybN28O+6ZNm1Jb1mf5du7cWahnHf/9998Pe9b7qUX07ds37IMHDw77oEGDcrUkSZKBAweGfeTIkYXm+/Tpk9p69eoVzvbv3z/sWfM9e/YMe2VlZdijtXft2jWc7d69e9iz1l5K/fr1C3uXLl1Kev7ov6WDBw+W9NxZjh49mtqOHz8ezn7yySdhP3z4cNizvvZobVFLkiTZv39/7mMnSfbaP/jgg7Dv27cvte3duzec3bBhQ9iz5rP6oUOHwl5E1n9LQ4cODfuIESPCfsYZZ6S24cOHFzr2OeecE/as14ba2trU1q1bt3AWAIroXO4FAAAAAAAAAAAAAAAAAAAAAAAAAAAAQFtkg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5VJR7AQAAAEDrMHHixLDX1dWF/bLLLmvO5TSrqqqqsDc0NIT9F7/4RWpbuHBhOFtfXx/2H/7wh2Gvrq4OexF333132B999NGwz5s3L+yLFi0K+9y5c8MeOXToUNjXrl0b9vPPPz/sQ4cO/dxrAoBy6dSpU7mX0Co1NTWVewkAAAAA7c7o0aPDvnnz5rD/z//8T9hnzpwZ9jPOOCPsAADQUWQ935T13FlNTU2u1hK9trY2tZ1++unhrPfPAeD/3HjjjWFfuXJl2P/5z3+GPev5/CwHDhwoNA8AZNu/f3/Y//rXv4b97bffDvuGDRvCvmnTptSW9Z5i1mf9Ghsbw54les9xxIgR4eywYcPCfuGFF4Z9+vTpYc96P3T48OGpLWvtgwcPDnu3bt3CDu1Jly5dUtuAAQNacCWt7/y0vBMnTqS2vXv3hrNZr5k7duwI+3vvvVey42d9znvZsmVhz1pblui/86z3Kc4555xCfcyYMWEfO3ZsavvCF74Qzvbv3z/sAJRf53IvAAAAAAAAAAAAAAAAAAAAAAAAAAAAANoiG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIoaLcCwAAAABahwMHDoR92LBhYR81alRzLqdFVVdXh33BggW5WmvQ1NSUe3bu3LmFeik99thjYb/jjjsKHX/QoEFh/9KXvpTaLrjggnB2/PjxYZ8wYULYzzrrrLAD0PEUeb0HAAAAgM9jw4YN5V4CAACQJMnDDz8c9j179oR969atuVqSJMnLL78c9m3btoX9xIkTYY9079497CNGjAh7TU1NyXopj50kSXL66aeHHaC9ynpNKyrrGfrKysrcs0Vlfe1VVVWpberUqeFsXV1d2B988MGwP/LII2GP1pYkSfLss8+mtvr6+nAWAFqTnTt3hn3t2rVhX7duXdjffvvt1LZ+/fpwdsuWLWHPkvV6Pnr06LCPHDkytX31q18NZ88555zcxz6V3qtXr7ADQEvq1q1basv6bH9Wb8uOHj0a9s2bN4d948aNuWezenSNliRJsnz58rAXud9VW1sb9rFjx4Z93LhxYc/6DHz0Gfqs/RoAOorO5V4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAtEU2+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJBDRbkXAAAAALQOy5YtC/v06dNbaCWQbfjw4SU9/r59+8K+atWq1LZ69epw9pNPPgl7U1NT2Pv06RP2cePGpbYvf/nL4eyECRPC/sUvfjHs5557btgBAAAAAAAAAIDiZsyYEfbKysoWWsl/O3nyZNjfe++9sG/dujW1bdmyJffsqfTNmzeH/cUXX0xt27dvD2eznhvL0qNHj7DX1NTk7kVmS92HDBkSzgLtX6l/D/Tv3z/s0XO9WbNFZX3t0dqyrgUeeeSRsP/+978Pe9F/lyeeeCK1/fSnPy10bADan6y/p956662wv/7662F/4403cs9u27Yt7BUV8bY+o0ePDvvYsWNTW319fTgbfb7oVPrQoUPDDgBQSr169Qp7dJ10Kr2c3n///dT29ttvh7Pr1q0r1H/3u9+F/cEHHwx7Y2Njahs2bFg4e/HFFxfqkyZNCnv0GfmuXbuGswDNqXO5FwAAAAAAAAAAAAAAAAAAAAAAAAAAAABtkQ1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5FBR7gUAAAAAp+6BBx4I+8KFC3Mfe/78+WGfPXt27mNDcxs5cmRZz9/U1JTaTpw4UdJzHz58OOyvvfZaanvzzTfD2cbGxrCfPHky7AAAAAAAAAAAQMfWuXPnsA8fPjx3/8pXvpJrTS3h008/DfvOnTvDvnXr1kJ9y5Ytuec3bNgQzj733HNh37FjR9iznkuL9OzZM+y1tbVhr6mpKWmPzl90bQMHDgw7dBTRM7vl1prXlqWqqirsWZ8d8NkCgI4n63Vv/fr1YX/hhRdS26pVq8LZNWvWhP3jjz8O+9ChQ8M+adKk1Pbd734392ySJMmECRPC3qNHj7ADAND+RNenWdeuV111VXMv5/9z7NixsL/11lup7fXXXw9ns/qiRYvCnvU+S/R+QtZ1+9VXXx32K6+8Muzjx48Pe6dOncIOtC/xu+UAAAAAAAAAAAAAAAAAAAAAAAAAAADAZ7LBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADhXlXgAAAABw6qqrqwvNL126NLXNnj270LGhJZ111llh79KlS9g//fTT5lxOm3HixIlyLwEAAAAAAAAAAKDdyXpmLev5z6x+6aWXfu41tZTGxsaw79ixI+xbt27N1U6lb9myJex/+9vfwr5ixYqwv/fee6nt5MmT4WyWvn37hr2mpiZ3r62tLdmxi/YBAwaEswAAfLYPPvgg7M8991zYV61aFfYXXngh7O+//37Yo2vAK6+8Mpz99re/HfZJkyblPjcAAPBvPXr0CPvFF1+cqzWHbdu2hf21115Lba+88ko4u3jx4rDfd999YR86dGjYp0yZEvapU6emtmuuuSacHThwYNiBlte53AsAAAAAAAAAAAAAAAAAAAAAAAAAAACAtsgGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAKMgl6cAACAASURBVAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKoKPcCAAAAgFM3e/bsQh1a0ocffhj2jRs35mqn0isrK8OetbaOqmvXrmFvamoK++233x728847L7V95zvfCWcBAAAAAAAAAABofSoq4o+p1tTUFOqt2YkTJ1Lbtm3bwtmtW7eWrf/5z38OZ5966qmw79q1K+xZzxpGsp7/LPrzlNVra2tzzxc9d9bXDgC0ffv27Qv7008/Hfbly5entj/+8Y/hbNbnRb761a+G/f777w/7lVdeGfZzzz037AAAAJHq6urcfdasWYXOnbWvwfPPPx/2VatWhb2+vj61HTt2LJy9/PLLw/6Nb3wj7DfccEPYBw0aFHbgv3Uu9wIAAAAAAAAAAAAAAAAAAAAAAAAAAACgLbLBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBwqyr0AAAAAgI7s6NGjYd+0aVOhvnHjxtyz7777bqFz79u3L+yR7t27h/3ss88O+2mnnRb2Dz/88HOvqa3o2rVramtsbAxnv/nNb4b9xz/+cdhra2vD/vjjj4cdAAAAAAAAAAAA2opu3bqltnPOOSeczeqt2fHjx8Pe0NAQ9q1bt+ZqzdHXrl0b9mXLloV9165dYS9iwIABYa+pqcndi8wmSfbzoUWP37dv37AD5Tdz5sywn3766WGfP39+ahs4cGCuNUGpfPzxx6kt61rhN7/5TdhffvnlsPfs2TPs1157bWp78sknw9mpU6cWOjcAAEBHNWrUqEL9rrvuCnv0d+iqVavC2eXLl4d93rx5Yb/zzjvDfvnll6e2m266KZzN2rfA36G0V53LvQAAAAAAAAAAAAAAAAAAAAAAAAAAAABoi2zwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIeKci8AAAAAIEtjY2PYt2zZEvZNmzaF/d133809v3HjxkLn3r59e9ibmprC3rlz/P9vGjFiRGobNWpUOHvBBReE/cYbbwz7yJEjwx6dv7q6Opzt0qVL2O+9996w/+xnPwv7iRMnwl5KXbt2DXvWfw9Tp05NbT/5yU/C2fPOOy/sAAAAAAAAAAAAQMfWvXv3sGc9n5rVW7Njx46ltq1bt4az5exr1qwJZx9//PGw7969O+xFDRw4MLXV1NSEs+XstbW1hY7du3fvsENLyvrcwm9/+9tCx//5z3+e2u65555w9vvf/37YBw0alGtNtF/vvPNO2KOfxyRJkl//+tep7ejRo+HsDTfcEPbly5eH/Zprrgl7jx49wg4AAEDb07Nnz9R2/fXXh7NZPbqnnSRJsmrVqrBH94Tq6+vD2az9Hm655ZawZx1/zJgxYYdyiXeAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAD6TDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUFHuBQAAAAAt53/Zu9vYOuv68ePX6bqMcVeU0cldB5F1whILiDAgglkVGNpBFMw6QlBkcwRGNBsBdEOlDeIvCyCQrA4SFGRbGBJYFbyBhvEAJupCE3iwIY4Nb6AMRoEJMlj/D/4P/n9/cn2u7brO6XVO93o9fed7nU+v057esPPhb3/7W2p74YUXwrObNm0Ke9HzGzduTG2bN28Oz+7cuTPsWSZPnhz2adOmpbapU6eGZ88666ywZ51vb28P+zHHHBP2CRMmhH2sip6zJEmSXbt21eyxx40bF/YPP/ww7KeffnrYf/zjH4f95JNPDjsAAAAAAAAAAAAAe26fffZJbZ/61KfCs1m9nr377rthz/q33i+99FLuXuRskiTJk08+GfZ77rkn7K+99lrYizjkkEPCftRRR4X96KOPztV259q17tHXEuX45z//WdPrR68jy5YtC8/eeuutYb/qqqvCvmjRorBnfS0y+n7729+G/aabbgr7E088Efas9/F897vfTW1f//rXw7OTJk0KOwAAAIymrL/DnXfeebn7tm3bwrM/+9nPwr5ixYqw33bbbWH//Oc/H/ZrrrkmtZ1zzjnhWSiiqewBAAAAAAAAAAAAAAAAAAAAAAAAAAAAoBFZ8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVjwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOTSXPQAAAADUmzfeeCPsL7zwQmrbtGlTeLbWPZotSZJkx44dYY8ceOCBYW9vbw/71KlTw97d3Z372kUfu6WlJew0nqzPiQ8++CDsTU3x/xdr165dqe34448Pz/7P//xP2GfOnBl2AAAAAAAAAAAAABgtEydODPtxxx1XqNez6N/fv/TSS+HZzZs3h73o+aj/7ne/K/TY27dvD3tRhx56aGo76qijwrNl9qyzU6ZMCfuECRPCXqasz4la2rlzZ6F+8803h/22224L+8KFC1Pb4sWLw7OHHHJI2Pdmv/nNb1LbD3/4w/Ds+vXrwz5r1qyw//73vw97Z2dn2CuVStgBAACAJJk0aVLYs/6usmjRorAPDAyEPetvQtHfD2bMmBGevf7663NfG+JNJQAAAAAAAAAAAAAAAAAAAAAAAAAAAMBHsuAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAByaC57AAAAABrTu+++G/YXXngh7Js2bcp9Puvsxo0bc187SZJk27ZtYY9MmDAh7J/85CfD3t7eHvazzjor7FdccUXYp06dmto+9alPhWcnT54cdqgnxx57bKHzBx10UNjvvvvu1DZ79uxCjw0AAAAAAAAAAAAAlG+//fZLbdOnTw/PZvV6Njw8HPaXXnqpZn3z5s2Frv3II48UOp/1sUcqlUrYDz300LAfffTRYT/qqKNy96yzW7duDXtTU1PYd+3aFfZa2rlzZ6F+yy23pLbbbrstPLtw4cKwL168OOytra1hL9Of//znsGd97E8//XRq+9KXvhSe/cMf/hD2k08+OewAAABA/cv6W1pnZ2eh/sc//jG13XDDDeHZc889N+wzZswI+x133BH2z3zmM2GnscV/SQUAAAAAAAAAAAAAAAAAAAAAAAAAAAA+kgW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQQ3PZA0Aeq1evTm3r1q0Lz/b19RV67BUrVoR93rx5YY9mX7lyZXi2v78/7AsWLCjUOzo6wg7Uv4GBgbCvWbMm7MuXL6/mOGPG1q1bwz5lypRRmmR0vfnmm2FvaWmp6eNH3zO7u7sLXXvVqlVhnzNnTqHrlym6b0lS7N6N5fu2t7rxxhvDft1114X95ZdfDvvIyEjYm5ri/+dM9Po6derU8OyJJ54Y9qyvhfb29ty9ra0tPDtu3LiwA9Vx6KGHhn3Lli1hP+KII8Ke9RoGAAAAAAAAAAAAANCIst43lvV+5EZ+v/L27dtT20svvRSerXXfvHlz2NeuXZv72vvuu2/Yx48fH/Z///vfYa9nO3fuzNWSJEluvfXWsN9+++1hv/LKK8N+9dVXh721tTW1vfPOO+HZ66+/PuxZs5966qlh/+Mf/5jaTjrppPAsAAAAQFGf/exnU1vWPsc//elPYV+0aFHYZ8yYEfYrrrgitfX29oZn999//7BTPptIAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBwt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBwt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIfmsgeAj7J06dKw9/b25r72yMhI2GfPnh32/v7+sG/YsCHsfX19YS8i69pZ/emnn05tM2bMyDUT9W1wcDC1PfbYY+HZRYsWVXucvcLQ0FDYBwYGwt7d3V3Ncf7L8uXLa3r9RhV9rTS6ZcuWpbaWlpaaPnaR7/cbN24s9NjTpk0L+/PPPx/2np6eQo9fRNGfk4rcu0a+b3y0KVOmhP2MM84Ie3t7e6F+zDHHhH3ChAlhByiira2t7BEAAAAAAAAAAAAAAKgjH/vYx3K1JEmSE044odrjjJpLL7007Pfee+8oTdJYdu7cWahH7+/cnT5z5szU9uKLL4Zn33rrrbBnvd/4m9/8ZtgrlUrYAQAAAOrVSSedFPYnnngi7HfffXfYr7766tT24IMPhmfvu+++sH/uc58LO7XXVPYAAAAAAAAAAAAAAAAAAAAAAAAAAAAA0Igs+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgByayx4APkpvb29pj7127dqwr1+/Puy//vWvw/7mm2+mtpaWlvDs6tWrw97d3R32LDfeeGNqy7ovlCPr8/HnP/952M8888zUduGFF+aaidjtt99e9gjk8Morr4R9y5Ytqa2tra3a4+yRrO8dxx57bM0ee3BwMOxFvt+3t7fnPrs7sma74IILwt7R0ZH7sWt535KktveuzPtGPhdddFHY586dO0qTAAAAAAAAAAAAAAAAAGXYvHlz2D/44INRmqSxNDU1hX3cuHFh37lzZ6HHHxgYSG3nn39+eLavry/skydPzjUT1MLQ0FDYo6+FJEmSlStXht0uDUZT0R0h0ev3ggULwrNZ3fu8AQBg91QqlbBfeumlYf/yl7+c2q644orw7Be+8IWw33HHHWGfN29e2Cku/oshAAAAAAAAAAAAAAAAAAAAAAAAAAAA8JEs+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgByayx6AvVOlUqnbxx4ZGQn7YYcdFvaFCxeGvaWlJeyROXPmhL27uzv3tZMkSfr7+wud56MNDw+ntieffDI8+8gjj4T93HPPDftNN90U9iKfj+TT09NT6Hxvb2+VJmFPzJs3r+wRclu3bl3Ys763FPHMM8/U7Nply/rYOjo6anbtRlbL+wYAAAAAAAAAAAAAAADAnnvxxRfLHiG3CRMmhP39998Pe7Rb4JBDDgnPnnbaaWE/9dRTw75hw4awr1mzJuzf//73U9v1118fni1z3wPsqehzPUmSpK+vb5QmgWwDAwNh7+zsDPuWLVvCvnz58tS2evXq8OzSpUvDvnbt2rCPVXfeeWfY58+fH/asPUVQTVlf50V3T61atSrstdxPsjeLnlfPabpafj2M5ftWVPSzTtbvsNHPMbC3aW1tTW33339/ePbGG28M+7e+9a2wP//882G/5ZZbUpu/J+2eprIHAAAAAAAAAAAAAAAAAAAAAAAAAAAAgEZkwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5NBc9gDsnUZGRsJeqVRKe+wsbW1tVZqk/qxYsaLsEerS1q1bw/7UU0+Ffd26dantkksuCc8uX7487ACbNm0K+5lnnjlKk/y3DRs2lPbYtdbf3x/2efPm5b62+wYAAAAAAAAAAAAAAABAtezatSvsL7/8cqHrjxs3Luwffvhhaps4cWJ49sQTTwz76aefHvZTTjkldz/88MPDs1muueaasP/qV78K+5o1a8L+1a9+dY9ngkaUtXOhr69vlCaBbFmv3VmK7LSZM2dOoT6WDQ4Oprb58+eP4iSQbenSpamtt7c3PLtx48ZCjz1t2rSwP//886mtp6en0GOPZdFzmiTx81rmc5ok5T6vRe5bkhS7d41834aGhsI+MDAQ9u7u7mqO8x/sUoPdk7WD83vf+17Yp0+fHvaLLroo7M3N6etply1bFp7l/2oqewAAAAAAAAAAAAAAAAAAAAAAAAAAAABoRBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADs1lDwDsvqGhoULnu7q6wv61r32t0PXHqilTpoR92bJlYV++fHk1xwH4D+vWrQv72WefPUqT/Le+vr7SHrvW+vv7a3Zt9w0AAAAAAAAAAAAAAACAaqlUKmE/4YQTwn7MMceE/ayzzgr7KaecktqOO+648Oy4cePCXqaVK1eG/dZbbw37Qw89FPZZs2bt8UwAlGssv0+8ng0PD4f9gQceGKVJINvg4GDYe3t7c1+7vb0999ndEc12wQUXhGc7OjqqPU7dGKvPaZLU9nmt5X1LktreuzLvW5bbb7+9ZtcGGsP5558f9gcffDDss2fPTm1Zr18XX3xx2PcWTWUPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI3Igl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIwYJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIwYJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMihuewBgN03MDBQ6HxPT0/YW1paCl1/rNqyZUvYn3rqqbBffvnlqe2SSy4Jz86YMSPsABs2bAj7vHnzRmkSAAAAAAAAAAAAAAAAAKDeVCqVsGe9T3Fv9dprr4X9yiuvDPuyZcvCPmvWrD2eCWgsQ0NDYb/33nvDvnjx4tTW1dUVnv32t78d9pkzZ4Y9y/DwcNjvv//+1DZ//vxCj71kyZKwL1y4MOytra2pLet7Zq0VefyRkZEqTtJY7rrrrrBHnxO9vb3VHqeqli5dmvts1p4hyvHMM8+UPUJNZH1cHR0dozTJ6Burz2mS1PZ5dd9qo+hrf71/XwSKO/vss8N+8803p7arrroqPHvOOeeE/ZBDDgn7WNFU9gAAAAAAAAAAAAAAAAAAAAAAAAAAAADQiCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHJrLHgD4fwYHB8Pe3d0d9meffTbsHR0dezwTSdLW1laoz5o1K7U9+eST4dnLL7887Oeee27YzzjjjLC3tLSEHSjf+vXrw37hhReO0iR7rqurK+z9/f2jNEn1ZX1stby2+wbU0tDQUNgHBgbCvnLlyrCvXbs27NFr3OzZs8OzWa8xd9xxR9izfq4vYnh4OOyPPvpo2LN+F8yyYsWK1HbeeeeFZ1tbWws9NgAAAAAAAAAAAAAAQCNasmRJ2D/96U+HfeHChdUcB6hDWe/Hu+yyy8I+d+7csI+MjKS2rPf6dXZ2hr3ofpJrr7027H19fant1VdfDc++9957YZ8yZUrYt23bFvbly5entuie745KpVLofNHHH6uyPt9PP/30sHufJPVkw4YNZY9QE1l7MObNmzdKk4y+sfqcJkltn1f3DaA+XXHFFantoYceCs9m/S3tpz/9aa6ZGk1T2QMAAAAAAAAAAAAAAAAAAAAAAAAAAABAI7LgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcmguewDY2wwODqa2pUuXhmdfffXVsLe2tuaaidpqaWlJbV1dXeHZrL5+/fqwX3vttWE/88wzU9tpp50Wnm1raws7UB2//vWvw7548eJRmmTPZb2G9ff3j9Ik1Zf1sdXy2u4bUEuXXXZZ2Iu+BmX9/Bq9TmzZsiU8O2XKlLAffvjhYV++fHnYi7j44ovDnvX6ODIyEvahoaGwR89r1nN67733hj36fQc+yr777pv7bKVSqeIkAAAA1INvfOMbZY8AAABjQpH/BpMkSXLQQQdVaRIAAEZLc7O3BQIAQDVs27YttWW9p+KXv/xltccBGszAwEDYs967tXbt2tyPPXPmzNxnkyRJHnjggbB3dHSEfdKkSWFfsGBBaqv1bpS+vr6w1/K9hOST9R7JF198Mezz5s2r5jh1paenp+wRqLKs16hG1ch7MIoaq89pktT2eXXfABrPokWLwn7eeeeFPetn27GyR7Op7AEAAAAAAAAAAAAAAAAAAAAAAAAAAACgEVnwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVjwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5VEZGRsqeIUmSpLQhVq5cGfaLLroo7HVy/8acSqVSs2vX+jkbHBwM+wMPPJDaFi5cGJ5tbW3NNdPuGh4eTm33339/eHbevHnVHodREH2+PvbYY+HZRYsWVXscdkMtXx+TpNzvawMDA2Hv7OxMbY8//nh4dubMmblmGg3Ra2+SJMldd90V9nr+Wsz6nnj88cfnvnbRz9WiX0vPPvts2Ds6OnJfu5b3LUmK3bt6vm+ki563++67Lzw7d+7cao9Dgyv6OlDLnzXKnK3IzzFJkiSvvvpq2Iv+Lrh+/frUduqpp4ZnV61aFfY5c+bkmom91wcffJDa1q5dG5798MMPqz0OAAAAJZsxY0bYjzzyyFGaBAAAGlvWf+98+OGHw75z585qjgMAQBVMnjw57GecccYoTQIAAGPbL37xi9S2YMGC8Oz27dvDPn78+FwzAXumzPeVzZ49O+z9/f25r122Wr4XcOvWrWFfs2ZN2BcvXlzo8cfq+xwb2Z133hn2Wu608Zwx2up5r1Y9z1bP6vm+1fOeIrPVp3r+fAbKl7Xv4eMf/3jYf/KTn4T961//+p6OVE3dGX317l6oqeAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAsFey4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHJoLnsAaDQDAwNh7+zszH3t3t7e3Gdr7emnny57BGqgo6MjV6N2tm7dWurjb9q0KbW1t7fX9LGLvH5mnR0ZGcl97Vp78sknw/6FL3xhlCapvqzXkSVLloQ9+r4Yfa5WQ9ZstXyNrOV9S5La3rsy7xtAmdasWVPofGtra5Um+WjHHnts7rMrV64M+5w5c3Jfm71Tc3P6nyO/8pWvjOIkXffIOQAAIABJREFUAAAAAAAAY0elUgn7+eefP0qTAAAAAAA0lsHBwdR23HHHhWfHjx9f7XGABtPf31/ofD2/B76oO++8M7Vl3bdly5aFffHixblmojxZz/nZZ589SpNA+bq6usJe9HtLWbI+rrFsrD6nSVLb59V9A2g848aNC/v06dPDHv0dbixpKnsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAaEQW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA7NZQ/A3qlSqdTtY4+MjIS9s7OzmuM0jBkzZpQ9AowJZb7+7Y5p06blPpv1+pnl8ccfD3v0+pt1tp4988wzYe/q6hqlSUZfT09P2KdPn57ainyuJkmSrFq1Kuxz5swpdP1aKnLfkqTYvWvk+wZQS319fWWPEGppacl9tr+/v4qTAAAAAAAAAAAAAAAA1I8dO3aktv32228UJwH2Rps2bQp7e3v7KE2y51avXh32+fPnp7YtW7aEZ9va2nLNRP2aPXt22SPUTNE9Rny0et7NUvQ5zdoh0qjv6x3Lu1GyjNXnNElq+7y6bwBjz4EHHhj2t99+e5QmKVdT2QMAAAAAAAAAAAAAAAAAAAAAAAAAAABAI7LgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcmguewD2TiMjI2WPkFsjzw6Uz2tIupkzZ4Z9rN67np6eskeoW3PmzMnV9nZZ98a9A6i+rq6usPf394d9aGgo7K2trXs8U7UsWLCgtMcGAAAAAAAAAAAAAACopcmTJ6e2devWjeIkQCNasWJF2OfPnx/2e++9N+yLFy9ObS0tLeHZrPesZT32okWLwt7d3R32SFtbW+6zNKZ63hVRqVQKna/nj62RjeX7evLJJ5c9Qk3U+uMaGBgIe2dnZ9gff/zx1Ja17ybLWH1Ok6S2H5v7BjD2vPLKK2E/6aSTRmmScjWVPQAAAAAAAAAAAAAAAAAAAAAAAAAAAAA0Igt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBwt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAgh+ayBwCod5VKpewR6tLIyEjZIwAAQF2YO3du2Pv7+8P+17/+Neytra17PNP/b3h4OPfZCy+8sNBjw//2yiuvpLbvfOc74dkPP/yw2uMAAABQsosvvjjsXV1dozQJAAAA9eJf//pX2Ldt25baXn/99fDs22+/HfZ333037G+99VbY33nnndSW9XFFZ3fnsbP+e+qOHTvC/v7774e9lrZv317aYzc3x2+pOeCAA0Zpkv+27777hn3ChAmFenT9gw46KDw7ceLE3NfenetH57Oek4MPPrhQz7pvAAAAjD2nnnpqarvhhhvCs1l/j8r6PRTYPUNDQ6VdP+t9Xeedd17Y58+fH/be3t5CvYgtW7YUOp/1b7ui99Rt3bo1PPvee+/lmml3FXnOBwcHqz3Of9i0aVPY29vba/r41J+lS5fmPtvT01PFSaiWjo6OsC9ZsiS1ZX1fyHoNKSqaLevjKqqzs7Nm54vuUCrynCZJ/LyW+ZwmSW2f11retySp7b0r874VlfVzWC35OQcaX9bukueeey7sP/rRj6o5Tt1qKnsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAaEQW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA7NZQ8AUO9GRkbKHgEAAEbF0NBQTa8/PDwc9paWltxni8r62FtbW1PbrFmzwrNdXV1hv/HGG8N+1113hT2aLUmS5NFHH01tCxYsCM/OnDkz7LCnBgYGUtvq1avDsxdeeGG1xwEAAKDG1qxZE/bx48eHPevvKgAAAGXauXNn2P/xj3+ktpdffjk8m9X//ve/Fzr/2muv5e7btm0Lz2b1119/Pezvvvtu2OvZ/vvvn9omTpwYnj3ggAMK9ebm+K0h++yzT9iz5qulAw88MOzjxo2r2WPv2LEj7FlfK7X09ttvh/2DDz4I+3vvvRf26Gtt+/btuc/uzmPXs6yvtYMPPjjs0b/jyTo7adKksE+ePDnsRx55ZNiPOOKIXG13rv2JT3wi7JVKJewAAABlit43kfX70n333Rf2q666KtdMwH/K+rtILa+ftWsi631dW7ZsCfudd94Z9t7e3tSW9b6w6667LuxtbW1hz9LT0xP2/v7+1Jb1cS9cuDDsS5YsCXvWf6uI/oZZ9t+ypk2blvus3SgwNkSvr9OnTw/PFnkNSZIkWbVqVdjnzJlT6PpFPP7442Hv7OwsdL6Wsr5nRs/rWH5OsxS5b0lS7N418n0r+2eZSNHPZz/rQPnuueeesGf9/v7FL36xmuPUraayBwAAAAAAAAAAAAAAAAAAAAAAAAAAAIBGZMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORQGRkZKXuGJEmS0oZYuXJl2C+66KKw18n9AwAA+EiVSiW13XfffeHZuXPnVnsc6lz0+TIaot+x63m2LENDQ2F/+OGHwz5//vzcj50kSbJq1arUNmvWrPBsS0tLoceG/y36W5y/wwEAAIw9Wb/rZcn6GyYAAFD/3njjjbD/5S9/KdQ3btyY++yLL74Y9q1bt4b91VdfDfuuXbvCHhk/fnzYDzvssLAfccQRYW9tbQ37wQcfXJOzSZIkkyZNKnQ+6lnX3n///cM+ceLEsPtv6LB7sl7/hoeHw75jx47U9s4774Rnt23bFvbXX3+9UH/ttddyP37R2bK+77z88sthf+WVV1Jb0X+TUfT71pQpU8I+derU1HbMMcfkPrs7Pev6++67b9gBAIDGdtddd4X9Bz/4Qdife+65sB900EF7OhIAAAAA1Ez0bzqmT58enl2yZEnYFyxYkGumUdKd0Vfv7oWaCg4CAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyULfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBwt+Afg/7N17kFd1/fjxs8sC4g2licUUvIOQuwsaCmqaKJIpiKbmshVKAktXE8q8lRNMKYJdlVszpshKYl64WJiRGiE2WOwy3hArULyQoaQDCsL+/miaX/XlvI6czy6f3eXx+Pc5r7NvPp/1cznn7FsAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJBDWbEXAAAAALQMjY2NxV5Cqpa8tixdu3YN++jRowvqAAAAAAAAQNv2zjvvhL2hoaGgvnLlytyzL7zwQtjfeOONsGfp2LFj2I888sjU1rNnz3D25JNPDnt1dXXYu3fvHvZDDjkkV0uSJOnWrVvYS0tLww5QTFmvUQceeGBBnXy2bt2a2l555ZVw9uWXXw77unXrwr5+/fqwr127Nuxr1qxJbb/73e8KOvb27dvDniX6PJD1WaRv375hr6ysDHtVVVXY+/TpE/b27duHHQAASJJRo0aFfdasWWEfOXJk2O+///6wOw8IAAAAQFPK2rvlsssuS23l5eXhrL1R/sUZPQAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAByKCv2AgAAACiOX/ziF2EvK4u/MlZUVIS9Z8+eYW/Xrl3YAQAAAAAAAGB32bJlS9hXrFgR9mXLluWe//Of/xzO/vWvfw37jh07wt6lS5ew9+vXL7WdeOKJ4eznPve5sB911FFhz7q3oHv37mEvLS0NOwDwLx06dEhthx12WDib1VuyrVu3hj3rc9bq1avD/sILL6S2559/PpxdunRp2KdNmxb2zZs3h719+/Zh79OnT2rr27dvOHvCCSeE/aSTTgr7scceG/as+1cBAGB3yTr/eNddd4W9f//+Yf/Sl74U9ttuuy3sJSUlYQcAAABgz9LY2Bj2L3/5y2FfsmRJanvyySfDWfsI/Ys7GgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIoazYCwAAAKA4nnrqqbA/9NBDYX///ffDvtdee4X92GOPDXtVVVVqq6ysDGcL7V26dAk7AAAAAAAAAE3vlVdeCftjjz0W9ieeeCK1LV++PJxduXJl2Ldt2xb2I444Iuwf+9jHUtuoUaPC2ej6+QfphxxySNgBANqqDh06hL1Xr14F9ea0Y8eOsK9ZsybsWZ9v6+vrc7UkSZIbbrgh7H//+9/Dvs8++4S9f//+qe3kk08OZwcOHBj2U045JeydO3cOOwAA/Kejjz467Pfff3/Yzz333LBv3Lgx7Lfffntq23vvvcNZAAAAAFqfLVu2hD3rXsQHHngg7IsWLUptxbx+3pqUFnsBAAAAAAAAAAAAAAAAAAAAAAAAAAAA0BrZ4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOZcVeAAAAAMUxefLksF9wwQVhf+aZZ8JeX18f9oaGhrCvWrUqtT3wwAPh7D/+8Y+wZ+nevXvYKysrU1tFRUU427dv39zHTpIk6dmzZ9jbtWsXdgAAAAAAAIDIu+++m9oef/zxcPbhhx8uqEfXiZMkSTp27Bj2448/PrWdeuqp4ezVV18d9oEDB4a9W7duYQcAgF1RWloa9qx7CbP6xRdfvMtr+qBWr14d9ieeeCJ3X7hwYTh74403hr2kpCTsWZ/7Bw8enNo++clPhrPHHXdc2N3/CQDQ9px++ulhX7x4cdg//elPh/3jH/94anvwwQfD2UMOOSTsAAAAAOx+69evD/vw4cPD/re//S3sWfdwRueb+GDiK/0AAAAAAAAAAAAAAAAAAAAAAAAAAADATtngFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgh7JiLwAAAICWaa+99gr7cccdV1BvTuvXrw97Q0ND2Ovr63PPL1iwIJydMmVK2N9///2wZz0vxx57bNgrKytTW0VFRe7ZJEmSvn37hr1Lly5hh93pnHPOCfuQIUPCPm7cuNTWvn37XGsCAAAAAAD4IF555ZWw//KXvwz7woULw/773/8+tb333nvh7PHHHx/24cOHh/2nP/1p2AcMGBD2Dh06hB0AAGh+PXv2LKiPHDky989+++23w/7oo4+G/eGHHw77XXfdldq+/e1vh7NZ91AOHjw47MOGDQv7ueeeG/b9998/7AAA7H6nnHJK2P/4xz+G/bzzzktt/fr1C2dvueWWsH/uc58LOwAAAAD5zJkzJ7WNHz8+nO3atWvYs84nHX744WGncKXFXgAAAAAAAAAAAAAAAAAAAAAAAAAAAAC0Rjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkENZsRcAAAAATe3ggw8uqJ999tlNuZz/8t5774X96aefDvuqVavCXl9fH/aGhobUNn/+/HD2jTfeCHuWrMe9srIytVVVVeWeTZIkqaioCPsxxxwT9rIyp1Bam9deey3sDz30UEH9lltuSW0/+MEPwtnzzz8/7AAAAAAAQOv38ssvh/2+++5LbfPmzQtnly1bFvbOnTuHfdiwYWG//fbbU9vgwYPD2S5duoQdAACgOe23335hHzp0aEE98re//S3sixcvDvuvfvWrsH/hC1/Y1SX9lyFDhqS2Cy+8MJzN+h65//7751oTAACxQw89NOxLly5NbVdddVU4e+mll4b9zjvvDPttt90W9qOPPjrsAAAAAK3Viy++GPba2tqwL1myJLWNGTMmnJ08eXLYs66X0vxKi70AAAAAAAAAAAAAAAAAAAAAAAAAAAAAaI1s8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCHsmIvAAAAAPYkHTt2DPtxxx1XUG9Or776atgbGhrCXl9fn3t+0aJF4ezUqVPDvm3btrB36NAh7Mcee2xqq6ioCGcrKysL6lVVVWH/8Ic/HPY9VdbvY6Feeuml1HbBBReEs6ecckrYf/zjH4e9X79+YQcAAAAAALK99957Yb/vvvvCPmPGjLD//ve/D/uBBx6Y2oYPHx7OXnvttWE/44wzwt6+ffuwAwAAsOsOO+ywsI8dO7ag/s9//jPsCxcuDPu8efNS2+jRo8PZyy+/POxZ32Oz/m2f+MQnUltJSUk4CwCwJ9t3331T26233hrOfv7znw/7mDFjwp71ty5f/OIXU9s3vvGNcLa8vDzsAAAAAIV4/fXXw37zzTeH/bbbbgv7UUcdFfalS5emtoEDB4aztHylxV4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAtEY2+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJBDWbEXAAAAALQOBx10UEF9yJAhTbmc/7J169awP/vss2FvaGjI3evr68PZX//612F//fXXw56lW7duYa+srExtffv2DWcrKipyHztJkqR3795hb9++fdgLkfW8dOjQIexZv1M7duzY5TX92/Lly8N+/PHHh33kyJFh/973vpfasv47BQAAAACA1uTFF19MbTNmzAhnf/7zn4f9rbfeCvuwYcPCvmjRorCfeeaZqa2szO29AAAA/Lf9998/7CNGjMjd33777XD2wQcfDHvWd/BBgwaFvVevXqltzJgx4eyll14a9i5duoQdAGBPdeKJJ4b9qaeeCvutt94a9ptuuim1TZs2LZwdO3Zs2K+66qqwl5eXhx0AAABo/bL26Zg8eXJqmz59ejibdV3u+9//fti/9KUvhd09om1babEXAAAAAAAAAAAAAAAAAAAAAAAAAAAAAK2RDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUNLY2FjsNSRJkhRtEXV1dWGvqakJewt5/AAAAHaqpKQktc2ZMyecHTFiRFMvB9iJDRs2hL2+vj7sDQ0NuXvW7DPPPBP2rVu3hr19+/Zh79OnT2qrqKgIZysrK8P+hz/8IewLFy4M+/bt28NeTFmPa1lZWWq79tprw9krr7wy7J06dQp7luhcnPNwAAAAbU/Wd70sWecwKY6pU6emtgkTJjTrz54yZUrYx48fn/vY0b8rSQr/t61duzbsPXr0SG1Z5xBnz54d9qy1Dx06NOxXXHFFahs0aFA4W6hCn5eZM2emtvPOOy+cLS8vD7vzVdA0Vq1aFfaJEyeG/Ze//GVq6969ezg7evTosI8aNSrsBx10UNgBAACAf3n66afDPmPGjNR25513hrPbtm0Le21tbdi/+c1vhj3rPCEAADu3ZcuW1BZdx02SJJk8eXLY33zzzbBn/U3c2LFjw96/f/+wAwAAAIVbsWJF2KPrR0mSvYdo586dU9tVV10Vzo4ZMybshe65QItUndHnftADlRa4EAAAAAAAAAAAAAAAAAAAAAAAAAAAANgj2eAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADiWNjY3FXkOSJEnRFlFXVxf2mpqasLeQxw8AAGCnSkpKUtucOXPC2REjRjT1coBWZtu2bWF/7rnnwr5q1aqw19fXp7aGhoZwNqtHr39JkiTr168Pe1vVrl27sHft2jXsU6dODfsll1wS9rvvvju1OQ8HAADQ9mR918uSdQ6Tlmf58uVhHzhwYNhra2vDPm3atF1eU1MZNmxY2H/2s5+FPeu8y4YNG1Lb5ZdfHs5mnc/POmezZMmSsJ9xxhmpbeXKleFsVVVV2LPON1100UVh79GjR9g3bdqU2qZMmRLOTpo0KezOV8G/ZJ2v/+53vxv2+++/P+wVFRVhv/7661Pb+eefH86WlpaGHQAAACi+zZs3h33WrFlhnzx5ctjffPPNsI8dOzbsV111VWrr1q1bOAsAwM69++67Yb/jjjvCPn369LBnXec+7rjjUlvWfQ3V1dVh33fffcMOAAAALck777yT2ubOnRvOzpgxI+wrVqwIe9Z96Fnf0UeOHJnaOnXqFM6yR4pP6iRJ/Av/H9ydDAAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIeSxsbGYq8hSZKkaIuoq6sLe01NTdhbyOMHAACwUyUlJaltzpw54eyIESOaejkATWbbtm1h79SpU9i3b9/elMtpM0pL4/8fWNa5sOOPPz7sQ4cOTW3f+c53CvrZAAAAtDxZ91xkyTqHSeszderUsE+YMCHsa9euDXuPHj12eU3/Vl9fH/Znn3027Jdccknun50kSTJ37tzUVl1dHc4293mT6FrDddddF85OnDgx97GTJElef/31sHft2jXskQ0bNoS9vLw87M5X0ZZs2rQp7Ndcc01qmz59ejhbWVkZ9qxzw+edd17Ys15HAAAAgD3bli1bwj5r1qywT548OezReZXvfve74exXvvKVsJeVlYUdAIB8nnzyybDPmDEjtUXX9pMk+28yzj333LBfdNFFYT/77LNT29577x3OAgAA0PZkXQd56KGHwn7vvfeGfeHChakta7+Giy++OOy1tbVhHzBgQNihicV/tJEk8Umh/xCfHQIAAAAAAAAAAAAAAAAAAAAAAAAAAAB2yga/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAB4ghcpAAAgAElEQVQAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcigr9gIAAAAAYFc999xzYd++fftuWknbsmPHjoLmV6xYUVAHAAAA2rYzzzyzoPnFixeHffTo0bmP/cgjj4T9oosuyn3sD6Kuri73bElJSROuZNdMmjQp7BMnTgx7bW1t2MvLy8N+9913h/3ss89ObV27dg1nGxsbww6tybx588L+ta99LezROfc77rgjnK2pqQl7MV/DAAAAgLavU6dOYf/qV78a9qzzzjfddFNqu/rqq8PZu+66K+wzZswI+8c+9rGwAwCwcyeeeGLufsstt4Sz9913X9izrttVV1eHvUOHDqntnHPOCWcvvPDCsJ911llh79y5c9gBAADYuU2bNoX9N7/5TWq79957w9mFCxeGfevWrWEfNGhQ2H/4wx+mtgsuuCCcPfDAA8MObVVpsRcAAAAAAAAAAAAAAAAAAAAAAAAAAAAArZENfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORQVuwFAAAAAMCuWrlyZdhLS+P/r9WOHTuacjlNqqSkJOwdOnQI+/vvv5/atm/fnmtN/9alS5ew9+7dO+yvvfZaanvxxRdzrQkAAABoPaqqqsJeW1sb9jFjxoT94osv3uU1/duaNWvC3qNHj9zH/iAWLFiQe7axsbEJV7J7ff3rXw/7+vXrw15dXZ37Z0+ZMiXs48ePz31saGrvvvtu2LNeP++8886wX3bZZWG/+eabU1vWeWMAAACA1qxTp05hv+GGG1Jb1vnLsWPHhn3AgAFhv/HGG8M+YcKEsAMAsOsOOOCAsI8aNaqgvnHjxrA/+OCDqW3evHnh7Gc/+9mwZ/2dTf/+/cM+ZMiQsJ911lmp7YQTTghn27VrF3YAAIBI1t/X//GPfwz7I488EvbFixeH/cknnwx7tC/C6aefHs7+6Ec/Cvvw4cPD/qEPfSjswK6LdzoBAAAAAAAAAAAAAAAAAAAAAAAAAAAAdsoGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHIoK/YCAAAAAGBXrVy5Muw7duwIe8eOHcO+devWsDc2Nqa2kpKScPYjH/lI2CsqKgrqPXv2TG19+vQJZ3v37h32Aw88MOxZ6urqUltNTU1BxwYAAABav9ra2rBPnz497L/61a/Cvs8++6S2kSNHhrMt2erVq8MenS8qtqy1zZ8/P+z19fVhj35nJkyYEM5mGT9+fEHz8L9effXV1HbBBReEs1mvA7/+9a/DftZZZ4UdWpINGzaEfcmSJaktuk6RJNnvO9CUli9fHvY77rgj7FmfjbM+W2f1qqqqsAMAANl69eoV9t/97ndh/8lPfhL2rHOUq1atSm0zZ84MZ7PusQQAoHl06dIl7JdddlmuliRJsnnz5rA/+uijYX/44YfDPnfu3LDfcMMNqe2AAw4IZ0899dSwDxgwIOwDBw4Me//+/VNbdK8JAADwwW3ZsiXsK1asCPuyZcty96VLl4azGzduDPsxxxwT9sGDB4f96quvDvtpp52W2vbdd99wFmh5Sou9AAAAAAAAAAAAAAAAAAAAAAAAAAAAAGiNbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgh5LGxsZiryFJkqRoi6irqwt7TU1N2FvI4wcAALBTJSUlqW3OnDnh7IgRI5p6OQBNZtGiRWGvra0N+6hRo8Lep0+fsB9zzDGprVevXuHsXnvtFfa2LDoX5zxcyzR37tywP/bYY2GfPn167p89c+bMsI8ePTr3sQu1ZMmSsM+bNy/s06ZNa8rlNKn6+vqw33vvvWGfNGlS7p+d9dp90UUXhX3QoEG5fzbw/2W99ldXV+c+9t133x32Sy65JPex92QbNmwIe3l5+W5aya6bMmVK2MePH1/Q8detWxf2Qw89tKDjt1RvvfVW2Dt37lzQ8Qv5vFDIZ4Uk8XmhNcr6rpcl6xwme55x48aFPet76NChQ1Pb/Pnzc62pqcyaNSu1jRkzJpy97rrrwj5hwoSwZ703RJ83Zs+eHc5mvZ9H1zGSpHnf17Le0/r27Rt256vYVWvWrAl79Fllv/32C2ezXsOOPPLIsENrUujngYjXdppadD3hjDPOCGfXrl0b9h49eoQ96zxf1j30xfx8vGnTprA/++yzqW3VqlXh7IIFC8Je7O8FsCucz297mvM5TZL4eW3pz2n02GS9p2W99medd87qVVVVYS+mrPfUe+65J7VlnY/K4nUEdo9HHnkk7J/5zGdSW9Y9kosXLw773nvvHXYAAPhfL730UmrL+vyZ9XcLy5YtC/tf/vKXsJeVlaW2rO/+J510UthPOOGEsFdWVoa9d+/eqa19+/bhLAAAe55t27aF/bnnngt71r21Tz31VGp74oknwtk//elPYc9a+xFHHBH26LP5aaedFs4OHjw47G3174+A/5J1U0Z8U8d/KC1wIQAAAAAAAAAAAAAAAAAAAAAAAAAAALBHssEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOJY2NjcVeQ5IkSdEWUVdXF/aampqwt5DHDwAAYKdKSkpS25w5c8LZESNGNPVyANjDRefinIcrjuuvvz7skyZNKuj4Wc/bsGHDch979uzZYX/vvffCvmTJkrBXV1fv8po+qGL+Pmf9u88444yCjr9y5cqwV1VVpbZC13b33XeH/ZJLLgk7LU99fX3YH3nkkbCPHz++KZfTZhT62v/888/n/tm9evUK+3XXXRf2iRMn5v7ZbdmCBQvCXsj7bXNbu3Zt2Hv06FHQ8VvzYxOZMmVK2At9/WvOzwuFfFZIkub9vOCzQvPI+q6XJescJnue5cuXh33gwIFhnzlzZmobPXp0rjU1lQ0bNqS28vLy3biSXVPo+3l0HSNJsj8jZj1v0c9ft25dODtv3ryw+87B/3rjjTfCfuKJJ4Y9+n198MEHw9n9998/7LAnyXpvibgWQVMbN25caps+fXo4uyf/Pmadw4w097Ut2J2cz297ivmcJkn8vBb7OW3uexea0xNPPJHaBgwY0Kw/OzqflCRJcvnll4c96zpKc2ruax3Av6xZsya1nX766eFsRUVF2LNeQ9q1axd2AADYnV577bWwR9/v//CHP+SeTZLse+Y2b94c9g4dOqS2j370o+FsZWVlQT3rfr6ePXuG/ZBDDklthVzTAwAotpdeeinsq1evDnv0N3MNDQ3hbFZ/+umnw75169awd+rUKez9+vVLbVn3cZ988slhz5rv1q1b2AEKlLWxwNwPeqDSAhcCAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyQb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMihpLGxsdhrSJIkKdoiHnjggbCff/75u2klAAAAu9f9998f9uHDh++mlQCwp6irq0ttNTU14WwLOY/Z5pSUlDTr8Yv5vF1//fUFzU+aNKmJVvJ/FfNxGTZsWNgXLFhQ0PGb899W6O+r15HiWL58eWq74447wtnTTjst7CeddFLYe/ToEfa2qr6+Pux9+/Yt6PiF/LdU6H/HK1euDHtVVVVBx2+txo0bF/Ybb7wx7J07d27K5bQos2bNCvuQIUNSW7FfQ+bOnZvaevfuHc4W+t9Cc35eaO7340JeZ3xWaB5Z3/WyzJkzp4lWwp4i6zVsypQpqa1nz55NvZwms27durBnvedlfcetra0N+9VXX53aCn3PzHrtfv3118M+e/bssE+YMCG1Rb8PSZIk48ePD3uWl19+OewbN25MbZWVlQX9bIpj6NChYX/ttdfCvmTJktS233775VoT7Il8L6Al8fu4+7mWQGvifH7bFD2vxXxOk6Sw57XQ5zS6ZpgkSbJo0aKwR9/vs65zROfbkyRJqqurw54l+i44f/78go6dZerUqWHv169f2AcNGpTamvs1Kov3ZGh+a9asCfuAAQPCfuWVV4b9mmuu2eU1AQBAW7R9+/awr169OuyrVq1KbX/+859zzyZJ9vf/rPsesuy9996p7aijjgpns+7jac75Qw89NJw96KCDwt6+ffuwA0Bbsm3bttT26quvhrNZ98a+8MILzdYLPfbmzZvDnuXggw9ObVnX3bLuL826PlRRURH2rM9R7dq1CztAK5Z180B888F/KC1wIQAAAAAAAAAAAAAAAAAAAAAAAAAAALBHssEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHEoaGxuLvYYkSZKiLeL9998P+/z588O+ffv2plwOAABAk2rXrl1qGzZsWDhbVlbW1MsBYA9XV1eX2mpqasLZFnIes9UpKSkp9hJCLfl5bc7Hrpj/7qzPgAsWLCjo+L/97W/DPmjQoNS2adOmcPaAAw4Ie21tbdinTZsW9j1V1uP++OOPh/2hhx4K+6c+9anUduqpp4aznTt3Djs7N2vWrLCPGTOmoOMX8hpW6GvrzJkzwz569OiCjt+SbdiwIbWVl5cXdOzrrrsu7B/96EfDftJJJ4W9R48eu7wmkmTcuHGprbnf05rz80IhnxWSpHk/L/is0DyyvutlmTNnThOthLYi63XgW9/6Vtj9t87u9IlPfCLsjz32WGo7/PDDw9lLL7007NXV1WE/+uijw87OzZs3L+xZ30lWrlwZ9sMOO2xXlwTsRCHnH5r73G30/X727Nnh7IQJE8I+dOjQsF9xxRVhz/o+Fsn6jHbPPfeEvdDzVdG5ja985SvhbNeuXcPe0q+zRFryNZjmVOhz1pIft+uvv76g+YkTJzbRSmgqzue3TdHzWsznNEkKe14LfU7XrVsX9r322ivsWe/ZhWir9wY0t0Ift0KvFwDNL+t+kAsvvDDsTz/9dNizzoECAADFt3HjxrA///zzYV+9enWuliRJsmbNmrC/8MILBfV33nkn7JHS0tKwZ93X2/3/sXP/sXXV9ePHT7shP6Vjg84wLKAyQlBHYiJD+SGbbIGsmw4hLWObP9joEjUxG8qPVWPaRCEjJqJubPMntpvMGFlFEg0z4w9W/IMwEg3uD3Qg6Mqm6wJsoON+/viEfP185bwOfZ/entv28fj3mfc5r3tvdzn33Mv73e8O+6xZs5LXn3POOeHaotlmzJgR9jPPPDO5n3XWWeHa008/PewA9XTkyJGwv/TSS2E/ePBgqX7o0KHcFv3WJMuy7Pnnny/VX3jhhbD/9a9/Dfvf//733PbGG2+Ea4ucdtppYX/f+94X9tmzZ9dl7dvpF154YdinT58edgAqEf8AP8u2v90DxZ8aAQAAAAAAAAAAAAAAAAAAAAAAAAAAgLdkg18AAAAAAAAAAAAAAAAAAAAAAAAAAABIYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgg18AAAAAAAAAAAAAAAAAAAAAAAAAAABIYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEgwteoBqjZ1avwULF26dIwmAQAAAAAYXbVaLexNTU2Vnp+x19PTE/aBgYFSx58/f37YN2/enNuefPLJcG17e3vYv/71r4d9Invuuedy2+OPPx6u3b17d9hXrlwZ9o0bN4adsVf0b2k8K3qPWrVq1RhNMvaeeOKJuh27t7e3bsfOsizbuXNnbit6b5/I9u3bF/arrrpqjCb5b/W8XihzrZBlrheALHvwwQfDfsMNN4zRJFDstNNOS1775z//OexF13Bf+9rXwn7JJZeEffny5WHv6OjIbWeffXa4tpG98cYbYf/KV74S9jvvvDPs55133khHAsaZoaGhsN9yyy257aabbgrXFt1v37VrV9iLPo899dRTuW3OnDnh2ttvvz3smzZtCvuBAwfCfuzYsbCfe+65ue3gwYPh2qL7m2W/5yjzPYzvWGDicz9/Ypqor2vZ17StrW00x2koRfe1G9nw8HBue+SRR8K1RffjN2zYEPbZs2eHHajeddddF/aFCxeGvehe2bZt20Y8EwAAMLamT58e9ssuu6xUr9Lf/va33LZ///5w7Ysvvhj2559/vlR/4YUXwh7dg3zooYfCtWW/l6und7zjHWGfMWNG2M8888ywn3LKKbntne98Z7j29NNPD/vJJ58c9lNPPTXs06ZNSz5+0bmLFD22KVOmlDp+5KSTTgp72cdWJPp7P3r0aF3Pffz48bAfOXIk+dhFsxf1w4cPh/2VV15JPnbR43r55ZeTz51lxb8POHToUG57/fXXw7X1Fv17mDlzZrj2nHPOCfu73/3usF9xxRWl1kfnnzVrVri26Puj8fxbRAAmt+aqBwAAAAAAAAAAAAAAAAAAAAAAAAAAAIDxyAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMAGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMAGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQoKlWq1U9Q5ZlWUMMAQAAAADUT39/f25btmxZuLZB7mNOOE1NTXU9/nh+3er53DTy87Jv376wr1u3LuwDAwOjOc7/sWfPnrDPnTu3bududNHf64YNG8K1a9euHe1xqFgjv7c38myNbs2aNblt06ZNYzjJ6Hr00UfDPm/evDGaZOxt2bIl7AsXLsxtbW1toz3OiJS5XqjntUKWuV5oREWf9Yr09fWN0iSMle7u7rD39vaWOv769evD3tPTU+r4MJoWLVoU9ocffniMJvlvRdfmU6ZMCfvx48dz2+WXXx6uXbFiRdiXLl0a9unTp4e9jEceeSTsS5YsCfuBAwfCfsYZZ4x4JmDkytx/KHtvYfv27WHv7Oys27mLFD0v0XVW0TVW0TXgwYMHw75x48awF6nyNS/SyLNNVGXvQXreGUuNfM+8kWdrdI38PXMjz1ZPQ0NDYZ85c2bY29vbw/7AAw/ktpaWlnBt1er5N9HV1RX2L33pS2GfPXv2aI4D1MFvf/vbsF933XVhf/HFF3PbWWedlTQTAAAA5bzyyithP3ToUNhfeumlpPZ2jl30nWPR+uixvfzyy+Ha4eHhsB89ejTsr776atgPHz6cvP7YsWPh2iL//Oc/S60vo+jv7fXXX6/r+U844YTcdtppp9X13EXK/MbopJNOCvvJJ59c6tynnHJK8rGL7pmfeuqpYS96XWbMmBH2M888M3lt0f2q6Nhvpxc9dgBgTOT/wPN/xT8Q/Q/NJQcBAAAAAAAAAAAAAAAAAAAAAAAAAACASckGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMAGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkKCpVqtVPUOWZVlDDAEAAAAA1E9/f39uW7ZsWbi2Qe5jTjhNTU11Pf54ft3q+dw08vMyPDwc9q1bt4b98OHDYe/t7R3xTG/X/v37w97W1la3c1ftueeey22PP/54uHb37t1hX7lyZdjnzp0bdsZeI7+3N/Jsk1n0HpJlWbZ3796wb9myJewDAwO5raurK1y7cePGsI9na9asCXsjP/Yy1wtVXitkWXy9MJGvFapU9FmvSF9f3yhNwlgp+u/C6tWrw7558+awr1q1asQzQVUWLVoU9ocffniMJhlbzc3NYS/6XFC0fsGCBWG/+eabc9vixYvDtXfddVfYBwcHw75nz56wA2OjzP2HsvcWit5nos/Ijaze91yK7k3s2LEj7OvWrUs+d70fW5V/j5NV2XuQnnfGUiPfM2/k2RpdI3/P3Miz1dP27dvD3tnZGfannnoq7HPmzBnxTI0iut//4IMPhmuL7vOVFT3v4/k5h4nktddeC3tLS0vYf/KTn+S2G2+8MWkmAAAAAAAASBT/eCDL4h8f/If41/AAAAAAAAAAAAAAAAAAAAAAAAAAAADAW7LBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAkaKrValXPkGVZ1hBDAAAAAAD109/fn9uWLVsWrm2Q+5gTTlNTU12PP55ft3o+N1U+L/v27Qv7unXrwn7nnXeGfe7cuWHfvn17buvs7AzXFlm/fn3Ye3p6Sh1/ohoeHg77Y489FvZf//rXYb/uuuty25VXXhmubWlpCTtvbfHixWEfGBgodfwy72Fl31vb29vDvnPnzlLHpz66u7tzW29vb7h2PF9LDA4Ohv3VV18N+7x580ZznBGp5/VCmWuFLKvv9YJrhfoo+qz39NNPh/2iiy4azXEAxtSf/vSnsBe9B/LWpkyZEvbjx48nH/td73pX2Iuu0fr6+pLPDYyeMvcfyn4OrfLcjWzLli1hL7pftWHDhrBfeOGFI57pTfV+3v1NjL2y9yA974wl9/Mnpuh1rfI1zbJyr2sjv6Z79+4N+yWXXBL2p556Kuxz5swZ8UyTQb3v50d/cxP5PQQmkgsuuCDsn/3sZ3PbHXfcMdrjAAAAAAAAQKToS+74S/L/0FxyEAAAAAAAAAAAAAAAAAAAAAAAAAAAAJiUbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAls8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAls8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJplY9AAAAAAAAMDa+9a1vhX1gYCDsO3fuLHX+jo6O3NbZ2Vnq2L29vWHv6ekpdfyJqqWlJezt7e2l+uDgYG67/fbbw7VXXXVV2D/ykY+Eva2tLewTVdFrUvTvvJEVPTYa07p163Lb3r17x3CSsfXwww+HPXpeqlbl9UJ0rZBl9b1ecK0AADQ3N1c9AjCJ7du3L+yzZ88eo0lGbvv27WFfvXp12Pfv3x/2yXqfD/h/mpqaqh4hV61WS17rfv7EFD12r2m66PuE7u7ucO2BAwfC3tramjTTZHfttddWPQIAAAAAAAAANBy/SAcAAAAAAAAAAAAAAAAAAAAAAAAAAIAENvgFAAAAAAAAAAAAAAAAAAAAAAAAAACABDb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAQ2+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAENvgFAAAAAAAAAAAAAAAAAAAAAAAAAACABDb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAQ2+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEU6seAAAAAAAAGBubNm2qeoS66erqqnoE3sLcuXOTWpZl2d69e8O+Y8eOsK9duzbsE9WHP/zhqkeom3o/tl27duW2+fPnh2sfffTRsM+bNy9ppjfde++9uW337t3h2jvvvDPsRf8Wy2ppaclt7e3tdT13PQ0PD4d92rRpYY+el6q5XmAsffCDHwx7X1/fGE0CMPoWLVoU9qeffnqMJhlbzc3NYW9qaiq1fsGCBWG/+eabc9vixYvDtXfddVfYBwcHww6wefPmsK9evTq3PfDAA+HadevWhb3oc+bQ0FDYo/MX3Wfr7OwMe5G2trZS64GJr1arVT1CXQmnHh4AACAASURBVLifnya6l59l1d/Pn6iva70fV9H3gj//+c9z29atW8O1ra2tSTO9XdH3BQ8++GC4dtWqVaXO3d3dHfaLL7447B0dHcnnrvf3HGWfG6D+XnvttbA///zzYX/ve987muMAAAAAAABAQ4h/DQ8AAAAAAAAAAAAAAAAAAAAAAAAAAAC8JRv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAIb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABAAhv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAIb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABAgqlVDwAAAAAAAGPtueeeq+zc+/btC/vs2bPrdu6dO3eGffHixWHftWtX2OfNmxf2wcHBsJfR1dVVt2NTjTlz5pTqk1XR87J+/fqw9/b2hr3oPayMotnq/ZrPnz+/bmtrtVrysbMsy3bv3p3bBgYGwrVFffPmzWFftWpV2ItE51+yZEmpY1fpscceC/vHP/7xMZpk9NXzeqHKa4Usc70AAG9qamoK+5QpU8J+/Pjx3PbRj340XLtixYqwL126NOzTp08PexkLFiwI+3e/+92wHzp0KOwzZswY8UzAfxsaGqrs2K2trWEv+py7evXq3FZ0T6aol7V///7kte3t7WEvujdRdL/+2LFjI57p7Sr7mu/du3c0x/k/qvwuodENDw9XduyWlpa6nbtId3d3qfU9PT2jNAmjxf38NGXu5b+d9WXv50ePfTK/pkXfM5d5Xet9nVTGnj176nr8omuRMs9NR0dH2Mvezy/6myu6xgSqV/R9adF/U6+++urRHAcAAAAAAAAaQnPVAwAAAAAAAAAAAAAAAAAAAAAAAAAAAMB4ZINfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgg18AAAAAAAAAAAAAAAAAAAAAAAAAAABIYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgg18AAAAAAAAAAAAAAAAAAAAAAAAAAABIMLXqAQAAAAAAqI+mpqaGPn+tVqvs3FW68MILS60v87y1t7eH/dFHHw37jh07wj5//vwRz/SmDRs2hL1o9tmzZyefGyaTnp6esF988cVhL/Metm3btrB3dHQkH3s0RO+BRe9vRe+fZW3dujW3PfTQQ+Ha1atXl+pPPvlk2K+66qqwX3vttbmtpaUlXNvIfv/734e96L9bjaye1wtlrhWyzPUCALzphBNOCPu//vWvsM+ZMyfsy5cvD3t07X722WeHaxvZwoULw37eeeeF/fvf/37Yv/zlL490JOAtzJw5s7JjF92bbW1tDfv+/ftz25YtW8K1vb29Ye/q6gr7HXfcEfa2trawR4ruNw0MDIS96LF/4QtfCPv69etz28GDB8O1x44dC3uV3zVU+V1C1ap83qdNm1Zq/Xh+3mk87ue/taL7k1Xfz49U+ZpmWfy61vs1LXtveLyaO3duXY9fdM981qxZYe/s7ExqWRZfg2VZ8b+1efPmhR1ofPfdd1/Yr7/++rCfddZZozkOAAAAAAAANITmqgcAAAAAAAAAAAAAAAAAAAAAAAAAAACA8cgGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMAGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQoKlWq1U9Q5ZlWUMMAQAAAADUT39/f25btmxZuLZB7mMCAAAwAkWf9Yr09fWN0iQAY+9jH/tY2Hfv3p3bzj///HDtpz/96bB3dnaG/YILLgg7b+2Xv/xl2ItelyeffDLs73nPe0Y6EgAAAADUxa9+9auw33DDDWH/4x//GPaie6AAAAAAAAAwhuIf4GfZ9rd7oOaSgwAAAAAAAAAAAAAAAAAAAAAAAAAAAMCkZINfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgg18AAAAAAAAAAAAAAAAAAAAAAAAAAABIYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgg18AAAAAAAAAAAAAAAAAAAAAAAAAAABIMLXqAQAAAAAAAEjX1NRU9QgNqVarVT0CAAAwif30pz8N+1/+8pfcdvnll4/yNIyGT3ziE2G/8sorw/6pT30q7L/73e9yW0tLS7gWAAAAAEbimWeeCfuKFSvC3t3dHfbzzz9/xDMBAAAAAADAeNdc9QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwHtngFwAAAAAAAAAAAAAAAAAAAAAAAAAAABLY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAS2OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEtjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABLY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAS2OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEkytegAAAAAAAADS1Wq1qkcAAADg/3POOeeU6ow/P/rRj8J+6aWXhn3JkiW57aGHHgrXtrS0hB0AAACAyWffvn257ZprrgnXXnHFFWG//fbbk2YCAAAAAACAiay56gEAAAAAAAAAAAAAAAAAAAAAAAAAAABgPLLBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJJha9QAAAAAAAAAAAABDQ0Nh37VrV9j7+/vDvnPnztw2MDAQrl28eHHY29vbw/6d73wn7G1tbWEvY3h4OOyPPPJI2Ds7O5PPvXnz5rAvWbIk7K2trcnnhrE2ffr0sP/mN78J+9VXX53b5s6dG66N3t+yLMsuuOCCsAMAAAAw/hTdb+ro6Mhtc+bMCddu27Yt7M3NzWEHAAAAAACAyci3aAAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMAGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMAGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJBgatUDAAAAAAAAAAAA3HLLLWEfGBgodfzBwcHc1t7eHq7dv39/2M8999ywz5o1K+wbN24MexnLly8Pe9Fjr9VquW1oaChcW/Y1feCBB8Le0tISdmgk559/ftifeOKJ3LZ06dJw7aWXXhr2bdu2hX3hwoVhBwAAAGD0RfdesyzLvv3tb4d97dq1YV+5cmVu+973vheuPfHEE8MOAAAAAAAA/LfmqgcAAAAAAAAAAAAAAAAAAAAAAAAAAACA8cgGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMAGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkKCpVqtVPUOWZVlDDAEAAAAA1E9/f39uW7ZsWbi2Qe5jAgAAMAJFn/WK9PX1jdIkTBRNTU2l1tfz/kKVs+3atSvs8+fPD/uBAwfC3traOuKZ3jQ4OBj2yy67LOzbtm0Le0dHx4hngvHotddeC3tXV1fYf/zjH4d95cqVYd+wYUNumzFjRrgWAAAAYLJ65plnwr569eqw79mzJ+zf/OY3w7527dqwAwAAAAAAAFmWZVlnQd/+dg/UXHIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAmJRs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAls8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACaZWPQAAAAAAAAAAAABpduzYUWp9a2vrKE3y3y666KJS6/v7+8Pe0dFR6vgwXpx44olh/+EPfxj2RYsWhf2LX/xi2KN/yxs2bAjXLl++POxNTU1hBwAAAKjS0aNHw/6Nb3wjt919993h2g984ANhHxwcDPuHPvShsAMAAAAAAABjq7nqAQAAAAAAAAAAAAAAAAAAAAAAAAAAAGA8ssEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAkmFr1AAAAAAAAAAAAAKTZtGlT1SPkamlpKbV+YGBglCaBye36668P+zXXXBP2u+66K7d95jOfCdfee++9Yf/qV78a9k9+8pNhb25uDjsAAAAwuR09ejTs999/f9jvvvvusL/yyiu57Z577gnXfv7znw/7lClTwg4AAAAAAAA0Fr9sBgAAAAAAAAAAAAAAAAAAAAAAAAAAgAQ2+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAENvgFAAAAAAAAAAAAAAAAAAAAAAAAAACABDb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAQ2+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAENvgFAAAAAAAAAAAAAAAAAAAAAAAAAACABDb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgARTqx4AAAAAAAAAAACANO3t7WEfGBgI+9DQUNhbW1tHPNNo6erqquzcMJmcfvrpYb/vvvty26233hqu7enpCfuNN94Y9osvvjjs3d3duW3p0qXh2ilTpoQdAAAAqN7LL78c9i1btoT9nnvuCfuRI0fCvmbNmrDfdtttuW3mzJnhWgAAAAAAAGBiaa56AAAAAAAAAAAAAAAAAAAAAAAAAAAAABiPbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAls8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAls8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJplY9AAAAAAAAAAAAAGluuummsA8MDIT92WefDXtra+uIZ3rT8PBw8tosy7Ibbrih1Hqg/t7//veH/Wc/+1nY//CHP4S9p6cn7B0dHblt1qxZ4dpbbrmlVD/77LPDDgAAAPyvp59+Ouz3339/buvr6wvX/vvf/w77mjVrwn7bbbeFvcz9UQAAAAAAAGByaa56AAAAAAAAAAAAAAAAAAAAAAAAAAAAABiPbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAls8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAls8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJmmq1WtUzZFmWNcQQAAAAAED99Pf357Zly5aFaxvkPiYAAAAjUPRZr0hfX98oTcJ4MTQ0FPaZM2eWOv7hw4dzW0tLS7h2eHg47NOmTUua6U0HDhzIba2treHaotmWL1+eNNObtm7dmtuKZtu+fXvYd+/eHfaNGzeGHeDZZ5/NbVu2bAnX/uAHPwj7P/7xj7C3t7eH/XOf+1zYFyxYkNtOOOGEcC0AAACMpiNHjoT9F7/4RdiLPoM//vjjYb/oooty26233hquXbFiRdjPOOOMsAMAAAAAAACTXmdBj//HiP/QXHIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAmJRs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwC/A/7Nzfb9V3/cDx9zmUAh3Ir7Ix6GBhDEmYHXMRxo/NLVhKlSV6wUVtdmWiF3qjl/4HxhgvvDMm3tjaZP6Ic4FSWLYowoIw+TE1QlkmjPGjMAcrUOjo+d5/3ef16d6Hrkf6eNw+83p/3pCOnnPavQAAAAAAAAAAAAAAAAAAAAAAAAAAIIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIUKnValN9h5RSaohLAAAAAACTp6+vr7D19PSEsw3yOSYAAACfQtl7vTK9vb336Cb8r6hUKlP27LLPHhr5bmUuX74c9j/84Q9h//a3v5397F//+tdh7+rqCvv8+fOznw1QZmxsLOy///3vw/7zn/887K+//nrYo3/jvv71r4ezu3btCvu2bdvC3tzcHHYAAAAaz/Xr18P+yiuvhP3ll18ubHv37g1nq9Vq2Mvex37nO98J+5e//OWwAwAAAAAAAEyi7pLeP9GD4p+sAgAAAAAAAAAAAAAAAAAAAAAAAAAAAJ/Igl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQIZKrVab6juklFJDXAIAAAAAmDx9fX2FraenJ5xtkM8xAQAA+BTK3utdunQp7D/+8Y/Dvn79+rBXKpWwAwBMpgsXLoT9t7/9bWF7+eWXw9kDBw6Eff78+WF/8cUXw97V1VXYtm3bFs4uWbIk7AAAAPerd955J+yDg4Nh3717d13z1Wo17Dt27Chsu3btCmd37twZ9nnz5oUdAAAAAAAAoIF1l/T+iR4U/9QWAAAAAAAAAAAAAAAAAAAAAAAAAAAA+EQW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAABksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMjRN9QUAAAAAAAAAAJh+3n777bB/8YtfDPvy5cvD3tXVldVSSmn79u1hnzt3btgBAB5++OGwf+9738tqKaV04cKFsP/ud78L+6uvvhr2b33rW4VtdHQ0nC17DdfR0RH2zs7OsD/zzDNhnzVrVtgBAID727Vr18L+xhtvhH1wcDC7Dw0NhbOtra1h37ZtW9h/+ctfhn3nzp1hnzdvXtgBAAAAAAAAqE91qi8AAAAAAAAAAAAAAAAAAAAAAAAAAAAA/4ss+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZKjUarWpvkNKKTXEJQAAAACAydPX11fYenp6wtkG+RwTAACAT6HsvV6ZH/zgB2HfvXt3dj98+HA429TUFPatW7eGfefOnWHv6uoK+9q1a8MOADCZRkdHC9uBAwfC2cHBwbDv27cv7MePHw97c3Nz2J9++unCtmnTpnC2rD/zzDNhX758edgBAGC6OHXqVNgPHToU9oMHD2a1lFL6xz/+Efayz37LXvd3dnYWto6OjnA2er+SUkrVajXsAAAAAAAAAEyK7pLeP9GD/NQXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIUKnValN9h5RSaohLAAAAAACTp6+vr7D19PSEsw3yOSYAAACfQtl7vTK9vb336Cb/bXh4OOyDg4Nh/+Mf/xj2ffv2hf2DDz4I+2OPPVbYurq6wtmvfe1rYX/++efDPnv27LADAEymixcvhv3Pf/5z2A8ePFjYDh06FM6+9dZbYR8bGwv7ihUrwr5hw4bC9uSTT4az7e3tdfVHH3007AAANJ6PP/447KdPnw778ePHs/uxY8fC2aNHj4a97PPXefPmhX3jxo2FbfPmzeHspk2bwr5169awz507N+wAAAAAAAAA3He6S3r/RA+q1nkRAAAAAAAAAAAAAAAAAAAAAAAAAAAAmJYs+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIUKnValN9h5RSaohLAAAAAACTp6+vr7D19PSEsw3yOSYAAACfQtl7vTK9vb336Cafvbt374b90KFDYd+9e3dh27NnTzh77NixsLe0tIT9hRdeCPtXv/rVrJZSSo8++mjYAQCm0q1bt8J+5MiRsB88eDDsb731VmErew135syZsJe9/lywYEHYn3zyycL2xBNPhLNr164N++OPP15XX7lyZdhnzJgRdgBgert9+3bYh4aGwn769Omwnzp1qrD961//CmdPnDgR9rfffjvso6OjYW9ubg579Dqvvb09nP3Sl74U9i1btmQ/OyWv8QAAAAAAAAD4THWX9P6JHlSt8yIAAAAAAAAAAAAAAAAAAAAAAAAAAAAwLVnwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIUKnValN9h5RSaohLAAAAAACTp6+vr7D19PSEsw3yOSYAAACfQtl7vTK9vb336CbTy3vvvRf2gYGBsL/66qth379/f2G7ceNGOLt27dqwd3V1hb2zszPszz77bNhbWlrCDgDQqG7evBn2kydPhv3EiRNhP3bsWGH7+9//Hs7+85//DPvly5fDXqa5uTnsq1atKmxr1qwJZ1evXh32Rx55JOxtbW1hX758eWFbsWJFOLt06dKwz5gxI+wA8P/dvn27sJ0/fz6cLev//ve/w/7++++H/ezZs2E/depUYTt9+nRdZ4+Pj4e9Wq2GPXq9UPZZ2JNPPllXb29vD3vZ85uamsIOAAAAAAAAANNEd0nvn+hB8W8ZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJ/Igl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQIZKrVab6juklFJDXAIAAAAAmDx9fX2FraenJ5xtkM8xAQAA+BTK3uuV6e3tvUc34V66c+dOYTtw4EA4OzAwUFc/efJk2GfPnh325557rrDt2LEjnO3q6gr72rVrww4AMF1dv3497KdOnQr70NBQ9nzZ2WfOnAn72bNnw37x4sWwj4+Phz3S1NQU9oceeijsK1euDHtra2t2L5tdsmRJXc9evHhxdi87+4EHHgh7S0tL2BcsWBD2SqUSdpguxsbGwj4yMhL26HvHjRs3wtmrV6+G/cqVK3X14eHh7Pl671b2fef8+fNhv3TpUtjr0dzcHPbly5eHfcWKFWF//PHHs9pn0WfNmhV2AAAAAAAAAKDhdZf0/okeVK3zIgAAAAAAAAAAAAAAAAAAAAAAAAAAADAtWfALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhQqdVqU32HlFJqiEsAAAAAAJOnr6+vsPX09ISzu3btutfXAQAAYJK9/PLLYf/mN78Z9t7e3nt51brmlQAAIABJREFUHe4D7733XtgHBgay+/79+8PZa9euhf3RRx8N+44dO7L7tm3bwtm5c+eGHQCAyfHxxx+H/cKFC4Xt3Llz4WzZa9/z58+H/ezZs2EfHh4O+9WrV7Nn6zk7pZRu3LgR9kY2Z86crJZSSgsWLAh7S0tL2GfNmhX2MgsXLqxrvh5l72lmzpw5ac++detW2EdHRyft2Xfv3g379evX6zr/5s2b2b3s2SMjI2EfGxsLeyObP39+2B988MGwL168uLC1trZmz6aU0sMPPxz2ZcuWhX3lypXZs21tbWFfunRp2AEAAAAAAAAAGlh3Se+f6EHVOi8CAAAAAAAAAAAAAAAAAAAAAAAAAAAA05IFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADJVarTbVd0gppYa4BAAAAAAweS5evFjYvv/974ezd+/evdfXAQAAYIq99NJLYX/xxRc/o5tA+WcPhw8fDvuePXvCPjAwEPajR48WtqampnB269atYd++fXtdff369YWtUqmEswAA8GndunUr7FevXs1qKaU0MjJS17P/85//hP3mzZvZZ1+7di3sN27cCPudO3fCXvae5/r162GfTGV/9vHx8Ul7dnNzc9gfeOCBSXt2mYULF9Y1P2fOnLC3tLQUtvnz54ezZX8v0dkppTRv3rzsXvbs1tbWsC9evDjsM2fODDsAAAAAAAAAAPed7pLeP9GDqnVeBAAAAAAAAAAAAAAAAAAAAAAAAAAAAKYlC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAABksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkqtVptqu+QUkoNcQkAAAAAAAAAAIDP2vDwcGEbHBwMZ/fs2RP2svno2SmltHTp0sK2Y8eOcLasd3R0hH3RokVhBwAAAAAAAAAAAAAAqEN3Se+f6EHVOi8CAAAAAAAAAAAAAAAAAAAAAAAAAAAA05IFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZKrVabarvkFJKDXEJAAAAAAAAAACA+8n4+HjYjx49GvaBgYHCNjg4GM6++eabYS/73bUNGzaEvaurK+w7duwobE8//XQ4W61Www4AAAAAAAAAAAAAAPzP6y7p/RM9yP+FAAAAAAAAAAAAAAAAAAAAAAAAAAAAABks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZKjUarWpvkNKKTXEJQAAAAAAAAAAALg3rl27Fvb9+/eHfWBgoK7+3nvvFbbW1tZwdtu2bWHv7OwMe0dHR9jb2trCDgAAAAAAAAAAAAAATLrukt4/0YOqdV4EAAAAAAAAAAAAAAAAAAAAAAAAAAAApiULfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwYJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAGSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGSq1Wm2q75BSSg1xCQAAAAAAAAAAAO4PJ0+eLGwDAwPh7P79+8P+pz/9Keyjo6NhX7duXWHbvn17OFvWn3vuubC3tLSEHQCAPB999FFhu3XrVjg7MjIS9mvXroV9fHy8rvPHxsbCXo+yu5X92SZTtVoN+/z58z+jm/y32bNnh33OnDlhb25uDvsDDzxQ2Mr+3GXvKcruBgAAAAAAAAAADaS7pPdP9KD4t5EAAAAAAAAAAAAAAAAAAAAAAAAAAACAT2TBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAGSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAgQ6VWq031HVJKqSEuAQAAAAAAAAAAAGVu3rwZ9gMHDoR9YGCgsO3fvz+cPXnyZNhnz54d9q1bt4Z9+/bt2b29vT2crVQqYSfP7du3w172NbFx48awv/LKK4XtwQcfDGcByHPnzp2wX7hwIeznzp0L++XLl8N+5cqVwjY8PBzOXr16ta4ePbtsvuzsjz76KOxlr/HK5oGJWbhwYdhbWloK29y5c8PZ1tbWsC9evLiuXnZ+9Pq43mcvXbo07G1tbdnz1Wo1nAUAAAAAAAAAuI91l/T+iR7kNzAAAAAAAAAAAAAAAAAAAAAAAAAAAAAggwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAABksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMlVqtNtV3SCmlhrgEAAAAAAAAAAAANLLz58+Hfd++fWHfu3dv2F977bWwDw8PF7alS5eGsx0dHWHv7OwM+1e+8pWwP/TQQ2G/X73xxhthf+GFF+o6f9GiRYXtF7/4RTj7jW98o65nA4yOjoZ9aGgoq6WU0pkzZ8J+9uzZsJ87dy7s0ffsstmLFy+GfbL/H4AFCxYUtiVLloSzixcvDntra+ukzZfNzp07N+wtLS1h/9znPpd9/pw5c8LZefPm1fXsGTNmhL3s+bNnzw77ZIq+3lJKqVKpTNqzx8bGwj4yMjJpzy5T9uyyu9++fTvsN2/eLGwffvhh9mxKKd26dSvsZeffuHGjsJX9vVy9ejXsV65cmdT56P1S2Wz0574XZs6cWdiWLVsWzj7yyCNhb2trq6uvWLEi7GvWrClsq1evDmdXrlwZ9qamprADAAAAAAAAAPe97pLeP9GDqnVeBAAAAAAAAAAAAAAAAAAAAAAAAAAAAKYlC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAABksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMlRqtdpU3yGllBriEgAAAAAAAAAAADCdjY+Ph/2tt94qbPv27Qtn9+7dG/ZDhw6FfWxsLOzr168vbB0dHeFsZ2dn2Lds2RL2WbNmhX0y/fCHPwz7T37yk7DfuXMn7NVqtbCVfb289NJLYf/Zz34W9vnz54cdmJgrV66E/fjx44XtxIkT4eypU6fCfvr06bAPDQ2F/dy5c2GP/h2K/v1KKaW2trawP/LII3X16Px6ZifSy85fsmRJ2JuamsIOwL03Ojoa9kuXLoW97Hvm2bNnC9v7778/aWdP5Px333037JcvXw57ZObMmWFftWpV2FevXh32NWvWFLbPf/7z4Wz0HjallNatWxf2uXPnhh0AAAAAAAAAmJDukt4/0YPi31oEAAAAAAAAAAAAAAAAAAAAAAAAAAAAPpEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADJVarTbVd0gppYa4BAAAAAAAAAAAADA1RkZGwv7666+HfXBwMKullNKpU6fC3tLSEvbnn38+7B0dHWHfsWNHYVu7dm04+9RTT4X92LFjYZ9MM2fODPvixYvD/qtf/Srs27Zt+9R3gsnyzjvvhP3IkSOF7W9/+1s4e+LEibAfP3487OfPnw97pK2tLeyPP/74pPbVq1eHfc2aNYXtscceC2dnzZoVdgCgMVy/fr2wDQ0NhbNl7/XK5k+fPp19ftmzP/jgg7BXq9Wwl71Oam9vD/v69euzWkopbdiwIexLliwJOwAAAAAAAAA0kO6S3j/Rg+Kf9AMAAAAAAAAAAAAAAAAAAAAAAAAAAACfyIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAGSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAECGSq1Wm+o7pJRSQ1wCAAAAAAAAAAAAmH7efffdsA8ODtbVX3vttbB/+OGHhW3ZsmXh7IULF8LeIL8n+olmzJgR9vHx8bB/97vfLWw/+tGPwtmWlpaw05hu3boV9iNHjhS2gwcPhrNvvvlmXf3ixYthb25uLmzr1q0LZ9vb28O+fv36sH/hC18I+1NPPVXYFi1aFM4CAJDn7NmzYT9x4kTYjx8/HvZjx45lnz80NBTOlr1XW7VqVdg3b94c9k2bNhW2LVu2hLNPPPFE2MvehwIAAAAAAAAw7XSX9P6JHlSt8yIAAAAAAAAAAAAAAAAAAAAAAAAAAAAwLVnwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIUKnValN9h5RSaohLAAAAAAAAAAAAANxrd+/eDfvhw4cL209/+tNw9je/+U3YG+T3RCdFU1NTYVuxYkU429fXF/aNGzdm3el+V/a1/Ne//jXs+/btC/vevXvDHv23klJKY2Njha3sa2LLli1h37RpU9g3b94c9vb29sI2c+bMcBYAAD5LIyMjYT9y5EjY//KXv4T90KFDYY9e9w8PD4ez8+bNC/uzzz4b9s7OzrBv3769sK1duzacBQAAAAAAAKAhdZf0/okeVK3zIgAAAAAAAAAAAAAAAAAAAAAAAAAAADAtWfALAAAAAAAAAAAAAADwf+zcb2hdd/3A8XOzbArSVjsNUrDrEzNlg0wY2in4INmo/9JO2KTXIUyXxA7RbTTq5hKppLoxOxUVGqfgpDLv2rEHSXVbaSKowyH+aSh22j2QFt1mKEocyJy1xwc/fvzmz53PTb7n3pyb3Nfr6ZvvuZ97TpI23d0HAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAAgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCglud51TNkWZZ1xBAAAAAAAAAAAAAAnWRsbCzsDz74YNj/+c9/tnCataO3tzfsFy5cCPudd94Z9n379oX94osvDns7Pfvss2H/0Y9+FPZjx44Vtrm5ufDsX/7yl7C/5S1vCfu1114b9sHBwbC/4x3vKGxbtmwJzwIAAJ3v9OnTYf/5z38e9vn5+bBHvw9lWZY9//zzhe2yyy4Lz1533XVh37FjR6m+YcOGsAMAAAAAAADwiupNemO5F+opOQgAAAAAAAAAAAAAAAAAAAAAAAAAAAB0JQt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDAgl8AAAAAAAAAAAAAAAAAAAAAAAAAAABIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJanmeVz1DlmVZRwwBAAAAAAAAAAAA0Em2bNkS9ueee26VJukuF110Udi3bt0a9kceeSTsfX19he3RRx8tde0nn3wy7Bs2bAj7tddeW9h27NgRnr3uuuvCvm3btrADAABUqdn/a3ny5MnC9sQTT4Rnjx07Fvaf/exnYW+m2e9rN9xwQ2HbuXNneHbjxo1JMwEAAAAAAACsAfUmvbHcC/WUHAQAAAAAAAAAAAAAAAAAAAAAAAAAAAC6kgW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAlqeZ5XPUOWZVlHDAEAAAAAAAAAAACw2p555pnC1t/fv4qTdJaenp6w9/b2FrZarRaebfb52ZdeeinsZUXv7XWve1149vrrrw/7DTfcEPahoaGwX3zxxWEHAACg9f72t7+F/ejRo2E/cuRI2B9//PHC1ux35B07doT9Ix/5SNh37doVdr+HAgAAAAAAABWqN+mN5V4o/uQzAAAAAAAAAAAAAAAAAAAAAAAAAAAA8Ios+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAAgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDAgl8AAAAAAAAAAAAAAAAAAAAAAAAAAABIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJKjleV71DFmWZR0xBAAAAAAAAAAAAMBq++EPf1jYPvCBD6ziJP/tNa95TWF77WtfG5699NJLw97X11eqb968ubD94x//CM+ePHky7L/5zW/CXqvVwr5r166wf+xjHytsg4OD4dne3t6wAwAAwP/3wgsvFLajR4+GZxuNRtijf9fIsix7wxveEPbod+TR0dHw7LZt28IOAAAAAAAA0ES9SY//g+nL9JQcBAAAAAAAAAAAAAAAAAAAAAAAAAAAALqSBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAS1PM+rniHLsqwjhgAAAAAAAAAAAABYbefPny9sP/7xj8Ozb3rTm8K+efPmsF966aVhv+iii8JexoULF8L+yCOPhP2+++4rbL/61a/Cs1dddVXYP/7xj4f9pptuCvuGDRvCDgAAAOvFH//4x7B/5zvfSe7PPfdcePb9739/2O+6666wX3PNNWEHAAAAAAAA1r16k95Y7oV6Sg4CAAAAAAAAAAAAAAAAAAAAAAAAAAAAXcmCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAglqe51XPkGVZ1hFDAAAAAAAAAAAAALA8Fy5cCPuRI0fCvn///rCfOnUq7DfeeGNhu/3228Oz27dvDzsAAACwOv71r38VttnZ2fDsgQMHwv7kk0+GfceOHWH//Oc/H/Z3vvOdYQcAAAAAAAA6Xr1Jbyz3Qj0lBwEAAAAAAAAAAAAAAAAAAAAAAAAAAICuZMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACBBLc/zqmfIsizriCEAAAAAAAAAAAAA+D9zc3OF7bbbbgvPPv3002HfvXt32CcmJsL+1re+NewAAABAd4v+XSPLsuwLX/hC2H/605+G/T3veU9h+/rXvx6effOb3xx2AAAAAAAAYFXUm/TGci/UU3IQAAAAAAAAAAAAAAAAAAAAAAAAAAAA6EoW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAkqOV5XvUMWZZlHTEEAAAAAAAAAAAAwHpy7ty5sO/duzfshw4dKmzDw8Ph2fvuuy/sl19+edgBAAAAqjQ3Nxf26N9VTp8+HZ696667wv7Zz3427JdccknYAQAAAAAAgGWpN+mN5V6op+QgAAAAAAAAAAAAAAAAAAAAAAAAAAAA0JUs+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAAgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDAgl8AAAAAAAAAAAAAAAAAAAAAAAAAAABIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJKjleV71DFmWZR0xBAAAAAAAAAAAAMBaMjMzE/Zbbrkl7K961avC/o1vfKOwffCDHwzPAgAAAKxn58+fL2xf/epXw7P79u0L+7Zt28L+8MMPh/3KK68MOwAAAAAAAJBlWZbVm/TGci/UU3IQAAAAAAAAAAAAAAAAAAAAAAAAAAAA6EoW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEtTyPK96hizLso4YAgAAAAAAAAAAAGC1RZ/lvOeee8Kzk5OTYR8ZGQn7l7/85bBv3Lgx7LCaFhcXwz4/P1/YHnroofDszMxM0kyQ4qmnngr79773vbBPT0+Hfc+ePaX6wMBA2AEAgPL+8Ic/hP2WW24J+y9/+cuwf//73y9sO3fuDM8CAAAAAABAF6k36Y3lXqin5CAAAAAAAAAAAAAAAAAAAAAAAAAAAADQlSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAQW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAkqOV5XvUMWZZlHTEEAAAAAAAAAAAAQKu99NJLYf/oRz9a2I4cORKe/eY3vxn2sbGxsMNacuutt4Z9eno6+dod8plq1pH5+fnCNjQ0FJ49c+ZM2Ldu3Rr2RqMR9oceeijsMzMzYW+ns2fPhv2ee+4pbM1+BuzZsyfsN954Y9gHBwfDDqup2fd5vV5PvvYPfvCDsO/evTv52hRr5zPNsvi5ruVnWuV9y7LOvndLS0thP3z4cGEr+3vkWr5v0EnOnz8f9ttuuy3s0d+P77333vDspz/96bADAAAAAADAOtLswwXxhxNepqfkIAAAAAAAAAAAAAAAAAAAAAAAAAAAANCVLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSo5Xle9QxZlmUdMQQAAAAAAAAAAADASjX7LOZNN90U9mPHjhW2Rx99NDz77ne/O+zQTWq1WvLZDvlMNevIrbfeWtimp6fDs+v563FpaSnsP/nJT8I+PDycfO3HHnss7PV6PewzMzNhj2aDlZqcnAz7/v37w/773/8++bXOgl0nAAAgAElEQVQvv/zysE9MTIR9amoq+bXXsyqfaZbFz7XTn2l076q8b1kW37t237fFxcWwj4yMhH12draV46zIgQMHwr53795VmgTWt29961uF7ROf+ER49itf+UrYP/WpTyXNBAAAAAAAAB0o/uBYljWWe6GekoMAAAAAAAAAAAAAAAAAAAAAAAAAAABAV7LgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQoJbnedUzZFmWdcQQAAAAAAAAAAAAACt17733lurHjx8vbFdffXXSTNCNarVa8tkO+Uw164ivx1c2Ozsb9uHh4VWa5L+VeWZZtr6fG623sLAQ9quuuqrU9ct8PZb9Xjhx4kTYBwYGSl2/k0XPtcpnmmXlnmu7n2k7vx/W8327//77w/62t70t7IODg4Wt3T+jmvFnKrTfgw8+GPbR0dGwP/HEE2GPfsYAAAAAAABAh6k36Y3lXqin5CAAAAAAAAAAAAAAAAAAAAAAAAAAAADQlSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAQW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEjQW/UAAAAAAAAAAAAAAJ3sd7/7Xdj37dsX9kajEfarr756pSMBa8zi4mJhO3ToUHh2fHw87MPDw2G//fbbwz44OBj2yNLSUtgPHz4c9rGxseTXzrIsm5iYKGyf/OQnw7N9fX1hr9VqSTO1QtnXzvO8RZO0XrOv1062Z8+eyl57cnKy1PmpqakWTUKr/OIXv6h6hLZp9t4GBgZWaZLVt16fa7ufqfuWZu/evaXOR9r9fTo3N9fW6wPN3XzzzWF/+umnwz4yMhL2U6dOhf3Vr3512AEAAAAAAGAt6ql6AAAAAAAAAAAAAAAAAAAAAAAAAAAAAFiLLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACTorXoAAAAAAAAAAAAAgE52xx13hP1DH/pQ2K+//vpWjgN0oMXFxbCPjIwUtg9/+MPh2TzPwz4/Px/2oaGhsJ84caKwDQwMhGfvvPPOsE9PT4f9z3/+c9hffPHFsF922WWF7dy5c+HZgwcPhr3ZfW+mVqslny372ryypaWlUuff9773tWgSyLJf//rXVY/QNrOzs2EfHR1dpUlW33p9ru1+pu5bNaI/Fx977LHw7PDwcNgPHDgQ9v7+/rAD1fviF78Y9qNHj4b9/vvvD/vdd9+94pkAAAAAAACg0/VUPQAAAAAAAAAAAAAAAAAAAAAAAAAAAACsRRb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAS1PI8r3qGLMuyjhgCAAAAAAAAAAAA6D6nTp0K+5VXXhn2kydPhv2KK65Y8UzAytVqteSzZT9T3Wg0wl6v19v22s00uy8TExOFbWpqKjw7OTkZ9nPnzoX94MGDYW+mymfeTCfP1q3m5+fD/rWvfS3shw4dCvumTZtWPBPdq8zPiOUo83Okk2frdO28d2Xvm9nSdPJsZbXzve3Zsyfsd9xxR9j7+/tbOQ7QBg888EDY77777rD/6U9/KmyXXHJJ0kwAAAAAAACQqPgDnv8j/oDoy/SUHAQAAAAAAAAAAAAAAAAAAAAAAAAAAAC6kgW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEtTzPq54hy7KsI4YAAAAAAAAAAAAAus+XvvSlsH/3u98N+zPPPNPKcYBEtVot+WzZz1Tv3Lkz7LOzs6WuX5V2f9b87NmzYT9y5EjYx8fHk1+73e+tyq9HXlmz79PPfe5zYd++fXsrx6HLlfkZsRxlfo508mydrp33rux9M1uaTp6trKWlpcJ2+PDh8OzY2Firx/kPJ06cKGwDAwNtfW1gec6dOxf2vr6+sB8/frywDQ4OJs0EAAAAAAAAiepNemO5F+opOQgAAAAAAAAAAAAAAAAAAAAAAAAAAAB0JQt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDAgl8AAAAAAAAAAAAAAAAAAAAAAAAAAABIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJeqseAAAAAAAAAAAAAKBKCwsLYR8YGFilSYC1anZ2NvlsnuctnKSzfPvb3w57s/t24MCBsI+Pj694JtavRqMR9uHh4bBv3769leNAqNnXY5k/V6rW7L2tZ9F790zTr79W712nfy9s2rSpsI2OjoZnN2zYEPZ6vZ400/+anJwsbDMzM6WuDbTG61//+rBv2bIl7NG/xQ0ODibNBAAAAAAAAFXrqXoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAWIss+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAAgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDAgl8AAAAAAAAAAAAAAAAAAAAAAAAAAABI0Fv1AAAAAAAAAAAAAABV+vvf/x72zZs3r9IkQDc6ffp02Pv7+1dpkpVrNBphHxsbC/uZM2fCvnXr1hXPxPq2sLBQ2H7729+GZ6emplo9DqugVqtVPUKhPM+Tzw4PD4d9dnY2+dpVa/be1rPovXum6ddfq/duPX8vvPe97616BKDDbdy4MewvvPDCKk0CAAAAAAAAq6en6gEAAAAAAAAAAAAAAAAAAAAAAAAAAABgLbLgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQoLfqAQAAAAAAAAAAAACq1NfXF/Znn312lSYB1qoHHngg7GNjY4Xt0KFD4dnx8fGwb9q0KeyLi4thj15/79694dl6vR72ZrZu3VrqPOtPs6/X48ePF7apqalWj/MfFhYWwj49PV3YDh482Opxukae51WP0BZvf/vbqx6hbdr53ubn58M+NDQU9rm5ubAPDg6ueKaXW6/Ptd3vy31LMzk5GfYrrrgi7Lt3705+7WZ//yxrdHS0rdcH2u/5558P+xvf+MZVmgQAAAAAAABWT0/VAwAAAAAAAAAAAAAAAAAAAAAAAAAAAMBaZMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACBBb9UDAAAAAAAAAAAAAFTpmmuuCftnPvOZsJ8/fz7svb0+rgmtsLi4WNm1+/r6wr5r166wj42NFbb9+/eHZ5v1ss6cOZN8dnh4OOyzs7NhP3v2bNhffPHFFc+0XGWf+cLCQivH+Q+nT58Oe39/f9teu92a3feRkZGwR19T4+PjSTO1yszMTNuuPTk5Wer81NRUiyahVQYGBsI+MTER9mZ/NjT7OVJGs9mavbcyhoaG2no+z/NS14/eu2eafv1ovirvW5bFs7X7vjX7u0iZv0Pu3r077E899VTytbOs+ddcs79jAtU7depU2P/617+G/V3velcrx/k3O3cTYnXd93H8d8ZBJ58rGy1p4iptKs0EIywXkklFhUqpOaDQzgrsCVoUposeNhUoVGQtKiGaMBdpUGoFilq2qLTowSnMR3yONM3M/N+Le3Ff1H2+Z/ydpnPGeb22b35/v2pFXHl9AAAAAAAAoC401PoAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6I4M/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQobHWBwAAAAAAAAAAAADU0vTp08P+8MMPh/29994L+7Rp0874JuDvhg4dWrNvF0UR9ubm5rBv3769bHv11VfDt0899VTY77333rA/9thjYW9paQl75Mknnwz7ypUrw17p5z5v3rywz58/v2w7ePBg+PbEiRNhL5VKYe9Kra2tVb2v9NdrLS1cuDDslf6aqWfV/r7Bf6v0z9dRo0aFvZq/Ht96662wz5o1K/vb1froo4/CftNNN1X1vivV8vc0pfj3tZa/p50R/drV8tctpdr+2j333HNhHz58eNjb2tqyWkrxv4OlVPnvtUmTJoUdqH+vvfZa2K+99tqwV/rnNwAAAAAAAHRHDbU+AAAAAAAAAAAAAAAAAAAAAAAAAAAAALojA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZCgVRVHrG1JKqS6OAAAAAAAAAAAAAPirBQsWhH3FihVh/+yzz8Leu3fvM74JAAAAALrCjh07wj5q1Kiwt7e3h/32228/45sAAAAAAACgi7RV6PF//PovDVUeAgAAAAAAAAAAAAAAAAAAAAAAAAAAAD2SgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMhj4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwGfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMpSKoqj1DSmlVBdHAAAAAAAAAAAAAPzV8ePHwz5q1Kiw33bbbWF/8cUXz/gmAAAAAMhx8uTJsE+cODHsQ4YMCfvKlSvP+CYAAAAAAACokbYKvb2zH2qo8hAAAAAAAAAAAAAAAAAAAAAAAAAAAADokQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAYDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkMPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQIbGWh8AAAAAAAAAAAAAUM/69u0b9jfffDPsN954Y9iHDx9etj3++OPhWwAAAAD4q1OnTpVtM2fODN/u2rUr7O+++27WTQAAAAAAAHA2a6j1AQAAAAAAAAAAAAAAAAAAAAAAAAAAANAdGfgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIENjrQ8AAAAAAAAAAAAA6M5uuOGGsL/xxhthnz17dtm2b9++8O3zzz8f9sZGf1QUAAAA4Gxz+PDhsM+cObNs+/zzz8O3a9euDXtzc3PYAQAAAAAAoCdqqPUBAAAAAAAAAAAAAAAAAAAAAAAAAAAA0B0Z+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgQ2OtDwAAAAAAAAAAAAA4m82aNSvsTU1NZducOXPCt99++23Y33777bCfe+65YQcAAADg31fpf/OZMmVK2P/888+ybd26deHb0aNHhx0AAAAAAAD4u4ZaHwAAAAAAAAAAAAAAAAAAAAAAAAAAAADdkYFfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADKUiqKo9Q0ppVQXRwAAAAAAAAAAAADUky1btoR96tSpYa/050RfeumlsN92221hBwAAAODvTp8+HfYXXngh7E888UTYx44dG/bly5eXbUOGDAnfAgAAAAAAQA/SVqG3d/ZDDVUeAgAAAAAAAAAAAAAAAAAAAAAAAAAAAD2SgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMhj4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwGfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMpSKoqj1DSmlVBdHAAAAAAAAAAAAAHQnBw4cCPu8efPC/vbbb4d95syZZduiRYvCtxdeeGHYAQAAALqzL774omybO3du+Hbz5s1hf/TRR8O+YMGCsPfu3TvsAAAAAAAAQEoppbYKvb2zH2qo8hAAAAAAAAAAAAAAAAAAAAAAAAAAAADokQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAYDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkMPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQIZSURS1viGllOriCAAAAAAAAAAAAICe5IMPPgj7/fffX7b9/PPP4dtHHnkk7A888EDYBw0aFHYAAACAamzbti3szzzzTNhff/31sm38+PHh2yVLloT9qquuCjsAAAAAAADwj2ir0Ns7+6GGKg8BAAAAAAAAAAAAAAAAAAAAAAAAAACAHsnALwAAAAAAAAAAAAAAAAAAAAAAAAAAAGQw8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDPwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMHALwAAAAAAAAAAAAAAAAAAAAAAAAAAAGQw8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZSkVR1PqGlFKqiyMAAAAAAAAAAAAA+D/Hjx8v25599tnw7eLFi6v6sR988MGwP/TQQ2XboEGDqvqxAQAAgPq3bdu2sD/99NNhX7p0adgvvvjisC9cuLBsmzNnTvi2VCqFHQAAAAAAAPhXtFXo7Z39UEOVhwAAAAAAAAAAAAAAAAAAAAAAAAAAAECPZOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMhj4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwGfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMhj4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAyloihqfUNKKdXFEQAAAAAAAAAAAAD8M3755ZewL168OOyLFi0K++nTp8u2e+65J3w7d+7csF955ZVhBwAAADqn0v+Hde3atWXbkiVLwrfLly8Pe0tLS9jnz58f9tmzZ4e9sbEx7AAAAAAAAEDda6vQ2zv7oYYqDwEAAAAAAAAAAAAAAAAAAAAAAAAAAIAeycAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABlKRVHU+oaUUqqLIwAAAAAAAAAAAACoD0eOHAn7K6+8ktVSSqmjoyPsEydODPvcuXPDfuedd4a9T58+YQcAAIB6cfjw4bAvXbo07EuWLAn7d999V7ZNmDAhfHvfffeF/e677w57Y2Nj2AEAAAAAAICzXluF3t7ZDzVUeQgAAAAAAAAAAAAAAAAAAAAAAAAAAAD0SAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhQKoqi1jeklFJdHAEAAAAAAAAAAABA91fpz8d+/PHHYX/55ZfD/u6774a9f//+YZ82bVrZNn369PDt5MmTw967d++wAwAAcPY5cuRI2FesWFG2LVu2LHy7atWqsDc1NYV99uzZYb/33nvLttGjR4dvAQAAAAAAAKrUVqG3d/ZDDVUeAgAAAAAAAAAAAAAAAAAAAAAAAAAAAD2SgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMhj4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwGfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMpSKoqj1DSmlVBdHAAAAAAAAAAAAAEAl+/btC/uyZcuy+/r168O3gwYNCvvUqVPDftddd4V90qRJZVvfvn3DtwAAAPz/Dhw4EPb3338/7O+8807YV69eHfaGhoay7dZbbw3fzpgxI+xTpkwJe79+/cIOAAAAAAAAUENtFXp7Zz9U/r/KAgAAAAAAAAAAAAAAAAAAAAAAAAAAAGUZ+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMpaIoan1DSinVxREAAAAAAAAAAAAAUEt79+4N+/Lly8O+bNmysK9fvz7sjY2NZduECRPCtzfffHNVfezYsWEvlUphBwAAiJw8eTLsGzZsCPuaNWvKttWrV4dvv/jii7A3NTWF/ZZbbgn7jBkzwn7HHXeUbQMGDAjfAgAAAAAAAJzF2ir09s5+qKHKQwAAAAAAAAAAAAAAAAAAAAAAAAAAAKBHMvALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAYDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkMPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQIZSURS1viGllOriCAAAAAAAAAAAAAA4m/3yyy9h//DDD8u2NWvWhG9XrVoV9p9++inszc3NYZ84cWLYx48fn9VSSmncuHFh79OnT9gBAIDOOXr0aNg3bdpUtm3cuDF8W6lv2LAh7MeOHQv72LFjy7bJkyeHb2+55ZawT5gwIexNTU1hBwAAAAAAACBLW4Xe3tkPNVR5CAAAAAAAAAAAAAAAAAAAAAAAAAAAAPRIBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAJktQvsAAA8qSURBVAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyFAqiqLWN6SUUl0cAQAAAAAAAAAAAAB0jY6OjrCvWrUq7OvWrQv7xo0by7bdu3eHb/v06RP2cePGhf36668P+3XXXVe2jRkzJnw7cuTIsPfq1SvsAAD0PCdOnAj7N998U7Z9+eWX4dtNmzZV1b/++uuwnz59umy74oorwreV/r180qRJYZ88eXLYhw4dGnYAAAAAAAAAup22Cr29sx9qqPIQAAAAAAAAAAAAAAAAAAAAAAAAAAAA6JEM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQoVQURa1vSCmlujgCAAAAAAAAAAAAADj77NixI+wbNmwI+yeffBL2Tz/9NOybN28u206ePBm+7du3b9hHjRoV9rFjx4Z9zJgxZdvVV18dvm1tbQ37sGHDwg4AUM9Onz4d9u3bt5dt33//ffg2+vfDzvQtW7aEvdKPf+rUqbJtwIAB4dtx48aFfcKECWG//vrrs/t5550XvgUAAAAAAACAM9RWobd39kMNVR4CAAAAAAAAAAAAAAAAAAAAAAAAAAAAPZKBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAylIqiqPUNKaVUF0cAAAAAAAAAAAAAAPzT/vjjj7Ltm2++Cd9+9dVXYd+8eXOX9f3794dvKxk4cGDYR4wYEfbLL788+31ra2v49rLLLgt7S0tL2IcNGxb2Xr16hR0Azha///572Hfv3h32HTt2hL2joyOrdaZv3bo17D/++GPYK/3cI//5z3/Cfs0114R9zJgxVb2P+qWXXhq+LZVKYQcAAAAAAACAbqStQm/v7IcaqjwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiQDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkMPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAYDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJChVBRFrW9IKaW6OAIAAAAAAAAAAAAAgP+1f//+sH/77bdh/+GHH8K+devWqt53dHRktZRSOnHiRNgraWxsDPvQoUPLtksuuSR8O3z48Kp6S0tL2C+44IKwn3/++WXbkCFDwrfNzc3Z304ppf79+4cdoCv9/PPPYT9w4EDYDx06VLYdPHgw+21KKe3duzfsu3btCvvOnTuz3+7ZsyfslW6r1uDBg8u2ESNGhG8vv/zysI8cObLL3re2toZvBw4cGHYAAAAAAAAA4F/RVqG3d/ZDDVUeAgAAAAAAAAAAAAAAAAAAAAAAAAAAAD2SgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMhj4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwGfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMpSKoqj1DSmlVBdHAAAAAAAAAAAAAADQ/VX6c/K7du0K+44dO8K+e/fu7O/v3Lkz+21KKe3Zsyfs27dvD/vBgwfD/vvvv4e9KzU1NYX9/PPPL9uGDBkSvu3bt2/Y+/XrF/bBgweH/Zxzzsn+sSt9u9L7Pn36hL2S6McvlUpVfbuSrv65RY4fPx72rvx74Y8//gj7r7/+WtX3jx07FvbffvutbDty5Ej49ujRo9nfTqnyzy368Sv9vCr98+3QoUNhP3XqVNi7UqW/Fy688MKwX3TRRWG/5JJLyrbhw4eHbyv1lpaWLn3f3NwcdgAAAAAAAACAKrRV6O2d/VBDlYcAAAAAAAAAAAAAAAAAAAAAAAAAAABAj2TgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAA4H/auYNUNYIoDKP15IEoNg7UBbisbCOrym6yEBUVUUHEfpMMk+rwP6UVz5lequoiPZQPAAAAAAAAAAAAAAAAAAAAgIDALwAAAAAAAAAAAAAAAAAAAAAAAAAAAAQEfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAg8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAABgV8AAAAAAAAAAAAAAAAAAAAAAAAAAAAIfLRt2/cOpZTyFEsAAAAAAAAAAAAAAMA7OxwO/5yt1+vq2dVqVZ13nd9sNvH5rrPH47E6P51O1fl2u63Oz+dzNCullN1uV5137X65XKrz2+1Wne/3++r8kWrfWymlXK/Xh709HA6r8/F4/LC3B4NBdT6dTr91f9fuo9EofnsymcR3l1JK0zTxvOvt2WxWnc/n82/NF4tF/H7X3V2/GwAAAAAAAAAAD/GjY/7rfy+q/yMIAAAAAAAAAAAAAAAAAAAAAAAAAAAA+CuBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAAgI/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEBA4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAACAr8AAAAAAAAAAAAAAAAAAAAAAAAAAAAQEPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAgMAvAAAAAAAAAAAAAAAAAAAAAAAAAAAABD77XgAAAAAAAAAAAAAAAHgOTdNEs1JKWS6X914HAAAAAAAAAAAAnt6g7wUAAAAAAAAAAAAAAAAAAAAAAAAAAADgFQn8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQEDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAAICvwAAAAAAAAAAAAAAAAAAAAAAAAAAABAQ+AUAAAAAAAAAAAAAAAAAAAAAAAAAAICAwC8AAAAAAAAAAAAAAAAAAAAAAAAAAAAEBH4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgIPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYFfAAAAAAAAAAAAAAAAAAAAAAAAAAAACAj8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQEDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAAICvwAAAAAAAAAAAAAAAAAAAAAAAAAAABAQ+AUAAAAAAAAAAAAAAAAAAAAAAAAAAICAwC8AAAAAAAAAAAAAAAAAAAAAAAAAAAAEBH4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgIPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYFfAAAAAAAAAAAAAAAAAAAAAAAAAAAACAj8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQEDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAAICvwAAAAAAAAAAAAAAAAAAAAAAAAAAABAQ+AUAAAAAAAAAAAAAAAAAAAAAAAAAAICAwC8AAAAAAAAAAAAAAAAAAAAAAAAAAAAEBH4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgIPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYFfAAAAAAAAAAAAAAAAAAAAAAAAAAAACAj8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQEDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAAICvwAAAAAAAAAAAAAAAAAAAAAAAAAAABAQ+AUAAAAAAAAAAAAAAAAAAAAAAAAAAICAwC8AAAAAAAAAAAAAAAAAAAAAAAAAAAAEPvte4I+ffS8AAAAAAAAAAAAAAAAAAAAAAAAAAADAW/h9r4sG97oIAAAAAAAAAAAAAAAAAAAAAAAAAAAA3onALwAAAAAAAAAAAAAAAAAAAAAAAAAAAAQEfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAg8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAABgV8AAAAAAAAAAAAAAAAAAAAAAAAAAAAICPwCAAAAAAAAAAAAAAAAAAAAAAAAAABAQOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAAh9t2/a9AwAAAAAAAAAAAAAAAAAAAAAAAAAAALycQd8LAAAAAAAAAAAAAAAAAAAAAAAAAAAAwCsS+AUAAAAAAAAAAAAAAAAAAAAAAAAAAICAwC8AAAAAAAAAAAAAAAAAAAAAAAAAAAAEBH4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgIPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYFfAAAAAAAAAAAAAAAAAAAAAAAAAAAACAj8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQEDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAAICvwAAAAAAAAAAAAAAAAAAAAAAAAAAABAQ+AUAAAAAAAAAAAAAAAAAAAAAAAAAAICAwC8AAAAAAAAAAAAAAAAAAAAAAAAAAAAEBH4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgIPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYFfAAAAAAAAAAAAAAAAAAAAAAAAAAAACAj8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQEDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAAICvwAAAAAAAAAAAAAAAAAAAAAAAAAAABAQ+AUAAAAAAAAAAAAAAAAAAAAAAAAAAICAwC8AAAAAAAAAAAAAAAAAAAAAAAAAAAAEBH4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgIPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYFfAAAAAAAAAAAAAAAAAAAAAAAAAAAACAj8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQOALQ8hYvvw7l/QAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAFfgAAAbSCAYAAADbl1DoAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzs3FuI1VX/x/G1tzOecizTiZDxgHlEs+mmUunGtAkiK7DDEN1kUXcFXYRQURAEFgR1U9BNJDKhdGEWaehNmZITpULBOB3wnI6Sh9Qy5/e/+P/h//A8/r57nrXTPTqv1+2btfZSghyd+VSKokgAAAAAAAAAAAAAAAAAAAAAAAAAAADAf6fa6AcAAAAAAAAAAAAAAAAAAAAAAAAAAADAlcjALwAAAAAAAAAAAAAAAAAAAAAAAAAAAGQw8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDPwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMHALwAAAAAAAAAAAAAAAAAAAAAAAAAAAGQw8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDPwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkKGp0Q/4P0WjHwAAAAAAAAAAAAAAAAAAAAAAAAAAAMCQ0Fmjdw30omqdDwEAAAAAAAAAAAAAAAAAAAAAAAAAAIAhycAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZGhq9AMAAAAAAAAAAAAAAACuZKdOnSptZ8+eDc+ePn067CdOnAh7f39/XfefP38+7PWo9bZav7ZLqVqthv3aa6+9TC/5TyNHjgz7qFGjwj58+PCwX3PNNaWt1q979OjRYa/1NgAAAAAAAAAAuBrF340EAAAAAAAAAAAAAAAAAAAAAAAAAAAAXJSBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyNDX6AQAAAAAAAAAAAAAAwP/666+/wn7o0KGw79u3L+xHjhwJe19fX2k7evRoePbYsWN19eiza52vdfepU6fCfubMmbrOAwMzbty4sI8ePbq0jRkzJjw7YcKEsI8fP76uXuv+G2644ZJ99o033hj2tra27PPVajU8CwAAAAAAAABAbb4DAwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgQ1OjHwAAAAAAAAAAAAAAAP/u3LlzYe/t7c1qKaX0008/hX3v3r1h37dvX9gPHDiQffbw4cNhL4oi7PW67rrrSltra2t4dvz48WGfMGFC2GvdP2fOnOzPHjNmTNhHjx4d9rFjx2bfP2rUqPBsS0tLXZ89bNiwsNf6/JEjR4b9Uor+e0sppUqlcsk++/z582E/ffr0JfvsWmp9dq23//nnn2E/c+ZMafv999+zz6aU0tmzZ8Ne6/4//vijtNX6fTl27FjY+/r6wn7o0KGw7969O+xHjx7N/uzo1/1PaG5uLm0TJ04Mz06aNCnsbW1tdfXJkyeHfebMmaVt+vTp4dkpU6aEvanJj9YBAAAAAAAAAP+MaqMfAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFciA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZKgURdHoN6SU0qB4BAAAAAAAAAAAAADA1aSvry/sO3fuLG27du0Kz/b09IR9z549Ye/t7Q37vn37wt7f31/aqtVqeLatrS3skyZNqqtH99dzdiC91v2tra1hb2pqCjsA/7xz586F/bfffgt7rf9n7t27t7QdPHjwkt09kPt//fXXsB85ciTskebm5rBPmzYt7NOnTw/7zJkzS9usWbPCs+3t7WGfO3du2MeMGRN2AAAAAAAAAGBAOmv0roFeFH/XIgAAAAAAAAAAAAAAAAAAAAAAAAAAAHBRBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyFApiqLRb0gppUHxCAAAAAAAAAAAAACA/9bPP/8c9u7u7tL23XffhWd37doV9p07d4b9wIEDYY+0tbWFfcaMGZe0T58+PewzZ84sbTfddFN4dsSIEWEHAAaHkydPlrbe3t7wbE9PT9hrnd+zZ0/2/bU++/jx42GvVqthr/XnpPnz54e9vb09q6WU0m233Rb21tbWsAMAAAAAAADAINJZo3cN9KL4X/oBAAAAAAAAAAAAAAAAAAAAAAAAAACAizLwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkqRVE0+g0ppTQoHgEAAAAAAAAAAAAADE5nz54Ne3d3d2n7+uuvw7Pbt2+vqx8+fDjsw4cPL21z584Nz86fPz/s7e3tYb/55pvDfuutt5a266+/PjwLAECevXv3hn3Xrl1h37lzZ9i///777Pt7e3vDs/39/WGfNm1a2BcuXBj2BQsWlLZFixaFZ+fNmxf2YcOGhR0AAAAAAACAIaezRu8a6EXVOh8CAAAAAAAAAAAAAAAAAAAAAAAAAAAAQ5KBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyVIqiaPQbUkppUDwCAAAAAAAAAAAAALi4CxcuhH3Hjh1h/+KLL8K+cePGsH/zzTdhP3/+fGmbPHlyeHbRokVhX7BgQdgXLlwY9vnz55e25ubm8CwAAFxOp0+fDnt3d3fYt27dGvZt27aFPfpz/9GjR8OzLS0tYb/zzjvD3tHREfa77767tM2ePTs8CwAAAAAAAMCg1Fmjdw30omqdDwEAAAAAAAAAAAAAAAAAAAAAAAAAAIAhycAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkqRVE0+g0ppTQoHgEAAAAAAAAAAAAAV7KDBw+G/bPPPgv7pk2bStvmzZvDs8ePHw/77Nmzw75kyZKwL168OOy33357aZs4cWJ4FgAAGPx6enrCvm3btrBv2bIl7NHXQymldPjw4dI2ZcqU8OzSpUvD3tHRUVdvaWkJOwAAAAAAAAAX1Vmjdw30omqdDwEAAAAAAAAAAAAAAAAAAAAAAAAAAIAhycAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkqRVE0+g0ppTQoHgEAAAAAAAAAAAAAl9r+/ftL28cffxyeXbduXdi3bt0a9paWlrAvWbKktHV0dIRnly5dGvapU6eGHQAAoJFq/azl7t27S9vGjRvDs5s2bQr7V199FfZaan29tnz58tK2bNmy8OzYsWOz3gQAAAAAAABwBeis0bsGelG1zocAAAAAAAAAAAAAAAAAAAAAAAAAAADAkGTgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMlaIoGv2GlFIaFI8AAAAAAAAAAAAAgBMnToR99erVYV+zZk3Yt2/fXtrGjRsXnn3ggQfCvnz58rDfddddYW9ubg47AAAA/7yTJ0+GfcOGDWFfu3Zt2D///PPSVutnTDs6OsL++OOPh/3+++8Pu69DAQAAAAAAgAbqrNG7BnpRtc6HAAAAAAAAAAAAAAAAAAAAAAAAAAAAwJBk4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyVIqiaPQbUkppUDwCAAAAAAAAAAAAgCvfjh07wv7ee++FvaurK+yVSiXsjzzySNgffvjh0rZ48eLwbFNTU9gBAADg3506daq0bdiwITxb62vkTz/9NOytra1hf+KJJ0rbU089FZ6dOnVq2AEAAAAAAABq6KzR438w/RfVOh8CAAAAAAAAAAAAAAAAAAAAAAAAAAAAQ5KBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyVIqiaPQbUkppUDwCAAAAAAAAAAAAgMujv78/7OvWrQv7qlWrStu3334bnm1vbw/7008/HfbHHnss7C0tLWEHAACAq8X+/fvD/v7772f3Q4cOhWfvvffesK9cuTLsCxYsCDsAAAAAAABw1eus0bsGelG1zocAAAAAAAAAAAAAAAAAAAAAAAAAAADAkGTgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMlaIoGv2GlFIaFI8AAAAAAAAAAAAAYGD6+/vDvnbt2rC/9tprYf/hhx/C/tBDD5W25557Ljx7xx13hB0AAAC4PC5cuFDaPvnkk/Dsm2++GfatW7eGvaOjI+wvv/xy2BcuXBh2AAAAAAAAYNDrrNG7BnpRtc6HAAAAAAAAAAAAAAAAAAAAAAAAAAAAwJBk4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADJWiKBr9hpRSGhSPAAAAAAAAAAAAAOD/bd68ubQ9++yz4dkff/wx7I8++mjYX3zxxbDPmTMn7AAAAMDQFv29Rkopvfrqq2H/8ssvw37PPfeUtrfffjs8O2PGjLADAAAAAAAAl0Vnjd410IuqdT4EAAAAAAAAAAAAAAAAAAAAAAAAAAAAhiQDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkMPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAYDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkqBRF0eg3pJTSoHgEAAAAAAAAAAAAwNWkr68v7M8//3zYP/zww9J23333hWdXrVoV9lmzZoUdAAAAoJE2b94c9ujvVXp6esKzK1euDPsLL7wQ9uHDh4cdAAAAAAAAGJDOGr1roBdV63wIAAAAAAAAAAAAAAAAAAAAAAAAAAAADEkGfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMhj4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwGfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIUCmKotFvSCmlQfEIAAAAAAAAAAAAgCvJ+vXrw75ixYqwjxgxIuzvvPNOaXvwwQfDswAAAABXs7///ru0vfXWW+HZV155JexTp04N+0cffRT2efPmhR0AAAAAAABIKaXUWaN3DfSiap0PAQAAAAAAAAAAAAAAAAAAAAAAAAAAgCHJwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkMPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAYDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkqBRF0eg3pJTSoHgEAAAAAAAAAAAAwOUWfS/n66+/Hp596aWXwv7kk0+G/Y033gj72LFjww6X05EjR8K+ZcuW0rZmzZrw7Pr167PeBDm2b98e9g8++CDs7777btifeeaZuvott9wSdgAAoH6//PJL2FesWBH27u7usK9evbq0LVu2LDwLAAAAAAAAQ0hnjd410IuqdT4EAAAAAAAAAAAAAAAAAAAAAAAAAAAAhiQDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAPwPO/cbW2V9NnD8PpV1mwgMUitiLHuhgIOtyMiC4Y9Zm8VNVySLIXZuyZYI0zmcbri4Sd0SmPHFXJbFDYQXupBg1WgMJcYZIGFGt5iMP2MBgWwLJWNYGpkwGEHceV482ROfufu66e9uOaft5/P2m999rnOfntKWkwsAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAElSq1WqtZ8iyLKuLIQAAAAAAAAAAAAAG27lz58L+9a9/Pbc999xz4dnHH3887MuXLw87DCd333132NetW5d87Tr5TDUjyPbt23Nbe3t7ePbw4cNhb2lpCXt3d3fYN23aFPbNmzeHvZZ6enpy24YNG0pde9myZWHv6OgodX0YTEXv887OzuRrP/3002G//fbbk69NvqF8TbMsfl2H82tay/uWZcP73pUR/ZyTZcW/x65du3Ywx4ER6/z582H/9re/Hfbod+RHH300PPvAAw+EHQAAAAAAAEaQog8XxB9OeJ+GkoMAAAAAAAAAAAAAAAAAAAAAAAAAAADAqGTBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAQW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAgQaVardZ6hizLsroYAgAAAAAAAAAAAGCgij6Leccdd4T9lVdeyW0vvPBCeHbRokVhh9GkUqkkn62Tz1Qzgtx99925bd26deHZ0fz12N3dHfZNmzblto0bN5Z67AcffDDsc+bMCfuyZctKPT68X1dXV9jXrFkT9gMHDiQ/9vTp08O+atWqsK9evTr5sUeyWr6mWRa/rvX+mkb3rpb3Lcvie1fr+9bX1xf27du357bOzs7BHuf/Gc0/68DF9MQTT+S2e+65Jzz705/+NOz33ntv0kwAAAAAAABQh4r+kzz+UNv7NJQcBAAAAAAAAAAAAAAAAAAAAAAAAAAAAEYlC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAkq1Wq11jNkWZbVxRAAAAAAAAAAAAAAA/Xoo4+W6lu3bs1tc+fOTZoJRqNKpZJ8tk4+U80I4uvxv+vt7Q371KlTw/7b3/42t82bNy9ppn/bs2dP2GfPnh323bt357bW1takmRi5yn69FSnzfaTM968si98LWTay3w/R61rL1zTLyr2uQ/2aDuX7YSTftyJdXV3JZ9esWTOIk3zQSP5ZB4aLp556KuzLli0L+69//euwt7W1DXQkAAAAAAAAqJXOgt59oRdqKDkIAAAAAAAAAAAAAAAAAAAAAAAAAAAAjEoW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEoyp9QAAAAAAAAAAAAAA9ezNN98M+49+9KOwd3d3h33u3LkDHQkYZvr6+nLbxo0bw7MrV64Me0dHR9jvu+++sLe1tYU98s4774T92WefDfvy5cuTHzvLsmzVqlW5bcWKFeHZ5ubmsFcqlaSZBkPZx65Wq4M0yeB7/fXXS52fMmXKIE3yQVdeeWWp82+88UZua21tLXXtrq6uUudXr15d6jyDL/p6Ge6KnlvZ90M9G6mv61C/pu7b0CjzvX/NmjWDOAlQj772ta+Fff/+/WG/8847w75v376wf+QjHwk7AAAAAAAADEcNtR4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAhiMLfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASjKn1AAAAAAAAAAAAAAD17P777w/70qVLw75kyZLBHAeoQ319fWG/8847c9uXv/zl8Gy1Wg379u3bw97e3h723bt357bW1tbw7IMPPhj2devWhf2tt94K+9mzZ8M+derU3Nbf3x+eXbulLdp/AAAgAElEQVR2bdiL7nuRSqWSfLbsY9ezHTt2lDrf0tIySJN8UHNzc6nzPT09uW3ZsmWlrs3Is3PnzlqPMGSi90KWjez3w0h9XYf6NXXfAOrPj3/847Bv2bIl7I899ljYH3rooQHPBAAAAAAAAPWuodYDAAAAAAAAAAAAAAAAAAAAAAAAAAAAwHBkwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAAgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIEGlWq3WeoYsy7K6GAIAAAAAAAAAAAAYffbt2xf2WbNmhX3v3r1hnzlz5oBnAgauUqkkny37meru7u6wd3Z2DtljFym6L6tWrcptq1evDs92dXWFvb+/P+xr164Ne5FavuZF6nm2WipzX7KstvfGa8pgKvteKFLma66eZ6t3Q3nvyt43s6Wp59mGku8DQJH169eH/aGHHgr7X//619zW2NiYNBMAAAAAAAAkyv+A5/+KPyD6Pg0lBwEAAAAAAAAAAAAAAAAAAAAAAAAAAIBRyYJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAQW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAECCSrVarfUMWZZldTEEAAAAAAAAAAAAMPo88sgjYX/yySfDfujQocEcB0hUqVSSz5b9TPXixYvD3tPTU+r6tTLUnzXv7e0N+3PPPRf2lStXJj/2UD+3Wn491rMy9yXLantvvKYMprLvhSJlvubqebZ6N5T3rux9M1uaep5tKPk+ABTp7+8Pe3Nzc9i3bt2a29ra2pJmAgAAAAAAgESdBb37Qi/UUHIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAGJUs+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAAgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDAgl8AAAAAAAAAAAAAAAAAAAAAAAAAAABIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJBhT6wEAAAAAAAAAAAAAamnPnj1hb21tvUiTAMNVT09P8tlqtTqIk9SXDRs2hL3ovv3kJz8J+8qVKwc8E7XV0dER9jLvpVq76667aj0Cw8hIfi8UPbeRLHruXtP06w/Xezea3wvAyNfU1BT2KVOmhD36W1xbW1vSTAAAAAAAAFBrDbUeAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIYjC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAnG1HoAAAAAAAAAAAAAgFo6c+ZM2CdNmnSRJgFGo4MHD4Z92rRpF2mSgevu7g778uXLw3748OGwt7S0DHgm6ltHR0fYe3p6wt7X15fbmpubk2b6t97e3lLn58yZU+o8/12lUqn1CLmq1Wry2bLvhXpW9NxGsui5e03Trz9c791ofi8AjB8/PuynTp26SJMAAAAAAADAxdNQ6wEAAAAAAAAAAAAAAAAAAAAAAAAAAABgOLLgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQYEytBwAAAAAAAAAAAACopebm5rAfPXr0Ik0CDFfr168P+/Lly3Pbxo0bw7MrV64M+4QJE8Le19cX9ujxv/vd74ZnOzs7w16kpaWl1HmGn5tuuqnU+T//+c+5rejf8yJl/70v+9z476rVaq1HGBKf+cxnaj3CkBnK57Z9+/awt7e3h33btm1hb2trG/BM7zdSX9ehfl7uG8DIc+zYsbBPnjz5Ik0CAAAAAAAAF09DrQcAAAAAAAAAAAAAAAAAAAAAAAAAAACA4ciCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEY2o9AAAAAAAAAAAAAEAt3XDDDWH/3ve+F/bz58+HfcwYH9eEwdDX11ezazc3N4f91ltvDfvy5ctz25o1a8KzRb2sw4cPJ5/t6OgIe09PT9h7e3vDfvbs2QHPdKHKvuZ79uwZzHH+n4MHD4Z92rRpQ/bYQ62lpSXs69evD/uvfvWr3HbdddclzXQh186y4tmKnlsZXV1dpc6vXr16kCZhsLS2toZ91apVYS/6t6Ho+0gZRbMVPbcy2tvbh/R8tVotdf3ouXtN068fzVfL+5Zl8WxDfd/KKvo5bCiN5J91YLTYt29f2E+cOBH2+fPnD+Y4AAAAjHBnzpwJe39/f3I/efJkqcc+ffp02It+Ry46Hz3+qVOnwrPvvfde2Iue+1Aqmr3oczhlRZ/jGTdu3JA+dpHx48fntksuuSQ8e9lll4V97NixpfrEiRNz26WXXlrq2tHzzrIsa2pqKtWL5gMAGCwNtR4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAhiMLfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACcbUegAAAAAAAAAAAACAWrrtttvCfv/994d9y5YtYV+yZMmAZwI+6IorrqjZtavVatibm5vDfvjw4dy2YcOG8OyaNWvCftddd4X9+9//fthbWlrCHlm9enXYe3p6wl703FesWBH2VatW5bb+/v7w7NmzZ8NeqVTCPpSmT59e6nzR12s9W7ZsWdijr6mPfexj4dmOjo6w33fffWFva2sLOwymou+vM2fODHuZ7yNPP/102G+//fbka5e1bdu2sLe3t5c6P5Rq+ZpmWfy61vI1vRDRvavlfcuy+r53tfxZpkiZ12U4/5wDI8mTTz4Z9rlz54a96Ps3AADASPX3v/897EeOHAl79H+Ovb29pa599OjRsB8/fjy59/X1hWeL/l/vzJkzYa9nRf9/NXbs2LBfeumluW38+PFJM/3bxIkTS50vI3peWZZlH/7wh4f08aOvqaKv9aF24sSJ5LOnTp0K++nTp0v1ou9h9Sz6mmtqagrPFn0e5PLLLw970fWnTJmS266++urw7NSpU8Ne9HmQol70PQwA+KCGWg8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAw5EFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDAgl8AAAAAAAAAAAAAAAAAAAAAAAAAAABIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABJVqtVrrGbIsy+piCAAAAAAAAAAAAID/9PDDD4d98+bNYX/jjTfC3tjYOOCZAAAAAGAo9Pb2hn3mzJlh7+7uDvstt9wy4JkAAIDRo7+/P+wHDhwo1Q8ePJh89tChQ2E/cuRI2E+ePBn2MpqamsLe0tIS9ilTppS6ftSvuOKKIbt2lmXZ5Zdfnnx+3Lhx4dmxY8eW6jBanD59ulQ/depU2Iv+bSjTjx8/Hp596623huyxsyzLjh49mtuK/k5XdO2yir5HRv+2XHvtteHZadOmlerTp08P+4wZM3Jb0b8rAIxKnQU9/s+v92koOQgAAAAAAAAAAAAAAAAAAAAAAAAAAACMShb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASVKrVaq1nyLIsq4shAAAAAAAAAAAAAP7TmTNnwj5z5syw33zzzWH/xS9+MeCZAAAAACDFuXPnwn7jjTeGvampKew9PT0DngkAABhc7777btj37t0b9t27d4d9586dyef3798fnn377bfDXmTcuHFhnzZtWlLLsiy75pprwj516tSwX3311aX6xz/+8dz20Y9+NDwLwMjzz3/+M+yHDx8O+5EjR8Le29ubfP7QoUPh2QMHDoT94MGDYT916lTYI5MmTQr7ddddF/bW1tawf/rTnw777Nmzc9usWbPCs42NjWEHIFlnQe++0As1lBwEAAAAAAAAAAAAAAAAAAAAAAAAAAAARiULfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACSrVarXWM2RZltXFEAAAAAAAAAAAAAAD9frrr4f9s5/9bNh/+MMf5rYf/OAHSTMBAAAAMHqdP38+t912223h2d///velenNzc9gBAGC0OHToUNhfe+215L5z587w7B//+Mewnzt3Luzjx48P++zZs8N+/fXX57YZM2aEZ6dNmxb26dOnh/2qq64KOwAw/B09ejTsBw4cSGpZlmVvvvlm2Hfv3h32Xbt2hf3kyZO5rbGxMTw7a9assEc/g2VZls2fPz/sCxYsCPu1114bdoBhrLOgd1/ohRpKDgIAAAAAAAAAAAAAAAAAAAAAAAAAAACjkgW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAkq1Wq11jNkWZbVxRAAAAAAAAAAAAAAg627uzvsX/nKV3LbPffcE5597LHHwj5mzJiwAwAAADD8vP3222FfunRpbtu5c2d4dseOHWH/5Cc/GXYAALiYivbm7NmzJ+yvvvpqUruQfuzYsbCPGzcu7PPmzcttc+bMCc8W9euvvz7s11xzTdgrlUrYAQBGq6KfT//0pz/ltqK/3e7atSvsRed/97vfhf3kyZNhnzx5cm5bsGBBeHbhwoVhX7RoUdhbW1vD7udToKTOgh5/EPx9GkoOAgAAAAAAAAAAAAAAAAAAAAAAAAAAAKOSBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgASVarVa6xmyLMvqYggAAAAAAAAAAACAi+3FF1/MbV/96lfDszfccEPYn3nmmbBPnDgx7AAAAABcfPv37w/74sWLw/7ee+/lts2bN4dnZ82aFXYAABiod955J7dt3bo1PPvSSy+V6seOHQv75MmTc9vChQvDswsWLAh70flPfepTYb/kkkvCDgAAAxH93TjLsmzv3r1hf/XVV5PahfQyP7dnWZZ9/vOfz2233HJLePZzn/tc2CdMmBB2YEToLOjdF3qhhpKDAAAAAAAAAAAAAAAAAAAAAAAAAAAAwKhkwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAAgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIEGlWq3WeoYsy7K6GAIAAAAAAAAAAACgnvzhD38I+6233hr2os+J/vKXvwz7zTffHHYAAAAAPuhf//pX2B9//PGwd3V1hX327Nlhf/7553NbU1NTeBYAgNHnb3/7W9ifffbZsG/evDnsv/nNb3JbQ0NDePbGG28M+xe+8IVSfcaMGWEHAACG3qFDh8K+ZcuWsL/88su5Lfp9JMuy7Pz582GfP39+2Is+w7l06dKwX3XVVWEHLorOgt59oReK/8oBAAAAAAAAAAAAAAAAAAAAAAAAAAAA/FcW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAElSq1WqtZ8iyLKuLIQAAAAAAAAAAAACGk+PHj4d9xYoVYX/mmWfCvnTp0tz2s5/9LDx75ZVXhh0AAABgONu1a1du+8Y3vhGe3bNnT9gfeOCBsD/88MNhb2xsDDsAAMPPiRMnctsLL7wQnt20aVPYd+zYEfYJEyaE/Utf+lLYv/jFL+a29vb28Oxll10WdgAAgMg//vGPsG/bti3sL730Utiff/75sEe/y2VZli1atCi33XHHHeHZot/FJk2aFHbg/3QW9O4LvVBDyUEAAAAAAAAAAAAAAAAAAAAAAAAAAABgVLLgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQoFKtVms9Q5ZlWV0MAQAAAAAAAAAAADCavPzyy2H/5je/mdtOnDgRnv3Od74T9nvvvTfsEyZMCDsAAABAGX/5y1/C/sgjj4T9qaeeym3z5s0Lzz7xxBNh/8QnPhF2AACGn9deey3sP//5z8P+4osv5rbGxsbw7OLFi8Pe2dkZ9ptuuinsH/rQh8IOAAAwUr377rthf+WVV8Le3d2d26LfA7Msy86dOxf2JUuWhP1b3/pW2BcuXBh2GEHiP4xkWf4b9T80lBwEAAAAAAAAAAAAAAAAAAAAAAAAAAAARiULfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAA/8PO3QfXVdeJHz83DbQUaBRK4uqagtAHH6BRRMJWt9pWKGqLg2YkHRgfhtZ2XLQzyR/FTXVnknV0SB0ZdRpbRgfr0IyFQVtHGgvRKmBFhEYdsEWR1uKUQIGIlFK7ufuXs/78cT6nPSfJvUlfr3/f8znnm/uQe3PvyRcAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJBDqVwuV3oNSZIkVbEIAAAAAAAAAAAAAP7P4cOHU9tNN90Uzt58882Fzv2Zz3wm7KtXr05tdXV1hc4NAAAAVL8//vGPYf/v//7vsH/7298O++tf//qwf/7zn09t1113XThbKpXCDgBAZRw5ciS1bd68OZz92te+FvaHHnoo7M3NzWH/1Kc+ldquvvrqcHbq1KlhBwAAoPq89NJLYb/zzjvD/vWvfz3s999/f9ibmppS23/8x3+Es8uWLQv7aaedFnYYY60Zvfd4D1RTcCEAAAAAAAAAAAAAAAAAAAAAAAAAAABwUrLBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxK5XK50mtIkiSpikUAAAAAAAAAAADp1q1bl9ra29tH9dzd3d1hb2try33s6OdKkuI/2759+8Le2NiY2gYHB8PZTZs2hT1r7UuWLAn76tWrU9uCBQvC2aKK3i8bNmxIbVdddVU429DQEPYque4Oqt7Q0FDYb7755rB/5StfCfvw8HBq+9jHPhbOfvKTnwz7G9/4xrADAAAAxyfrs7SdO3emtm984xvh7B133BH26LPXJEmSjo6OsF977bVhr62tDTsAAGPv6NGjYY++R06SJPnCF76Q2p599tlw9iMf+UjYb7jhhrC//e1vDzsAAACMpAcffDDsX/va11Jbb29vOPvqV7867J/97GfDnnWN56mnnhp2OEGtGT1+wP+DmoILAQAAAAAAAAAAAAAAAAAAAAAAAAAAgJOSDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUCqXy5VeQ5IkSVUsAgAAAAAAAAAAyGfXrl1hv+yyy8K+cuXKsK9fv/6E1zRSli5dGvZbbrkl7PX19WEfHBxMbddff304u2zZsrBfc801Ye/v7w/7woULU9vu3bvD2blz54Z93bp1YW9paQl7Y2Nj2IeGhlJbd3d3ONvV1RX2KrnujgnkwIEDqe3LX/5yOPvqV7867GeffXbYzzrrrNx9+vTphY6dtbas59qGDRtytSRJksceeyzs8+fPD/snP/nJsF999dVhnzx5ctgBAACgWjz77LNh//a3vx32b3zjG2H/3e9+l9rmzZsXzq5atSrsH/nIR8JeW1sbdgAAxt7//M//hP073/lO2P/rv/4r7AcPHgx7dO3CmjVrwtmGhoawAwAAwEQRXf+eJEnyxS9+Mew9PT1hz7r+Puvv/2uvvTa1+X6IV9Ca0XuP90A1BRcCAAAAAAAAAAAAAAAAAAAAAAAAAAAAJyUb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMihVC6XK72GJEmSqlgEAAAAAAAAAAAwOtatWxf29vb2sO/bty/sjY2NJ7ymvxsYGAj7o48+GvZrrrkm97mTJEl6e3tTW2trazg72td/lUql1NbR0RHOdnZ25j52kiTJU089Ffb6+vqwRwYHB8Pe0NAQ9iq57o4J5M4770xtV199dThbW1sb9qzn2rFjx8Jeycf7pEmTwj5t2rTUdtZZZ4Wzp5xyStiHhobCfvDgwbBPmTIl7NFrx4c//OFwdtGiRWE/9dRTww4AAMDE85e//CXsW7duTW1btmwJZ/v6+sKe9TfwtddeG/aVK1emtre85S3hLAAA489vf/vbsH/iE58I+8MPPxz2j3/842Ffu3Zt2F//+teHHQAAACjuwIEDYc+6Dv2b3/xm2OfOnZvavvWtb4WzF154YdiZkOJ/2kiS9H/4+Cc1BRcCAAAAAAAAAAAAAAAAAAAAAAAAAAAAJyUb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMihVC6XK72GJEmSqlgEAAAAAAAAAAAwOgYGBsLe1NQU9g0bNoR9+fLlJ7ymv1u3bl3YW1pawt7Y2Jj73EmSJEuXLk1t27ZtK3TsSsq6Nm3VqlVh7+npCfvmzZvDfuWVV6a2urq6cBbG2nPPPZfapk+fHs4ODw+P9HIYATU1NWF/5zvfmdruvffecDbrd9hVV10V9g996ENhX7BgQWqbOnVqOAsAAMAre/rpp8N+1113hW5u1c4AACAASURBVP32228P+49+9KOwR3+nLl68OJzN+nw0+nwzSZLk9NNPDzsAAOPPsWPHwv6lL30ptXV2doazb33rW8P+zW9+M+xvfOMbww4AAACMf3v27An7Jz7xidT24IMPhrMdHR1hX7NmTdhPOeWUsFOVWjN67/EeKL56GAAAAAAAAAAAAAAAAAAAAAAAAAAAAHhFNvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQQ6lcLld6DUmSJFWxCAAAAAAAAAAAoDJWrVoV9p6enrA///zzuc+9Zs2asK9fvz73sY9HqVTKPVsl13/lsnfv3rC3t7eHfdu2bbnP3d3dHfa2trbcx4aR9ra3vS3sDz/88BitZGKpra0Ne9bv18997nNh/+xnP5v7/AcPHgxn77jjjrBv2bIl7Pfee2/Yo7XNmzcvnL388ssL9aamprAXec0EAAA4evRo2O+7776w79ixI7X96Ec/Cmez/n6fMmVK2K+44oqwt7S0hP0DH/hAajvzzDPDWQAATj7PPvts2D/4wQ+G/cEHH0xtnZ2d4ezq1avDPmnSpLADAAAADA8Pp7abb745nO3o6Ah71nWO3//+98M+ffr0sFMRrRm993gPVFNwIQAAAAAAAAAAAAAAAAAAAAAAAAAAAHBSssEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOpXK5XOk1JEmSVMUiAAAAAAAAAACAyhgYGAh7U1NT2Ddv3hz2008/PbWdc8454Wxzc3PYiyqVSrln9+zZE/ZZs2blPna1y3rM9PT05GpJkiTd3d1hb2trCzuMpI6OjrDfdNNNYT969OhILmfcqK2tDft5550X9ttuuy3sb3/72094TdViaGgo7HfffXdq27FjRzjb19cX9ieeeCLs9fX1YZ8/f37Yo9fsrNfziy++OOyTJ08OOwAAcHxeeOGFsP/iF79Ibffff384m9Xvu+++sL/44othjz6jXLRoUTh7xRVXhH3evHlhnzJlStgBAOBE/OEPfwj7lVdeGfbh4eGw/+AHP0htc+bMCWdhrA0ODqa2/v7+cDbrO8WtW7fmWhPksWvXrrDfeuutYc+6lmblypW5+9y5c8NZAACoJlnX5y9ZsiTsWfu7/vCHP0xtM2fODGcZNa0Zvfd4D1RTcCEAAAAAAAAAAAAAAAAAAAAAAAAAAABwUrLBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxK5XK50mtIkiSpikUAAAAAAAAAAADVadWqVWHv6ekJ+5IlS1Lb1q1bc61ppGzcuDG1rVixIpzt6OgIe3t7e9jr6urCPjg4mNo2bdoUzra1tYW9VCqF/fnnnw971tojAwMDYW9qagp7lVx3RxU5fPhw2Hfu3Bn2HTt2pLY777wznH3iiSfCPp5NmjQp7MPDw6lt9erV4ewXvvCFsE+ZMiXs5PPYY4+Fva+vL+w//elPw37//fentieffDKcnTx5ctgvvvjisF922WVhf8c73pHaLrroonB25syZYc96rgAAcPI5cuRI2B955JHUtnv37nD2F7/4RaH+29/+NuzR33pz5swJZ7Pely9YsCDsixYtCntDQ0PYAQCgWkTv+ZMkSd7znveE/fzzzw971nfs06dPDztUk+h6lKxrUbK4toCR1t/fn9oWLlwYzu7bty/sjY2NYe/t7Q37bbfdltoqfW1WdP3TV7/61XC2q6ur0Lk3b94c9muuuabQ8Rl/sp5Lra2tuY/t8VYZ7tN8RvN2S5L4thvPt1tR+/fvD3t07VWSxNdBZr139t6YieTQoUNh/+AHPxj2PXv2pLas5+EFF1wQdnLLeuGJX7j+QU3BhQAAAAAAAAAAAAAAAAAAAAAAAAAAAMBJyQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAciiVy+VKryFJkqQqFgEAAAAAAAAAAFSnXbt2hf2yyy4L+4YNG1Lb8uXLc61ppAwODqa2hoaGMVzJidm3b1/YGxsbw14qlcLe0dER9qz7LTr//v37w9ktW7aEva2tLexURnQ95MDAQDi7Y8eOsPf19YX9vvvuC/uRI0fCfuGFF6a2hQsXhrPr168P+8svvxz2SqqtrQ37a17zmrB/5zvfSW3z58/PtSYmrqzf/VnP45///Odhz3qvEv0eOnr0aDg7derUsL/5zW8Oe1NTU9gvuuii1Bb9fkqSJJk9e3bYs57HAADVbHh4OOzRZyN79uwJZ7P+Ts3qv/71r8Oedf5jx46ltjPPPDOcvfjii8M+b968sGd9jhf1s846K5wFAICTyaFDh1LbW9/61nA2+lw4SZLk9ttvD/uUKVPCDhNF1nUNWapkTx8mkFWrVqW2np6ecHYiPx6ja6+SJEkef/zx1Nbc3Fzo3L29vWFvbW0Ne3d3d9hdozT+rF27NuxdXV1hz/psN5L1/X3W9XidnZ25zz2RuU/zqeTtliTxbVfNt1tRGzduDPuKFSvCnvW6tGjRotR27rnnhrN1dXVhh4kk65rhq6++OrVlXed4//33h33atGlhJ1X8xj1J4jf+/6Cm4EIAAAAAAAAAAAAAAAAAAAAAAAAAAADgpGSDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADmUyuVypdeQJElSFYsAAAAAAAAAAADGp6VLl4a9u7s7tc2aNWuklzNi9u/fH/aNGzeGvaurK+wrV64M+4033pjaGhsbw9kspVIp7E899VTYN23aFPb29vbUFj0ekiRJ2trawk4+Bw8eDPvdd98d9u3bt4f9nnvuyX3uc845J+yLFi0K++WXX16ov/a1rw175H3ve1/Y+/r6wj48PJz73DU1NYWO/fGPfzzsX/nKV8I+bdq0sEM1+dvf/pbaHnnkkXD2N7/5TdgHBgZGrQ8ODoazWbKepxdccEHYs96nRfOzZ88OZ88///ywZ73Xec1rXhP2SZMmhR0AJoqXX3457E8++WTYsz77eOyxx3K14+l79+4N+x/+8IewZ/3skfPOOy/sc+fODftFF11UaD7qb3jDG8LZrM90AACAsdHS0pLannjiiXB2586dYZ86dWqeJcGEU/Rv4CrZ04cJpMhjciI/Hnft2hX25ubmMVrJ/8/vkYkn6/v3pqamQscvcp8Xfbzt3r077FmfO49X7tP8otuukrdbkhS77ar5ubB27dqwZ10zXM0/G5xMhoaGUtull14azl544YVh37JlS641kbRm9N7jPVB8ZTUAAAAAAAAAAAAAAAAAAAAAAAAAAADwimzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIfaSi8AAAAAAAAAAAAgy9DQUNhf97rXhX3WrFkjuZwx09jYGPbOzs5CvZLK5XKh+ba2tkL9ZHXkyJGw33fffWHv6+tLbTt27AhnBwYGwn7KKaeEfd68eWH/9Kc/ndquuOKKcLapqSnsNTU1Ya+krJ8t634ZHh4Oe3S/TJs2LZy99dZbw/7+978/7DCRRM+luXPnhrNZ/dprr821puMxODgY9kcffTTsv//978O+d+/eQvPf//73U9tjjz0Wzma9JmaprY0vxW9oaEhtM2bMCGez3ttm9az3kOecc07Yzz777NQ2ffr0cLa+vj73sZMkSc4444ywA4ym5557LuxPP/102A8dOpTannnmmdyzSZIkBw8eDPuBAwfC/qc//Sn37J///OewZ62tqFe96lWp7YILLghnsz4PamlpKTQ/c+bM1DZ79uxwNuvvKQAAgKzveH74wx+mtqzv5aZOnZprTUD1yPoOZ9OmTWFvb28P+5IlS8K+evXqsC9YsCDskaxrhL773e+GfcWKFbnP3dHREfYbbrgh7Fnfk5RKpRNe00gpeu6i1/mMpubm5oqdO+vxmiXrMTea1q5dW2i+mq9LG00PPPBApZcwarJ+tqxrF8Yr9+noHX+8quRzobe3N+xdXV1hv+eee8I+UZ/HMN7U1dWltu9973vh7CWXXBL27du3h33x4sVhp7jqveIcAAAAAAAAAAAAAAAAAAAAAAAAAAAAqpgNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcqit9AIAAAAAAAAAAACyfPe73w17S0vLGK0ERtcjjzwS9r6+vrDv2LEj7Dt37gz74cOHwz5nzpzUtmjRonC2q6sr7PPnzw/7GWecEfaT1Xvf+96wHzt2rNDxly5dmtp6enrC2enTpxc6N1B59fX1hXrW7/bRVC6Xw37gwIGw79+/P+xPPvlk7uP/6U9/yj2bJEnywAMPhH3Lli1hf+aZZ8L+8ssvh300TZkyJexnn312ast63Zk6dWrYTz/99LC/6lWvCvtpp52W+9xZx86anzx5ctizROcvlUqFjp1ltH+2SNZ739F8Lvztb38L+1//+tdCx3/xxRfD/tJLL6W2v/zlL+HsCy+8kPvYSZL9s0Xnz/q5sn6/HTp0KOxF3zsXkfVc+Jd/+Zewv/a1rw37jBkzUlvW3xSve93rwt7Y2Diq81nvNwAAACaqG2+8Mew33HBDarvgggtGejlABQwODqa266+/PpxdtmxZ2LO+R+nv7w/7woULw7579+7UNnfu3HB2zZo1Yc/6nvqpp54K+5EjR1Jb9DlakmR/Brl+/fqwZ93uWYp8Zl/03CerrO8MN27cWOj41113XaF5xt5DDz1U6SWMmm3btoV9+fLlY7SSseU+zW+i3najfbtF7/FaW1sLHfvHP/5x2LPew61cuTLsH/3oR1Nbc3NzOAscn+g67SRJkk9/+tNhz/p7avHixSe8Jk5MTaUXAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOORDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUCqXy5VeQ5IkSVUsAgAAAAAAAAAAyGft2rVh7+rqKnT8jo6OsHd2dhY6PvyjZ555Juz33HNP2Pv6+sK+Y8eO1HbgwIFw9qyzzgr7e97znrAvXrw47IsWLQr7ueeeG3aqz4oVK8L+7ne/O+zLli0bwdUAMB688MILqS3rfdLTTz8d9qz5Q4cO5Z7Pmn3xxRfDfvjw4bA/99xzYX/ppZdytSRJkueffz7sWWs/evRo2IeHh8M+NDQU9tEUPd6SJEmOHTs2aueePHly2KdOnTpq566pqQl7XV1doeNnrf20007Lfe4zzjgj97GTJEnOPPPM3D3r3GeffXbYp0+fXqifc845uc+fdeys2w0AAICJ5+GHHw772972trDv3bs3tc2cOTPXmoD/V6lUKjRfdE+f3t7e1Nba2jqq586SddtE1/lkXeOTdQ1S1ncN69evD3uk0vd5liLrq5I9pqrO/v37wz5jxoxRPX93d3fY29raRvX8nLiivyeyFHmuVvPaqlk1327VvLYkGd31TeS1FXmPlyVrbf39/WFfuHBh7nP//Oc/D3tzc3PuYwP/549//GPY3/CGN4T9gQceCPsll1xywmuaILJ+Aaf/8v4n8RVBAAAAAAAAAAAAAAAAAAAAAAAAAAAAwCuywS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcaiu9AAAAAAAAAAAAYPxrbGwsNL9hw4awL1++vNDxGX+OHTsW9l/+8pdhv+uuu1Lb9u3bw9lf/epXYa+pqQn7pZdeGvYVK1aktve+973h7CWXXBL2SZMmhZ2TT9bvVwD4Z2eeeWauliRJct555430cgAAAKiwdevWhb29vX3Uzt3d3R32tra2QscfzZ9t3759Yc/6bm1wcDDsmzZtSm1Z616yZEnYV69eHfYFCxaEvYii90nWZ+JXXXVV2BsaGlJbuVwOZ+FE9ff3h/1f//Vfwz5z5syRXA5QhW677bbcs6VSaQRXcuK6urpSW2dnZzib1bPs378/7Fu2bCl0fCaWrPflWe8BBwYGwn777beHPev97bRp01Kb6+kAxqedO3dW7NxvectbRu3Yt956a9ibm5tH7dxwMsm6Niur//jHPw571nXqZIv/ywAAAAAAAAAAAAAAAAAAAAAAAAAAAAB4RTb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkEOpXC5Xeg1JkiRVsQgAAAAAAAAAAACOz4EDB8K+ffv2Qv3uu+8O+9DQUNjPPffc1LZ48eJw9vLLLw/7ggULwl5XVxd2AAAAAACYKHbt2hX2yy67LOwrV65MbevXr8+1ppGydOnS1HbLLbeEs/X19WEfHBwM+/XXXx/2ZcuWpbZrrrkmnO3v7w/7woULw7579+6wz507N+zr1q1LbS0tLeFsY2Nj2LO+P+ru7g57V1dXaquSfRmYQD7zmc+E/aGHHgr7z372s5FcDvAKSqVSofmirx1Fzj+RX7c2btwY9m3btoU9ej8we/bsXGv6u9G+3T0mxp+9e/eGvchjzn1aGdHfiUmS/TsoS5H7tejr1pIlS8K+devWQsevVu7T/KLbrpK3W5IUu+1G+3Yrer9GKnm7ZfG6BWMj67PdmTNnhr2np2cklzOetGb03uM9UE3BhQAAAAAAAAAAAAAAAAAAAAAAAAAAAMBJyQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcqit9AIAAAAAAAAAAADI5+jRo2G/9957w759+/bc/Te/+U04O2XKlLD/+7//e9g///nPh/3KK68M+5w5c8IOAAAAAAAU19zcHPbu7u6wt7e3p7Ybb7wxnG1sbAx7loGBgbAvW7YstdXX1xc6d39/f9i3bdsW9q1bt+Y+94IFC3LPJkmS3H777WGfO3du2KP7/Lrrrsu1pr+rq6sL+w033BD2rq6uQucHgGqxd+/esM+aNWuMVnLient7w75ixYqw79u3L+xF30PCiajm5xr5LFmyJOxZf8tVs6yfbaJyn47O8d1u+Y7vdgNGW6lUqvQSJryaSi8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAxiMb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMihttILAAAAAAAAAAAAmMieeOKJsG/fvj1XS5Ikueeee8L+17/+Nexz5swJ+5VXXpnabrrppnD2Xe96V9inTp0adgAAAAAAYPxbtGhR7tm+vr6wL1++PPexkyRJ7r777rC3tLQUOn7ktttuKzRfKpVGaCUnrqurK+ydnZ1hX7lyZWpraGgIZzdv3hz26LutJEmS+vr6sJfL5bDDSJoxY0bY77jjjjFaCVCtNmzYkNpWrFgRzm7atCns7e3tYa+rqwv74OBg7vO3tbWFs62trWHP0tjYWGgeRtLQ0FCh+az3v4y9d7zjHZVewqgZzZ+tv78/7AsXLgx71nWSCxYsOOE1/Z37tHqPXymj/XMtW7YstW3btq3Qsffv3x/20XyfFP1cwNh5/PHHw7548eIxWsnJq6bSCwAAAAAAAAAAAAAAAAAAAAAAAAAAAIDxyAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5lMrlcqXXkCRJUhWLAAAAAABGzwMPPJDaLr300jFcCQAAANXgP//zP8Pe1dU1RivhZHH48OHU9pOf/CScveuuu8K+ffv2sP/+978P+xlnnJHaFi5cGM4uXry4UD/33HPDDgBUv0cffTTsb3rTm8ZoJQAAjJWXX3457KeeeuoYrQQAilu1alVq6+npCWeff/75Qudes2ZN2NevX1/o+JFSqVRovkr2CMhl7969qa29vT2c3bZtW6Fzd3d3h72tra3Q8eFEDAwMhL2pqSnse/bsSW2zZs3KtSY4GQ0ODqa2hoaGQsd+6qmnwl5fXx/20VzbaNu3b19qa2xsDGeXLl0a9qz3A9G5kyRJjhw5ktpmz54dzmYpep8XfW2IRK8bSTK+XzuyHjPz589PbS0tLeFs1uN1aGgo7FnvP7N0dnYWmo+sXbu20Pxorm08y7pds66NzXquRrJ+h3V0dIR9NO/Ton8DZxnNv5Hdp/lU8nZLkvi2q/TtFr12XHfddeFs1vugzZs3h/3f/u3fwj5jxoywL1myJLXdcsst4WzW+yDg+Dz++ONhP//888P+4IMPhv3iiy8+4TVNEK0Zvfd4D1RTcCEAAAAAAAAAAAAAAAAAAAD8Lzv3G5tVfTZw/LQUxImpf2JRZKWdC4joQIbIFGEBHCgUhg6lY8zMWILJjGawZSTAtsCLZWPLjEYawG3GUVB0ibAqW4RFcM6ZBWXqnM7RwuIcqBsNOFCQPi+emOzJ47mOnNObu4XP5+0317l/tqU9PXe9AAAAAAAAOCVZ8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVjwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVjwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlUlfsAAAAAAMCp4fXXX889+/DDD3fhSQAAADgR5syZE/a2trYTdBJ6ir/85S9hf+KJJ8K+efPmsG/bti21HT58OJy97LLLwj5z5sywT5kyJexjx45NbX369AlnAQB27dpVaH716tWprbq6utC1AQD4aK2trWF/4IEHwn7o0KGwe6YEQE8yf/781Nbc3BzOZr1/dMYZZ4T9lltuCXt39tprr6W2wYMHn8CTHL/ofBs3bgxnd+7cGfasr5mFCxeGPbJgwYLcs/BRhg8fHvZRo0aFfc2aNantBz/4Qa4zwamof//+Zbt2Z2dn2GtqalLb7t27w9no/Z8kSZLly5eHPbpHS5IkWbRoUdhra2vDHlm2bFnYN23aFPas//Y77rgjtS1evDicffvtt8Oe9TdIFRUVYS+lIUOGFJrP+notp6amprBPnz49tRW5P0ySJFmxYkXYp06dGvYxY8YUen26n6zvYcOGDQt7kX+r69atC/vs2bNzX7uoLVu2mwZsjwAAIABJREFUhH3ixImF5kvJ5zSfcn7ckiT+2JX74xb9LU70e16SJMljjz0W9sbGxlxn+tCqVavCPmPGjNQW3bsCXSfr952RI0eG/bOf/WxXHoePUFnuAwAAAAAAAAAAAAAAAAAAAAAAAAAAAEBPZMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUNHZ2VnuMyRJknSLQwAAAAAApdPS0pLa5syZE852k+eYAAAAHIes3/WyrF27totOwvE4ePBg2Lds2ZLaNm/eHM5m9fb29rCfc845Yb/22mvDPmXKlNQ2adKkcHbgwIFhBwAop9bW1rBPmzYt7Pv3709t1dXVuc4EAECslPdwSeI+DoCTx+233x725ubmsDc0NIR948aNx32mrrJ69eqwz5s3L+yLFy9ObQsXLgxns+4V9u3bF/YHH3ww7AsWLAh7RUVFaiv1fc7OnTvDPmLEiNTm75k50Z588smwz5gxI7U9//zz4ezgwYNznQkAAACA0nnllVfCPnr06LA/8sgjYZ88efJxn+kU0ZjR13/cC1UWPAgAAAAAAAAAAAAAAAAAAAAAAAAAAACckiz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHKrKfQAAAAAAAAAAAODjefXVV8P++OOPF+rbtm0L+/vvv5/arrjiinB27ty5Yb/uuuvCPnr06LD36tUr7AAAAAAAAPQst9xyS9ibm5vD3tDQ0JXH6VIzZswI+7x588K+fPnyXK0r7N69u2TXXrFiRdibmprCXltbG/azzz670OuTz3333Zfa7rrrrnA263Oe9T7ylVdeGfYhQ4aktoqKinC21CZNmhT2qVOnprYvf/nL4WzW3wZ84hOfCDsAAAAA+XR0dKS2rOfGU6ZMCfvkyZNznYmuU1nuAwAAAAAAAAAAAAAAAAAAAAAAAAAAAEBPZMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUFXuAwAAAAAAAAAAQE9y+PDhsG/bti3smzZtCvvmzZtT2+uvvx7OnnPOOWG/9tprw75mzZqwT5kyJbWdd9554SwAAAAAAAAcjzFjxoS9oaEh7OPHj+/K43SpmpqasO/evTvsq1evTm3Lly8PZ+fPnx/2RYsWhb22tjbsRdxxxx1hf/DBB8O+cOHCsK9YsSLsCxYsCDv5vPzyy6ntyJEj4Wz0tZ4kSdLc3Bz2Y8eOhb1fv36pbfTo0eHs1VdfHfas+SuvvDLsWe/BR//tWdeeNWtW2B955JGwn3766WEHAAAAOFUdOnQo7DfddFNq69u3bzj7s5/9LNeZOHEqy30AAAAAAAAAAAAAAAAAAAAAAAAAAAAA6Iks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHKrKfQAAAAAAAAAAAOhqu3fvTm1PPPFEOPv444+HfcuWLWH/z3/+E/bLLrss7LNmzUptU6ZMCWevuuqqsFdV+XMhAAAAAAAAeoaOjo6wX3jhhWEfPHhwVx7nhKqtrQ37smXLcrXuoLOzM/fsggULCnXK4+KLL05tvXv3DmePHDnS1cf5Pw4ePJjafvvb34azv/vd78L+3nvv5TrTh7K+x11zzTWp7eabbw5nV65cGfbx48eH/Ve/+lXYa2pqwg4AAADQU7399tthnz59etj/9re/pbas5039+vULO+VXWe4DAAAAAAAAAAAAAAAAAAAAAAAAAAAAQE9kwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORQVe4DAAAAAAAAAABw6nn77bfD/q1vfSvsmzdvDvuLL76Y2vr16xfOTpw4Mew//vGPwz516tSwDxw4MOwAAAAAAABAkjz88MNhnzVr1gk6CVBEXV1dajty5MiJO8hx6uzsDPt7771X0td/4403wr5hw4bUVlFREc4ePXo07P/617/CPnz48LBv2bIltV1yySXhLAAAAEA5vfrqq2GfNm1a2LOeyzzzzDOp7aKLLgpn6f4qy30AAAAAAAAAAAAAAAAAAAAAAAAAAAAA6Iks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgByqyn0AAAAAAAAAAABOPS+++GLYd+3aFfZp06aFfcWKFalt3Lhx4Wzfvn3DDgAAAAAAAPyvJUuWhH358uW5r7148eKwNzU15b420HU++OCDsJ922mkn6CSnlujj3rt370LX7t+/f9jr6urCPmrUqNT2ve99L5z9xje+EfZevXqFHQAAAODYsWOp7Sc/+Uk4m/VceuTIkWF/7LHHwn7uueeGnZ6tstwHAAAAAAAAAAAAAAAAAAAAAAAAAAAAgJ7Igl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIwYJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIwYJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMihqtwHAAAAAAAAAADg1HPNNdeE/aGHHjpBJwEAAAAAAADyqq2tLTS/atWq1NbU1FTo2nAiHTt2LOz/+Mc/wt7e3h72tra23PNZ1y7a//73v4f9yJEjYeej9e7dO+xHjx5NbTNnzgxnly5dGvZhw4blfu0kSZIf/vCHqW3JkiXh7KOPPhr2NWvWhP3SSy8NOwAAANDzvfzyy2G/7bbbUtuOHTvC2axnF9/+9rfDXlVlxeuprLLcBwAAAAAAAAAAAAAAAAAAAAAAAAAAAICeyIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIwYJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIoarcBwAAAAAAAAAA4NRTVeXPVgAAAAAAAKCna2pqKtQ5+XR2dob9zTffTG1tbW3hbHt7e9l61uyePXvC/v7774c9y2mnnRb2QYMGpba6urpw9tOf/nTYJ02aFPas60d98uTJ4eyBAwfC3p317t077EePHg37DTfcEPalS5emtksuuSScLSrrbz4WLVqU2mbMmBHO3nrrrWG//PLLw/7Vr3417N/5znfCXltbG3YAAACguKxnacuWLQv7z3/+87CPHDkyte3YsSOcHTZsWNghUlnuAwAAAAAAAAAAAAAAAAAAAAAAAAAAAEBPZMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUFXuAwAAAAAAAAAAAAAAAAAAAMCpYu/evWFvb2/P3YvMfpze1tZWaP69994Le6RPnz5hr62tDXtdXV3u/vnPf75k106SJKmvrw/7BRdcEPaKioqwd1dZH5cXX3zxxBzkI/Tu3TvsR48eDfuNN94Y9qVLl4Z96NChYe+pLrnkkrA/88wzYV+7dm3Yv/vd74b9F7/4Rdjnz5+f2hYtWhTOnn/++WEHAACAk0XW883vf//7YV+5cmXYBwwYEPb7778/7F/5yldSW2VlZTgLRfjqAgAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHKrKfQAAAAAAAAAAAIB9+/aFfevWrWFvaWkJ+8aNG1Pbpk2bwtnp06eHvaGhIez33ntv2Gtra8NeREdHR9ifeOKJsDc2NuZ+7VWrVoV9xowZYa+pqcn92gAAAAAAQM/w1ltvpbb29vZwtpw9a7atrS3shw4dCnuWqqr0NQEDBw4MZ+vq6gr1MWPGhL2+vj7sn/rUp3K/9oABA8JeWVkZdrqfwYMHh/2ll14Ke2dnZ9h79+4d9qNHj6a2L33pS+Hs0qVLw37xxReHnY+W9e947ty5Yb/55pvD/tOf/jTsy5YtS23Nzc3hbNbXzJ133hn20aNHhx0AAAC60nPPPRf2e+65J7Vt2LAhnD3nnHPCvmLFirDPmzcv7H369Ak7lIsn1AAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJBDVbkPAAAAAAAAAAAAcNttt4V906ZNha7/7LPPpraGhoZwdvfu3WEfNGhQ2C+88MKwr1y5MuxFzJ07N+xZ/+2dnZ2pbd++feFs0c/pgw8+GPbq6uqwAwAAAABAT/HOO++Evb29PXcvMvtxeltbW6H5d999N+yRXr16hT3rPZq6urrc/YorrijZtT9OHzhwYGqrqrJCgJ6jtrY27NH7lUmSJJWVlWGfNWtW2JcuXZrahgwZEs7SPfXp0yfs8+fPD/vXvva11LZ+/fpw9t577w37lVdeGfbRo0eH/etf/3pqmzlzZjjbr1+/sAMAAND9ZD07/eUvfxn2rN9Tn3vuubCPHDkytWX9/XtjY2PY+/btG3boqeKnlQAAAAAAAAAAAAAAAAAAAAAAAAAAAMBHsuAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAByqOjs7Cz3GZIkSbrFIQAAAACA0mlpaUltc+bMCWe7yXNMAAAAjkPW73pZ1q5d20Un4WRRUVFRaL6UzxfKebatW7eGfeLEiWHfu3dv2Gtqao77TB969tlnw/65z30u7OvWrQv77Nmzj/tMAHAyam1tDfu0adPCvn///tRWXV2d60wAAMRKeQ+XJO7jgJ7r3//+d9jb29vL2tva2kp27QMHDoQ9S2VlZWobMGBAOFtXVxf2+vr6QvNFetbsJz/5ybD37t077ED5/fGPfwz7/fffH/a77ror7EOGDDnuM0GpZL2Hfs8994T90UcfTW29evUKZ6dPnx72xsbGsE+ZMiXsffr0CTsAAMDJ6siRI2H/9a9/Hfbo/7/fuHFjOHv06NGwz5w5M+x33HFH2K+66qqww0kkfjCSJOs/7oXS360BAAAAAAAAAAAAAAAAAAAAAAAAAAAAUlnwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVjwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVSV+wAAAAAAAAAAAADks2HDhkLzNTU1XXSS/2/o0KGF5ltaWsI+e/bsQtcHAAAAAOiuDhw4EPb29vbcva2trdC1d+3aVWg+6h0dHeFsloqKirBfcMEFYa+vrw97XV1daps+fXru2a7otbW1qa1Pnz7hLEA5jRo1qlCHnmTMmDGF+n333ZfaHn300XB23bp1Yb/hhhvCXl1dHfYvfvGLYZ86dWpq+8IXvhDO9uvXL+wAAACRgwcPhn3Lli1hb21tDXvW72NZz73Hjx+f2u6+++5w9sYbbwz7WWedFXag61WW+wAAAAAAAAAAAAAAAAAAAAAAAAAAAADQE1nwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVjwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVSV+wAAAAAAAAAAAADk09zcXO4jpKquri40v2nTpi46CQAAAADQE7377rthb29vD3tbW1uh+SK96LXfeeedsBfRv3//sNfV1RXq1113Xdjr6+tL9tqDBg0K+2mnnRZ2AIDuLHoP/tZbbw1ns/o///nPsD/00ENh37hxY9gbGxvDHhk7dmzYs+4/p06dGvahQ4ce95kAAICu9corr4R98+bNYW9tbU1t27dvD2ePHTsW9nHjxoV9yZIlYb/pppvCPmDAgLADPUtluQ8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAPZEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQQ1W5DwAAAAAAQPe0fv36sD/11FNhb25uzv3aq1atCntTU1PYs87e0tIS9k2bNqW2+fPnh7NZffjw4WEvp46OjrA//PDDYZ83b17u1163bl3YZ8+enfvawImzdevW1LZhw4ZwduXKlV19HJLsn4mNjY2Frt+Tv39HH5si9wpJ0rPvF/bs2RP2QYMGnaCTnFidnZ3lPgJAbg0NDWHP+rm1b9++sNfU1Bz3mbpK1s9MAICeqpzvwSRJsfdhSvkeTJIUe67Snd+D6c4ftyTp3h87OFl4Xg8UdejQobC3tbWFvb29PVcrd3/rrbfC2aLOO++8sNfV1eXuEyZMKNm1P06vr69Pbaeffno4CwDAqef8888P+5133lmoHzhwILX95je/CWc3b94c9rvvvjvs3/zmN8Me/V3E1VdfHc6OGzcu7GPHjg37iBEjwl5VZS0QAABd54MPPgj7Cy+8EPbt27fnakmSJE8//XTYs/6eecCAAWG//vrrU9vtt98ezk6aNCns1dXVYQf4b5XlPgAAAAAAAAAAAAAAAAAAAAAAAAAAAAD0RBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhWdnZ3lPkOSJEm3OAQAAAAAUDotLS2pbc6cOeFsN3mOedJZsmRJ2JcvX17o+lmft+nTp+e+9oUXXhj25ubm3Ncutd///vdhHzNmTMlee9++fWG/7bbbwr5p06auPM5xWbFiRdgXLFhwgk7CibJz586wP/nkk2H3NfHRsr4PbN26NeyNjY1deZz/w8/7fIr+PH/11VcLvf6QIUPCvnjx4tS2bNmyQq+dpdT3OqUU3S+U8l4hSbJ/3he5hyu36H7Cz43SyPpdL8vatWu76CScLCoqKgrNl/J+o5xnW79+fdiz7uFK+XtqR0dH2M8666ywb9myJewTJkw47jMBwMmotbU17NOmTQv7/v37U1t1dXWuMxE/m+jO78EkSfw+jPdg0pXyc15q5XweBSeTIt8HTubn9Xy0Ut7DJcnJex93+PDhsLe3t5etl/q19+7dG/Yizj333LDX1dWVrWfN1tfXF3rtM844I+wAAED3l/XM/KWXXgr7tm3bUtvTTz8dzm7fvj3sb7zxRtjPPPPMsF9xxRWpbdSoUeHsiBEjwn755ZeHffDgwWGvrKwMOwDAqerYsWNh/+tf/5raduzYEc6+8MILYc+a/8Mf/hD2AwcOhH3AgAGp7Zprrglnx44dG/Zx48aF/bLLLgt70b/lBk55Wf/jbPw/bfwXvy0DAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhWdnZ3lPkOSJEm3OAQAAAAAUDotLS2pbc6cOeFsN3mOedKpqKgo6fWLfN6effbZsLe2toZ94cKFYa+urg77+vXrU1tjY2M4m6WhoSHsGzduLHT9yI9+9KOwX3755WGfMGFC2Hfu3Bn2ESNGhL0I3ye6p+jf8gMPPBDOjh8/PuxXXXVV2Gtra8N+qlqyZEmh+eXLl3fRSf4//47TRd9fi35vLfpxL3I/8cILL4R9+PDhYS/l/UKRe4UkKe39QinvFZIkSVavXh32yZMnh72c33+zPi9Dhw5NbVlfb+ST9btelrVr13bRSegp9u3bF/b+/fsXuv7+/ftTW9b3/o6OjrCfddZZuc70ob1796a2mpqacDbrbHPnzs11pg+tWbMmtWWdLet781NPPRX2lStXhh0A+F9ZvwNPmzYt7EXuk0hXyvdhij7TKfJcpZTvwSRJsecqpX4P5mT9uCVJeZ9HQU9SyvdDe/LzevIpeg+X9Swt65lRe3t7amtrawtns3p07aL9zTffDGeLOvvss8NeV1eXq3VFr6+vL9n1zzzzzHAWAACAfHbt2hX27du3h/2ZZ55JbTt27AhnX3rppbAfPnw47P369Qv7Zz7zmbBHz5SGDRsWzg4ZMiTsgwcPDru/IweAk9+ePXvC/tprr+VqSZIkf/7zn8P+/PPPh/1Pf/pT2A8ePJja+vbtG85eeumlYR85cmTYs/5/vLFjx4b9oosuCjtAD5b1x1fxH2/9l8qCBwEAAAAAAAAAAAAAAAAAAAAAAAAAAIBTkgW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQQ0VnZ2e5z5AkSdItDgEAAAAAlE5LS0tqmzNnTjjbTZ5j9jgVFRXlPkIo+rzu2bMnnO3bt2/Ya2pqcp3p4yj1x/Vk/nov8rHbsmVL2CdMmJD72qeyjo6OsG/bti3sjz/+eNivv/761DZu3Lhwtrq6OuyURym/B57M3/+KWr16dWqbN29eoWsX/bgX+ZpYtWpV2JuamsJ+qt4v+LeS7vbbbw/7ypUrT9BJ+FDW73pZ1q5d20Unoaco5++xWd9fu/PZsuzbty/sjz32WNiL3G+sW7cu7Nddd13Y/V4A0P1k3VcfPHgw7DfffHPYa2trj/tMJElra2vYp02bFvb9+/enNj+P03Xn92Gy7iGLPFcp5TOVJOnez1V83IDoeX2SFPsduic/ryefovdwlZWVYf8f9u4txqr6bPz4AobzYUBgQMFhRgVEU6DYKmijFTzROGJNoaBWLyo4Wm21kNhavCnE9k3wzhRKTdPUeiiYxkKs4oGqiWgxtWAPIpDCcBA5qJwFHJlyCWNHAAAgAElEQVT34k3T/791PQvXmj17Dp/P7TfPWr8Zxtlr1l7758mTJz/3mv6lb9++Ya+trS3Ua2pqcs9nzRbtrn8BAABoLxobG8P+97//Pezr1q0r1P/yl7+ktn/84x/h7N69e8OepXfv3mEfNWpUahs9enQ4O3LkyLCfeeaZhXrW+6VR79OnTzgLQPtz5MiRsDc0NIR9+/btYc96jzya37x5czj77rvvFupZX3tk8ODBYR8zZkzYx48fH/YJEybknj/vvPPC2a5du4YdgNxmZfQnT/VA8Tv5AAAAAAAAAAAAAAAAAAAAAAAAAAAAwGeywS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcKsq9AAAAAAAASqOpqSnsnTp1Kuv5I9XV1c24ktZl6dKl5V5CbgcOHAj7s88+G/a6urrUtmjRonB21KhRYe/Itm3bltrWrFkTzr7yyithv/XWW8O+ePHisAPN46233ir3Ekpi5cqVYZ89e3bYXS90PBs3bgz7ZZdd1kIrAUqlyN+Rpdaa15alqqoq7FmvuVkdgI7lzjvvLDR/3333hf2iiy4K+y233BL26dOnp7ZBgwaFs/B5lfN9mKLXp+31vkqp76m01+9bkrgflabU76e2V235b+gs7tfTmvzqV78K+3nnnRf2mpqa1DZw4MAcKwIAAABak4qKeEuhcePGFepZz5kX8dFHH4X93XffDfuGDRtyz2cd++mnnw779u3bw75///6wF3HaaaeFffjw4YV61vutUR88eHA4m/V8U9Z8kbX16dMnnO3du3fY+/XrF3boKA4ePBj2I0eOhP3w4cNh37dvX8n67t27w9m9e/eW7NxJkiQ7duzI1ZIkST788MOwF5X1Oy56D33kyJHh7JVXXhn2u+66K+xjxowJ++jRo1PbgAEDwlkAKKJzuRcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAbZENfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORQUe4FAAAAAP/20EMPhX3evHklO/eiRYvCPnfu3ELHL+XX1tDQEPbq6uqw79mzJ+yPPvpoastad11dXdjvueeesE+ePDnsRRT9N1m6dGnYp02bFvYhQ4aktqampnAWoNSyXhsiWb/7Z8yYkfvY5da/f/+SHXvYsGFhv/fee8M+atSo5lxOmzJixIjUlnWNt3jx4uZeDlACS5YsKfcSSmLlypXlXkJuRa4VkqR9Xy+U0iuvvBL2q6++uoVWAgAAHdfpp58e9l27doU96z2gtWvXhv3NN98M+913353apkyZEs7efPPNYf/6178e9j59+oQd+D+lvK/Snu+puB9VHp5d4D+5X09rct1114W9srKyhVYCAAAA0LwGDBgQ9okTJxbq5XTw4MGwb9++PezR5yizZrP6zp07w75v376wb9y4MbXt3r07nN27d2/YDx8+HPbWrG/fvmHv3bt37l70Mz69evUKe/fu3QsdP9KjR4+w9+zZs2TnTpIk+fjjj1PbsWPHSnru48ePh/3o0aO5j71///6wHzlypNC5s36HtWbRMx2DBw8OZ6uqqsJedH78+PGpLevz8cOHDw97TU1N2M8888yw9+vXL+wAwH/rXO4FAAAAAAAAAAAAAAAAAAAAAAAAAAAAQFtkg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBwqyr0AAAAA4N/mzp0b9ksuuSTskyZNCnt9fX3ucxeVdfxXXnkltT3yyCPhbFVVVdj37NkT9ttuuy3sN954Y2pramoKZ1evXh32KVOmhH3dunVhHzduXNgfeuih1DZ9+vRwNuvf7MCBA2FftGhR2AFas6zf35EFCxaEvbKyMvexy23//v1hX7ZsWdjnzJmT2pYsWRLOZvWir5ltWUNDQ2pbs2ZNOHvHHXeE/dZbbw37xIkTww7QXhW5VkiS9n29UEpvvfVW2GfPnt1CKwEAAErl5MmTJTv2Sy+9FPYXX3wx7FnvKU6bNi3s0XuOU6dODWe7d+8edmhLSnlfpT3fU3E/CgAAAAAA2r5+/fqF/fzzzy/U26tjx46Ffd++fWHfu3dvajt8+HA4e+TIkbAfOnQo7AcPHix0/Khnfb4zS9baGxsbCx0/kvV1nzhxomTnTpL4vbEzzjijpOeuqIi3fOvbt2/uY2e959e7d++w9+rVK/fxs9adde4+ffqEfdCgQWEfPHhw2Hv06BF2AIDm0rncCwAAAAAAAAAAAAAAAAAAAAAAAAAAAIC2yAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcujU1NRU7jUkSZK0ikUAAABAW/fQQw+Ffd68eamtoaEhnK2urs61pn9Zv3592N95553UNnPmzELnfvLJJ8M+a9assJfy/kmnTp3CPn/+/LAvWLAg9/F3794dzlZVVYU9y549e8I+ZMiQ1NZK7lnRzB5//PHUdtNNN4WzfiZKI+t3UFGt+d8t63Vp/PjxqW3dunXh7Lhx43KtqSOIXpOzXo+z1NXVhX3FihWFjt9eHThwIOyvvvpq2P/whz+E/Wtf+1pqu/TSS8PZysrKsFMepXztaM2vG+V23XXXpbaVK1cWOnbR73uRn4nW/Lu7yLVCkrheyOuNN94I+9GjR8M+efLk5lwOzSDrb70sjz32WDOtBACA5nLGGWeEfdeuXS20ktana9euYW9sbExtvXv3DmdnzJgR9rPPPjvsP/rRj8K+f//+1OY+XX4d9V6a+yr5+L61TqV+P7W9as2/o4qK7tcnSbF79u7XdzzPPPNM2K+99tqwR9dwSeI6DoD2I+t55NWrV4c9enY1SbKvdaJrvKzrw6zrrIcffjjsRZ/fj2Q9N/bss8+GvejzfkuXLk1t06ZNC2eLPuMOAAAAAAD/Ieumd7xpzf+jc8GFAAAAAAAAAAAAAAAAAAAAAAAAAAAAQIdkg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5VJR7AeW2du3asF900UUttBIAAICW9ac//SnsF154YQuthOZ0xRVX5J5dtWpV2GfPnp372EmSJC+++GLYp0+fXuj4kccff7zQfKdOnZppJZ/fwoULw75gwYKw19fXp7YhQ4aEs0888UTYp06dGvaqqqqwNzU1hR2giPXr14f9gQceCPvu3btTW9bvN9JlvXbQ8iorK8NeV1dXqL/xxhup7Qc/+EE4e9lll4X94osvDnt1dXXYoS2J/ltbuXJlC66keWX9Dim16HqhyLVCkrheyOuZZ54J+7x581poJbSUF154IezlvCcDAMBnq6jo8I8ep/rkk09yzx4+fDjsv/zlL3MfG5pbKd+DSZL2e1/F961t8lwD/ynrvnZbvWdf7vv1AACR2267LexFr8Gi55uSJL5WamhoCGdHjBgR9mHDhoV98eLFYS/iW9/6VtizrhGz/l7as2dP2KN/16x/0xUrVoQdAAAAAADKpXO5FwAAAAAAAAAAAAAAAAAAAAAAAAAAAABtkQ1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5FBR7gWU2+bNmwvNL1u2rJlWAgAA0PxmzJiR2rL+Hrrwwgubezm0gHHjxoW9vr4+tc2ZMyecjX6eTkXWz1x1dXWh40dWrlxZaL6pqamZVtLy7r333tS2c+fOcHbWrFmFzr1o0aKwz507t9DxgY5t/fr1YX/qqafC/sgjj4S9qqrqc6/pVB04cCDsWfedZ8+enfvcDzzwQNjPP//8sM+cOTP3uZMkSSorKwvNR4p8XyidiRMn5mpJkv3f+fLly8PuWoPPa/Xq1altypQp4exLL70U9smTJ+da07+017/RS/11FbleKOe1QpLE1wulvFYotazroP79+4e9lNcSlMfYsWPDfvvtt7fQSgAAOFXR+41JkiQffvhhC62k7amoSH9su7GxMZzt169f2M8666ywr1u3Luzwn6L7Kt6DSddWv29J0n7vR0Fzc78eAKDlrVixIuydOnUqdPysZ5giRZ9/X7JkSdgXL16c+9jRczBJkv18fdbfqVmy/o69//77U9ukSZMKnRsAAAAAAMqlc7kXAAAAAAAAAAAAAAAAAAAAAAAAAAAAAG2RDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUFHuBbR106dPL/cSAAAA4JTV19entiVLloSzzz77bNh79+4d9ltvvTXsrdnGjRtT26hRo1pwJZ9ftL4VK1aEs+vXrw971s/MvHnzwh6ZO3du7lmgfVi9enXYp0yZUuj4CxcuLDRfSq+//nrJjp31u73o92XmzJlhf+ONN3Ife/78+WGvq6vLfWxap3HjxhXq5LNt27aynTu67k6S0l97F3ltyZptamrKfewkiX/es34/Zv1uz/q+FxWtr+h/x6W8Xuio1wql9uqrr4b9iiuuaKGV0FoMGTIk7J7JAABofb73ve+Vewkl06VLl7CfPHky7N27dw/79ddfn9puuummcPbqq68O+/PPPx/2a6+9Nux0PO6r5OP7BmTd147uibfn+/UAALQ+y5cvLzRfVVXVTCv5bGPGjCnp8QEAoKUcOXIk7B9//HHYDx48GPZDhw6ltsbGxnD26NGjYT9+/HjYS+mjjz4q27mzDBgwoKzn79atW2rL+hx3RUW8nVzfvn0L9V69eqW2rLUBAHQUncu9AAAAAAAAAAAAAAAAAAAAAAAAAAAAAGiLbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJBDRbkXAAAAALSccePGpbb6+vpwdtasWWGvq6sL+4oVK8JeSkuXLg37nDlzwv7oo4+mtnnz5oWzlZWVYd+zZ0/ucydJksydOzfsnTp1Sm379+8PZ6OflyRJksWLF4c962dq/PjxqS3r6wJOTfQ7oDWcv6mpKbVNmTKluZfTZkycOLFkx160aFHYhw0bFvas64GsPn/+/NT20ksvhbOTJ08OO3Bqyv3aEBk9enSh+eh15VREv4eyXpeyfoeV0oIFC8J+/vnnh73o9/2JJ54I+8yZMwsdP9JRrxdKea1QamvXrg171r0FAACAzp07hz2695F1X+Sqq64K+8033xz2adOmhb1Xr15hp+0p5722Iu/BJIn7Knn5vgFZonv27fl+PQAArc+SJUvKvYRQ1jP2AAAdVWNjY9h3796d2hoaGsLZrM8xfvDBB2Hfu3dv2Pft25f72NHsqcxn9UOHDqW2o0ePhrMHDhwIO3Bqsv4OzHqmok+fPmEfNGhQ2AcOHJirncqxs/rgwYPDHp2/qqoqnK2urg770KFDw15RYQtCAGhu8VOuAAAAAAAAAAAAAAAAAAAAAAAAAAAAwGeywS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcKsq9AAAAAKB1uPXWW8O+ZMmSsNfV1TXncprVtGnTwj5nzpywL1y4MFdrDg0NDSU79qJFi8I+e/bssFdXV4d9wIABhc4PFNfU1FTuJeTWltfemo0aNSrsixcvLtSB1s/v13STJ09ObW35+zZz5sxCvTVry/8uHdWCBQvKvQQAAKCgXbt2FZqvqIgfXf7000/Dfskll4T9lltuSW033HBDOHvaaaeFHf5TW7430ZbXXk6+b0AR7fl+PeVx8uTJci8BAGjFsp7tX7lyZdj37NkT9qqqqs+9JgCAU3XixImwb9myJeybN28O+6ZNm1Jb1mf5du7cWahnHf/9998Pe9b7qUX07ds37IMHDw77oEGDcrUkSZKBAweGfeTIkYXm+/Tpk9p69eoVzvbv3z/sWfM9e/YMe2VlZdijtXft2jWc7d69e9iz1l5K/fr1C3uXLl1Kev7ov6WDBw+W9NxZjh49mtqOHz8ezn7yySdhP3z4cNizvvZobVFLkiTZv39/7mMnSfbaP/jgg7Dv27cvte3duzec3bBhQ9iz5rP6oUOHwl5E1n9LQ4cODfuIESPCfsYZZ6S24cOHFzr2OeecE/as14ba2trU1q1bt3AWAIroXO4FAAAAAAAAAAAAAAAAAAAAAAAAAAAAQFtkg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5VJR7AQAAAEDrMHHixLDX1dWF/bLLLmvO5TSrqqqqsDc0NIT9F7/4RWpbuHBhOFtfXx/2H/7wh2Gvrq4OexF333132B999NGwz5s3L+yLFi0K+9y5c8MeOXToUNjXrl0b9vPPPz/sQ4cO/dxrAoBy6dSpU7mX0Co1NTWVewkAAAAA7c7o0aPDvnnz5rD/z//8T9hnzpwZ9jPOOCPsAADQUWQ935T13FlNTU2u1hK9trY2tZ1++unhrPfPAeD/3HjjjWFfuXJl2P/5z3+GPev5/CwHDhwoNA8AZNu/f3/Y//rXv4b97bffDvuGDRvCvmnTptSW9Z5i1mf9Ghsbw54les9xxIgR4eywYcPCfuGFF4Z9+vTpYc96P3T48OGpLWvtgwcPDnu3bt3CDu1Jly5dUtuAAQNacCWt7/y0vBMnTqS2vXv3hrNZr5k7duwI+3vvvVey42d9znvZsmVhz1pblui/86z3Kc4555xCfcyYMWEfO3ZsavvCF74Qzvbv3z/sAJRf53IvAAAAAAAAAAAAAAAAAAAAAAAAAAAAANoiG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIoaLcCwAAAABahwMHDoR92LBhYR81alRzLqdFVVdXh33BggW5WmvQ1NSUe3bu3LmFeik99thjYb/jjjsKHX/QoEFh/9KXvpTaLrjggnB2/PjxYZ8wYULYzzrrrLAD0PEUeb0HAAAAgM9jw4YN5V4CAACQJMnDDz8c9j179oR969atuVqSJMnLL78c9m3btoX9xIkTYY9079497CNGjAh7TU1NyXopj50kSXL66aeHHaC9ynpNKyrrGfrKysrcs0Vlfe1VVVWpberUqeFsXV1d2B988MGwP/LII2GP1pYkSfLss8+mtvr6+nAWAFqTnTt3hn3t2rVhX7duXdjffvvt1LZ+/fpwdsuWLWHPkvV6Pnr06LCPHDkytX31q18NZ88555zcxz6V3qtXr7ADQEvq1q1basv6bH9Wb8uOHj0a9s2bN4d948aNuWezenSNliRJsnz58rAXud9VW1sb9rFjx4Z93LhxYc/6DHz0Gfqs/RoAOorO5V4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAtEU2+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJBDRbkXAAAAALQOy5YtC/v06dNbaCWQbfjw4SU9/r59+8K+atWq1LZ69epw9pNPPgl7U1NT2Pv06RP2cePGpbYvf/nL4eyECRPC/sUvfjHs5557btgBAAAAAAAAAIDiZsyYEfbKysoWWsl/O3nyZNjfe++9sG/dujW1bdmyJffsqfTNmzeH/cUXX0xt27dvD2eznhvL0qNHj7DX1NTk7kVmS92HDBkSzgLtX6l/D/Tv3z/s0XO9WbNFZX3t0dqyrgUeeeSRsP/+978Pe9F/lyeeeCK1/fSnPy10bADan6y/p956662wv/7662F/4403cs9u27Yt7BUV8bY+o0ePDvvYsWNTW319fTgbfb7oVPrQoUPDDgBQSr169Qp7dJ10Kr2c3n///dT29ttvh7Pr1q0r1H/3u9+F/cEHHwx7Y2Njahs2bFg4e/HFFxfqkyZNCnv0GfmuXbuGswDNqXO5FwAAAAAAAAAAAAAAAAAAAAAAAAAAAABtkQ1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5FBR7gUAAAAAp+6BBx4I+8KFC3Mfe/78+WGfPXt27mNDcxs5cmRZz9/U1JTaTpw4UdJzHz58OOyvvfZaanvzzTfD2cbGxrCfPHky7AAAAAAAAAAAQMfWuXPnsA8fPjx3/8pXvpJrTS3h008/DfvOnTvDvnXr1kJ9y5Ytuec3bNgQzj733HNh37FjR9iznkuL9OzZM+y1tbVhr6mpKWmPzl90bQMHDgw7dBTRM7vl1prXlqWqqirsWZ8d8NkCgI4n63Vv/fr1YX/hhRdS26pVq8LZNWvWhP3jjz8O+9ChQ8M+adKk1Pbd734392ySJMmECRPC3qNHj7ADAND+RNenWdeuV111VXMv5/9z7NixsL/11lup7fXXXw9ns/qiRYvCnvU+S/R+QtZ1+9VXXx32K6+8Muzjx48Pe6dOncIOtC/xu+UAAAAAAAAAAAAAAAAAAAAAAAAAAADAZ7LBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADhXlXgAAAABw6qqrqwvNL126NLXNnj270LGhJZ111llh79KlS9g//fTT5lxOm3HixIlyLwEAAAAAAAAAAKDdyXpmLev5z6x+6aWXfu41tZTGxsaw79ixI+xbt27N1U6lb9myJex/+9vfwr5ixYqwv/fee6nt5MmT4WyWvn37hr2mpiZ3r62tLdmxi/YBAwaEswAAfLYPPvgg7M8991zYV61aFfYXXngh7O+//37Yo2vAK6+8Mpz99re/HfZJkyblPjcAAPBvPXr0CPvFF1+cqzWHbdu2hf21115Lba+88ko4u3jx4rDfd999YR86dGjYp0yZEvapU6emtmuuuSacHThwYNiBlte53AsAAAAAAAAAAAAAAAAAAAAAAAAAAACAtsgGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAKMgl6cAACAASURBVAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKoKPcCAAAAgFM3e/bsQh1a0ocffhj2jRs35mqn0isrK8OetbaOqmvXrmFvamoK++233x728847L7V95zvfCWcBAAAAAAAAAABofSoq4o+p1tTUFOqt2YkTJ1Lbtm3bwtmtW7eWrf/5z38OZ5966qmw79q1K+xZzxpGsp7/LPrzlNVra2tzzxc9d9bXDgC0ffv27Qv7008/Hfbly5entj/+8Y/hbNbnRb761a+G/f777w/7lVdeGfZzzz037AAAAJHq6urcfdasWYXOnbWvwfPPPx/2VatWhb2+vj61HTt2LJy9/PLLw/6Nb3wj7DfccEPYBw0aFHbgv3Uu9wIAAAAAAAAAAAAAAAAAAAAAAAAAAACgLbLBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBwqyr0AAAAAgI7s6NGjYd+0aVOhvnHjxtyz7777bqFz79u3L+yR7t27h/3ss88O+2mnnRb2Dz/88HOvqa3o2rVramtsbAxnv/nNb4b9xz/+cdhra2vD/vjjj4cdAAAAAAAAAAAA2opu3bqltnPOOSeczeqt2fHjx8Pe0NAQ9q1bt+ZqzdHXrl0b9mXLloV9165dYS9iwIABYa+pqcndi8wmSfbzoUWP37dv37AD5Tdz5sywn3766WGfP39+ahs4cGCuNUGpfPzxx6kt61rhN7/5TdhffvnlsPfs2TPs1157bWp78sknw9mpU6cWOjcAAEBHNWrUqEL9rrvuCnv0d+iqVavC2eXLl4d93rx5Yb/zzjvDfvnll6e2m266KZzN2rfA36G0V53LvQAAAAAAAAAAAAAAAAAAAAAAAAAAAABoi2zwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIeKci8AAAAAIEtjY2PYt2zZEvZNmzaF/d133809v3HjxkLn3r59e9ibmprC3rlz/P9vGjFiRGobNWpUOHvBBReE/cYbbwz7yJEjwx6dv7q6Opzt0qVL2O+9996w/+xnPwv7iRMnwl5KXbt2DXvWfw9Tp05NbT/5yU/C2fPOOy/sAAAAAAAAAAAAQMfWvXv3sGc9n5rVW7Njx46ltq1bt4az5exr1qwJZx9//PGw7969O+xFDRw4MLXV1NSEs+XstbW1hY7du3fvsENLyvrcwm9/+9tCx//5z3+e2u65555w9vvf/37YBw0alGtNtF/vvPNO2KOfxyRJkl//+tep7ejRo+HsDTfcEPbly5eH/Zprrgl7jx49wg4AAEDb07Nnz9R2/fXXh7NZPbqnnSRJsmrVqrBH94Tq6+vD2az9Hm655ZawZx1/zJgxYYdyiXeAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAD6TDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUFHuBQAAAAAt53/Zu9vYOuv68ePX6bqMcVeU0cldB5F1whILiDAgglkVGNpBFMw6QlBkcwRGNBsBdEOlDeIvCyCQrA4SFGRbGBJYFbyBhvEAJupCE3iwIY4Nb6AMRoEJMlj/D/4P/n9/cn2u7brO6XVO93o9fed7nU+v057esPPhb3/7W2p74YUXwrObNm0Ke9HzGzduTG2bN28Oz+7cuTPsWSZPnhz2adOmpbapU6eGZ88666ywZ51vb28P+zHHHBP2CRMmhH2sip6zJEmSXbt21eyxx40bF/YPP/ww7KeffnrYf/zjH4f95JNPDjsAAAAAAAAAAAAAe26fffZJbZ/61KfCs1m9nr377rthz/q33i+99FLuXuRskiTJk08+GfZ77rkn7K+99lrYizjkkEPCftRRR4X96KOPztV259q17tHXEuX45z//WdPrR68jy5YtC8/eeuutYb/qqqvCvmjRorBnfS0y+n7729+G/aabbgr7E088Efas9/F897vfTW1f//rXw7OTJk0KOwAAAIymrL/DnXfeebn7tm3bwrM/+9nPwr5ixYqw33bbbWH//Oc/H/ZrrrkmtZ1zzjnhWSiiqewBAAAAAAAAAAAAAAAAAAAAAAAAAAAAoBFZ8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVjwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOTSXPQAAAADUmzfeeCPsL7zwQmrbtGlTeLbWPZotSZJkx44dYY8ceOCBYW9vbw/71KlTw97d3Z372kUfu6WlJew0nqzPiQ8++CDsTU3x/xdr165dqe34448Pz/7P//xP2GfOnBl2AAAAAAAAAAAAABgtEydODPtxxx1XqNez6N/fv/TSS+HZzZs3h73o+aj/7ne/K/TY27dvD3tRhx56aGo76qijwrNl9qyzU6ZMCfuECRPCXqasz4la2rlzZ6F+8803h/22224L+8KFC1Pb4sWLw7OHHHJI2Pdmv/nNb1LbD3/4w/Ds+vXrwz5r1qyw//73vw97Z2dn2CuVStgBAACAJJk0aVLYs/6usmjRorAPDAyEPetvQtHfD2bMmBGevf7663NfG+JNJQAAAAAAAAAAAAAAAAAAAAAAAAAAAMBHsuAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAByaC57AAAAABrTu+++G/YXXngh7Js2bcp9Puvsxo0bc187SZJk27ZtYY9MmDAh7J/85CfD3t7eHvazzjor7FdccUXYp06dmto+9alPhWcnT54cdqgnxx57bKHzBx10UNjvvvvu1DZ79uxCjw0AAAAAAAAAAAAAlG+//fZLbdOnTw/PZvV6Njw8HPaXXnqpZn3z5s2Frv3II48UOp/1sUcqlUrYDz300LAfffTRYT/qqKNy96yzW7duDXtTU1PYd+3aFfZa2rlzZ6F+yy23pLbbbrstPLtw4cKwL168OOytra1hL9Of//znsGd97E8//XRq+9KXvhSe/cMf/hD2k08+OewAAABA/cv6W1pnZ2eh/sc//jG13XDDDeHZc889N+wzZswI+x133BH2z3zmM2GnscV/SQUAAAAAAAAAAAAAAAAAAAAAAAAAAAA+kgW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQQ3PZA0Aeq1evTm3r1q0Lz/b19RV67BUrVoR93rx5YY9mX7lyZXi2v78/7AsWLCjUOzo6wg7Uv4GBgbCvWbMm7MuXL6/mOGPG1q1bwz5lypRRmmR0vfnmm2FvaWmp6eNH3zO7u7sLXXvVqlVhnzNnTqHrlym6b0lS7N6N5fu2t7rxxhvDft1114X95ZdfDvvIyEjYm5ri/+dM9Po6derU8OyJJ54Y9qyvhfb29ty9ra0tPDtu3LiwA9Vx6KGHhn3Lli1hP+KII8Ke9RoGAAAAAAAAAAAAANCIst43lvV+5EZ+v/L27dtT20svvRSerXXfvHlz2NeuXZv72vvuu2/Yx48fH/Z///vfYa9nO3fuzNWSJEluvfXWsN9+++1hv/LKK8N+9dVXh721tTW1vfPOO+HZ66+/PuxZs5966qlh/+Mf/5jaTjrppPAsAAAAQFGf/exnU1vWPsc//elPYV+0aFHYZ8yYEfYrrrgitfX29oZn999//7BTPptIAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBwt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBwt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIfmsgeAj7J06dKw9/b25r72yMhI2GfPnh32/v7+sG/YsCHsfX19YS8i69pZ/emnn05tM2bMyDUT9W1wcDC1PfbYY+HZRYsWVXucvcLQ0FDYBwYGwt7d3V3Ncf7L8uXLa3r9RhV9rTS6ZcuWpbaWlpaaPnaR7/cbN24s9NjTpk0L+/PPPx/2np6eQo9fRNGfk4rcu0a+b3y0KVOmhP2MM84Ie3t7e6F+zDHHhH3ChAlhByiira2t7BEAAAAAAAAAAAAAAKgjH/vYx3K1JEmSE044odrjjJpLL7007Pfee+8oTdJYdu7cWahH7+/cnT5z5szU9uKLL4Zn33rrrbBnvd/4m9/8ZtgrlUrYAQAAAOrVSSedFPYnnngi7HfffXfYr7766tT24IMPhmfvu+++sH/uc58LO7XXVPYAAAAAAAAAAAAAAAAAAAAAAAAAAAAA0Igs+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgByayx4APkpvb29pj7127dqwr1+/Puy//vWvw/7mm2+mtpaWlvDs6tWrw97d3R32LDfeeGNqy7ovlCPr8/HnP/952M8888zUduGFF+aaidjtt99e9gjk8Morr4R9y5Ytqa2tra3a4+yRrO8dxx57bM0ee3BwMOxFvt+3t7fnPrs7sma74IILwt7R0ZH7sWt535KktveuzPtGPhdddFHY586dO0qTAAAAAAAAAAAAAAAAAGXYvHlz2D/44INRmqSxNDU1hX3cuHFh37lzZ6HHHxgYSG3nn39+eLavry/skydPzjUT1MLQ0FDYo6+FJEmSlStXht0uDUZT0R0h0ev3ggULwrNZ3fu8AQBg91QqlbBfeumlYf/yl7+c2q644orw7Be+8IWw33HHHWGfN29e2Cku/oshAAAAAAAAAAAAAAAAAAAAAAAAAAAA8JEs+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgByayx6AvVOlUqnbxx4ZGQn7YYcdFvaFCxeGvaWlJeyROXPmhL27uzv3tZMkSfr7+wud56MNDw+ntieffDI8+8gjj4T93HPPDftNN90U9iKfj+TT09NT6Hxvb2+VJmFPzJs3r+wRclu3bl3Ys763FPHMM8/U7Nply/rYOjo6anbtRlbL+wYAAAAAAAAAAAAAAADAnnvxxRfLHiG3CRMmhP39998Pe7Rb4JBDDgnPnnbaaWE/9dRTw75hw4awr1mzJuzf//73U9v1118fni1z3wPsqehzPUmSpK+vb5QmgWwDAwNh7+zsDPuWLVvCvnz58tS2evXq8OzSpUvDvnbt2rCPVXfeeWfY58+fH/asPUVQTVlf50V3T61atSrstdxPsjeLnlfPabpafj2M5ftWVPSzTtbvsNHPMbC3aW1tTW33339/ePbGG28M+7e+9a2wP//882G/5ZZbUpu/J+2eprIHAAAAAAAAAAAAAAAAAAAAAAAAAAAAgEZkwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5NBc9gDsnUZGRsJeqVRKe+wsbW1tVZqk/qxYsaLsEerS1q1bw/7UU0+Ffd26dantkksuCc8uX7487ACbNm0K+5lnnjlKk/y3DRs2lPbYtdbf3x/2efPm5b62+wYAAAAAAAAAAAAAAABAtezatSvsL7/8cqHrjxs3Luwffvhhaps4cWJ49sQTTwz76aefHvZTTjkldz/88MPDs1muueaasP/qV78K+5o1a8L+1a9+dY9ngkaUtXOhr69vlCaBbFmv3VmK7LSZM2dOoT6WDQ4Oprb58+eP4iSQbenSpamtt7c3PLtx48ZCjz1t2rSwP//886mtp6en0GOPZdFzmiTx81rmc5ok5T6vRe5bkhS7d41834aGhsI+MDAQ9u7u7mqO8x/sUoPdk7WD83vf+17Yp0+fHvaLLroo7M3N6etply1bFp7l/2oqewAAAAAAAAAAAAAAAAAAAAAAAAAAAABoRBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADs1lDwDsvqGhoULnu7q6wv61r32t0PXHqilTpoR92bJlYV++fHk1xwH4D+vWrQv72WefPUqT/Le+vr7SHrvW+vv7a3Zt9w0AAAAAAAAAAAAAAACAaqlUKmE/4YQTwn7MMceE/ayzzgr7KaecktqOO+648Oy4cePCXqaVK1eG/dZbbw37Qw89FPZZs2bt8UwAlGssv0+8ng0PD4f9gQceGKVJINvg4GDYe3t7c1+7vb0999ndEc12wQUXhGc7OjqqPU7dGKvPaZLU9nmt5X1LktreuzLvW5bbb7+9ZtcGGsP5558f9gcffDDss2fPTm1Zr18XX3xx2PcWTWUPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAI3Igl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIwYJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIwYJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMihuewBgN03MDBQ6HxPT0/YW1paCl1/rNqyZUvYn3rqqbBffvnlqe2SSy4Jz86YMSPsABs2bAj7vHnzRmkSAAAAAAAAAAAAAAAAAKDeVCqVsGe9T3Fv9dprr4X9yiuvDPuyZcvCPmvWrD2eCWgsQ0NDYb/33nvDvnjx4tTW1dUVnv32t78d9pkzZ4Y9y/DwcNjvv//+1DZ//vxCj71kyZKwL1y4MOytra2pLet7Zq0VefyRkZEqTtJY7rrrrrBHnxO9vb3VHqeqli5dmvts1p4hyvHMM8+UPUJNZH1cHR0dozTJ6Burz2mS1PZ5dd9qo+hrf71/XwSKO/vss8N+8803p7arrroqPHvOOeeE/ZBDDgn7WNFU9gAAAAAAAAAAAAAAAAAAAAAAAAAAAADQiCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHCz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBws+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHJrLHgD4fwYHB8Pe3d0d9meffTbsHR0dezwTSdLW1laoz5o1K7U9+eST4dnLL7887Oeee27YzzjjjLC3tLSEHSjf+vXrw37hhReO0iR7rqurK+z9/f2jNEn1ZX1stby2+wbU0tDQUNgHBgbCvnLlyrCvXbs27NFr3OzZs8OzWa8xd9xxR9izfq4vYnh4OOyPPvpo2LN+F8yyYsWK1HbeeeeFZ1tbWws9NgAAAAAAAAAAAAAAQCNasmRJ2D/96U+HfeHChdUcB6hDWe/Hu+yyy8I+d+7csI+MjKS2rPf6dXZ2hr3ofpJrr7027H19fant1VdfDc++9957YZ8yZUrYt23bFvbly5entuie745KpVLofNHHH6uyPt9PP/30sHufJPVkw4YNZY9QE1l7MObNmzdKk4y+sfqcJkltn1f3DaA+XXHFFantoYceCs9m/S3tpz/9aa6ZGk1T2QMAAAAAAAAAAAAAAAAAAAAAAAAAAABAI7LgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcmguewDY2wwODqa2pUuXhmdfffXVsLe2tuaaidpqaWlJbV1dXeHZrL5+/fqwX3vttWE/88wzU9tpp50Wnm1raws7UB2//vWvw7548eJRmmTPZb2G9ff3j9Ik1Zf1sdXy2u4bUEuXXXZZ2Iu+BmX9/Bq9TmzZsiU8O2XKlLAffvjhYV++fHnYi7j44ovDnvX6ODIyEvahoaGwR89r1nN67733hj36fQc+yr777pv7bKVSqeIkAAAA1INvfOMbZY8AAABjQpH/BpMkSXLQQQdVaRIAAEZLc7O3BQIAQDVs27YttWW9p+KXv/xltccBGszAwEDYs967tXbt2tyPPXPmzNxnkyRJHnjggbB3dHSEfdKkSWFfsGBBaqv1bpS+vr6w1/K9hOST9R7JF198Mezz5s2r5jh1paenp+wRqLKs16hG1ch7MIoaq89pktT2eXXfABrPokWLwn7eeeeFPetn27GyR7Op7AEAAAAAAAAAAAAAAAAAAAAAAAAAAACgEVnwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5WPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOVjwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADlY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA5VEZGRsqeIUmSpLQhVq5cGfaLLroo7HVy/8acSqVSs2vX+jkbHBwM+wMPPJDaFi5cGJ5tbW3NNdPuGh4eTm33339/eHbevHnVHodREH2+PvbYY+HZRYsWVXscdkMtXx+TpNzvawMDA2Hv7OxMbY8//nh4dubMmblmGg3Ra2+SJMldd90V9nr+Wsz6nnj88cfnvnbRz9WiX0vPPvts2Ds6OnJfu5b3LUmK3bt6vm+ki563++67Lzw7d+7cao9Dgyv6OlDLnzXKnK3IzzFJkiSvvvpq2Iv+Lrh+/frUduqpp4ZnV61aFfY5c+bkmom91wcffJDa1q5dG5798MMPqz0OAAAAJZsxY0bYjzzyyFGaBAAAGlvWf+98+OGHw75z585qjgMAQBVMnjw57GecccYoTQIAAGPbL37xi9S2YMGC8Oz27dvDPn78+FwzAXumzPeVzZ49O+z9/f25r122Wr4XcOvWrWFfs2ZN2BcvXlzo8cfq+xwb2Z133hn2Wu608Zwx2up5r1Y9z1bP6vm+1fOeIrPVp3r+fAbKl7Xv4eMf/3jYf/KTn4T961//+p6OVE3dGX317l6oqeAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAsFey4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHJoLnsAaDQDAwNh7+zszH3t3t7e3Gdr7emnny57BGqgo6MjV6N2tm7dWurjb9q0KbW1t7fX9LGLvH5mnR0ZGcl97Vp78sknw/6FL3xhlCapvqzXkSVLloQ9+r4Yfa5WQ9ZstXyNrOV9S5La3rsy7xtAmdasWVPofGtra5Um+WjHHnts7rMrV64M+5w5c3Jfm71Tc3P6nyO/8pWvjOIkXffIOQAAIABJREFUAAAAAAAAY0elUgn7+eefP0qTAAAAAAA0lsHBwdR23HHHhWfHjx9f7XGABtPf31/ofD2/B76oO++8M7Vl3bdly5aFffHixblmojxZz/nZZ589SpNA+bq6usJe9HtLWbI+rrFsrD6nSVLb59V9A2g848aNC/v06dPDHv0dbixpKnsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAaEQW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA7NZQ/A3qlSqdTtY4+MjIS9s7OzmuM0jBkzZpQ9AowJZb7+7Y5p06blPpv1+pnl8ccfD3v0+pt1tp4988wzYe/q6hqlSUZfT09P2KdPn57ainyuJkmSrFq1Kuxz5swpdP1aKnLfkqTYvWvk+wZQS319fWWPEGppacl9tr+/v4qTAAAAAAAAAAAAAAAA1I8dO3aktv32228UJwH2Rps2bQp7e3v7KE2y51avXh32+fPnp7YtW7aEZ9va2nLNRP2aPXt22SPUTNE9Rny0et7NUvQ5zdoh0qjv6x3Lu1GyjNXnNElq+7y6bwBjz4EHHhj2t99+e5QmKVdT2QMAAAAAAAAAAAAAAAAAAAAAAAAAAABAI7LgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcmguewD2TiMjI2WPkFsjzw6Uz2tIupkzZ4Z9rN67np6eskeoW3PmzMnV9nZZ98a9A6i+rq6usPf394d9aGgo7K2trXs8U7UsWLCgtMcGAAAAAAAAAAAAAACopcmTJ6e2devWjeIkQCNasWJF2OfPnx/2e++9N+yLFy9ObS0tLeHZrPesZT32okWLwt7d3R32SFtbW+6zNKZ63hVRqVQKna/nj62RjeX7evLJJ5c9Qk3U+uMaGBgIe2dnZ9gff/zx1Ja17ybLWH1Ok6S2H5v7BjD2vPLKK2E/6aSTRmmScjWVPQAAAAAAAAAAAAAAAAAAAAAAAAAAAAA0Igt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBwt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcLfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAgh+ayBwCod5VKpewR6tLIyEjZIwAAQF2YO3du2Pv7+8P+17/+Neytra17PNP/b3h4OPfZCy+8sNBjw//2yiuvpLbvfOc74dkPP/yw2uMAAABQsosvvjjsXV1dozQJAAAA9eJf//pX2Ldt25baXn/99fDs22+/HfZ333037G+99VbY33nnndSW9XFFZ3fnsbP+e+qOHTvC/v7774e9lrZv317aYzc3x2+pOeCAA0Zpkv+27777hn3ChAmFenT9gw46KDw7ceLE3NfenetH57Oek4MPPrhQz7pvAAAAjD2nnnpqarvhhhvCs1l/j8r6PRTYPUNDQ6VdP+t9Xeedd17Y58+fH/be3t5CvYgtW7YUOp/1b7ui99Rt3bo1PPvee+/lmml3FXnOBwcHqz3Of9i0aVPY29vba/r41J+lS5fmPtvT01PFSaiWjo6OsC9ZsiS1ZX1fyHoNKSqaLevjKqqzs7Nm54vuUCrynCZJ/LyW+ZwmSW2f11retySp7b0r874VlfVzWC35OQcaX9bukueeey7sP/rRj6o5Tt1qKnsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAaEQW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA4W/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABADhb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA7NZQ8AUO9GRkbKHgEAAEbF0NBQTa8/PDwc9paWltxni8r62FtbW1PbrFmzwrNdXV1hv/HGG8N+1113hT2aLUmS5NFHH01tCxYsCM/OnDkz7LCnBgYGUtvq1avDsxdeeGG1xwEAAKDG1qxZE/bx48eHPevvKgAAAGXauXNn2P/xj3+ktpdffjk8m9X//ve/Fzr/2muv5e7btm0Lz2b1119/Pezvvvtu2OvZ/vvvn9omTpwYnj3ggAMK9ebm+K0h++yzT9iz5qulAw88MOzjxo2r2WPv2LEj7FlfK7X09ttvh/2DDz4I+3vvvRf26Gtt+/btuc/uzmPXs6yvtYMPPjjs0b/jyTo7adKksE+ePDnsRx55ZNiPOOKIXG13rv2JT3wi7JVKJewAAABlit43kfX70n333Rf2q666KtdMwH/K+rtILa+ftWsi631dW7ZsCfudd94Z9t7e3tSW9b6w6667LuxtbW1hz9LT0xP2/v7+1Jb1cS9cuDDsS5YsCXvWf6uI/oZZ9t+ypk2blvus3SgwNkSvr9OnTw/PFnkNSZIkWbVqVdjnzJlT6PpFPP7442Hv7OwsdL6Wsr5nRs/rWH5OsxS5b0lS7N418n0r+2eZSNHPZz/rQPnuueeesGf9/v7FL36xmuPUraayBwAAAAAAAAAAAAAAAAAAAAAAAAAAAIBGZMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORQGRkZKXuGJEmS0oZYuXJl2C+66KKw18n9AwAA+EiVSiW13XfffeHZuXPnVnsc6lz0+TIaot+x63m2LENDQ2F/+OGHwz5//vzcj50kSbJq1arUNmvWrPBsS0tLoceG/y36W5y/wwEAAIw9Wb/rZcn6GyYAAFD/3njjjbD/5S9/KdQ3btyY++yLL74Y9q1bt4b91VdfDfuuXbvCHhk/fnzYDzvssLAfccQRYW9tbQ37wQcfXJOzSZIkkyZNKnQ+6lnX3n///cM+ceLEsPtv6LB7sl7/hoeHw75jx47U9s4774Rnt23bFvbXX3+9UH/ttddyP37R2bK+77z88sthf+WVV1Jb0X+TUfT71pQpU8I+derU1HbMMcfkPrs7Pev6++67b9gBAIDGdtddd4X9Bz/4Qdife+65sB900EF7OhIAAAAA1Ez0bzqmT58enl2yZEnYFyxYkGumUdKd0Vfv7oWaCg4CAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyULfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBwt+Afg/7N17kFd1/fjxs8sC4g2licUUvIOQuwsaCmqaKJIpiKbmshVKAktXE8q8lRNMKYJdlVszpshKYl64WJiRGiE2WOwy3hArULyQoaQDCsL+/miaX/XlvI6czy6f3eXx+Pc5r7NvPp/1cznn7FsAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJBDWbEXAAAAALQMjY2NxV5Cqpa8tixdu3YN++jRowvqAAAAAAAAQNv2zjvvhL2hoaGgvnLlytyzL7zwQtjfeOONsGfp2LFj2I888sjU1rNnz3D25JNPDnt1dXXYu3fvHvZDDjkkV0uSJOnWrVvYS0tLww5QTFmvUQceeGBBnXy2bt2a2l555ZVw9uWXXw77unXrwr5+/fqwr127Nuxr1qxJbb/73e8KOvb27dvDniX6PJD1WaRv375hr6ysDHtVVVXY+/TpE/b27duHHQAASJJRo0aFfdasWWEfOXJk2O+///6wOw8IAAAAQFPK2rvlsssuS23l5eXhrL1R/sUZPQAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAByKCv2AgAAACiOX/ziF2EvK4u/MlZUVIS9Z8+eYW/Xrl3YAQAAAAAAAGB32bJlS9hXrFgR9mXLluWe//Of/xzO/vWvfw37jh07wt6lS5ew9+vXL7WdeOKJ4eznPve5sB911FFhz7q3oHv37mEvLS0NOwDwLx06dEhthx12WDib1VuyrVu3hj3rc9bq1avD/sILL6S2559/PpxdunRp2KdNmxb2zZs3h719+/Zh79OnT2rr27dvOHvCCSeE/aSTTgr7scceG/as+1cBAGB3yTr/eNddd4W9f//+Yf/Sl74U9ttuuy3sJSUlYQcAAABgz9LY2Bj2L3/5y2FfsmRJanvyySfDWfsI/Ys7GgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIoazYCwAAAKA4nnrqqbA/9NBDYX///ffDvtdee4X92GOPDXtVVVVqq6ysDGcL7V26dAk7AAAAAAAAAE3vlVdeCftjjz0W9ieeeCK1LV++PJxduXJl2Ldt2xb2I444Iuwf+9jHUtuoUaPC2ej6+QfphxxySNgBANqqDh06hL1Xr14F9ea0Y8eOsK9ZsybsWZ9v6+vrc7UkSZIbbrgh7H//+9/Dvs8++4S9f//+qe3kk08OZwcOHBj2U045JeydO3cOOwAA/Kejjz467Pfff3/Yzz333LBv3Lgx7Lfffntq23vvvcNZAAAAAFqfLVu2hD3rXsQHHngg7IsWLUptxbx+3pqUFnsBAAAAAAAAAAAAAAAAAAAAAAAAAAAA0BrZ4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOZcVeAAAAAMUxefLksF9wwQVhf+aZZ8JeX18f9oaGhrCvWrUqtT3wwAPh7D/+8Y+wZ+nevXvYKysrU1tFRUU427dv39zHTpIk6dmzZ9jbtWsXdgAAAAAAAIDIu+++m9oef/zxcPbhhx8uqEfXiZMkSTp27Bj2448/PrWdeuqp4ezVV18d9oEDB4a9W7duYQcAgF1RWloa9qx7CbP6xRdfvMtr+qBWr14d9ieeeCJ3X7hwYTh74403hr2kpCTsWZ/7Bw8enNo++clPhrPHHXdc2N3/CQDQ9px++ulhX7x4cdg//elPh/3jH/94anvwwQfD2UMOOSTsAAAAAOx+69evD/vw4cPD/re//S3sWfdwRueb+GDiK/0AAAAAAAAAAAAAAAAAAAAAAAAAAADATtngFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgh7JiLwAAAICWaa+99gr7cccdV1BvTuvXrw97Q0ND2Ovr63PPL1iwIJydMmVK2N9///2wZz0vxx57bNgrKytTW0VFRe7ZJEmSvn37hr1Lly5hh93pnHPOCfuQIUPCPm7cuNTWvn37XGsCAAAAAAD4IF555ZWw//KXvwz7woULw/773/8+tb333nvh7PHHHx/24cOHh/2nP/1p2AcMGBD2Dh06hB0AAGh+PXv2LKiPHDky989+++23w/7oo4+G/eGHHw77XXfdldq+/e1vh7NZ91AOHjw47MOGDQv7ueeeG/b9998/7AAA7H6nnHJK2P/4xz+G/bzzzktt/fr1C2dvueWWsH/uc58LOwAAAAD5zJkzJ7WNHz8+nO3atWvYs84nHX744WGncKXFXgAAAAAAAAAAAAAAAAAAAAAAAAAAAAC0Rjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkENZsRcAAAAATe3ggw8uqJ999tlNuZz/8t5774X96aefDvuqVavCXl9fH/aGhobUNn/+/HD2jTfeCHuWrMe9srIytVVVVeWeTZIkqaioCPsxxxwT9rIyp1Bam9deey3sDz30UEH9lltuSW0/+MEPwtnzzz8/7AAAAAAAQOv38ssvh/2+++5LbfPmzQtnly1bFvbOnTuHfdiwYWG//fbbU9vgwYPD2S5duoQdAACgOe23335hHzp0aEE98re//S3sixcvDvuvfvWrsH/hC1/Y1SX9lyFDhqS2Cy+8MJzN+h65//7751oTAACxQw89NOxLly5NbVdddVU4e+mll4b9zjvvDPttt90W9qOPPjrsAAAAAK3Viy++GPba2tqwL1myJLWNGTMmnJ08eXLYs66X0vxKi70AAAAAAAAAAAAAAAAAAAAAAAAAAAAAaI1s8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCHsmIvAAAAAPYkHTt2DPtxxx1XUG9Or776atgbGhrCXl9fn3t+0aJF4ezUqVPDvm3btrB36NAh7Mcee2xqq6ioCGcrKysL6lVVVWH/8Ic/HPY9VdbvY6Feeuml1HbBBReEs6ecckrYf/zjH4e9X79+YQcAAAAAALK99957Yb/vvvvCPmPGjLD//ve/D/uBBx6Y2oYPHx7OXnvttWE/44wzwt6+ffuwAwAAsOsOO+ywsI8dO7ag/s9//jPsCxcuDPu8efNS2+jRo8PZyy+/POxZ32Oz/m2f+MQnUltJSUk4CwCwJ9t3331T26233hrOfv7znw/7mDFjwp71ty5f/OIXU9s3vvGNcLa8vDzsAAAAAIV4/fXXw37zzTeH/bbbbgv7UUcdFfalS5emtoEDB4aztHylxV4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAtEY2+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJBDWbEXAAAAALQOBx10UEF9yJAhTbmc/7J169awP/vss2FvaGjI3evr68PZX//612F//fXXw56lW7duYa+srExtffv2DWcrKipyHztJkqR3795hb9++fdgLkfW8dOjQIexZv1M7duzY5TX92/Lly8N+/PHHh33kyJFh/973vpfasv47BQAAAACA1uTFF19MbTNmzAhnf/7zn4f9rbfeCvuwYcPCvmjRorCfeeaZqa2szO29AAAA/Lf9998/7CNGjMjd33777XD2wQcfDHvWd/BBgwaFvVevXqltzJgx4eyll14a9i5duoQdAGBPdeKJJ4b9qaeeCvutt94a9ptuuim1TZs2LZwdO3Zs2K+66qqwl5eXhx0AAABo/bL26Zg8eXJqmz59ejibdV3u+9//fti/9KUvhd09om1babEXAAAAAAAAAAAAAAAAAAAAAAAAAAAAAK2RDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkUNLY2FjsNSRJkhRtEXV1dWGvqakJewt5/AAAAHaqpKQktc2ZMyecHTFiRFMvB9iJDRs2hL2+vj7sDQ0NuXvW7DPPPBP2rVu3hr19+/Zh79OnT2qrqKgIZysrK8P+hz/8IewLFy4M+/bt28NeTFmPa1lZWWq79tprw9krr7wy7J06dQp7luhcnPNwAAAAbU/Wd70sWecwKY6pU6emtgkTJjTrz54yZUrYx48fn/vY0b8rSQr/t61duzbsPXr0SG1Z5xBnz54d9qy1Dx06NOxXXHFFahs0aFA4W6hCn5eZM2emtvPOOy+cLS8vD7vzVdA0Vq1aFfaJEyeG/Ze//GVq6969ezg7evTosI8aNSrsBx10UNgBAACAf3n66afDPmPGjNR25513hrPbtm0Le21tbdi/+c1vhj3rPCEAADu3ZcuW1BZdx02SJJk8eXLY33zzzbBn/U3c2LFjw96/f/+wAwAAAIVbsWJF2KPrR0mSvYdo586dU9tVV10Vzo4ZMybshe65QItUndHnftADlRa4EAAAAAAAAAAAAAAAAAAAAAAAAAAAANgj2eAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORgg18AAAAAAAAAAAAAAAAAAAAAAAAAAADIwQa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADiWNjY3FXkOSJEnRFlFXVxf2mpqasLeQxw8AAGCnSkpKUtucOXPC2REjRjT1coBWZtu2bWF/7rnnwr5q1aqw19fXp7aGhoZwNqtHr39JkiTr168Pe1vVrl27sHft2jXsU6dODfsll1wS9rvvvju1OQ8HAADQ9mR918uSdQ6Tlmf58uVhHzhwYNhra2vDPm3atF1eU1MZNmxY2H/2s5+FPeu8y4YNG1Lb5ZdfHs5mnc/POmezZMmSsJ9xxhmpbeXKleFsVVVV2LPON1100UVh79GjR9g3bdqU2qZMmRLOTpo0KezOV8G/ZJ2v/+53vxv2+++/P+wVFRVhv/7661Pb+eefH86WlpaGHQAAACi+zZs3h33WrFlhnzx5ctjffPPNsI8dOzbsV111VWrr1q1bOAsAwM69++67Yb/jjjvCPn369LBnXec+7rjjUlvWfQ3V1dVh33fffcMOAAAALck777yT2ubOnRvOzpgxI+wrVqwIe9Z96Fnf0UeOHJnaOnXqFM6yR4pP6iRJ/Av/H9ydDAAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIeSxsbGYq8hSZKkaIuoq6sLe01NTdhbyOMHAACwUyUlJaltzpw54eyIESOaejkATWbbtm1h79SpU9i3b9/elMtpM0pL4/8fWNa5sOOPPz7sQ4cOTW3f+c53CvrZAAAAtDxZ91xkyTqHSeszderUsE+YMCHsa9euDXuPHj12eU3/Vl9fH/Znn3027Jdccknun50kSTJ37tzUVl1dHc4293mT6FrDddddF85OnDgx97GTJElef/31sHft2jXskQ0bNoS9vLw87M5X0ZZs2rQp7Ndcc01qmz59ejhbWVkZ9qxzw+edd17Ys15HAAAAgD3bli1bwj5r1qywT548OezReZXvfve74exXvvKVsJeVlYUdAIB8nnzyybDPmDEjtUXX9pMk+28yzj333LBfdNFFYT/77LNT29577x3OAgAA0PZkXQd56KGHwn7vvfeGfeHChakta7+Giy++OOy1tbVhHzBgQNihicV/tJEk8Umh/xCfHQIAAAAAAAAAAAAAAAAAAAAAAAAAAAB2yga/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkIMNfgEAAAAAAAAAAAAAAAAAAAB4ghcpAAAgAElEQVQAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcigr9gIAAAAAYFc999xzYd++fftuWknbsmPHjoLmV6xYUVAHAAAA2rYzzzyzoPnFixeHffTo0bmP/cgjj4T9oosuyn3sD6Kuri73bElJSROuZNdMmjQp7BMnTgx7bW1t2MvLy8N+9913h/3ss89ObV27dg1nGxsbww6tybx588L+ta99LezROfc77rgjnK2pqQl7MV/DAAAAgLavU6dOYf/qV78a9qzzzjfddFNqu/rqq8PZu+66K+wzZswI+8c+9rGwAwCwcyeeeGLufsstt4Sz9913X9izrttVV1eHvUOHDqntnHPOCWcvvPDCsJ911llh79y5c9gBAADYuU2bNoX9N7/5TWq79957w9mFCxeGfevWrWEfNGhQ2H/4wx+mtgsuuCCcPfDAA8MObVVpsRcAAAAAAAAAAAAAAAAAAAAAAAAAAAAArZENfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAHG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABADjb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgBxs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAA52OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAcrDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAORQVuwFAAAAAMCuWrlyZdhLS+P/r9WOHTuacjlNqqSkJOwdOnQI+/vvv5/atm/fnmtN/9alS5ew9+7dO+yvvfZaanvxxRdzrQkAAABoPaqqqsJeW1sb9jFjxoT94osv3uU1/duaNWvC3qNHj9zH/iAWLFiQe7axsbEJV7J7ff3rXw/7+vXrw15dXZ37Z0+ZMiXs48ePz31saGrvvvtu2LNeP++8886wX3bZZWG/+eabU1vWeWMAAACA1qxTp05hv+GGG1Jb1vnLsWPHhn3AgAFhv/HGG8M+YcKEsAMAsOsOOOCAsI8aNaqgvnHjxrA/+OCDqW3evHnh7Gc/+9mwZ/2dTf/+/cM+ZMiQsJ911lmp7YQTTghn27VrF3YAAIBI1t/X//GPfwz7I488EvbFixeH/cknnwx7tC/C6aefHs7+6Ec/Cvvw4cPD/qEPfSjswK6LdzoBAAAAAAAAAAAAAAAAAAAAAAAAAAAAdsoGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgBxv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQA42+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAcbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHIoK/YCAAAAAGBXrVy5Muw7duwIe8eOHcO+devWsDc2Nqa2kpKScPYjH/lI2CsqKgrqPXv2TG19+vQJZ3v37h32Aw88MOxZ6urqUltNTU1BxwYAAABav9ra2rBPnz497L/61a/Cvs8++6S2kSNHhrMt2erVq8MenS8qtqy1zZ8/P+z19fVhj35nJkyYEM5mGT9+fEHz8L9effXV1HbBBReEs1mvA7/+9a/DftZZZ4UdWpINGzaEfcmSJaktuk6RJNnvO9CUli9fHvY77rgj7FmfjbM+W2f1qqqqsAMAANl69eoV9t/97ndh/8lPfhL2rHOUq1atSm0zZ84MZ7PusQQAoHl06dIl7JdddlmuliRJsnnz5rA/+uijYX/44YfDPnfu3LDfcMMNqe2AAw4IZ0899dSwDxgwIOwDBw4Me//+/VNbdK8JAADwwW3ZsiXsK1asCPuyZcty96VLl4azGzduDPsxxxwT9sGDB4f96quvDvtpp52W2vbdd99wFmh5Sou9AAAAAAAAAAAAAAAAAAAAAAAAAAAAAGiNbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAOdjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAHKwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAADkYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyMEGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCDDX4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgh5LGxsZiryFJkqRoi6irqwt7TU1N2FvI4wcAALBTJSUlqW3OnDnh7IgRI5p6OQBNZtGiRWGvra0N+6hRo8Lep0+fsB9zzDGprVevXuHsXnvtFfa2LDoX5zxcyzR37tywP/bYY2GfPn167p89c+bMsI8ePTr3sQu1ZMmSsM+bNy/s06ZNa8rlNKn6+vqw33vvvWGfNGlS7p+d9dp90UUXhX3QoEG5fzbw/2W99ldXV+c+9t133x32Sy65JPex92QbNmwIe3l5+W5aya6bMmVK2MePH1/Q8detWxf2Qw89tKDjt1RvvfVW2Dt37lzQ8Qv5vFDIZ4Uk8XmhNcr6rpcl6xwme55x48aFPet76NChQ1Pb/Pnzc62pqcyaNSu1jRkzJpy97rrrwj5hwoSwZ703RJ83Zs+eHc5mvZ9H1zGSpHnf17Le0/r27Rt256vYVWvWrAl79Fllv/32C2ezXsOOPPLIsENrUujngYjXdppadD3hjDPOCGfXrl0b9h49eoQ96zxf1j30xfx8vGnTprA/++yzqW3VqlXh7IIFC8Je7O8FsCucz297mvM5TZL4eW3pz2n02GS9p2W99medd87qVVVVYS+mrPfUe+65J7VlnY/K4nUEdo9HHnkk7J/5zGdSW9Y9kosXLw773nvvHXYAAPhfL730UmrL+vyZ9XcLy5YtC/tf/vKXsJeVlaW2rO/+J510UthPOOGEsFdWVoa9d+/eqa19+/bhLAAAe55t27aF/bnnngt71r21Tz31VGp74oknwtk//elPYc9a+xFHHBH26LP5aaedFs4OHjw47G3174+A/5J1U0Z8U8d/KC1wIQAAAAAAAAAAAAAAAAAAAAAAAAAAALBHssEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAOJY2NjcVeQ5IkSdEWUVdXF/aampqwt5DHDwAAYKdKSkpS25w5c8LZESNGNPVyANjDRefinIcrjuuvvz7skyZNKuj4Wc/bsGHDch979uzZYX/vvffCvmTJkrBXV1fv8po+qGL+Pmf9u88444yCjr9y5cqwV1VVpbZC13b33XeH/ZJLLgk7LU99fX3YH3nkkbCPHz++KZfTZhT62v/888/n/tm9evUK+3XXXRf2iRMn5v7ZbdmCBQvCXsj7bXNbu3Zt2Hv06FHQ8VvzYxOZMmVK2At9/WvOzwuFfFZIkub9vOCzQvPI+q6XJescJnue5cuXh33gwIFhnzlzZmobPXp0rjU1lQ0bNqS28vLy3biSXVPo+3l0HSNJsj8jZj1v0c9ft25dODtv3ryw+87B/3rjjTfCfuKJJ4Y9+n198MEHw9n9998/7LAnyXpvibgWQVMbN25caps+fXo4uyf/Pmadw4w097Ut2J2cz297ivmcJkn8vBb7OW3uexea0xNPPJHaBgwY0Kw/OzqflCRJcvnll4c96zpKc2ruax3Av6xZsya1nX766eFsRUVF2LNeQ9q1axd2AADYnV577bWwR9/v//CHP+SeTZLse+Y2b94c9g4dOqS2j370o+FsZWVlQT3rfr6ePXuG/ZBDDklthVzTAwAotpdeeinsq1evDnv0N3MNDQ3hbFZ/+umnw75169awd+rUKez9+vVLbVn3cZ988slhz5rv1q1b2AEKlLWxwNwPeqDSAhcCAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyQb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHGzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAADnY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAABysMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMihpLGxsdhrSJIkKdoiHnjggbCff/75u2klAAAAu9f9998f9uHDh++mlQCwp6irq0ttNTU14WwLOY/Z5pSUlDTr8Yv5vF1//fUFzU+aNKmJVvJ/FfNxGTZsWNgXLFhQ0PGb899W6O+r15HiWL58eWq74447wtnTTjst7CeddFLYe/ToEfa2qr6+Pux9+/Yt6PiF/LdU6H/HK1euDHtVVVVBx2+txo0bF/Ybb7wx7J07d27K5bQos2bNCvuQIUNSW7FfQ+bOnZvaevfuHc4W+t9Cc35eaO7340JeZ3xWaB5Z3/WyzJkzp4lWwp4i6zVsypQpqa1nz55NvZwms27durBnvedlfcetra0N+9VXX53aCn3PzHrtfv3118M+e/bssE+YMCG1Rb8PSZIk48ePD3uWl19+OewbN25MbZWVlQX9bIpj6NChYX/ttdfCvmTJktS233775VoT7Il8L6Al8fu4+7mWQGvifH7bFD2vxXxOk6Sw57XQ5zS6ZpgkSbJo0aKwR9/vs65zROfbkyRJqqurw54l+i44f/78go6dZerUqWHv169f2AcNGpTamvs1Kov3ZGh+a9asCfuAAQPCfuWVV4b9mmuu2eU1AQBAW7R9+/awr169OuyrVq1KbX/+859zzyZJ9vf/rPsesuy9996p7aijjgpns+7jac75Qw89NJw96KCDwt6+ffuwA0Bbsm3bttT26quvhrNZ98a+8MILzdYLPfbmzZvDnuXggw9ObVnX3bLuL826PlRRURH2rM9R7dq1CztAK5Z180B888F/KC1wIQAAAAAAAAAAAAAAAAAAAAAAAAAAALBHssEvAAAAAAAAAAAAAAAAAAAAAAAAAAAA5GCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMjBBr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQgw1+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIAcb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAONvgFAAAAAAAAAAAAAAAAAAAAAAAAAACAHEoaGxuLvYYkSZKiLeL9998P+/z588O+ffv2plwOAABAk2rXrl1qGzZsWDhbVlbW1MsBYA9XV1eX2mpqasLZFnIes9UpKSkp9hJCLfl5bc7Hrpj/7qzPgAsWLCjo+L/97W/DPmjQoNS2adOmcPaAAw4Ie21tbdinTZsW9j1V1uP++OOPh/2hhx4K+6c+9anUduqpp4aznTt3Djs7N2vWrLCPGTOmoOMX8hpW6GvrzJkzwz569OiCjt+SbdiwIbWVl5cXdOzrrrsu7B/96EfDftJJJ4W9R48eu7wmkmTcuHGprbnf05rz80IhnxWSpHk/L/is0DyyvutlmTNnThOthLYi63XgW9/6Vtj9t87u9IlPfCLsjz32WGo7/PDDw9lLL7007NXV1WE/+uijw87OzZs3L+xZ30lWrlwZ9sMOO2xXlwTsRCHnH5r73G30/X727Nnh7IQJE8I+dOjQsF9xxRVhz/o+Fsn6jHbPPfeEvdDzVdG5ja985SvhbNeuXcPe0q+zRFryNZjmVOhz1pIft+uvv76g+YkTJzbRSmgqzue3TdHzWsznNEkKe14LfU7XrVsX9r322ivsWe/ZhWir9wY0t0Ift0KvFwDNL+t+kAsvvDDsTz/9dNizzoECAADFt3HjxrA///zzYV+9enWuliRJsmbNmrC/8MILBfV33nkn7JHS0tKwZ93X2/3/sXP/sXXV9ePHT7shP6Vjg84wLKAyQlBHYiJD+SGbbIGsmw4hLWObP9joEjUxG8qPVWPaRCEjJqJubPMntpvMGFlFEg0z4w9W/IMwEg3uD3Qg6Mqm6wJsoON+/viEfP185bwOfZ/entv28fj3mfc5r3tvdzn33Mv73e8O+6xZs5LXn3POOeHaotlmzJgR9jPPPDO5n3XWWeHa008/PewA9XTkyJGwv/TSS2E/ePBgqX7o0KHcFv3WJMuy7Pnnny/VX3jhhbD/9a9/Dfvf//733PbGG2+Ea4ucdtppYX/f+94X9tmzZ9dl7dvpF154YdinT58edgAqEf8AP8u2v90DxZ8aAQAAAAAAAAAAAAAAAAAAAAAAAAAAgLdkg18AAAAAAAAAAAAAAAAAAAAAAAAAAABIYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgg18AAAAAAAAAAAAAAAAAAAAAAAAAAABIYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEgwteoBqjZ1avwULF26dIwmAQAAAAAYXbVaLexNTU2Vnp+x19PTE/aBgYFSx58/f37YN2/enNuefPLJcG17e3vYv/71r4d9Invuuedy2+OPPx6u3b17d9hXrlwZ9o0bN4adsVf0b2k8K3qPWrVq1RhNMvaeeOKJuh27t7e3bsfOsizbuXNnbit6b5/I9u3bF/arrrpqjCb5b/W8XihzrZBlrheALHvwwQfDfsMNN4zRJFDstNNOS1775z//OexF13Bf+9rXwn7JJZeEffny5WHv6OjIbWeffXa4tpG98cYbYf/KV74S9jvvvDPs55133khHAsaZoaGhsN9yyy257aabbgrXFt1v37VrV9iLPo899dRTuW3OnDnh2ttvvz3smzZtCvuBAwfCfuzYsbCfe+65ue3gwYPh2qL7m2W/5yjzPYzvWGDicz9/Ypqor2vZ17StrW00x2koRfe1G9nw8HBue+SRR8K1RffjN2zYEPbZs2eHHajeddddF/aFCxeGvehe2bZt20Y8EwAAMLamT58e9ssuu6xUr9Lf/va33LZ///5w7Ysvvhj2559/vlR/4YUXwh7dg3zooYfCtWW/l6und7zjHWGfMWNG2M8888ywn3LKKbntne98Z7j29NNPD/vJJ58c9lNPPTXs06ZNSz5+0bmLFD22KVOmlDp+5KSTTgp72cdWJPp7P3r0aF3Pffz48bAfOXIk+dhFsxf1w4cPh/2VV15JPnbR43r55ZeTz51lxb8POHToUG57/fXXw7X1Fv17mDlzZrj2nHPOCfu73/3usF9xxRWl1kfnnzVrVri26Puj8fxbRAAmt+aqBwAAAAAAAAAAAAAAAAAAAAAAAAAAAIDxyAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMAGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMAGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQoKlWq1U9Q5ZlWUMMAQAAAADUT39/f25btmxZuLZB7mNOOE1NTXU9/nh+3er53DTy87Jv376wr1u3LuwDAwOjOc7/sWfPnrDPnTu3bududNHf64YNG8K1a9euHe1xqFgjv7c38myNbs2aNblt06ZNYzjJ6Hr00UfDPm/evDGaZOxt2bIl7AsXLsxtbW1toz3OiJS5XqjntUKWuV5oREWf9Yr09fWN0iSMle7u7rD39vaWOv769evD3tPTU+r4MJoWLVoU9ocffniMJvlvRdfmU6ZMCfvx48dz2+WXXx6uXbFiRdiXLl0a9unTp4e9jEceeSTsS5YsCfuBAwfCfsYZZ4x4JmDkytx/KHtvYfv27WHv7Oys27mLFD0v0XVW0TVW0TXgwYMHw75x48awF6nyNS/SyLNNVGXvQXreGUuNfM+8kWdrdI38PXMjz1ZPQ0NDYZ85c2bY29vbw/7AAw/ktpaWlnBt1er5N9HV1RX2L33pS2GfPXv2aI4D1MFvf/vbsF933XVhf/HFF3PbWWedlTQTAAAA5bzyyithP3ToUNhfeumlpPZ2jl30nWPR+uixvfzyy+Ha4eHhsB89ejTsr776atgPHz6cvP7YsWPh2iL//Oc/S60vo+jv7fXXX6/r+U844YTcdtppp9X13EXK/MbopJNOCvvJJ59c6tynnHJK8rGL7pmfeuqpYS96XWbMmBH2M888M3lt0f2q6Nhvpxc9dgBgTOT/wPN/xT8Q/Q/NJQcBAAAAAAAAAAAAAAAAAAAAAAAAAACASckGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMAGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkKCpVqtVPUOWZVlDDAEAAAAA1E9/f39uW7ZsWbi2Qe5jTjhNTU11Pf54ft3q+dw08vMyPDwc9q1bt4b98OHDYe/t7R3xTG/X/v37w97W1la3c1ftueeey22PP/54uHb37t1hX7lyZdjnzp0bdsZeI7+3N/Jsk1n0HpJlWbZ3796wb9myJewDAwO5raurK1y7cePGsI9na9asCXsjP/Yy1wtVXitkWXy9MJGvFapU9FmvSF9f3yhNwlgp+u/C6tWrw7558+awr1q1asQzQVUWLVoU9ocffniMJhlbzc3NYS/6XFC0fsGCBWG/+eabc9vixYvDtXfddVfYBwcHw75nz56wA2OjzP2HsvcWit5nos/Ijaze91yK7k3s2LEj7OvWrUs+d70fW5V/j5NV2XuQnnfGUiPfM2/k2RpdI3/P3Miz1dP27dvD3tnZGfannnoq7HPmzBnxTI0iut//4IMPhmuL7vOVFT3v4/k5h4nktddeC3tLS0vYf/KTn+S2G2+8MWkmAAAAAAAASBT/eCDL4h8f/If41/AAAAAAAAAAAAAAAAAAAAAAAAAAAADAW7LBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAkaKrValXPkGVZ1hBDAAAAAAD109/fn9uWLVsWrm2Q+5gTTlNTU12PP55ft3o+N1U+L/v27Qv7unXrwn7nnXeGfe7cuWHfvn17buvs7AzXFlm/fn3Ye3p6Sh1/ohoeHg77Y489FvZf//rXYb/uuuty25VXXhmubWlpCTtvbfHixWEfGBgodfwy72Fl31vb29vDvnPnzlLHpz66u7tzW29vb7h2PF9LDA4Ohv3VV18N+7x580ZznBGp5/VCmWuFLKvv9YJrhfoo+qz39NNPh/2iiy4azXEAxtSf/vSnsBe9B/LWpkyZEvbjx48nH/td73pX2Iuu0fr6+pLPDYyeMvcfyn4OrfLcjWzLli1hL7pftWHDhrBfeOGFI57pTfV+3v1NjL2y9yA974wl9/Mnpuh1rfI1zbJyr2sjv6Z79+4N+yWXXBL2p556Kuxz5swZ8UyTQb3v50d/cxP5PQQmkgsuuCDsn/3sZ3PbHXfcMdrjAAAAAAAAQKToS+74S/L/0FxyEAAAAAAAAAAAAAAAAAAAAAAAAAAAAJiUbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAls8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAls8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJplY9AAAAAAAAMDa+9a1vhX1gYCDsO3fuLHX+jo6O3NbZ2Vnq2L29vWHv6ekpdfyJqqWlJezt7e2l+uDgYG67/fbbw7VXXXVV2D/ykY+Eva2tLewTVdFrUvTvvJEVPTYa07p163Lb3r17x3CSsfXwww+HPXpeqlbl9UJ0rZBl9b1ecK0AADQ3N1c9AjCJ7du3L+yzZ88eo0lGbvv27WFfvXp12Pfv3x/2yXqfD/h/mpqaqh4hV61WS17rfv7EFD12r2m66PuE7u7ucO2BAwfC3tramjTTZHfttddWPQIAAAAAAAAANBy/SAcAAAAAAAAAAAAAAAAAAAAAAAAAAIAENvgFAAAAAAAAAAAAAAAAAAAAAAAAAACABDb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAQ2+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAENvgFAAAAAAAAAAAAAAAAAAAAAAAAAACABDb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAQ2+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEU6seAAAAAAAAGBubNm2qeoS66erqqnoE3sLcuXOTWpZl2d69e8O+Y8eOsK9duzbsE9WHP/zhqkeom3o/tl27duW2+fPnh2sfffTRsM+bNy9ppjfde++9uW337t3h2jvvvDPsRf8Wy2ppaclt7e3tdT13PQ0PD4d92rRpYY+el6q5XmAsffCDHwx7X1/fGE0CMPoWLVoU9qeffnqMJhlbzc3NYW9qaiq1fsGCBWG/+eabc9vixYvDtXfddVfYBwcHww6wefPmsK9evTq3PfDAA+HadevWhb3oc+bQ0FDYo/MX3Wfr7OwMe5G2trZS64GJr1arVT1CXQmnHh4AACAASURBVLifnya6l59l1d/Pn6iva70fV9H3gj//+c9z29atW8O1ra2tSTO9XdH3BQ8++GC4dtWqVaXO3d3dHfaLL7447B0dHcnnrvf3HGWfG6D+XnvttbA///zzYX/ve987muMAAAAAAABAQ4h/DQ8AAAAAAAAAAAAAAAAAAAAAAAAAAAC8JRv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAIb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABAAhv8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAIb/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACG/wCAAAAAAAAAAAAAAAAAAAAAAAAAABAgqlVDwAAAAAAAGPtueeeq+zc+/btC/vs2bPrdu6dO3eGffHixWHftWtX2OfNmxf2wcHBsJfR1dVVt2NTjTlz5pTqk1XR87J+/fqw9/b2hr3oPayMotnq/ZrPnz+/bmtrtVrysbMsy3bv3p3bBgYGwrVFffPmzWFftWpV2ItE51+yZEmpY1fpscceC/vHP/7xMZpk9NXzeqHKa4Usc70AAG9qamoK+5QpU8J+/Pjx3PbRj340XLtixYqwL126NOzTp08PexkLFiwI+3e/+92wHzp0KOwzZswY8UzAfxsaGqrs2K2trWEv+py7evXq3FZ0T6aol7V///7kte3t7WEvujdRdL/+2LFjI57p7Sr7mu/du3c0x/k/qvwuodENDw9XduyWlpa6nbtId3d3qfU9PT2jNAmjxf38NGXu5b+d9WXv50ePfTK/pkXfM5d5Xet9nVTGnj176nr8omuRMs9NR0dH2Mvezy/6myu6xgSqV/R9adF/U6+++urRHAcAAAAAAAAaQnPVAwAAAAAAAAAAAAAAAAAAAAAAAAAAAMB4ZINfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgg18AAAAAAAAAAAAAAAAAAAAAAAAAAABIYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgg18AAAAAAAAAAAAAAAAAAAAAAAAAAABIMLXqAQAAAAAAqI+mpqaGPn+tVqvs3FW68MILS60v87y1t7eH/dFHHw37jh07wj5//vwRz/SmDRs2hL1o9tmzZyefGyaTnp6esF988cVhL/Metm3btrB3dHQkH3s0RO+BRe9vRe+fZW3dujW3PfTQQ+Ha1atXl+pPPvlk2K+66qqwX3vttbmtpaUlXNvIfv/734e96L9bjaye1wtlrhWyzPUCALzphBNOCPu//vWvsM+ZMyfsy5cvD3t07X722WeHaxvZwoULw37eeeeF/fvf/37Yv/zlL490JOAtzJw5s7JjF92bbW1tDfv+/ftz25YtW8K1vb29Ye/q6gr7HXfcEfa2trawR4ruNw0MDIS96LF/4QtfCPv69etz28GDB8O1x44dC3uV3zVU+V1C1ap83qdNm1Zq/Xh+3mk87ue/taL7k1Xfz49U+ZpmWfy61vs1LXtveLyaO3duXY9fdM981qxZYe/s7ExqWRZfg2VZ8b+1efPmhR1ofPfdd1/Yr7/++rCfddZZozkOAAAAAAAANITmqgcAAAAAAAAAAAAAAAAAAAAAAAAAAACA8cgGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMAGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQoKlWq1U9Q5ZlWUMMAQAAAADUT39/f25btmxZuLZB7mMCAAAwAkWf9Yr09fWN0iQAY+9jH/tY2Hfv3p3bzj///HDtpz/96bB3dnaG/YILLgg7b+2Xv/xl2ItelyeffDLs73nPe0Y6EgAAAADUxa9+9auw33DDDWH/4x//GPaie6AAAAAAAAAwhuIf4GfZ9rd7oOaSgwAAAAAAAAAAAAAAAAAAAAAAAAAAAMCkZINfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgg18AAAAAAAAAAAAAAAAAAAAAAAAAAABIYINfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGCDXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgg18AAAAAAAAAAAAAAAAAAAAAAAAAAABIMLXqAQAAAAAAAEjX1NRU9QgNqVarVT0CAAAwif30pz8N+1/+8pfcdvnll4/yNIyGT3ziE2G/8sorw/6pT30q7L/73e9yW0tLS7gWAAAAAEbimWeeCfuKFSvC3t3dHfbzzz9/xDMBAAAAAADAeNdc9QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwHtngFwAAAAAAAAAAAAAAAAAAAAAAAAAAABLY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAS2OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEtjgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABLY4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAS2OAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEkytegAAAAAAAADS1Wq1qkcAAADg/3POOeeU6ow/P/rRj8J+6aWXhn3JkiW57aGHHgrXtrS0hB0AAACAyWffvn257ZprrgnXXnHFFWG//fbbk2YCAAAAAACAiay56gEAAAAAAAAAAAAAAAAAAAAAAAAAAABgPLLBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJJha9QAAAAAAAAAAAABDQ0Nh37VrV9j7+/vDvnPnztw2MDAQrl28eHHY29vbw/6d73wn7G1tbWEvY3h4OOyPPPJI2Ds7O5PPvXnz5rAvWbIk7K2trcnnhrE2ffr0sP/mN78J+9VXX53b5s6dG66N3t+yLMsuuOCCsAMAAAAw/hTdb+ro6Mhtc+bMCddu27Yt7M3NzWEHAAAAAACAyci3aAAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMAGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMAGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJBgatUDAAAAAAAAAAAA3HLLLWEfGBgodfzBwcHc1t7eHq7dv39/2M8999ywz5o1K+wbN24MexnLly8Pe9Fjr9VquW1oaChcW/Y1feCBB8Le0tISdmgk559/ftifeOKJ3LZ06dJw7aWXXhr2bdu2hX3hwoVhBwAAAGD0RfdesyzLvv3tb4d97dq1YV+5cmVu+973vheuPfHEE8MOAAAAAAAA/LfmqgcAAAAAAAAAAAAAAAAAAAAAAAAAAACA8cgGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMAGvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDABr8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwAa/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkKCpVqtVPUOWZVlDDAEAAAAA1E9/f39uW7ZsWbi2Qe5jAgAAMAJFn/WK9PX1jdIkTBRNTU2l1tfz/kKVs+3atSvs8+fPD/uBAwfC3traOuKZ3jQ4OBj2yy67LOzbtm0Le0dHx4hngvHotddeC3tXV1fYf/zjH4d95cqVYd+wYUNumzFjRrgWAAAAYLJ65plnwr569eqw79mzJ+zf/OY3w7527dqwAwAAAAAAAFmWZVlnQd/+dg/UXHIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAmJRs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAls8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACaZWPQAAAAAAAAAAAABpduzYUWp9a2vrKE3y3y666KJS6/v7+8Pe0dFR6vgwXpx44olh/+EPfxj2RYsWhf2LX/xi2KN/yxs2bAjXLl++POxNTU1hBwAAAKjS0aNHw/6Nb3wjt919993h2g984ANhHxwcDPuHPvShsAMAAAAAAABjq7nqAQAAAAAAAAAAAAAAAAAAAAAAAAAAAGA8ssEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSwwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAkmFr1AAAAAAAAAAAAAKTZtGlT1SPkamlpKbV+YGBglCaBye36668P+zXXXBP2u+66K7d95jOfCdfee++9Yf/qV78a9k9+8pNhb25uDjsAAAAwuR09ejTs999/f9jvvvvusL/yyiu57Z577gnXfv7znw/7lClTwg4AAAAAAAA0Fr9sBgAAAAAAAAAAAAAAAAAAAAAAAAAAgAQ2+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAENvgFAAAAAAAAAAAAAAAAAAAAAAAAAACABDb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAQ2+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAENvgFAAAAAAAAAAAAAAAAAAAAAAAAAACABDb4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgARTqx4AAAAAAAAAAACANO3t7WEfGBgI+9DQUNhbW1tHPNNo6erqquzcMJmcfvrpYb/vvvty26233hqu7enpCfuNN94Y9osvvjjs3d3duW3p0qXh2ilTpoQdAAAAqN7LL78c9i1btoT9nnvuCfuRI0fCvmbNmrDfdtttuW3mzJnhWgAAAAAAAGBiaa56AAAAAAAAAAAAAAAAAAAAAAAAAAAAABiPbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAls8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAls8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJplY9AAAAAAAAAAAAAGluuummsA8MDIT92WefDXtra+uIZ3rT8PBw8tosy7Ibbrih1Hqg/t7//veH/Wc/+1nY//CHP4S9p6cn7B0dHblt1qxZ4dpbbrmlVD/77LPDDgAAAPyvp59+Ouz3339/buvr6wvX/vvf/w77mjVrwn7bbbeFvcz9UQAAAAAAAGByaa56AAAAAAAAAAAAAAAAAAAAAAAAAAAAABiPbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAls8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAls8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJmmq1WtUzZFmWNcQQAAAAAED99Pf357Zly5aFaxvkPiYAAAAjUPRZr0hfX98oTcJ4MTQ0FPaZM2eWOv7hw4dzW0tLS7h2eHg47NOmTUua6U0HDhzIba2treHaotmWL1+eNNObtm7dmtuKZtu+fXvYd+/eHfaNGzeGHeDZZ5/NbVu2bAnX/uAHPwj7P/7xj7C3t7eH/XOf+1zYFyxYkNtOOOGEcC0AAACMpiNHjoT9F7/4RdiLPoM//vjjYb/oooty26233hquXbFiRdjPOOOMsAMAAAAAAACTXmdBj//HiP/QXHIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAmJRs8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJbPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACWzwC/A/7Nzfb9V3/cDx9zmUAh3Ir7Ix6GBhDEmYHXMRxo/NLVhKlSV6wUVtdmWiF3qjl/4HxhgvvDMm3tjaZP6Ic4FSWLYowoIw+TE1QlkmjPGjMAcrUOjo+d5/3ef16d6Hrkf6eNw+83p/3pCOnnPavQAAAAAAAAAAAAAAAAAAAAAAAAAAIIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIUKnValN9h5RSaohLAAAAAACTp6+vr7D19PSEsw3yOSYAAACfQtl7vTK9vb336Cb8r6hUKlP27LLPHhr5bmUuX74c9j/84Q9h//a3v5397F//+tdh7+rqCvv8+fOznw1QZmxsLOy///3vw/7zn/887K+//nrYo3/jvv71r4ezu3btCvu2bdvC3tzcHHYAAAAaz/Xr18P+yiuvhP3ll18ubHv37g1nq9Vq2Mvex37nO98J+5e//OWwAwAAAAAAAEyi7pLeP9GD4p+sAgAAAAAAAAAAAAAAAAAAAAAAAAAAAJ/Igl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQIZKrVab6juklFJDXAIAAAAAmDx9fX2FraenJ5xtkM8xAQAA+BTK3utdunQp7D/+8Y/Dvn79+rBXKpWwAwBMpgsXLoT9t7/9bWF7+eWXw9kDBw6Eff78+WF/8cUXw97V1VXYtm3bFs4uWbIk7AAAAPerd955J+yDg4Nh3717d13z1Wo17Dt27Chsu3btCmd37twZ9nnz5oUdAAAAAAAAoIF1l/T+iR4U/9QWAAAAAAAAAAAAAAAAAAAAAAAAAAAA+EQW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAABksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMjRN9QUAAAAAAAAAAJh+3n777bB/8YtfDPvy5cvD3tXVldVSSmn79u1hnzt3btgBAB5++OGwf+9738tqKaV04cKFsP/ud78L+6uvvhr2b33rW4VtdHQ0nC17DdfR0RH2zs7OsD/zzDNhnzVrVtgBAID727Vr18L+xhtvhH1wcDC7Dw0NhbOtra1h37ZtW9h/+ctfhn3nzp1hnzdvXtgBAAAAAAAAqE91qi8AAAAAAAAAAAAAAAAAAAAAAAAAAAAA/4ss+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZKjUarWpvkNKKTXEJQAAAACAydPX11fYenp6wtkG+RwTAACAT6HsvV6ZH/zgB2HfvXt3dj98+HA429TUFPatW7eGfefOnWHv6uoK+9q1a8MOADCZRkdHC9uBAwfC2cHBwbDv27cv7MePHw97c3Nz2J9++unCtmnTpnC2rD/zzDNhX758edgBAGC6OHXqVNgPHToU9oMHD2a1lFL6xz/+Efayz37LXvd3dnYWto6OjnA2er+SUkrVajXsAAAAAAAAAEyK7pLeP9GD/NQXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIUKnValN9h5RSaohLAAAAAACTp6+vr7D19PSEsw3yOSYAAACfQtl7vTK9vb336Cb/bXh4OOyDg4Nh/+Mf/xj2ffv2hf2DDz4I+2OPPVbYurq6wtmvfe1rYX/++efDPnv27LADAEymixcvhv3Pf/5z2A8ePFjYDh06FM6+9dZbYR8bGwv7ihUrwr5hw4bC9uSTT4az7e3tdfVHH3007AAANJ6PP/447KdPnw778ePHs/uxY8fC2aNHj4a97PPXefPmhX3jxo2FbfPmzeHspk2bwr5169awz507N+wAAAAAAAAA3He6S3r/RA+q1nkRAAAAAAAAAAAAAAAAAAAAAAAAAAAAmJYs+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIUKnValN9h5RSaohLAAAAAACTp6+vr7D19PSEsw3yOSYAAACfQtl7vTK9vb336Cafvbt374b90KFDYd+9e3dh27NnTzh77NixsLe0tIT9hRdeCPtXv/rVrJZSSo8++mjYAQCm0q1bt8J+5MiRsB88eDDsb731VmErew135syZsJe9/lywYEHYn3zyycL2xBNPhLNr164N++OPP15XX7lyZdhnzJgRdgBgert9+3bYh4aGwn769Omwnzp1qrD961//CmdPnDgR9rfffjvso6OjYW9ubg579Dqvvb09nP3Sl74U9i1btmQ/OyWv8QAAAAAAAAD4THWX9P6JHlSt8yIAAAAAAAAAAAAAAAAAAAAAAAAAAAAwLVnwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIUKnValN9h5RSaohLAAAAAACTp6+vr7D19PSEsw3yOSYAAACfQtl7vTK9vb336CbTy3vvvRf2gYGBsL/66qth379/f2G7ceNGOLt27dqwd3V1hb2zszPszz77bNhbWlrCDgDQqG7evBn2kydPhv3EiRNhP3bsWGH7+9//Hs7+85//DPvly5fDXqa5uTnsq1atKmxr1qwJZ1evXh32Rx55JOxtbW1hX758eWFbsWJFOLt06dKwz5gxI+wA8P/dvn27sJ0/fz6cLev//ve/w/7++++H/ezZs2E/depUYTt9+nRdZ4+Pj4e9Wq2GPXq9UPZZ2JNPPllXb29vD3vZ85uamsIOAAAAAAAAANNEd0nvn+hB8W8ZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJ/Igl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQIZKrVab6juklFJDXAIAAAAAmDx9fX2FraenJ5xtkM8xAQAA+BTK3uuV6e3tvUc34V66c+dOYTtw4EA4OzAwUFc/efJk2GfPnh325557rrDt2LEjnO3q6gr72rVrww4AMF1dv3497KdOnQr70NBQ9nzZ2WfOnAn72bNnw37x4sWwj4+Phz3S1NQU9oceeijsK1euDHtra2t2L5tdsmRJXc9evHhxdi87+4EHHgh7S0tL2BcsWBD2SqUSdpguxsbGwj4yMhL26HvHjRs3wtmrV6+G/cqVK3X14eHh7Pl671b2fef8+fNhv3TpUtjr0dzcHPbly5eHfcWKFWF//PHHs9pn0WfNmhV2AAAAAAAAAKDhdZf0/okeVK3zIgAAAAAAAAAAAAAAAAAAAAAAAAAAADAtWfALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhQqdVqU32HlFJqiEsAAAAAAJOnr6+vsPX09ISzu3btutfXAQAAYJK9/PLLYf/mN78Z9t7e3nt51brmlQAAIABJREFUHe4D7733XtgHBgay+/79+8PZa9euhf3RRx8N+44dO7L7tm3bwtm5c+eGHQCAyfHxxx+H/cKFC4Xt3Llz4WzZa9/z58+H/ezZs2EfHh4O+9WrV7Nn6zk7pZRu3LgR9kY2Z86crJZSSgsWLAh7S0tL2GfNmhX2MgsXLqxrvh5l72lmzpw5ac++detW2EdHRyft2Xfv3g379evX6zr/5s2b2b3s2SMjI2EfGxsLeyObP39+2B988MGwL168uLC1trZmz6aU0sMPPxz2ZcuWhX3lypXZs21tbWFfunRp2AEAAAAAAAAAGlh3Se+f6EHVOi8CAAAAAAAAAAAAAAAAAAAAAAAAAAAA05IFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADJVarTbVd0gppYa4BAAAAAAweS5evFjYvv/974ezd+/evdfXAQAAYIq99NJLYX/xxRc/o5tA+WcPhw8fDvuePXvCPjAwEPajR48WtqampnB269atYd++fXtdff369YWtUqmEswAA8GndunUr7FevXs1qKaU0MjJS17P/85//hP3mzZvZZ1+7di3sN27cCPudO3fCXvae5/r162GfTGV/9vHx8Ul7dnNzc9gfeOCBSXt2mYULF9Y1P2fOnLC3tLQUtvnz54ezZX8v0dkppTRv3rzsXvbs1tbWsC9evDjsM2fODDsAAAAAAAAAAPed7pLeP9GDqnVeBAAAAAAAAAAAAAAAAAAAAAAAAAAAAKYlC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAABksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkqtVptqu+QUkoNcQkAAAAAAAAAAIDP2vDwcGEbHBwMZ/fs2RP2svno2SmltHTp0sK2Y8eOcLasd3R0hH3RokVhBwAAAAAAAAAAAAAAqEN3Se+f6EHVOi8CAAAAAAAAAAAAAAAAAAAAAAAAAAAA05IFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZKrVabarvkFJKDXEJAAAAAAAAAACA+8n4+HjYjx49GvaBgYHCNjg4GM6++eabYS/73bUNGzaEvaurK+w7duwobE8//XQ4W61Www4AAAAAAAAAAAAAAPzP6y7p/RM9yP+FAAAAAAAAAAAAAAAAAAAAAAAAAAAAABks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZKjUarWpvkNKKTXEJQAAAAAAAAAAALg3rl27Fvb9+/eHfWBgoK7+3nvvFbbW1tZwdtu2bWHv7OwMe0dHR9jb2trCDgAAAAAAAAAAAAAATLrukt4/0YOqdV4EAAAAAAAAAAAAAAAAAAAAAAAAAAAApiULfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwYJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAGSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGSq1Wm2q75BSSg1xCQAAAAAAAAAAAO4PJ0+eLGwDAwPh7P79+8P+pz/9Keyjo6NhX7duXWHbvn17OFvWn3vuubC3tLSEHQCAPB999FFhu3XrVjg7MjIS9mvXroV9fHy8rvPHxsbCXo+yu5X92SZTtVoN+/z58z+jm/y32bNnh33OnDlhb25uDvsDDzxQ2Mr+3GXvKcruBgAAAAAAAAAADaS7pPdP9KD4t5EAAAAAAAAAAAAAAAAAAAAAAAAAAACAT2TBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAGSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAgQ6VWq031HVJKqSEuAQAAAAAAAAAAAGVu3rwZ9gMHDoR9YGCgsO3fvz+cPXnyZNhnz54d9q1bt4Z9+/bt2b29vT2crVQqYSfP7du3w172NbFx48awv/LKK4XtwQcfDGcByHPnzp2wX7hwIeznzp0L++XLl8N+5cqVwjY8PBzOXr16ta4ePbtsvuzsjz76KOxlr/HK5oGJWbhwYdhbWloK29y5c8PZ1tbWsC9evLiuXnZ+9Pq43mcvXbo07G1tbdnz1Wo1nAUAAAAAAAAAuI91l/T+iR7kNzAAAAAAAAAAAAAAAAAAAAAAAAAAAAAggwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAABksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMlVqtNtV3SCmlhrgEAAAAAAAAAAAANLLz58+Hfd++fWHfu3dv2F977bWwDw8PF7alS5eGsx0dHWHv7OwM+1e+8pWwP/TQQ2G/X73xxhthf+GFF+o6f9GiRYXtF7/4RTj7jW98o65nA4yOjoZ9aGgoq6WU0pkzZ8J+9uzZsJ87dy7s0ffsstmLFy+GfbL/H4AFCxYUtiVLloSzixcvDntra+ukzZfNzp07N+wtLS1h/9znPpd9/pw5c8LZefPm1fXsGTNmhL3s+bNnzw77ZIq+3lJKqVKpTNqzx8bGwj4yMjJpzy5T9uyyu9++fTvsN2/eLGwffvhh9mxKKd26dSvsZeffuHGjsJX9vVy9ejXsV65cmdT56P1S2Wz0574XZs6cWdiWLVsWzj7yyCNhb2trq6uvWLEi7GvWrClsq1evDmdXrlwZ9qamprADAAAAAAAAAPe97pLeP9GDqnVeBAAAAAAAAAAAAAAAAAAAAAAAAAAAAKYlC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggwW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMGCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAABksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMlRqtdpU3yGllBriEgAAAAAAAAAAADCdjY+Ph/2tt94qbPv27Qtn9+7dG/ZDhw6FfWxsLOzr168vbB0dHeFsZ2dn2Lds2RL2WbNmhX0y/fCHPwz7T37yk7DfuXMn7NVqtbCVfb289NJLYf/Zz34W9vnz54cdmJgrV66E/fjx44XtxIkT4eypU6fCfvr06bAPDQ2F/dy5c2GP/h2K/v1KKaW2trawP/LII3X16Px6ZifSy85fsmRJ2JuamsIOwL03Ojoa9kuXLoW97Hvm2bNnC9v7778/aWdP5Px333037JcvXw57ZObMmWFftWpV2FevXh32NWvWFLbPf/7z4Wz0HjallNatWxf2uXPnhh0AAAAAAAAAmJDukt4/0YPi31oEAAAAAAAAAAAAAAAAAAAAAAAAAAAAPpEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADJVarTbVd0gppYa4BAAAAAAAAAAAADA1RkZGwv7666+HfXBwMKullNKpU6fC3tLSEvbnn38+7B0dHWHfsWNHYVu7dm04+9RTT4X92LFjYZ9MM2fODPvixYvD/qtf/Srs27Zt+9R3gsnyzjvvhP3IkSOF7W9/+1s4e+LEibAfP3487OfPnw97pK2tLeyPP/74pPbVq1eHfc2aNYXtscceC2dnzZoVdgCgMVy/fr2wDQ0NhbNl7/XK5k+fPp19ftmzP/jgg7BXq9Wwl71Oam9vD/v69euzWkopbdiwIexLliwJOwAAAAAAAAA0kO6S3j/Rg+Kf9AMAAAAAAAAAAAAAAAAAAAAAAAAAAACfyIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAAGSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAECGSq1Wm+o7pJRSQ1wCAAAAAAAAAAAAmH7efffdsA8ODtbVX3vttbB/+OGHhW3ZsmXh7IULF8LeIL8n+olmzJgR9vHx8bB/97vfLWw/+tGPwtmWlpaw05hu3boV9iNHjhS2gwcPhrNvvvlmXf3ixYthb25uLmzr1q0LZ9vb28O+fv36sH/hC18I+1NPPVXYFi1aFM4CAJDn7NmzYT9x4kTYjx8/HvZjx45lnz80NBTOlr1XW7VqVdg3b94c9k2bNhW2LVu2hLNPPPFE2MvehwIAAAAAAAAw7XSX9P6JHlSt8yIAAAAAAAAAAAAAAAAAAAAAAAAAAAAwLVnwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIMFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBgl8AAAAAAAAAAAAAAAAAAAAAAAAAAADIUKnValN9h5RSaohLAAAAAAAAAAAAANxrd+/eDfvhw4cL209/+tNw9je/+U3YG+T3RCdFU1NTYVuxYkU429fXF/aNGzdm3el+V/a1/Ne//jXs+/btC/vevXvDHv23klJKY2Njha3sa2LLli1h37RpU9g3b94c9vb29sI2c+bMcBYAAD5LIyMjYT9y5EjY//KXv4T90KFDYY9e9w8PD4ez8+bNC/uzzz4b9s7OzrBv3769sK1duzacBQAAAAAAAKAhdZf0/okeVK3zIgAAAAAAAAAAAAAAAAAAAAAAAAAAADAtWfALAAAAAAAAAAAAAADwf+zcb2hdd/3A8XOzbArSVjsNUrDrEzNlg0wY2in4INmo/9JO2KTXIUyXxA7RbTTq5hKppLoxOxUVGqfgpDLv2rEHSXVbaSKowyH+aSh22j2QFt1mKEocyJy1xwc/fvzmz53PTb7n3pyb3Nfr6ZvvuZ97TpI23d0HAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAAgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJCglud51TNkWZZ1xBAAAAAAAAAAAAAAnWRsbCzsDz74YNj/+c9/tnCataO3tzfsFy5cCPudd94Z9n379oX94osvDns7Pfvss2H/0Y9+FPZjx44Vtrm5ufDsX/7yl7C/5S1vCfu1114b9sHBwbC/4x3vKGxbtmwJzwIAAJ3v9OnTYf/5z38e9vn5+bBHvw9lWZY9//zzhe2yyy4Lz1533XVh37FjR6m+YcOGsAMAAAAAAADwiupNemO5F+opOQgAAAAAAAAAAAAAAAAAAAAAAAAAAAB0JQt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDAgl8AAAAAAAAAAAAAAAAAAAAAAAAAAABIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJanmeVz1DlmVZRwwBAAAAAAAAAAAA0Em2bNkS9ueee26VJukuF110Udi3bt0a9kceeSTsfX19he3RRx8tde0nn3wy7Bs2bAj7tddeW9h27NgRnr3uuuvCvm3btrADAABUqdn/a3ny5MnC9sQTT4Rnjx07Fvaf/exnYW+m2e9rN9xwQ2HbuXNneHbjxo1JMwEAAAAAAACsAfUmvbHcC/WUHAQAAAAAAAAAAAAAAAAAAAAAAAAAAAC6kgW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAlqeZ5XPUOWZVlHDAEAAAAAAAAAAACw2p555pnC1t/fv4qTdJaenp6w9/b2FrZarRaebfb52ZdeeinsZUXv7XWve1149vrrrw/7DTfcEPahoaGwX3zxxWEHAACg9f72t7+F/ejRo2E/cuRI2B9//PHC1ux35B07doT9Ix/5SNh37doVdr+HAgAAAAAAABWqN+mN5V4o/uQzAAAAAAAAAAAAAAAAAAAAAAAAAAAA8Ios+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAAgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDAgl8AAAAAAAAAAAAAAAAAAAAAAAAAAABIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJKjleV71DFmWZR0xBAAAAAAAAAAAAMBq++EPf1jYPvCBD6ziJP/tNa95TWF77WtfG5699NJLw97X11eqb968ubD94x//CM+ePHky7L/5zW/CXqvVwr5r166wf+xjHytsg4OD4dne3t6wAwAAwP/3wgsvFLajR4+GZxuNRtijf9fIsix7wxveEPbod+TR0dHw7LZt28IOAAAAAAAA0ES9SY//g+nL9JQcBAAAAAAAAAAAAAAAAAAAAAAAAAAAALqSBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAASWPALAAAAAAAAAAAAAAAAAAAAAAAAAAAACSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAS1PM+rniHLsqwjhgAAAAAAAAAAAABYbefPny9sP/7xj8Ozb3rTm8K+efPmsF966aVhv+iii8JexoULF8L+yCOPhP2+++4rbL/61a/Cs1dddVXYP/7xj4f9pptuCvuGDRvCDgAAAOvFH//4x7B/5zvfSe7PPfdcePb9739/2O+6666wX3PNNWEHAAAAAAAA1r16k95Y7oV6Sg4CAAAAAAAAAAAAAAAAAAAAAAAAAAAAXcmCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAglqe51XPkGVZ1hFDAAAAAAAAAAAAALA8Fy5cCPuRI0fCvn///rCfOnUq7DfeeGNhu/3228Oz27dvDzsAAACwOv71r38VttnZ2fDsgQMHwv7kk0+GfceOHWH//Oc/H/Z3vvOdYQcAAAAAAAA6Xr1Jbyz3Qj0lBwEAAAAAAAAAAAAAAAAAAAAAAAAAAICuZMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACBBLc/zqmfIsizriCEAAAAAAAAAAAAA+D9zc3OF7bbbbgvPPv3002HfvXt32CcmJsL+1re+NewAAABAd4v+XSPLsuwLX/hC2H/605+G/T3veU9h+/rXvx6effOb3xx2AAAAAAAAYFXUm/TGci/UU3IQAAAAAAAAAAAAAAAAAAAAAAAAAAAA6EoW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAkqOV5XvUMWZZlHTEEAAAAAAAAAAAAwHpy7ty5sO/duzfshw4dKmzDw8Ph2fvuuy/sl19+edgBAAAAqjQ3Nxf26N9VTp8+HZ696667wv7Zz3427JdccknYAQAAAAAAgGWpN+mN5V6op+QgAAAAAAAAAAAAAAAAAAAAAAAAAAAA0JUs+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAAgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDAgl8AAAAAAAAAAAAAAAAAAAAAAAAAAABIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJKjleV71DFmWZR0xBAAAAAAAAAAAAMBaMjMzE/Zbbrkl7K961avC/o1vfKOwffCDHwzPAgAAAKxn58+fL2xf/epXw7P79u0L+7Zt28L+8MMPh/3KK68MOwAAAAAAAJBlWZbVm/TGci/UU3IQAAAAAAAAAAAAAAAAAAAAAAAAAAAA6EoW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEtTyPK96hizLso4YAgAAAAAAAAAAAGC1RZ/lvOeee8Kzk5OTYR8ZGQn7l7/85bBv3Lgx7LCaFhcXwz4/P1/YHnroofDszMxM0kyQ4qmnngr79773vbBPT0+Hfc+ePaX6wMBA2AEAgPL+8Ic/hP2WW24J+y9/+cuwf//73y9sO3fuDM8CAAAAAABAF6k36Y3lXqin5CAAAAAAAAAAAAAAAAAAAAAAAAAAAADQlSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAQW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAkqOV5XvUMWZZlHTEEAAAAAAAAAAAAQKu99NJLYf/oRz9a2I4cORKe/eY3vxn2sbGxsMNacuutt4Z9eno6+dod8plq1pH5+fnCNjQ0FJ49c+ZM2Ldu3Rr2RqMR9oceeijsMzMzYW+ns2fPhv2ee+4pbM1+BuzZsyfsN954Y9gHBwfDDqup2fd5vV5PvvYPfvCDsO/evTv52hRr5zPNsvi5ruVnWuV9y7LOvndLS0thP3z4cGEr+3vkWr5v0EnOnz8f9ttuuy3s0d+P77333vDspz/96bADAAAAAADAOtLswwXxhxNepqfkIAAAAAAAAAAAAAAAAAAAAAAAAAAAANCVLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSo5Xle9QxZlmUdMQQAAAAAAAAAAADASjX7LOZNN90U9mPHjhW2Rx99NDz77ne/O+zQTWq1WvLZDvlMNevIrbfeWtimp6fDs+v563FpaSnsP/nJT8I+PDycfO3HHnss7PV6PewzMzNhj2aDlZqcnAz7/v37w/773/8++bXOgl0nAAAgAElEQVQvv/zysE9MTIR9amoq+bXXsyqfaZbFz7XTn2l076q8b1kW37t237fFxcWwj4yMhH12draV46zIgQMHwr53795VmgTWt29961uF7ROf+ER49itf+UrYP/WpTyXNBAAAAAAAAB0o/uBYljWWe6GekoMAAAAAAAAAAAAAAAAAAAAAAAAAAABAV7LgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQoJbnedUzZFmWdcQQAAAAAAAAAAAAACt17733lurHjx8vbFdffXXSTNCNarVa8tkO+Uw164ivx1c2Ozsb9uHh4VWa5L+VeWZZtr6fG623sLAQ9quuuqrU9ct8PZb9Xjhx4kTYBwYGSl2/k0XPtcpnmmXlnmu7n2k7vx/W8327//77w/62t70t7IODg4Wt3T+jmvFnKrTfgw8+GPbR0dGwP/HEE2GPfsYAAAAAAABAh6k36Y3lXqin5CAAAAAAAAAAAAAAAAAAAAAAAAAAAADQlSz4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAQW/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEACC34BAAAAAAAAAAAAAAAAAAAAAAAAAAAggQW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEjQW/UAAAAAAAAAAAAAAJ3sd7/7Xdj37dsX9kajEfarr756pSMBa8zi4mJhO3ToUHh2fHw87MPDw2G//fbbwz44OBj2yNLSUtgPHz4c9rGxseTXzrIsm5iYKGyf/OQnw7N9fX1hr9VqSTO1QtnXzvO8RZO0XrOv1062Z8+eyl57cnKy1PmpqakWTUKr/OIXv6h6hLZp9t4GBgZWaZLVt16fa7ufqfuWZu/evaXOR9r9fTo3N9fW6wPN3XzzzWF/+umnwz4yMhL2U6dOhf3Vr3512AEAAAAAAGAt6ql6AAAAAAAAAAAAAAAAAAAAAAAAAAAAAFiLLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACTorXoAAAAAAAAAAAAAgE52xx13hP1DH/pQ2K+//vpWjgN0oMXFxbCPjIwUtg9/+MPh2TzPwz4/Px/2oaGhsJ84caKwDQwMhGfvvPPOsE9PT4f9z3/+c9hffPHFsF922WWF7dy5c+HZgwcPhr3ZfW+mVqslny372ryypaWlUuff9773tWgSyLJf//rXVY/QNrOzs2EfHR1dpUlW33p9ru1+pu5bNaI/Fx977LHw7PDwcNgPHDgQ9v7+/rAD1fviF78Y9qNHj4b9/vvvD/vdd9+94pkAAAAAAACg0/VUPQAAAAAAAAAAAAAAAAAAAAAAAAAAAACsRRb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQwIJfAAAAAAAAAAAAAAAAAAAAAAAAAAAASGDBLwAAAAAAAAAAAAAAAAAAAAAAAAAAACSw4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAS1PI8r3qGLMuyjhgCAAAAAAAAAAAA6D6nTp0K+5VXXhn2kydPhv2KK65Y8UzAytVqteSzZT9T3Wg0wl6v19v22s00uy8TExOFbWpqKjw7OTkZ9nPnzoX94MGDYW+mymfeTCfP1q3m5+fD/rWvfS3shw4dCvumTZtWPBPdq8zPiOUo83Okk2frdO28d2Xvm9nSdPJsZbXzve3Zsyfsd9xxR9j7+/tbOQ7QBg888EDY77777rD/6U9/KmyXXHJJ0kwAAAAAAACQqPgDnv8j/oDoy/SUHAQAAAAAAAAAAAAAAAAAAAAAAAAAAAC6kgW/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMCCXwAAAAAAAAAAAAAAAAAAAAAAAAAAAEhgwS8AAAAAAAAAAAAAAAAAAAAAAAAAAAAksOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAEljwCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAks+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEtTzPq54hy7KsI4YAAAAAAAAAAAAAus+XvvSlsH/3u98N+zPPPNPKcYBEtVot+WzZz1Tv3Lkz7LOzs6WuX5V2f9b87NmzYT9y5EjYx8fHk1+73e+tyq9HXlmz79PPfe5zYd++fXsrx6HLlfkZsRxlfo508mydrp33rux9M1uaTp6trKWlpcJ2+PDh8OzY2Firx/kPJ06cKGwDAwNtfW1gec6dOxf2vr6+sB8/frywDQ4OJs0EAAAAAAAAiepNemO5F+opOQgAAAAAAAAAAAAAAAAAAAAAAAAAAAB0JQt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDAgl8AAAAAAAAAAAAAAAAAAAAAAAAAAABIYMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJeqseAAAAAAAAAAAAAKBKCwsLYR8YGFilSYC1anZ2NvlsnuctnKSzfPvb3w57s/t24MCBsI+Pj694JtavRqMR9uHh4bBv3769leNAqNnXY5k/V6rW7L2tZ9F790zTr79W712nfy9s2rSpsI2OjoZnN2zYEPZ6vZ400/+anJwsbDMzM6WuDbTG61//+rBv2bIl7NG/xQ0ODibNBAAAAAAAAFXrqXoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAWIss+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAEFvwCAAAAAAAAAAAAAAAAAAAAAAAAAABAAgt+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIEFvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDAgl8AAAAAAAAAAAAAAAAAAAAAAAAAAABI0Fv1AAAAAAAAAAAAAABV+vvf/x72zZs3r9IkQDc6ffp02Pv7+1dpkpVrNBphHxsbC/uZM2fCvnXr1hXPxPq2sLBQ2H7729+GZ6emplo9DqugVqtVPUKhPM+Tzw4PD4d9dnY2+dpVa/be1rPovXum6ddfq/duPX8vvPe97616BKDDbdy4MewvvPDCKk0CAAAAAAAAq6en6gEAAAAAAAAAAAAAAAAAAAAAAAAAAABgLbLgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCBBb8AAAAAAAAAAAAAAAAAAAAAAAAAAACQoLfqAQAAAAAAAAAAAACq1NfXF/Znn312lSYB1qoHHngg7GNjY4Xt0KFD4dnx8fGwb9q0KeyLi4thj15/79694dl6vR72ZrZu3VrqPOtPs6/X48ePF7apqalWj/MfFhYWwj49PV3YDh482Opxukae51WP0BZvf/vbqx6hbdr53ubn58M+NDQU9rm5ubAPDg6ueKaXW6/Ptd3vy31LMzk5GfYrrrgi7Lt3705+7WZ//yxrdHS0rdcH2u/5558P+xvf+MZVmgQAAAAAAABWT0/VAwAAAAAAAAAAAAAAAAAAAAAAAAAAAMBaZMEvAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAABJY8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAJLPgFAAAAAAAAAAAAAAAAAAAAAAAAAACABBb8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAILfgEAAAAAAAAAAAAAAAAAAAAAAAAAACBBb9UDAAAAAAAAAAAAAFTpmmuuCftnPvOZsJ8/fz7svb0+rgmtsLi4WNm1+/r6wr5r166wj42NFbb9+/eHZ5v1ss6cOZN8dnh4OOyzs7NhP3v2bNhffPHFFc+0XGWf+cLCQivH+Q+nT58Oe39/f9teu92a3feRkZGwR19T4+PjSTO1yszMTNuuPTk5Wer81NRUiyahVQYGBsI+MTER9mZ/NjT7OVJGs9mavbcyhoaG2no+z/NS14/eu2eafv1ovirvW5bFs7X7vjX7u0iZv0Pu3r077E899VTytbOs+ddcs79jAtU7depU2P/617+G/V3velcrx/k3O3cTYnXd93H8d8ZBJ58rGy1p4iptKs0EIywXkklFhUqpOaDQzgrsCVoUposeNhUoVGQtKiGaMBdpUGoFilq2qLTowSnMR3yONM3M/N+Le3Ff1H2+Z/ydpnPGeb22b35/v2pFXHl9AAAAAAAAoC401PoAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6I4M/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQobHWBwAAAAAAAAAAAADU0vTp08P+8MMPh/29994L+7Rp0874JuDvhg4dWrNvF0UR9ubm5rBv3769bHv11VfDt0899VTY77333rA/9thjYW9paQl75Mknnwz7ypUrw17p5z5v3rywz58/v2w7ePBg+PbEiRNhL5VKYe9Kra2tVb2v9NdrLS1cuDDslf6aqWfV/r7Bf6v0z9dRo0aFvZq/Ht96662wz5o1K/vb1froo4/CftNNN1X1vivV8vc0pfj3tZa/p50R/drV8tctpdr+2j333HNhHz58eNjb2tqyWkrxv4OlVPnvtUmTJoUdqH+vvfZa2K+99tqwV/rnNwAAAAAAAHRHDbU+AAAAAAAAAAAAAAAAAAAAAAAAAAAAALojA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZCgVRVHrG1JKqS6OAAAAAAAAAAAAAPirBQsWhH3FihVh/+yzz8Leu3fvM74JAAAAALrCjh07wj5q1Kiwt7e3h/32228/45sAAAAAAACgi7RV6PF//PovDVUeAgAAAAAAAAAAAAAAAAAAAAAAAAAAAD2SgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMhj4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwGfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMpSKoqj1DSmlVBdHAAAAAAAAAAAAAPzV8ePHwz5q1Kiw33bbbWF/8cUXz/gmAAAAAMhx8uTJsE+cODHsQ4YMCfvKlSvP+CYAAAAAAACokbYKvb2zH2qo8hAAAAAAAAAAAAAAAAAAAAAAAAAAAADokQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAYDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkMPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQIbGWh8AAAAAAAAAAAAAUM/69u0b9jfffDPsN954Y9iHDx9etj3++OPhWwAAAAD4q1OnTpVtM2fODN/u2rUr7O+++27WTQAAAAAAAHA2a6j1AQAAAAAAAAAAAAAAAAAAAAAAAAAAANAdGfgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIENjrQ8AAAAAAAAAAAAA6M5uuOGGsL/xxhthnz17dtm2b9++8O3zzz8f9sZGf1QUAAAA4Gxz+PDhsM+cObNs+/zzz8O3a9euDXtzc3PYAQAAAAAAoCdqqPUBAAAAAAAAAAAAAAAAAAAAAAAAAAAA0B0Z+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgQ2OtDwAAAAAAAAAAAAA4m82aNSvsTU1NZducOXPCt99++23Y33777bCfe+65YQcAAADg31fpf/OZMmVK2P/888+ybd26deHb0aNHhx0AAAAAAAD4u4ZaHwAAAAAAAAAAAAAAAAAAAAAAAAAAAADdkYFfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADKUiqKo9Q0ppVQXRwAAAAAAAAAAAADUky1btoR96tSpYa/050RfeumlsN92221hBwAAAODvTp8+HfYXXngh7E888UTYx44dG/bly5eXbUOGDAnfAgAAAAAAQA/SVqG3d/ZDDVUeAgAAAAAAAAAAAAAAAAAAAAAAAAAAAD2SgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMhj4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwGfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMpSKoqj1DSmlVBdHAAAAAAAAAAAAAHQnBw4cCPu8efPC/vbbb4d95syZZduiRYvCtxdeeGHYAQAAALqzL774omybO3du+Hbz5s1hf/TRR8O+YMGCsPfu3TvsAAAAAAAAQEoppbYKvb2zH2qo8hAAAAAAAAAAAAAAAAAAAAAAAAAAAADokQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAYDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkMPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQIZSURS1viGllOriCAAAAAAAAAAAAICe5IMPPgj7/fffX7b9/PPP4dtHHnkk7A888EDYBw0aFHYAAACAamzbti3szzzzTNhff/31sm38+PHh2yVLloT9qquuCjsAAAAAAADwj2ir0Ns7+6GGKg8BAAAAAAAAAAAAAAAAAAAAAAAAAACAHsnALwAAAAAAAAAAAAAAAAAAAAAAAAAAAGQw8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDPwCAAAAAAAAAAAAAAAAAAAAAAAAAABABgO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAkMHALwAAAAAAAAAAAAAAAAAAAAAAAAAAAGQw8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAAZSkVR1PqGlFKqiyMAAAAAAAAAAAAA+D/Hjx8v25599tnw7eLFi6v6sR988MGwP/TQQ2XboEGDqvqxAQAAgPq3bdu2sD/99NNhX7p0adgvvvjisC9cuLBsmzNnTvi2VCqFHQAAAAAAAPhXtFXo7Z39UEOVhwAAAAAAAAAAAAAAAAAAAAAAAAAAAECPZOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMhj4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwGfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMhj4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAyloihqfUNKKdXFEQAAAAAAAAAAAAD8M3755ZewL168OOyLFi0K++nTp8u2e+65J3w7d+7csF955ZVhBwAAADqn0v+Hde3atWXbkiVLwrfLly8Pe0tLS9jnz58f9tmzZ4e9sbEx7AAAAAAAAEDda6vQ2zv7oYYqDwEAAAAAAAAAAAAAAAAAAAAAAAAAAIAeycAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABlKRVHU+oaUUqqLIwAAAAAAAAAAAACoD0eOHAn7K6+8ktVSSqmjoyPsEydODPvcuXPDfuedd4a9T58+YQcAAIB6cfjw4bAvXbo07EuWLAn7d999V7ZNmDAhfHvfffeF/e677w57Y2Nj2AEAAAAAAICzXluF3t7ZDzVUeQgAAAAAAAAAAAAAAAAAAAAAAAAAAAD0SAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhQKoqi1jeklFJdHAEAAAAAAAAAAABA91fpz8d+/PHHYX/55ZfD/u6774a9f//+YZ82bVrZNn369PDt5MmTw967d++wAwAAcPY5cuRI2FesWFG2LVu2LHy7atWqsDc1NYV99uzZYb/33nvLttGjR4dvAQAAAAAAAKrUVqG3d/ZDDVUeAgAAAAAAAAAAAAAAAAAAAAAAAAAAAD2SgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMhj4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwGfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMpSKoqj1DSmlVBdHAAAAAAAAAAAAAEAl+/btC/uyZcuy+/r168O3gwYNCvvUqVPDftddd4V90qRJZVvfvn3DtwAAAPz/Dhw4EPb3338/7O+8807YV69eHfaGhoay7dZbbw3fzpgxI+xTpkwJe79+/cIOAAAAAAAAUENtFXp7Zz9U/r/KAgAAAAAAAAAAAAAAAAAAAAAAAAAAAGUZ+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMpaIoan1DSinVxREAAAAAAAAAAAAAUEt79+4N+/Lly8O+bNmysK9fvz7sjY2NZduECRPCtzfffHNVfezYsWEvlUphBwAAiJw8eTLsGzZsCPuaNWvKttWrV4dvv/jii7A3NTWF/ZZbbgn7jBkzwn7HHXeUbQMGDAjfAgAAAAAAAJzF2ir09s5+qKHKQwAAAAAAAAAAAAAAAAAAAAAAAAAAAKBHMvALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAYDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkMPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQIZSURS1viGllOriCAAAAAAAAAAAAAA4m/3yyy9h//DDD8u2NWvWhG9XrVoV9p9++inszc3NYZ84cWLYx48fn9VSSmncuHFh79OnT9gBAIDOOXr0aNg3bdpUtm3cuDF8W6lv2LAh7MeOHQv72LFjy7bJkyeHb2+55ZawT5gwIexNTU1hBwAAAAAAACBLW4Xe3tkPNVR5CAAAAAAAAAAAAAAAAAAAAAAAAAAAAPRIBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAJktQvsAAA8qSURBVAAAAAAAyGDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAAAAAAAAAAAAAAAAAAAAAAAIAMBn4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgg4FfAAAAAAAAAAAAAAAAAAAAAAAAAAAAyFAqiqLWN6SUUl0cAQAAAAAAAAAAAAB0jY6OjrCvWrUq7OvWrQv7xo0by7bdu3eHb/v06RP2cePGhf36668P+3XXXVe2jRkzJnw7cuTIsPfq1SvsAAD0PCdOnAj7N998U7Z9+eWX4dtNmzZV1b/++uuwnz59umy74oorwreV/r180qRJYZ88eXLYhw4dGnYAAAAAAAAAup22Cr29sx9qqPIQAAAAAAAAAAAAAAAAAAAAAAAAAAAA6JEM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQwcAvAAAAAAAAAAAAAAAAAAAAAAAAAAAAZDDwCwAAAAAAAAAAAAAAAAAAAAAAAAAAABkM/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEAGA78AAAAAAAAAAAAAAAAAAAAAAAAAAACQoVQURa1vSCmlujgCAAAAAAAAAAAAADj77NixI+wbNmwI+yeffBL2Tz/9NOybN28u206ePBm+7du3b9hHjRoV9rFjx4Z9zJgxZdvVV18dvm1tbQ37sGHDwg4AUM9Onz4d9u3bt5dt33//ffg2+vfDzvQtW7aEvdKPf+rUqbJtwIAB4dtx48aFfcKECWG//vrrs/t5550XvgUAAAAAAACAM9RWobd39kMNVR4CAAAAAAAAAAAAAAAAAAAAAAAAAAAAPZKBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAyGPgFAAAAAAAAAAAAAAAAAAAAAAAAAACADAZ+AQAAAAAAAAAAAAAAAAAAAAAAAAAAIIOBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAMhg4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAAylIqiqPUNKaVUF0cAAAAAAAAAAAAAAPzT/vjjj7Ltm2++Cd9+9dVXYd+8eXOX9f3794dvKxk4cGDYR4wYEfbLL788+31ra2v49rLLLgt7S0tL2IcNGxb2Xr16hR0Azha///572Hfv3h32HTt2hL2joyOrdaZv3bo17D/++GPYK/3cI//5z3/Cfs0114R9zJgxVb2P+qWXXhq+LZVKYQcAAAAAAACAbqStQm/v7IcaqjwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiQDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJDBwC8AAAAAAAAAAAAAAAAAAAAAAAAAAABkMPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQz8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQAYDvwAAAAAAAAAAAAAAAAAAAAAAAAAAAJChVBRFrW9IKaW6OAIAAAAAAAAAAAAAgP+1f//+sH/77bdh/+GHH8K+devWqt53dHRktZRSOnHiRNgraWxsDPvQoUPLtksuuSR8O3z48Kp6S0tL2C+44IKwn3/++WXbkCFDwrfNzc3Z304ppf79+4cdoCv9/PPPYT9w4EDYDx06VLYdPHgw+21KKe3duzfsu3btCvvOnTuz3+7ZsyfslW6r1uDBg8u2ESNGhG8vv/zysI8cObLL3re2toZvBw4cGHYAAAAAAAAA4F/RVqG3d/ZDDVUeAgAAAAAAAAAAAAAAAAAAAAAAAAAAAD2SgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMhj4BQAAAAAAAAAAAAAAAAAAAAAAAAAAgAwGfgEAAAAAAAAAAAAAAAAAAAAAAAAAACCDgV8AAAAAAAAAAAAAAAAAAAAAAAAAAADIYOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAMpSKoqj1DSmlVBdHAAAAAAAAAAAAAADQ/VX6c/K7du0K+44dO8K+e/fu7O/v3Lkz+21KKe3Zsyfs27dvD/vBgwfD/vvvv4e9KzU1NYX9/PPPL9uGDBkSvu3bt2/Y+/XrF/bBgweH/Zxzzsn+sSt9u9L7Pn36hL2S6McvlUpVfbuSrv65RY4fPx72rvx74Y8//gj7r7/+WtX3jx07FvbffvutbDty5Ej49ujRo9nfTqnyzy368Sv9vCr98+3QoUNhP3XqVNi7UqW/Fy688MKwX3TRRWG/5JJLyrbhw4eHbyv1lpaWLn3f3NwcdgAAAAAAAACAKrRV6O2d/VBDlYcAAAAAAAAAAAAAAAAAAAAAAAAAAABAj2TgFwAAAAAAAAAAAAAAAAAAAAAAAAAAADIY+AUAAAAA4H/auYNUNYIoDKP15IEoNg7UBbisbCOrym6yEBUVUUHEfpMMk+rwP6UVz5lequoiPZQPAAAAAAAAAAAAAAAAAAAAgIDALwAAAAAAAAAAAAAAAAAAAAAAAAAAAAQEfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAg8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAABgV8AAAAAAAAAAAAAAAAAAAAAAAAAAAAIfLRt2/cOpZTyFEsAAAAAAAAAAAAAAMA7OxwO/5yt1+vq2dVqVZ13nd9sNvH5rrPH47E6P51O1fl2u63Oz+dzNCullN1uV5137X65XKrz2+1Wne/3++r8kWrfWymlXK/Xh709HA6r8/F4/LC3B4NBdT6dTr91f9fuo9EofnsymcR3l1JK0zTxvOvt2WxWnc/n82/NF4tF/H7X3V2/GwAAAAAAAAAAD/GjY/7rfy+q/yMIAAAAAAAAAAAAAAAAAAAAAAAAAAAA+CuBXwAAAAAAAAAAAAAAAAAAAAAAAAAAAAgI/AIAAAAAAAAAAAAAAAAAAAAAAAAAAEBA4BcAAAAAAAAAAAAAAAAAAAAAAAAAAAACAr8AAAAAAAAAAAAAAAAAAAAAAAAAAAAQEPgFAAAAAAAAAAAAAAAAAAAAAAAAAACAgMAvAAAAAAAAAAAAAAAAAAAAAAAAAAAABD77XgAAAAAAAAAAAAAAAHgOTdNEs1JKWS6X914HAAAAAAAAAAAAnt6g7wUAAAAAAAAAAAAAAAAAAAAAAAAAAADgFQn8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQEDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAAICvwAAAAAAAAAAAAAAAAAAAAAAAAAAABAQ+AUAAAAAAAAAAAAAAAAAAAAAAAAAAICAwC8AAAAAAAAAAAAAAAAAAAAAAAAAAAAEBH4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgIPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYFfAAAAAAAAAAAAAAAAAAAAAAAAAAAACAj8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQEDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAAICvwAAAAAAAAAAAAAAAAAAAAAAAAAAABAQ+AUAAAAAAAAAAAAAAAAAAAAAAAAAAICAwC8AAAAAAAAAAAAAAAAAAAAAAAAAAAAEBH4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgIPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYFfAAAAAAAAAAAAAAAAAAAAAAAAAAAACAj8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQEDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAAICvwAAAAAAAAAAAAAAAAAAAAAAAAAAABAQ+AUAAAAAAAAAAAAAAAAAAAAAAAAAAICAwC8AAAAAAAAAAAAAAAAAAAAAAAAAAAAEBH4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgIPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYFfAAAAAAAAAAAAAAAAAAAAAAAAAAAACAj8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQEDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAAICvwAAAAAAAAAAAAAAAAAAAAAAAAAAABAQ+AUAAAAAAAAAAAAAAAAAAAAAAAAAAICAwC8AAAAAAAAAAAAAAAAAAAAAAAAAAAAEPvte4I+ffS8AAAAAAAAAAAAAAAAAAAAAAAAAAADAW/h9r4sG97oIAAAAAAAAAAAAAAAAAAAAAAAAAAAA3onALwAAAAAAAAAAAAAAAAAAAAAAAAAAAAQEfgEAAAAAAAAAAAAAAAAAAAAAAAAAACAg8AsAAAAAAAAAAAAAAAAAAAAAAAAAAAABgV8AAAAAAAAAAAAAAAAAAAAAAAAAAAAICPwCAAAAAAAAAAAAAAAAAAAAAAAAAABAQOAXAAAAAAAAAAAAAAAAAAAAAAAAAAAAAh9t2/a9AwAAAAAAAAAAAAAAAAAAAAAAAAAAALycQd8LAAAAAAAAAAAAAAAAAAAAAAAAAAAAwCsS+AUAAAAAAAAAAAAAAAAAAAAAAAAAAICAwC8AAAAAAAAAAAAAAAAAAAAAAAAAAAAEBH4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgIPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYFfAAAAAAAAAAAAAAAAAAAAAAAAAAAACAj8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQEDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAAICvwAAAAAAAAAAAAAAAAAAAAAAAAAAABAQ+AUAAAAAAAAAAAAAAAAAAAAAAAAAAICAwC8AAAAAAAAAAAAAAAAAAAAAAAAAAAAEBH4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgIPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYFfAAAAAAAAAAAAAAAAAAAAAAAAAAAACAj8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQEDgFwAAAAAAAAAAAAAAAAAAAAAAAAAAAAICvwAAAAAAAAAAAAAAAAAAAAAAAAAAABAQ+AUAAAAAAAAAAAAAAAAAAAAAAAAAAICAwC8AAAAAAAAAAAAAAAAAAAAAAAAAAAAEBH4BAAAAAAAAAAAAAAAAAAAAAAAAAAAgIPALAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYFfAAAAAAAAAAAAAAAAAAAAAAAAAAAACAj8AgAAAAAAAAAAAAAAAAAAAAAAAAAAQOALQ8hYvvw7l/QAAAAASUVORK5CYII=", "text/plain": [ "
" ] @@ -352,22 +350,27 @@ "source": [ "if INTERACTIVE:\n", " # create widget to switch between trees and control info in nodes\n", - " interact(render_tree,\n", - " tree_index=(0, gbm.num_trees() - 1),\n", - " show_info=SelectMultiple( # allow multiple values to be selected\n", - " options=['None',\n", - " 'split_gain',\n", - " 'internal_value',\n", - " 'internal_count',\n", - " 'internal_weight',\n", - " 'leaf_count',\n", - " 'leaf_weight',\n", - " 'data_percentage'],\n", - " value=['None']),\n", - " precision=(0, 10))\n", + " interact(\n", + " render_tree,\n", + " tree_index=(0, gbm.num_trees() - 1),\n", + " show_info=SelectMultiple( # allow multiple values to be selected\n", + " options=[\n", + " \"None\",\n", + " \"split_gain\",\n", + " \"internal_value\",\n", + " \"internal_count\",\n", + " \"internal_weight\",\n", + " \"leaf_count\",\n", + " \"leaf_weight\",\n", + " \"data_percentage\",\n", + " ],\n", + " value=[\"None\"],\n", + " ),\n", + " precision=(0, 10),\n", + " )\n", " tree = None\n", "else:\n", - " tree = render_tree(53, ['None'])\n", + " tree = render_tree(53, [\"None\"])\n", "tree" ] } @@ -389,7 +392,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.1" + "version": "3.11.7" }, "varInspector": { "cols": { diff --git a/examples/python-guide/plot_example.py b/examples/python-guide/plot_example.py index d85fcaa41..efbb971d5 100644 --- a/examples/python-guide/plot_example.py +++ b/examples/python-guide/plot_example.py @@ -8,13 +8,13 @@ import lightgbm as lgb if lgb.compat.MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt else: - raise ImportError('You need to install matplotlib and restart your session for plot_example.py.') + raise ImportError("You need to install matplotlib and restart your session for plot_example.py.") -print('Loading data...') +print("Loading data...") # load or create your dataset -regression_example_dir = Path(__file__).absolute().parents[1] / 'regression' -df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t') -df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t') +regression_example_dir = Path(__file__).absolute().parents[1] / "regression" +df_train = pd.read_csv(str(regression_example_dir / "regression.train"), header=None, sep="\t") +df_test = pd.read_csv(str(regression_example_dir / "regression.test"), header=None, sep="\t") y_train = df_train[0] y_test = df_test[0] @@ -26,45 +26,38 @@ lgb_train = lgb.Dataset(X_train, y_train) lgb_test = lgb.Dataset(X_test, y_test, reference=lgb_train) # specify your configurations as a dict -params = { - 'num_leaves': 5, - 'metric': ('l1', 'l2'), - 'verbose': 0 -} +params = {"num_leaves": 5, "metric": ("l1", "l2"), "verbose": 0} evals_result = {} # to record eval results for plotting -print('Starting training...') +print("Starting training...") # train gbm = lgb.train( params, lgb_train, num_boost_round=100, valid_sets=[lgb_train, lgb_test], - feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])], + feature_name=[f"f{i + 1}" for i in range(X_train.shape[-1])], categorical_feature=[21], - callbacks=[ - lgb.log_evaluation(10), - lgb.record_evaluation(evals_result) - ] + callbacks=[lgb.log_evaluation(10), lgb.record_evaluation(evals_result)], ) -print('Plotting metrics recorded during training...') -ax = lgb.plot_metric(evals_result, metric='l1') +print("Plotting metrics recorded during training...") +ax = lgb.plot_metric(evals_result, metric="l1") plt.show() -print('Plotting feature importances...') +print("Plotting feature importances...") ax = lgb.plot_importance(gbm, max_num_features=10) plt.show() -print('Plotting split value histogram...') -ax = lgb.plot_split_value_histogram(gbm, feature='f26', bins='auto') +print("Plotting split value histogram...") +ax = lgb.plot_split_value_histogram(gbm, feature="f26", bins="auto") plt.show() -print('Plotting 54th tree...') # one tree use categorical feature to split -ax = lgb.plot_tree(gbm, tree_index=53, figsize=(15, 15), show_info=['split_gain']) +print("Plotting 54th tree...") # one tree use categorical feature to split +ax = lgb.plot_tree(gbm, tree_index=53, figsize=(15, 15), show_info=["split_gain"]) plt.show() -print('Plotting 54th tree with graphviz...') -graph = lgb.create_tree_digraph(gbm, tree_index=53, name='Tree54') +print("Plotting 54th tree with graphviz...") +graph = lgb.create_tree_digraph(gbm, tree_index=53, name="Tree54") graph.render(view=True) diff --git a/examples/python-guide/simple_example.py b/examples/python-guide/simple_example.py index 79c4f7093..2b4173cf1 100644 --- a/examples/python-guide/simple_example.py +++ b/examples/python-guide/simple_example.py @@ -6,11 +6,11 @@ from sklearn.metrics import mean_squared_error import lightgbm as lgb -print('Loading data...') +print("Loading data...") # load or create your dataset -regression_example_dir = Path(__file__).absolute().parents[1] / 'regression' -df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t') -df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t') +regression_example_dir = Path(__file__).absolute().parents[1] / "regression" +df_train = pd.read_csv(str(regression_example_dir / "regression.train"), header=None, sep="\t") +df_test = pd.read_csv(str(regression_example_dir / "regression.test"), header=None, sep="\t") y_train = df_train[0] y_test = df_test[0] @@ -23,32 +23,30 @@ lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train) # specify your configurations as a dict params = { - 'boosting_type': 'gbdt', - 'objective': 'regression', - 'metric': {'l2', 'l1'}, - 'num_leaves': 31, - 'learning_rate': 0.05, - 'feature_fraction': 0.9, - 'bagging_fraction': 0.8, - 'bagging_freq': 5, - 'verbose': 0 + "boosting_type": "gbdt", + "objective": "regression", + "metric": {"l2", "l1"}, + "num_leaves": 31, + "learning_rate": 0.05, + "feature_fraction": 0.9, + "bagging_fraction": 0.8, + "bagging_freq": 5, + "verbose": 0, } -print('Starting training...') +print("Starting training...") # train -gbm = lgb.train(params, - lgb_train, - num_boost_round=20, - valid_sets=lgb_eval, - callbacks=[lgb.early_stopping(stopping_rounds=5)]) +gbm = lgb.train( + params, lgb_train, num_boost_round=20, valid_sets=lgb_eval, callbacks=[lgb.early_stopping(stopping_rounds=5)] +) -print('Saving model...') +print("Saving model...") # save model to file -gbm.save_model('model.txt') +gbm.save_model("model.txt") -print('Starting predicting...') +print("Starting predicting...") # predict y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration) # eval rmse_test = mean_squared_error(y_test, y_pred) ** 0.5 -print(f'The RMSE of prediction is: {rmse_test}') +print(f"The RMSE of prediction is: {rmse_test}") diff --git a/examples/python-guide/sklearn_example.py b/examples/python-guide/sklearn_example.py index 2f58ec284..67d1193be 100644 --- a/examples/python-guide/sklearn_example.py +++ b/examples/python-guide/sklearn_example.py @@ -8,85 +8,71 @@ from sklearn.model_selection import GridSearchCV import lightgbm as lgb -print('Loading data...') +print("Loading data...") # load or create your dataset -regression_example_dir = Path(__file__).absolute().parents[1] / 'regression' -df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t') -df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t') +regression_example_dir = Path(__file__).absolute().parents[1] / "regression" +df_train = pd.read_csv(str(regression_example_dir / "regression.train"), header=None, sep="\t") +df_test = pd.read_csv(str(regression_example_dir / "regression.test"), header=None, sep="\t") y_train = df_train[0] y_test = df_test[0] X_train = df_train.drop(0, axis=1) X_test = df_test.drop(0, axis=1) -print('Starting training...') +print("Starting training...") # train -gbm = lgb.LGBMRegressor(num_leaves=31, - learning_rate=0.05, - n_estimators=20) -gbm.fit(X_train, y_train, - eval_set=[(X_test, y_test)], - eval_metric='l1', - callbacks=[lgb.early_stopping(5)]) +gbm = lgb.LGBMRegressor(num_leaves=31, learning_rate=0.05, n_estimators=20) +gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric="l1", callbacks=[lgb.early_stopping(5)]) -print('Starting predicting...') +print("Starting predicting...") # predict y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_) # eval rmse_test = mean_squared_error(y_test, y_pred) ** 0.5 -print(f'The RMSE of prediction is: {rmse_test}') +print(f"The RMSE of prediction is: {rmse_test}") # feature importances -print(f'Feature importances: {list(gbm.feature_importances_)}') +print(f"Feature importances: {list(gbm.feature_importances_)}") # self-defined eval metric # f(y_true: array, y_pred: array) -> name: str, eval_result: float, is_higher_better: bool # Root Mean Squared Logarithmic Error (RMSLE) def rmsle(y_true, y_pred): - return 'RMSLE', np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y_true), 2))), False + return "RMSLE", np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y_true), 2))), False -print('Starting training with custom eval function...') +print("Starting training with custom eval function...") # train -gbm.fit(X_train, y_train, - eval_set=[(X_test, y_test)], - eval_metric=rmsle, - callbacks=[lgb.early_stopping(5)]) +gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric=rmsle, callbacks=[lgb.early_stopping(5)]) # another self-defined eval metric # f(y_true: array, y_pred: array) -> name: str, eval_result: float, is_higher_better: bool # Relative Absolute Error (RAE) def rae(y_true, y_pred): - return 'RAE', np.sum(np.abs(y_pred - y_true)) / np.sum(np.abs(np.mean(y_true) - y_true)), False + return "RAE", np.sum(np.abs(y_pred - y_true)) / np.sum(np.abs(np.mean(y_true) - y_true)), False -print('Starting training with multiple custom eval functions...') +print("Starting training with multiple custom eval functions...") # train -gbm.fit(X_train, y_train, - eval_set=[(X_test, y_test)], - eval_metric=[rmsle, rae], - callbacks=[lgb.early_stopping(5)]) +gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric=[rmsle, rae], callbacks=[lgb.early_stopping(5)]) -print('Starting predicting...') +print("Starting predicting...") # predict y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_) # eval rmsle_test = rmsle(y_test, y_pred)[1] rae_test = rae(y_test, y_pred)[1] -print(f'The RMSLE of prediction is: {rmsle_test}') -print(f'The RAE of prediction is: {rae_test}') +print(f"The RMSLE of prediction is: {rmsle_test}") +print(f"The RAE of prediction is: {rae_test}") # other scikit-learn modules estimator = lgb.LGBMRegressor(num_leaves=31) -param_grid = { - 'learning_rate': [0.01, 0.1, 1], - 'n_estimators': [20, 40] -} +param_grid = {"learning_rate": [0.01, 0.1, 1], "n_estimators": [20, 40]} gbm = GridSearchCV(estimator, param_grid, cv=3) gbm.fit(X_train, y_train) -print(f'Best parameters found by grid search are: {gbm.best_params_}') +print(f"Best parameters found by grid search are: {gbm.best_params_}") diff --git a/python-package/lightgbm/basic.py b/python-package/lightgbm/basic.py index 0846b6b04..93862f983 100644 --- a/python-package/lightgbm/basic.py +++ b/python-package/lightgbm/basic.py @@ -18,9 +18,23 @@ from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, import numpy as np import scipy.sparse -from .compat import (PANDAS_INSTALLED, PYARROW_INSTALLED, arrow_cffi, arrow_is_floating, arrow_is_integer, concat, - dt_DataTable, pa_Array, pa_chunked_array, pa_ChunkedArray, pa_compute, pa_Table, - pd_CategoricalDtype, pd_DataFrame, pd_Series) +from .compat import ( + PANDAS_INSTALLED, + PYARROW_INSTALLED, + arrow_cffi, + arrow_is_floating, + arrow_is_integer, + concat, + dt_DataTable, + pa_Array, + pa_chunked_array, + pa_ChunkedArray, + pa_compute, + pa_Table, + pd_CategoricalDtype, + pd_DataFrame, + pd_Series, +) from .libpath import find_lib_path if TYPE_CHECKING: diff --git a/python-package/lightgbm/callback.py b/python-package/lightgbm/callback.py index b68bb63c7..0a4fa65a5 100644 --- a/python-package/lightgbm/callback.py +++ b/python-package/lightgbm/callback.py @@ -5,8 +5,14 @@ from dataclasses import dataclass from functools import partial from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union -from .basic import (Booster, _ConfigAliases, _LGBM_BoosterEvalMethodResultType, - _LGBM_BoosterEvalMethodResultWithStandardDeviationType, _log_info, _log_warning) +from .basic import ( + Booster, + _ConfigAliases, + _LGBM_BoosterEvalMethodResultType, + _LGBM_BoosterEvalMethodResultWithStandardDeviationType, + _log_info, + _log_warning, +) if TYPE_CHECKING: from .engine import CVBooster diff --git a/python-package/lightgbm/dask.py b/python-package/lightgbm/dask.py index 88e4779ee..ee8bf58ce 100644 --- a/python-package/lightgbm/dask.py +++ b/python-package/lightgbm/dask.py @@ -19,12 +19,36 @@ import numpy as np import scipy.sparse as ss from .basic import LightGBMError, _choose_param_value, _ConfigAliases, _log_info, _log_warning -from .compat import (DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED, Client, Future, LGBMNotFittedError, concat, - dask_Array, dask_array_from_delayed, dask_bag_from_delayed, dask_DataFrame, dask_Series, - default_client, delayed, pd_DataFrame, pd_Series, wait) -from .sklearn import (LGBMClassifier, LGBMModel, LGBMRanker, LGBMRegressor, _LGBM_ScikitCustomObjectiveFunction, - _LGBM_ScikitEvalMetricType, _lgbmmodel_doc_custom_eval_note, _lgbmmodel_doc_fit, - _lgbmmodel_doc_predict) +from .compat import ( + DASK_INSTALLED, + PANDAS_INSTALLED, + SKLEARN_INSTALLED, + Client, + Future, + LGBMNotFittedError, + concat, + dask_Array, + dask_array_from_delayed, + dask_bag_from_delayed, + dask_DataFrame, + dask_Series, + default_client, + delayed, + pd_DataFrame, + pd_Series, + wait, +) +from .sklearn import ( + LGBMClassifier, + LGBMModel, + LGBMRanker, + LGBMRegressor, + _LGBM_ScikitCustomObjectiveFunction, + _LGBM_ScikitEvalMetricType, + _lgbmmodel_doc_custom_eval_note, + _lgbmmodel_doc_fit, + _lgbmmodel_doc_predict, +) __all__ = [ 'DaskLGBMClassifier', diff --git a/python-package/lightgbm/engine.py b/python-package/lightgbm/engine.py index 822aa3b35..e1779f072 100644 --- a/python-package/lightgbm/engine.py +++ b/python-package/lightgbm/engine.py @@ -10,10 +10,21 @@ from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union import numpy as np from . import callback -from .basic import (Booster, Dataset, LightGBMError, _choose_param_value, _ConfigAliases, _InnerPredictor, - _LGBM_BoosterEvalMethodResultType, _LGBM_BoosterEvalMethodResultWithStandardDeviationType, - _LGBM_CategoricalFeatureConfiguration, _LGBM_CustomObjectiveFunction, _LGBM_EvalFunctionResultType, - _LGBM_FeatureNameConfiguration, _log_warning) +from .basic import ( + Booster, + Dataset, + LightGBMError, + _choose_param_value, + _ConfigAliases, + _InnerPredictor, + _LGBM_BoosterEvalMethodResultType, + _LGBM_BoosterEvalMethodResultWithStandardDeviationType, + _LGBM_CategoricalFeatureConfiguration, + _LGBM_CustomObjectiveFunction, + _LGBM_EvalFunctionResultType, + _LGBM_FeatureNameConfiguration, + _log_warning, +) from .compat import SKLEARN_INSTALLED, _LGBMBaseCrossValidator, _LGBMGroupKFold, _LGBMStratifiedKFold __all__ = [ diff --git a/python-package/lightgbm/sklearn.py b/python-package/lightgbm/sklearn.py index 120a66671..9eb2219c8 100644 --- a/python-package/lightgbm/sklearn.py +++ b/python-package/lightgbm/sklearn.py @@ -8,14 +8,41 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import scipy.sparse -from .basic import (Booster, Dataset, LightGBMError, _choose_param_value, _ConfigAliases, _LGBM_BoosterBestScoreType, - _LGBM_CategoricalFeatureConfiguration, _LGBM_EvalFunctionResultType, _LGBM_FeatureNameConfiguration, - _LGBM_GroupType, _LGBM_InitScoreType, _LGBM_LabelType, _LGBM_WeightType, _log_warning) +from .basic import ( + Booster, + Dataset, + LightGBMError, + _choose_param_value, + _ConfigAliases, + _LGBM_BoosterBestScoreType, + _LGBM_CategoricalFeatureConfiguration, + _LGBM_EvalFunctionResultType, + _LGBM_FeatureNameConfiguration, + _LGBM_GroupType, + _LGBM_InitScoreType, + _LGBM_LabelType, + _LGBM_WeightType, + _log_warning, +) from .callback import _EvalResultDict, record_evaluation -from .compat import (SKLEARN_INSTALLED, LGBMNotFittedError, _LGBMAssertAllFinite, _LGBMCheckArray, - _LGBMCheckClassificationTargets, _LGBMCheckSampleWeight, _LGBMCheckXY, _LGBMClassifierBase, - _LGBMComputeSampleWeight, _LGBMCpuCount, _LGBMLabelEncoder, _LGBMModelBase, _LGBMRegressorBase, - dt_DataTable, np_random_Generator, pd_DataFrame) +from .compat import ( + SKLEARN_INSTALLED, + LGBMNotFittedError, + _LGBMAssertAllFinite, + _LGBMCheckArray, + _LGBMCheckClassificationTargets, + _LGBMCheckSampleWeight, + _LGBMCheckXY, + _LGBMClassifierBase, + _LGBMComputeSampleWeight, + _LGBMCpuCount, + _LGBMLabelEncoder, + _LGBMModelBase, + _LGBMRegressorBase, + dt_DataTable, + np_random_Generator, + pd_DataFrame, +) from .engine import train __all__ = [ diff --git a/python-package/pyproject.toml b/python-package/pyproject.toml index cb0c276fa..648d400a2 100644 --- a/python-package/pyproject.toml +++ b/python-package/pyproject.toml @@ -81,10 +81,14 @@ minimum-version = "0.4.4" # end:build-system [tool.isort] +include_trailing_comma = true line_length = 120 +# "vertical hanging indent", to match what ruff-format does +# ref: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html#3-vertical-hanging-indent +multi_line_output = 3 skip_glob = [ "*/external_libs/*", - "*/lightgbm-python/*" + "*/lightgbm-python/*", ] [tool.mypy] @@ -108,14 +112,13 @@ docstring-code-format = false exclude = [ "build/*.py", "compile/*.py", - "examples/*.py", "external_libs/*.py", "lightgbm-python/*.py", "python-package/*.py", - "tests/*.py" ] indent-style = "space" quote-style = "double" +skip-magic-trailing-comma = false [tool.ruff.lint] ignore = [ diff --git a/tests/c_api_test/test_.py b/tests/c_api_test/test_.py index 6cfec1c44..0abd40ece 100644 --- a/tests/c_api_test/test_.py +++ b/tests/c_api_test/test_.py @@ -10,7 +10,7 @@ try: from lightgbm.basic import _LIB as LIB except ModuleNotFoundError: print("Could not import lightgbm Python package, looking for lib_lightgbm at the repo root") - if system() in ('Windows', 'Microsoft'): + if system() in ("Windows", "Microsoft"): lib_file = Path(__file__).absolute().parents[2] / "Release" / "lib_lightgbm.dll" else: lib_file = Path(__file__).absolute().parents[2] / "lib_lightgbm.so" @@ -25,7 +25,7 @@ dtype_int64 = 3 def c_str(string): - return ctypes.c_char_p(string.encode('utf-8')) + return ctypes.c_char_p(string.encode("utf-8")) def load_from_file(filename, reference): @@ -33,17 +33,13 @@ def load_from_file(filename, reference): if reference is not None: ref = reference handle = ctypes.c_void_p() - LIB.LGBM_DatasetCreateFromFile( - c_str(str(filename)), - c_str('max_bin=15'), - ref, - ctypes.byref(handle)) + LIB.LGBM_DatasetCreateFromFile(c_str(str(filename)), c_str("max_bin=15"), ref, ctypes.byref(handle)) print(LIB.LGBM_GetLastError()) num_data = ctypes.c_int(0) LIB.LGBM_DatasetGetNumData(handle, ctypes.byref(num_data)) num_feature = ctypes.c_int(0) LIB.LGBM_DatasetGetNumFeature(handle, ctypes.byref(num_feature)) - print(f'#data: {num_data.value} #feature: {num_feature.value}') + print(f"#data: {num_data.value} #feature: {num_feature.value}") return handle @@ -69,20 +65,22 @@ def load_from_csr(filename, reference): ctypes.c_int64(len(csr.indptr)), ctypes.c_int64(len(csr.data)), ctypes.c_int64(csr.shape[1]), - c_str('max_bin=15'), + c_str("max_bin=15"), ref, - ctypes.byref(handle)) + ctypes.byref(handle), + ) num_data = ctypes.c_int(0) LIB.LGBM_DatasetGetNumData(handle, ctypes.byref(num_data)) num_feature = ctypes.c_int(0) LIB.LGBM_DatasetGetNumFeature(handle, ctypes.byref(num_feature)) LIB.LGBM_DatasetSetField( handle, - c_str('label'), + c_str("label"), label.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), ctypes.c_int(len(label)), - ctypes.c_int(dtype_float32)) - print(f'#data: {num_data.value} #feature: {num_feature.value}') + ctypes.c_int(dtype_float32), + ) + print(f"#data: {num_data.value} #feature: {num_feature.value}") return handle @@ -104,20 +102,22 @@ def load_from_csc(filename, reference): ctypes.c_int64(len(csc.indptr)), ctypes.c_int64(len(csc.data)), ctypes.c_int64(csc.shape[0]), - c_str('max_bin=15'), + c_str("max_bin=15"), ref, - ctypes.byref(handle)) + ctypes.byref(handle), + ) num_data = ctypes.c_int(0) LIB.LGBM_DatasetGetNumData(handle, ctypes.byref(num_data)) num_feature = ctypes.c_int(0) LIB.LGBM_DatasetGetNumFeature(handle, ctypes.byref(num_feature)) LIB.LGBM_DatasetSetField( handle, - c_str('label'), + c_str("label"), label.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), ctypes.c_int(len(label)), - ctypes.c_int(dtype_float32)) - print(f'#data: {num_data.value} #feature: {num_feature.value}') + ctypes.c_int(dtype_float32), + ) + print(f"#data: {num_data.value} #feature: {num_feature.value}") return handle @@ -137,20 +137,22 @@ def load_from_mat(filename, reference): ctypes.c_int32(mat.shape[0]), ctypes.c_int32(mat.shape[1]), ctypes.c_int(1), - c_str('max_bin=15'), + c_str("max_bin=15"), ref, - ctypes.byref(handle)) + ctypes.byref(handle), + ) num_data = ctypes.c_int(0) LIB.LGBM_DatasetGetNumData(handle, ctypes.byref(num_data)) num_feature = ctypes.c_int(0) LIB.LGBM_DatasetGetNumFeature(handle, ctypes.byref(num_feature)) LIB.LGBM_DatasetSetField( handle, - c_str('label'), + c_str("label"), label.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), ctypes.c_int(len(label)), - ctypes.c_int(dtype_float32)) - print(f'#data: {num_data.value} #feature: {num_feature.value}') + ctypes.c_int(dtype_float32), + ) + print(f"#data: {num_data.value} #feature: {num_feature.value}") return handle @@ -159,29 +161,26 @@ def free_dataset(handle): def test_dataset(): - binary_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'binary_classification' - train = load_from_file(binary_example_dir / 'binary.train', None) - test = load_from_mat(binary_example_dir / 'binary.test', train) + binary_example_dir = Path(__file__).absolute().parents[2] / "examples" / "binary_classification" + train = load_from_file(binary_example_dir / "binary.train", None) + test = load_from_mat(binary_example_dir / "binary.test", train) free_dataset(test) - test = load_from_csr(binary_example_dir / 'binary.test', train) + test = load_from_csr(binary_example_dir / "binary.test", train) free_dataset(test) - test = load_from_csc(binary_example_dir / 'binary.test', train) + test = load_from_csc(binary_example_dir / "binary.test", train) free_dataset(test) - save_to_binary(train, 'train.binary.bin') + save_to_binary(train, "train.binary.bin") free_dataset(train) - train = load_from_file('train.binary.bin', None) + train = load_from_file("train.binary.bin", None) free_dataset(train) def test_booster(): - binary_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'binary_classification' - train = load_from_mat(binary_example_dir / 'binary.train', None) - test = load_from_mat(binary_example_dir / 'binary.test', train) + binary_example_dir = Path(__file__).absolute().parents[2] / "examples" / "binary_classification" + train = load_from_mat(binary_example_dir / "binary.train", None) + test = load_from_mat(binary_example_dir / "binary.test", train) booster = ctypes.c_void_p() - LIB.LGBM_BoosterCreate( - train, - c_str("app=binary metric=auc num_leaves=31 verbose=0"), - ctypes.byref(booster)) + LIB.LGBM_BoosterCreate(train, c_str("app=binary metric=auc num_leaves=31 verbose=0"), ctypes.byref(booster)) LIB.LGBM_BoosterAddValidData(booster, test) is_finished = ctypes.c_int(0) for i in range(1, 51): @@ -189,28 +188,18 @@ def test_booster(): result = np.array([0.0], dtype=np.float64) out_len = ctypes.c_int(0) LIB.LGBM_BoosterGetEval( - booster, - ctypes.c_int(0), - ctypes.byref(out_len), - result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))) + booster, ctypes.c_int(0), ctypes.byref(out_len), result.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) + ) if i % 10 == 0: - print(f'{i} iteration test AUC {result[0]:.6f}') - LIB.LGBM_BoosterSaveModel( - booster, - ctypes.c_int(0), - ctypes.c_int(-1), - ctypes.c_int(0), - c_str('model.txt')) + print(f"{i} iteration test AUC {result[0]:.6f}") + LIB.LGBM_BoosterSaveModel(booster, ctypes.c_int(0), ctypes.c_int(-1), ctypes.c_int(0), c_str("model.txt")) LIB.LGBM_BoosterFree(booster) free_dataset(train) free_dataset(test) booster2 = ctypes.c_void_p() num_total_model = ctypes.c_int(0) - LIB.LGBM_BoosterCreateFromModelfile( - c_str('model.txt'), - ctypes.byref(num_total_model), - ctypes.byref(booster2)) - data = np.loadtxt(str(binary_example_dir / 'binary.test'), dtype=np.float64) + LIB.LGBM_BoosterCreateFromModelfile(c_str("model.txt"), ctypes.byref(num_total_model), ctypes.byref(booster2)) + data = np.loadtxt(str(binary_example_dir / "binary.test"), dtype=np.float64) mat = data[:, 1:] preb = np.empty(mat.shape[0], dtype=np.float64) num_preb = ctypes.c_int64(0) @@ -225,58 +214,51 @@ def test_booster(): ctypes.c_int(1), ctypes.c_int(0), ctypes.c_int(25), - c_str(''), + c_str(""), ctypes.byref(num_preb), - preb.ctypes.data_as(ctypes.POINTER(ctypes.c_double))) + preb.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), + ) LIB.LGBM_BoosterPredictForFile( booster2, - c_str(str(binary_example_dir / 'binary.test')), + c_str(str(binary_example_dir / "binary.test")), ctypes.c_int(0), ctypes.c_int(0), ctypes.c_int(0), ctypes.c_int(25), - c_str(''), - c_str('preb.txt')) + c_str(""), + c_str("preb.txt"), + ) LIB.LGBM_BoosterPredictForFile( booster2, - c_str(str(binary_example_dir / 'binary.test')), + c_str(str(binary_example_dir / "binary.test")), ctypes.c_int(0), ctypes.c_int(0), ctypes.c_int(10), ctypes.c_int(25), - c_str(''), - c_str('preb.txt')) + c_str(""), + c_str("preb.txt"), + ) LIB.LGBM_BoosterFree(booster2) def test_max_thread_control(): # at initialization, should be -1 num_threads = ctypes.c_int(0) - ret = LIB.LGBM_GetMaxThreads( - ctypes.byref(num_threads) - ) + ret = LIB.LGBM_GetMaxThreads(ctypes.byref(num_threads)) assert ret == 0 assert num_threads.value == -1 # updating that value through the C API should work - ret = LIB.LGBM_SetMaxThreads( - ctypes.c_int(6) - ) + ret = LIB.LGBM_SetMaxThreads(ctypes.c_int(6)) assert ret == 0 - ret = LIB.LGBM_GetMaxThreads( - ctypes.byref(num_threads) - ) + ret = LIB.LGBM_GetMaxThreads(ctypes.byref(num_threads)) assert ret == 0 assert num_threads.value == 6 # resetting to any negative number should set it to -1 - ret = LIB.LGBM_SetMaxThreads( - ctypes.c_int(-123) - ) + ret = LIB.LGBM_SetMaxThreads(ctypes.c_int(-123)) assert ret == 0 - ret = LIB.LGBM_GetMaxThreads( - ctypes.byref(num_threads) - ) + ret = LIB.LGBM_GetMaxThreads(ctypes.byref(num_threads)) assert ret == 0 assert num_threads.value == -1 diff --git a/tests/cpp_tests/test.py b/tests/cpp_tests/test.py index d1132064e..b9a49e071 100644 --- a/tests/cpp_tests/test.py +++ b/tests/cpp_tests/test.py @@ -3,5 +3,5 @@ from pathlib import Path import numpy as np -preds = [np.loadtxt(str(name)) for name in Path(__file__).absolute().parent.glob('*.pred')] +preds = [np.loadtxt(str(name)) for name in Path(__file__).absolute().parent.glob("*.pred")] np.testing.assert_allclose(preds[0], preds[1]) diff --git a/tests/distributed/_test_distributed.py b/tests/distributed/_test_distributed.py index e37dafee6..9615966ab 100644 --- a/tests/distributed/_test_distributed.py +++ b/tests/distributed/_test_distributed.py @@ -14,16 +14,16 @@ from sklearn.metrics import accuracy_score TESTS_DIR = Path(__file__).absolute().parent -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def executable(pytestconfig) -> str: """Returns the path to the lightgbm executable.""" - return pytestconfig.getoption('execfile') + return pytestconfig.getoption("execfile") def _find_random_open_port() -> int: """Find a random open port on localhost.""" with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(('', 0)) + s.bind(("", 0)) port = s.getsockname()[1] return port # noqa: RET504 @@ -34,7 +34,7 @@ def _generate_n_ports(n: int) -> Generator[int, None, None]: def _write_dict(d: Dict, file: io.TextIOWrapper) -> None: for k, v in d.items(): - file.write(f'{k} = {v}\n') + file.write(f"{k} = {v}\n") def create_data(task: str, n_samples: int = 1_000) -> np.ndarray: @@ -42,10 +42,10 @@ def create_data(task: str, n_samples: int = 1_000) -> np.ndarray: The data is returned as a numpy array with the label as the first column. """ - if task == 'binary-classification': + if task == "binary-classification": centers = [[-4, -4], [4, 4]] X, y = make_blobs(n_samples, centers=centers, random_state=42) - elif task == 'regression': + elif task == "regression": X, y = make_regression(n_samples, n_features=4, n_informative=2, random_state=42) return np.hstack([y.reshape(-1, 1), X]) @@ -54,22 +54,22 @@ class DistributedMockup: """Simulate distributed training.""" default_train_config = { - 'task': 'train', - 'pre_partition': True, - 'machine_list_file': TESTS_DIR / 'mlist.txt', - 'tree_learner': 'data', - 'force_row_wise': True, - 'verbose': 0, - 'num_boost_round': 20, - 'num_leaves': 15, - 'num_threads': 2, + "task": "train", + "pre_partition": True, + "machine_list_file": TESTS_DIR / "mlist.txt", + "tree_learner": "data", + "force_row_wise": True, + "verbose": 0, + "num_boost_round": 20, + "num_leaves": 15, + "num_threads": 2, } default_predict_config = { - 'task': 'predict', - 'data': TESTS_DIR / 'train.txt', - 'input_model': TESTS_DIR / 'model0.txt', - 'output_result': TESTS_DIR / 'predictions.txt', + "task": "predict", + "data": TESTS_DIR / "train.txt", + "input_model": TESTS_DIR / "model0.txt", + "output_result": TESTS_DIR / "predictions.txt", } def __init__(self, executable: str): @@ -77,8 +77,8 @@ class DistributedMockup: def worker_train(self, i: int) -> subprocess.CompletedProcess: """Start the training process on the `i`-th worker.""" - config_path = TESTS_DIR / f'train{i}.conf' - cmd = [self.executable, f'config={config_path}'] + config_path = TESTS_DIR / f"train{i}.conf" + cmd = [self.executable, f"config={config_path}"] return subprocess.run(cmd) def _set_ports(self) -> None: @@ -92,18 +92,18 @@ class DistributedMockup: ports.update(candidates) i += 1 if i == max_tries: - raise RuntimeError('Unable to find non-colliding ports.') + raise RuntimeError("Unable to find non-colliding ports.") self.listen_ports = list(ports) - with open(TESTS_DIR / 'mlist.txt', 'wt') as f: + with open(TESTS_DIR / "mlist.txt", "wt") as f: for port in self.listen_ports: - f.write(f'127.0.0.1 {port}\n') + f.write(f"127.0.0.1 {port}\n") def _write_data(self, partitions: List[np.ndarray]) -> None: """Write all training data as train.txt and each training partition as train{i}.txt.""" all_data = np.vstack(partitions) - np.savetxt(str(TESTS_DIR / 'train.txt'), all_data, delimiter=',') + np.savetxt(str(TESTS_DIR / "train.txt"), all_data, delimiter=",") for i, partition in enumerate(partitions): - np.savetxt(str(TESTS_DIR / f'train{i}.txt'), partition, delimiter=',') + np.savetxt(str(TESTS_DIR / f"train{i}.txt"), partition, delimiter=",") def fit(self, partitions: List[np.ndarray], train_config: Dict) -> None: """Run the distributed training process on a single machine. @@ -118,7 +118,7 @@ class DistributedMockup: """ self.train_config = copy.deepcopy(self.default_train_config) self.train_config.update(train_config) - self.n_workers = self.train_config['num_machines'] + self.n_workers = self.train_config["num_machines"] self._set_ports() self._write_data(partitions) self.label_ = np.hstack([partition[:, 0] for partition in partitions]) @@ -131,7 +131,7 @@ class DistributedMockup: results = [f.result() for f in futures] for result in results: if result.returncode != 0: - raise RuntimeError('Error in training') + raise RuntimeError("Error in training") def predict(self, predict_config: Dict[str, Any]) -> np.ndarray: """Compute the predictions using the model created in the fit step. @@ -141,14 +141,14 @@ class DistributedMockup: """ self.predict_config = copy.deepcopy(self.default_predict_config) self.predict_config.update(predict_config) - config_path = TESTS_DIR / 'predict.conf' - with open(config_path, 'wt') as file: + config_path = TESTS_DIR / "predict.conf" + with open(config_path, "wt") as file: _write_dict(self.predict_config, file) - cmd = [self.executable, f'config={config_path}'] + cmd = [self.executable, f"config={config_path}"] result = subprocess.run(cmd) if result.returncode != 0: - raise RuntimeError('Error in prediction') - return np.loadtxt(str(TESTS_DIR / 'predictions.txt')) + raise RuntimeError("Error in prediction") + return np.loadtxt(str(TESTS_DIR / "predictions.txt")) def write_train_config(self, i: int) -> None: """Create a file train{i}.conf with the required configuration to train. @@ -156,41 +156,41 @@ class DistributedMockup: Each worker gets a different port and piece of the data, the rest are the model parameters contained in `self.config`. """ - with open(TESTS_DIR / f'train{i}.conf', 'wt') as file: - output_model = TESTS_DIR / f'model{i}.txt' - data = TESTS_DIR / f'train{i}.txt' - file.write(f'output_model = {output_model}\n') - file.write(f'local_listen_port = {self.listen_ports[i]}\n') - file.write(f'data = {data}\n') + with open(TESTS_DIR / f"train{i}.conf", "wt") as file: + output_model = TESTS_DIR / f"model{i}.txt" + data = TESTS_DIR / f"train{i}.txt" + file.write(f"output_model = {output_model}\n") + file.write(f"local_listen_port = {self.listen_ports[i]}\n") + file.write(f"data = {data}\n") _write_dict(self.train_config, file) def test_classifier(executable): """Test the classification task.""" num_machines = 2 - data = create_data(task='binary-classification') + data = create_data(task="binary-classification") partitions = np.array_split(data, num_machines) train_params = { - 'objective': 'binary', - 'num_machines': num_machines, + "objective": "binary", + "num_machines": num_machines, } clf = DistributedMockup(executable) clf.fit(partitions, train_params) y_probas = clf.predict(predict_config={}) y_pred = y_probas > 0.5 - assert accuracy_score(clf.label_, y_pred) == 1. + assert accuracy_score(clf.label_, y_pred) == 1.0 def test_regressor(executable): """Test the regression task.""" num_machines = 2 - data = create_data(task='regression') + data = create_data(task="regression") partitions = np.array_split(data, num_machines) train_params = { - 'objective': 'regression', - 'num_machines': num_machines, + "objective": "regression", + "num_machines": num_machines, } reg = DistributedMockup(executable) reg.fit(partitions, train_params) y_pred = reg.predict(predict_config={}) - np.testing.assert_allclose(y_pred, reg.label_, rtol=0.2, atol=50.) + np.testing.assert_allclose(y_pred, reg.label_, rtol=0.2, atol=50.0) diff --git a/tests/distributed/conftest.py b/tests/distributed/conftest.py index 9df13e820..ef62f3f97 100644 --- a/tests/distributed/conftest.py +++ b/tests/distributed/conftest.py @@ -1,7 +1,7 @@ from pathlib import Path -default_exec_file = Path(__file__).absolute().parents[2] / 'lightgbm' +default_exec_file = Path(__file__).absolute().parents[2] / "lightgbm" def pytest_addoption(parser): - parser.addoption('--execfile', action='store', default=str(default_exec_file)) + parser.addoption("--execfile", action="store", default=str(default_exec_file)) diff --git a/tests/python_package_test/test_arrow.py b/tests/python_package_test/test_arrow.py index 593c03d8c..b8b90e1d0 100644 --- a/tests/python_package_test/test_arrow.py +++ b/tests/python_package_test/test_arrow.py @@ -71,9 +71,7 @@ def generate_random_arrow_table( values: Optional[np.ndarray] = None, ) -> pa.Table: columns = [ - generate_random_arrow_array( - num_datapoints, seed + i, generate_nulls=generate_nulls, values=values - ) + generate_random_arrow_array(num_datapoints, seed + i, generate_nulls=generate_nulls, values=values) for i in range(num_columns) ] names = [f"col_{i}" for i in range(num_columns)] @@ -156,9 +154,7 @@ def test_dataset_construct_fields_fuzzy(): arrow_weights = generate_random_arrow_array(1000, 42, generate_nulls=False) arrow_groups = pa.chunked_array([[300, 400, 50], [250]], type=pa.int32()) - arrow_dataset = lgb.Dataset( - arrow_table, label=arrow_labels, weight=arrow_weights, group=arrow_groups - ) + arrow_dataset = lgb.Dataset(arrow_table, label=arrow_labels, weight=arrow_weights, group=arrow_groups) arrow_dataset.construct() pandas_dataset = lgb.Dataset( @@ -171,9 +167,7 @@ def test_dataset_construct_fields_fuzzy(): # Check for equality for field in ("label", "weight", "group"): - np_assert_array_equal( - arrow_dataset.get_field(field), pandas_dataset.get_field(field), strict=True - ) + np_assert_array_equal(arrow_dataset.get_field(field), pandas_dataset.get_field(field), strict=True) np_assert_array_equal(arrow_dataset.get_label(), pandas_dataset.get_label(), strict=True) np_assert_array_equal(arrow_dataset.get_weight(), pandas_dataset.get_weight(), strict=True) @@ -269,9 +263,7 @@ def test_dataset_construct_groups(array_type, group_data, arrow_type): ], ) @pytest.mark.parametrize("arrow_type", _INTEGER_TYPES + _FLOAT_TYPES) -def test_dataset_construct_init_scores_array( - array_type: Any, init_score_data: Any, arrow_type: Any -): +def test_dataset_construct_init_scores_array(array_type: Any, init_score_data: Any, arrow_type: Any): data = generate_dummy_arrow_table() init_scores = array_type(init_score_data, type=arrow_type) dataset = lgb.Dataset(data, init_score=init_scores, params=dummy_dataset_params()) @@ -320,9 +312,7 @@ def assert_equal_predict_arrow_pandas(booster: lgb.Booster, data: pa.Table): np_assert_array_equal(p_pred_contrib_arrow, p_pred_contrib_pandas, strict=True) p_first_iter_arrow = booster.predict(data, start_iteration=0, num_iteration=1, raw_score=True) - p_first_iter_pandas = booster.predict( - data.to_pandas(), start_iteration=0, num_iteration=1, raw_score=True - ) + p_first_iter_pandas = booster.predict(data.to_pandas(), start_iteration=0, num_iteration=1, raw_score=True) np_assert_array_equal(p_first_iter_arrow, p_first_iter_pandas, strict=True) diff --git a/tests/python_package_test/test_basic.py b/tests/python_package_test/test_basic.py index b8ef43e41..7177623be 100644 --- a/tests/python_package_test/test_basic.py +++ b/tests/python_package_test/test_basic.py @@ -19,8 +19,9 @@ from .utils import dummy_obj, load_breast_cancer, mse_obj, np_assert_array_equal def test_basic(tmp_path): - X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(return_X_y=True), - test_size=0.1, random_state=2) + X_train, X_test, y_train, y_test = train_test_split( + *load_breast_cancer(return_X_y=True), test_size=0.1, random_state=2 + ) feature_names = [f"Column_{i}" for i in range(X_train.shape[1])] feature_names[1] = "a" * 1000 # set one name to a value longer than default buffer size train_data = lgb.Dataset(X_train, label=y_train, feature_name=feature_names) @@ -34,7 +35,7 @@ def test_basic(tmp_path): "verbose": -1, "num_threads": 1, "max_bin": 255, - "gpu_use_dp": True + "gpu_use_dp": True, } bst = lgb.Booster(params, train_data) bst.add_valid(valid_data, "valid_1") @@ -49,7 +50,7 @@ def test_basic(tmp_path): assert bst.current_iteration() == 20 assert bst.num_trees() == 20 assert bst.num_model_per_iteration() == 1 - if getenv('TASK', '') != 'cuda': + if getenv("TASK", "") != "cuda": assert bst.lower_bound() == pytest.approx(-2.9040190126976606) assert bst.upper_bound() == pytest.approx(3.3182142872462883) @@ -79,20 +80,19 @@ def test_basic(tmp_path): # test that shape is checked during prediction bad_X_test = X_test[:, 1:] bad_shape_error_msg = "The number of features in data*" - np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg, - bst.predict, bad_X_test) - np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg, - bst.predict, sparse.csr_matrix(bad_X_test)) - np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg, - bst.predict, sparse.csc_matrix(bad_X_test)) + np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg, bst.predict, bad_X_test) + np.testing.assert_raises_regex( + lgb.basic.LightGBMError, bad_shape_error_msg, bst.predict, sparse.csr_matrix(bad_X_test) + ) + np.testing.assert_raises_regex( + lgb.basic.LightGBMError, bad_shape_error_msg, bst.predict, sparse.csc_matrix(bad_X_test) + ) with open(tname, "w+b") as f: dump_svmlight_file(bad_X_test, y_test, f) - np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg, - bst.predict, tname) + np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg, bst.predict, tname) with open(tname, "w+b") as f: dump_svmlight_file(X_test, y_test, f, zero_based=False) - np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg, - bst.predict, tname) + np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg, bst.predict, tname) class NumpySequence(lgb.Sequence): @@ -108,7 +108,7 @@ class NumpySequence(lgb.Sequence): elif isinstance(idx, slice): if not (idx.step is None or idx.step == 1): raise NotImplementedError("No need to implement, caller will not set step by now") - return self.ndarray[idx.start:idx.stop] + return self.ndarray[idx.start : idx.stop] elif isinstance(idx, list): return self.ndarray[idx] else: @@ -132,12 +132,12 @@ def _create_sequence_from_ndarray(data, num_seq, batch_size): return seqs -@pytest.mark.parametrize('sample_count', [11, 100, None]) -@pytest.mark.parametrize('batch_size', [3, None]) -@pytest.mark.parametrize('include_0_and_nan', [False, True]) -@pytest.mark.parametrize('num_seq', [1, 3]) +@pytest.mark.parametrize("sample_count", [11, 100, None]) +@pytest.mark.parametrize("batch_size", [3, None]) +@pytest.mark.parametrize("include_0_and_nan", [False, True]) +@pytest.mark.parametrize("num_seq", [1, 3]) def test_sequence(tmpdir, sample_count, batch_size, include_0_and_nan, num_seq): - params = {'bin_construct_sample_cnt': sample_count} + params = {"bin_construct_sample_cnt": sample_count} nrow = 50 half_nrow = nrow // 2 @@ -159,8 +159,8 @@ def test_sequence(tmpdir, sample_count, batch_size, include_0_and_nan, num_seq): X = data[:, :-1] Y = data[:, -1] - npy_bin_fname = tmpdir / 'data_from_npy.bin' - seq_bin_fname = tmpdir / 'data_from_seq.bin' + npy_bin_fname = tmpdir / "data_from_npy.bin" + seq_bin_fname = tmpdir / "data_from_seq.bin" # Create dataset from numpy array directly. ds = lgb.Dataset(X, label=Y, params=params) @@ -181,9 +181,9 @@ def test_sequence(tmpdir, sample_count, batch_size, include_0_and_nan, num_seq): valid_X = valid_data[:, :-1] valid_Y = valid_data[:, -1] - valid_npy_bin_fname = tmpdir / 'valid_data_from_npy.bin' - valid_seq_bin_fname = tmpdir / 'valid_data_from_seq.bin' - valid_seq2_bin_fname = tmpdir / 'valid_data_from_seq2.bin' + valid_npy_bin_fname = tmpdir / "valid_data_from_npy.bin" + valid_seq_bin_fname = tmpdir / "valid_data_from_seq.bin" + valid_seq2_bin_fname = tmpdir / "valid_data_from_seq2.bin" valid_ds = lgb.Dataset(valid_X, label=valid_Y, params=params, reference=ds) valid_ds.save_binary(valid_npy_bin_fname) @@ -200,7 +200,7 @@ def test_sequence(tmpdir, sample_count, batch_size, include_0_and_nan, num_seq): assert filecmp.cmp(valid_npy_bin_fname, valid_seq2_bin_fname) -@pytest.mark.parametrize('num_seq', [1, 2]) +@pytest.mark.parametrize("num_seq", [1, 2]) def test_sequence_get_data(num_seq): nrow = 20 ncol = 11 @@ -218,12 +218,13 @@ def test_sequence_get_data(num_seq): def test_chunked_dataset(): - X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1, - random_state=2) + X_train, X_test, y_train, y_test = train_test_split( + *load_breast_cancer(return_X_y=True), test_size=0.1, random_state=2 + ) chunk_size = X_train.shape[0] // 10 + 1 - X_train = [X_train[i * chunk_size:(i + 1) * chunk_size, :] for i in range(X_train.shape[0] // chunk_size + 1)] - X_test = [X_test[i * chunk_size:(i + 1) * chunk_size, :] for i in range(X_test.shape[0] // chunk_size + 1)] + X_train = [X_train[i * chunk_size : (i + 1) * chunk_size, :] for i in range(X_train.shape[0] // chunk_size + 1)] + X_test = [X_test[i * chunk_size : (i + 1) * chunk_size, :] for i in range(X_test.shape[0] // chunk_size + 1)] train_data = lgb.Dataset(X_train, label=y_train, params={"bin_construct_sample_cnt": 100}) valid_data = train_data.create_valid(X_test, label=y_test, params={"bin_construct_sample_cnt": 100}) @@ -232,12 +233,13 @@ def test_chunked_dataset(): def test_chunked_dataset_linear(): - X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1, - random_state=2) + X_train, X_test, y_train, y_test = train_test_split( + *load_breast_cancer(return_X_y=True), test_size=0.1, random_state=2 + ) chunk_size = X_train.shape[0] // 10 + 1 - X_train = [X_train[i * chunk_size:(i + 1) * chunk_size, :] for i in range(X_train.shape[0] // chunk_size + 1)] - X_test = [X_test[i * chunk_size:(i + 1) * chunk_size, :] for i in range(X_test.shape[0] // chunk_size + 1)] - params = {"bin_construct_sample_cnt": 100, 'linear_tree': True} + X_train = [X_train[i * chunk_size : (i + 1) * chunk_size, :] for i in range(X_train.shape[0] // chunk_size + 1)] + X_test = [X_test[i * chunk_size : (i + 1) * chunk_size, :] for i in range(X_test.shape[0] // chunk_size + 1)] + params = {"bin_construct_sample_cnt": 100, "linear_tree": True} train_data = lgb.Dataset(X_train, label=y_train, params=params) valid_data = train_data.create_valid(X_test, label=y_test, params=params) train_data.construct() @@ -246,16 +248,16 @@ def test_chunked_dataset_linear(): def test_save_dataset_subset_and_load_from_file(tmp_path): data = np.random.rand(100, 2) - params = {'max_bin': 50, 'min_data_in_bin': 10} + params = {"max_bin": 50, "min_data_in_bin": 10} ds = lgb.Dataset(data, params=params) - ds.subset([1, 2, 3, 5, 8]).save_binary(tmp_path / 'subset.bin') - lgb.Dataset(tmp_path / 'subset.bin', params=params).construct() + ds.subset([1, 2, 3, 5, 8]).save_binary(tmp_path / "subset.bin") + lgb.Dataset(tmp_path / "subset.bin", params=params).construct() def test_subset_group(): - rank_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'lambdarank' - X_train, y_train = load_svmlight_file(str(rank_example_dir / 'rank.train')) - q_train = np.loadtxt(str(rank_example_dir / 'rank.train.query')) + rank_example_dir = Path(__file__).absolute().parents[2] / "examples" / "lambdarank" + X_train, y_train = load_svmlight_file(str(rank_example_dir / "rank.train")) + q_train = np.loadtxt(str(rank_example_dir / "rank.train.query")) lgb_train = lgb.Dataset(X_train, y_train, group=q_train) assert len(lgb_train.get_group()) == 201 subset = lgb_train.subset(list(range(10))).construct() @@ -294,7 +296,7 @@ def test_add_features_throws_if_datasets_unconstructed(): def test_add_features_equal_data_on_alternating_used_unused(tmp_path): X = np.random.random((100, 5)) X[:, [1, 3]] = 0 - names = [f'col_{i}' for i in range(5)] + names = [f"col_{i}" for i in range(5)] for j in range(1, 5): d1 = lgb.Dataset(X[:, :j], feature_name=names[:j]).construct() d2 = lgb.Dataset(X[:, j:], feature_name=names[j:]).construct() @@ -304,9 +306,9 @@ def test_add_features_equal_data_on_alternating_used_unused(tmp_path): d = lgb.Dataset(X, feature_name=names).construct() dname = tmp_path / "d.txt" d._dump_text(dname) - with open(d1name, 'rt') as d1f: + with open(d1name, "rt") as d1f: d1txt = d1f.read() - with open(dname, 'rt') as df: + with open(dname, "rt") as df: dtxt = df.read() assert dtxt == d1txt @@ -314,7 +316,7 @@ def test_add_features_equal_data_on_alternating_used_unused(tmp_path): def test_add_features_same_booster_behaviour(tmp_path): X = np.random.random((100, 5)) X[:, [1, 3]] = 0 - names = [f'col_{i}' for i in range(5)] + names = [f"col_{i}" for i in range(5)] for j in range(1, 5): d1 = lgb.Dataset(X[:, :j], feature_name=names[:j]).construct() d2 = lgb.Dataset(X[:, j:], feature_name=names[j:]).construct() @@ -332,9 +334,9 @@ def test_add_features_same_booster_behaviour(tmp_path): d1name = tmp_path / "d1.txt" b1.save_model(d1name) b.save_model(dname) - with open(dname, 'rt') as df: + with open(dname, "rt") as df: dtxt = df.read() - with open(d1name, 'rt') as d1f: + with open(d1name, "rt") as d1f: d1txt = d1f.read() assert dtxt == d1txt @@ -345,11 +347,12 @@ def test_add_features_from_different_sources(): n_col = 5 X = np.random.random((n_row, n_col)) xxs = [X, sparse.csr_matrix(X), pd.DataFrame(X)] - names = [f'col_{i}' for i in range(n_col)] + names = [f"col_{i}" for i in range(n_col)] seq = _create_sequence_from_ndarray(X, 1, 30) seq_ds = lgb.Dataset(seq, feature_name=names, free_raw_data=False).construct() - npy_list_ds = lgb.Dataset([X[:n_row // 2, :], X[n_row // 2:, :]], - feature_name=names, free_raw_data=False).construct() + npy_list_ds = lgb.Dataset( + [X[: n_row // 2, :], X[n_row // 2 :, :]], feature_name=names, free_raw_data=False + ).construct() immergeable_dds = [seq_ds, npy_list_ds] for x_1 in xxs: # test that method works even with free_raw_data=True @@ -373,20 +376,19 @@ def test_add_features_from_different_sources(): d1.add_features_from(d2) assert isinstance(d1.get_data(), original_type) assert d1.get_data().shape == (n_row, n_col * idx) - res_feature_names += [f'D{idx}_{name}' for name in names] + res_feature_names += [f"D{idx}_{name}" for name in names] assert d1.feature_name == res_feature_names def test_add_features_does_not_fail_if_initial_dataset_has_zero_informative_features(capsys): - arr_a = np.zeros((100, 1), dtype=np.float32) arr_b = np.random.normal(size=(100, 5)) dataset_a = lgb.Dataset(arr_a).construct() expected_msg = ( - '[LightGBM] [Warning] There are no meaningful features which satisfy ' - 'the provided configuration. Decreasing Dataset parameters min_data_in_bin ' - 'or min_data_in_leaf and re-constructing Dataset might resolve this warning.\n' + "[LightGBM] [Warning] There are no meaningful features which satisfy " + "the provided configuration. Decreasing Dataset parameters min_data_in_bin " + "or min_data_in_leaf and re-constructing Dataset might resolve this warning.\n" ) log_lines = capsys.readouterr().out assert expected_msg in log_lines @@ -404,7 +406,7 @@ def test_cegb_affects_behavior(tmp_path): X = np.random.random((100, 5)) X[:, [1, 3]] = 0 y = np.random.random(100) - names = [f'col_{i}' for i in range(5)] + names = [f"col_{i}" for i in range(5)] ds = lgb.Dataset(X, feature_name=names).construct() ds.set_label(y) base = lgb.Booster(train_set=ds) @@ -412,19 +414,21 @@ def test_cegb_affects_behavior(tmp_path): base.update() basename = tmp_path / "basename.txt" base.save_model(basename) - with open(basename, 'rt') as f: + with open(basename, "rt") as f: basetxt = f.read() # Set extremely harsh penalties, so CEGB will block most splits. - cases = [{'cegb_penalty_feature_coupled': [50, 100, 10, 25, 30]}, - {'cegb_penalty_feature_lazy': [1, 2, 3, 4, 5]}, - {'cegb_penalty_split': 1}] + cases = [ + {"cegb_penalty_feature_coupled": [50, 100, 10, 25, 30]}, + {"cegb_penalty_feature_lazy": [1, 2, 3, 4, 5]}, + {"cegb_penalty_split": 1}, + ] for case in cases: booster = lgb.Booster(train_set=ds, params=case) for _ in range(10): booster.update() casename = tmp_path / "casename.txt" booster.save_model(casename) - with open(casename, 'rt') as f: + with open(casename, "rt") as f: casetxt = f.read() assert basetxt != casetxt @@ -433,17 +437,22 @@ def test_cegb_scaling_equalities(tmp_path): X = np.random.random((100, 5)) X[:, [1, 3]] = 0 y = np.random.random(100) - names = [f'col_{i}' for i in range(5)] + names = [f"col_{i}" for i in range(5)] ds = lgb.Dataset(X, feature_name=names).construct() ds.set_label(y) # Compare pairs of penalties, to ensure scaling works as intended - pairs = [({'cegb_penalty_feature_coupled': [1, 2, 1, 2, 1]}, - {'cegb_penalty_feature_coupled': [0.5, 1, 0.5, 1, 0.5], 'cegb_tradeoff': 2}), - ({'cegb_penalty_feature_lazy': [0.01, 0.02, 0.03, 0.04, 0.05]}, - {'cegb_penalty_feature_lazy': [0.005, 0.01, 0.015, 0.02, 0.025], 'cegb_tradeoff': 2}), - ({'cegb_penalty_split': 1}, - {'cegb_penalty_split': 2, 'cegb_tradeoff': 0.5})] - for (p1, p2) in pairs: + pairs = [ + ( + {"cegb_penalty_feature_coupled": [1, 2, 1, 2, 1]}, + {"cegb_penalty_feature_coupled": [0.5, 1, 0.5, 1, 0.5], "cegb_tradeoff": 2}, + ), + ( + {"cegb_penalty_feature_lazy": [0.01, 0.02, 0.03, 0.04, 0.05]}, + {"cegb_penalty_feature_lazy": [0.005, 0.01, 0.015, 0.02, 0.025], "cegb_tradeoff": 2}, + ), + ({"cegb_penalty_split": 1}, {"cegb_penalty_split": 2, "cegb_tradeoff": 0.5}), + ] + for p1, p2 in pairs: booster1 = lgb.Booster(train_set=ds, params=p1) booster2 = lgb.Booster(train_set=ds, params=p2) for _ in range(10): @@ -453,32 +462,30 @@ def test_cegb_scaling_equalities(tmp_path): # Reset booster1's parameters to p2, so the parameter section of the file matches. booster1.reset_parameter(p2) booster1.save_model(p1name) - with open(p1name, 'rt') as f: + with open(p1name, "rt") as f: p1txt = f.read() p2name = tmp_path / "p2.txt" booster2.save_model(p2name) - with open(p2name, 'rt') as f: + with open(p2name, "rt") as f: p2txt = f.read() assert p1txt == p2txt def test_consistent_state_for_dataset_fields(): - def check_asserts(data): np.testing.assert_allclose(data.label, data.get_label()) - np.testing.assert_allclose(data.label, data.get_field('label')) + np.testing.assert_allclose(data.label, data.get_field("label")) assert not np.isnan(data.label[0]) assert not np.isinf(data.label[1]) np.testing.assert_allclose(data.weight, data.get_weight()) - np.testing.assert_allclose(data.weight, data.get_field('weight')) + np.testing.assert_allclose(data.weight, data.get_field("weight")) assert not np.isnan(data.weight[0]) assert not np.isinf(data.weight[1]) np.testing.assert_allclose(data.init_score, data.get_init_score()) - np.testing.assert_allclose(data.init_score, data.get_field('init_score')) + np.testing.assert_allclose(data.init_score, data.get_field("init_score")) assert not np.isnan(data.init_score[0]) assert not np.isinf(data.init_score[1]) - assert np.all(np.isclose([data.label[0], data.weight[0], data.init_score[0]], - data.label[0])) + assert np.all(np.isclose([data.label[0], data.weight[0], data.init_score[0]], data.label[0])) assert data.label[1] == pytest.approx(data.weight[1]) assert data.feature_name == data.get_feature_name() @@ -486,10 +493,8 @@ def test_consistent_state_for_dataset_fields(): sequence = np.ones(y.shape[0]) sequence[0] = np.nan sequence[1] = np.inf - feature_names = [f'f{i}'for i in range(X.shape[1])] - lgb_data = lgb.Dataset(X, sequence, - weight=sequence, init_score=sequence, - feature_name=feature_names).construct() + feature_names = [f"f{i}" for i in range(X.shape[1])] + lgb_data = lgb.Dataset(X, sequence, weight=sequence, init_score=sequence, feature_name=feature_names).construct() check_asserts(lgb_data) lgb_data = lgb.Dataset(X, y).construct() lgb_data.set_label(sequence) @@ -500,20 +505,15 @@ def test_consistent_state_for_dataset_fields(): def test_dataset_construction_overwrites_user_provided_metadata_fields(): - X = np.array([[1.0, 2.0], [3.0, 4.0]]) position = np.array([0.0, 1.0], dtype=np.float32) - if getenv('TASK', '') == 'cuda': + if getenv("TASK", "") == "cuda": position = None dtrain = lgb.Dataset( X, - params={ - "min_data_in_bin": 1, - "min_data_in_leaf": 1, - "verbosity": -1 - }, + params={"min_data_in_bin": 1, "min_data_in_leaf": 1, "verbosity": -1}, group=[1, 1], init_score=[0.312, 0.708], label=[1, 2], @@ -528,17 +528,9 @@ def test_dataset_construction_overwrites_user_provided_metadata_fields(): assert dtrain.get_init_score() == [0.312, 0.708] assert dtrain.label == [1, 2] assert dtrain.get_label() == [1, 2] - if getenv('TASK', '') != 'cuda': - np_assert_array_equal( - dtrain.position, - np.array([0.0, 1.0], dtype=np.float32), - strict=True - ) - np_assert_array_equal( - dtrain.get_position(), - np.array([0.0, 1.0], dtype=np.float32), - strict=True - ) + if getenv("TASK", "") != "cuda": + np_assert_array_equal(dtrain.position, np.array([0.0, 1.0], dtype=np.float32), strict=True) + np_assert_array_equal(dtrain.get_position(), np.array([0.0, 1.0], dtype=np.float32), strict=True) assert dtrain.weight == [0.5, 1.5] assert dtrain.get_weight() == [0.5, 1.5] @@ -554,13 +546,11 @@ def test_dataset_construction_overwrites_user_provided_metadata_fields(): np_assert_array_equal(dtrain.group, expected_group, strict=True) np_assert_array_equal(dtrain.get_group(), expected_group, strict=True) # get_field("group") returns a numpy array with boundaries, instead of size - np_assert_array_equal( - dtrain.get_field("group"), - np.array([0, 1, 2], dtype=np.int32), - strict=True - ) + np_assert_array_equal(dtrain.get_field("group"), np.array([0, 1, 2], dtype=np.int32), strict=True) - expected_init_score = np.array([0.312, 0.708],) + expected_init_score = np.array( + [0.312, 0.708], + ) np_assert_array_equal(dtrain.init_score, expected_init_score, strict=True) np_assert_array_equal(dtrain.get_init_score(), expected_init_score, strict=True) np_assert_array_equal(dtrain.get_field("init_score"), expected_init_score, strict=True) @@ -570,16 +560,12 @@ def test_dataset_construction_overwrites_user_provided_metadata_fields(): np_assert_array_equal(dtrain.get_label(), expected_label, strict=True) np_assert_array_equal(dtrain.get_field("label"), expected_label, strict=True) - if getenv('TASK', '') != 'cuda': + if getenv("TASK", "") != "cuda": expected_position = np.array([0.0, 1.0], dtype=np.float32) np_assert_array_equal(dtrain.position, expected_position, strict=True) np_assert_array_equal(dtrain.get_position(), expected_position, strict=True) # NOTE: "position" is converted to int32 on the C++ side - np_assert_array_equal( - dtrain.get_field("position"), - np.array([0.0, 1.0], dtype=np.int32), - strict=True - ) + np_assert_array_equal(dtrain.get_field("position"), np.array([0.0, 1.0], dtype=np.int32), strict=True) expected_weight = np.array([0.5, 1.5], dtype=np.float32) np_assert_array_equal(dtrain.weight, expected_weight, strict=True) @@ -588,7 +574,6 @@ def test_dataset_construction_overwrites_user_provided_metadata_fields(): def test_choose_param_value(): - original_params = { "local_listen_port": 1234, "port": 2222, @@ -599,30 +584,20 @@ def test_choose_param_value(): # should resolve duplicate aliases, and prefer the main parameter params = lgb.basic._choose_param_value( - main_param_name="local_listen_port", - params=original_params, - default_value=5555 + main_param_name="local_listen_port", params=original_params, default_value=5555 ) assert params["local_listen_port"] == 1234 assert "port" not in params # should choose the highest priority alias and set that value on main param # if only aliases are used - params = lgb.basic._choose_param_value( - main_param_name="num_iterations", - params=params, - default_value=17 - ) + params = lgb.basic._choose_param_value(main_param_name="num_iterations", params=params, default_value=17) assert params["num_iterations"] == 13 assert "num_trees" not in params assert "n_iter" not in params # should use the default if main param and aliases are missing - params = lgb.basic._choose_param_value( - main_param_name="learning_rate", - params=params, - default_value=0.789 - ) + params = lgb.basic._choose_param_value(main_param_name="learning_rate", params=params, default_value=0.789) assert params["learning_rate"] == 0.789 # all changes should be made on copies and not modify the original @@ -637,37 +612,23 @@ def test_choose_param_value(): def test_choose_param_value_preserves_nones(): - # preserves None found for main param and still removes aliases params = lgb.basic._choose_param_value( main_param_name="num_threads", - params={ - "num_threads": None, - "n_jobs": 4, - "objective": "regression" - }, - default_value=2 + params={"num_threads": None, "n_jobs": 4, "objective": "regression"}, + default_value=2, ) assert params == {"num_threads": None, "objective": "regression"} # correctly chooses value when only an alias is provided params = lgb.basic._choose_param_value( - main_param_name="num_threads", - params={ - "n_jobs": None, - "objective": "regression" - }, - default_value=2 + main_param_name="num_threads", params={"n_jobs": None, "objective": "regression"}, default_value=2 ) assert params == {"num_threads": None, "objective": "regression"} # adds None if that's given as the default and param not found params = lgb.basic._choose_param_value( - main_param_name="min_data_in_leaf", - params={ - "objective": "regression" - }, - default_value=None + main_param_name="min_data_in_leaf", params={"objective": "regression"}, default_value=None ) assert params == {"objective": "regression", "min_data_in_leaf": None} @@ -676,51 +637,39 @@ def test_choose_param_value_preserves_nones(): def test_choose_param_value_objective(objective_alias): # If callable is found in objective params = {objective_alias: dummy_obj} - params = lgb.basic._choose_param_value( - main_param_name="objective", - params=params, - default_value=None - ) - assert params['objective'] == dummy_obj + params = lgb.basic._choose_param_value(main_param_name="objective", params=params, default_value=None) + assert params["objective"] == dummy_obj # Value in params should be preferred to the default_value passed from keyword arguments params = {objective_alias: dummy_obj} - params = lgb.basic._choose_param_value( - main_param_name="objective", - params=params, - default_value=mse_obj - ) - assert params['objective'] == dummy_obj + params = lgb.basic._choose_param_value(main_param_name="objective", params=params, default_value=mse_obj) + assert params["objective"] == dummy_obj # None of objective or its aliases in params, but default_value is callable. params = {} - params = lgb.basic._choose_param_value( - main_param_name="objective", - params=params, - default_value=mse_obj - ) - assert params['objective'] == mse_obj + params = lgb.basic._choose_param_value(main_param_name="objective", params=params, default_value=mse_obj) + assert params["objective"] == mse_obj -@pytest.mark.parametrize('collection', ['1d_np', '2d_np', 'pd_float', 'pd_str', '1d_list', '2d_list']) -@pytest.mark.parametrize('dtype', [np.float32, np.float64]) +@pytest.mark.parametrize("collection", ["1d_np", "2d_np", "pd_float", "pd_str", "1d_list", "2d_list"]) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) def test_list_to_1d_numpy(collection, dtype): collection2y = { - '1d_np': np.random.rand(10), - '2d_np': np.random.rand(10, 1), - 'pd_float': np.random.rand(10), - 'pd_str': ['a', 'b'], - '1d_list': [1] * 10, - '2d_list': [[1], [2]], + "1d_np": np.random.rand(10), + "2d_np": np.random.rand(10, 1), + "pd_float": np.random.rand(10), + "pd_str": ["a", "b"], + "1d_list": [1] * 10, + "2d_list": [[1], [2]], } y = collection2y[collection] - if collection.startswith('pd'): + if collection.startswith("pd"): if not PANDAS_INSTALLED: - pytest.skip('pandas is not installed') + pytest.skip("pandas is not installed") else: y = pd_Series(y) if isinstance(y, np.ndarray) and len(y.shape) == 2: - with pytest.warns(UserWarning, match='column-vector'): + with pytest.warns(UserWarning, match="column-vector"): lgb.basic._list_to_1d_numpy(y, dtype=np.float32, name="list") return elif isinstance(y, list) and isinstance(y[0], list): @@ -736,30 +685,31 @@ def test_list_to_1d_numpy(collection, dtype): assert result.dtype == dtype -@pytest.mark.parametrize('init_score_type', ['array', 'dataframe', 'list']) +@pytest.mark.parametrize("init_score_type", ["array", "dataframe", "list"]) def test_init_score_for_multiclass_classification(init_score_type): init_score = [[i * 10 + j for j in range(3)] for i in range(10)] - if init_score_type == 'array': + if init_score_type == "array": init_score = np.array(init_score) - elif init_score_type == 'dataframe': + elif init_score_type == "dataframe": if not PANDAS_INSTALLED: - pytest.skip('Pandas is not installed.') + pytest.skip("Pandas is not installed.") init_score = pd_DataFrame(init_score) data = np.random.rand(10, 2) ds = lgb.Dataset(data, init_score=init_score).construct() - np.testing.assert_equal(ds.get_field('init_score'), init_score) + np.testing.assert_equal(ds.get_field("init_score"), init_score) np.testing.assert_equal(ds.init_score, init_score) def test_smoke_custom_parser(tmp_path): - data_path = Path(__file__).absolute().parents[2] / 'examples' / 'binary_classification' / 'binary.train' - parser_config_file = tmp_path / 'parser.ini' - with open(parser_config_file, 'w') as fout: + data_path = Path(__file__).absolute().parents[2] / "examples" / "binary_classification" / "binary.train" + parser_config_file = tmp_path / "parser.ini" + with open(parser_config_file, "w") as fout: fout.write('{"className": "dummy", "id": "1"}') data = lgb.Dataset(data_path, params={"parser_config_file": parser_config_file}) - with pytest.raises(lgb.basic.LightGBMError, - match="Cannot find parser class 'dummy', please register first or check config format"): + with pytest.raises( + lgb.basic.LightGBMError, match="Cannot find parser class 'dummy', please register first or check config format" + ): data.construct() @@ -770,9 +720,13 @@ def test_param_aliases(): assert all(isinstance(i, list) for i in aliases.values()) assert all(len(i) >= 1 for i in aliases.values()) assert all(k in v for k, v in aliases.items()) - assert lgb.basic._ConfigAliases.get('config', 'task') == {'config', 'config_file', 'task', 'task_type'} - assert lgb.basic._ConfigAliases.get_sorted('min_data_in_leaf') == [ - 'min_data_in_leaf', 'min_data', 'min_samples_leaf', 'min_child_samples', 'min_data_per_leaf' + assert lgb.basic._ConfigAliases.get("config", "task") == {"config", "config_file", "task", "task_type"} + assert lgb.basic._ConfigAliases.get_sorted("min_data_in_leaf") == [ + "min_data_in_leaf", + "min_data", + "min_samples_leaf", + "min_child_samples", + "min_data_per_leaf", ] @@ -793,10 +747,10 @@ def test_custom_objective_safety(): y_multiclass = np.arange(nrows) % nclass ds_binary = lgb.Dataset(X, y_binary).construct() ds_multiclass = lgb.Dataset(X, y_multiclass).construct() - bad_bst_binary = lgb.Booster({'objective': "none"}, ds_binary) - good_bst_binary = lgb.Booster({'objective': "none"}, ds_binary) - bad_bst_multi = lgb.Booster({'objective': "none", "num_class": nclass}, ds_multiclass) - good_bst_multi = lgb.Booster({'objective': "none", "num_class": nclass}, ds_multiclass) + bad_bst_binary = lgb.Booster({"objective": "none"}, ds_binary) + good_bst_binary = lgb.Booster({"objective": "none"}, ds_binary) + bad_bst_multi = lgb.Booster({"objective": "none", "num_class": nclass}, ds_multiclass) + good_bst_multi = lgb.Booster({"objective": "none", "num_class": nclass}, ds_multiclass) good_bst_binary.update(fobj=_good_gradients) with pytest.raises(ValueError, match=re.escape("number of models per one iteration (1)")): bad_bst_binary.update(fobj=_bad_gradients) @@ -805,33 +759,30 @@ def test_custom_objective_safety(): bad_bst_multi.update(fobj=_bad_gradients) -@pytest.mark.parametrize('dtype', [np.float32, np.float64]) -@pytest.mark.parametrize('feature_name', [['x1', 'x2'], 'auto']) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +@pytest.mark.parametrize("feature_name", [["x1", "x2"], "auto"]) def test_no_copy_when_single_float_dtype_dataframe(dtype, feature_name): - pd = pytest.importorskip('pandas') + pd = pytest.importorskip("pandas") X = np.random.rand(10, 2).astype(dtype) df = pd.DataFrame(X) built_data = lgb.basic._data_from_pandas( - data=df, - feature_name=feature_name, - categorical_feature="auto", - pandas_categorical=None + data=df, feature_name=feature_name, categorical_feature="auto", pandas_categorical=None )[0] assert built_data.dtype == dtype assert np.shares_memory(X, built_data) -@pytest.mark.parametrize('feature_name', [['x1'], [42], 'auto']) -@pytest.mark.parametrize('categories', ['seen', 'unseen']) +@pytest.mark.parametrize("feature_name", [["x1"], [42], "auto"]) +@pytest.mark.parametrize("categories", ["seen", "unseen"]) def test_categorical_code_conversion_doesnt_modify_original_data(feature_name, categories): - pd = pytest.importorskip('pandas') - X = np.random.choice(['a', 'b'], 100).reshape(-1, 1) - column_name = 'a' if feature_name == 'auto' else feature_name[0] - df = pd.DataFrame(X.copy(), columns=[column_name], dtype='category') - if categories == 'seen': - pandas_categorical = [['a', 'b']] + pd = pytest.importorskip("pandas") + X = np.random.choice(["a", "b"], 100).reshape(-1, 1) + column_name = "a" if feature_name == "auto" else feature_name[0] + df = pd.DataFrame(X.copy(), columns=[column_name], dtype="category") + if categories == "seen": + pandas_categorical = [["a", "b"]] else: - pandas_categorical = [['a']] + pandas_categorical = [["a"]] data = lgb.basic._data_from_pandas( data=df, feature_name=feature_name, @@ -841,31 +792,33 @@ def test_categorical_code_conversion_doesnt_modify_original_data(feature_name, c # check that the original data wasn't modified np.testing.assert_equal(df[column_name], X[:, 0]) # check that the built data has the codes - if categories == 'seen': + if categories == "seen": # if all categories were seen during training we just take the codes codes = df[column_name].cat.codes else: # if we only saw 'a' during training we just replace its code # and leave the rest as nan - a_code = df[column_name].cat.categories.get_loc('a') - codes = np.where(df[column_name] == 'a', a_code, np.nan) + a_code = df[column_name].cat.categories.get_loc("a") + codes = np.where(df[column_name] == "a", a_code, np.nan) np.testing.assert_equal(codes, data[:, 0]) -@pytest.mark.parametrize('min_data_in_bin', [2, 10]) +@pytest.mark.parametrize("min_data_in_bin", [2, 10]) def test_feature_num_bin(min_data_in_bin): - X = np.vstack([ - np.random.rand(100), - np.array([1, 2] * 50), - np.array([0, 1, 2] * 33 + [0]), - np.array([1, 2] * 49 + 2 * [np.nan]), - np.zeros(100), - np.random.choice([0, 1], 100), - ]).T + X = np.vstack( + [ + np.random.rand(100), + np.array([1, 2] * 50), + np.array([0, 1, 2] * 33 + [0]), + np.array([1, 2] * 49 + 2 * [np.nan]), + np.zeros(100), + np.random.choice([0, 1], 100), + ] + ).T n_continuous = X.shape[1] - 1 - feature_name = [f'x{i}' for i in range(n_continuous)] + ['cat1'] + feature_name = [f"x{i}" for i in range(n_continuous)] + ["cat1"] ds_kwargs = { - "params": {'min_data_in_bin': min_data_in_bin}, + "params": {"min_data_in_bin": min_data_in_bin}, "categorical_feature": [n_continuous], # last feature } ds = lgb.Dataset(X, feature_name=feature_name, **ds_kwargs).construct() @@ -884,7 +837,7 @@ def test_feature_num_bin(min_data_in_bin): assert bins_by_name == expected_num_bins # test using default feature names ds_no_names = lgb.Dataset(X, **ds_kwargs).construct() - default_names = [f'Column_{i}' for i in range(X.shape[1])] + default_names = [f"Column_{i}" for i in range(X.shape[1])] bins_by_default_name = [ds_no_names.feature_num_bin(name) for name in default_names] assert bins_by_default_name == expected_num_bins # check for feature indices outside of range @@ -892,9 +845,9 @@ def test_feature_num_bin(min_data_in_bin): with pytest.raises( lgb.basic.LightGBMError, match=( - f'Tried to retrieve number of bins for feature index {num_features}, ' - f'but the valid feature indices are \\[0, {num_features - 1}\\].' - ) + f"Tried to retrieve number of bins for feature index {num_features}, " + f"but the valid feature indices are \\[0, {num_features - 1}\\]." + ), ): ds.feature_num_bin(num_features) @@ -902,7 +855,7 @@ def test_feature_num_bin(min_data_in_bin): def test_feature_num_bin_with_max_bin_by_feature(): X = np.random.rand(100, 3) max_bin_by_feature = np.random.randint(3, 30, size=X.shape[1]) - ds = lgb.Dataset(X, params={'max_bin_by_feature': max_bin_by_feature}).construct() + ds = lgb.Dataset(X, params={"max_bin_by_feature": max_bin_by_feature}).construct() actual_num_bins = [ds.feature_num_bin(i) for i in range(X.shape[1])] np.testing.assert_equal(actual_num_bins, max_bin_by_feature) @@ -910,7 +863,7 @@ def test_feature_num_bin_with_max_bin_by_feature(): def test_set_leaf_output(): X, y = load_breast_cancer(return_X_y=True) ds = lgb.Dataset(X, y) - bst = lgb.Booster({'num_leaves': 2}, ds) + bst = lgb.Booster({"num_leaves": 2}, ds) bst.update() y_pred = bst.predict(X) for leaf_id in range(2): diff --git a/tests/python_package_test/test_callback.py b/tests/python_package_test/test_callback.py index f93ca837f..a13ee9c0e 100644 --- a/tests/python_package_test/test_callback.py +++ b/tests/python_package_test/test_callback.py @@ -10,7 +10,7 @@ def reset_feature_fraction(boosting_round): return 0.6 if boosting_round < 15 else 0.8 -@pytest.mark.parametrize('serializer', SERIALIZERS) +@pytest.mark.parametrize("serializer", SERIALIZERS) def test_early_stopping_callback_is_picklable(serializer): rounds = 5 callback = lgb.early_stopping(stopping_rounds=rounds) @@ -32,7 +32,7 @@ def test_early_stopping_callback_rejects_invalid_stopping_rounds_with_informativ lgb.early_stopping(stopping_rounds="neverrrr") -@pytest.mark.parametrize('serializer', SERIALIZERS) +@pytest.mark.parametrize("serializer", SERIALIZERS) def test_log_evaluation_callback_is_picklable(serializer): periods = 42 callback = lgb.log_evaluation(period=periods) @@ -43,7 +43,7 @@ def test_log_evaluation_callback_is_picklable(serializer): assert callback.period == periods -@pytest.mark.parametrize('serializer', SERIALIZERS) +@pytest.mark.parametrize("serializer", SERIALIZERS) def test_record_evaluation_callback_is_picklable(serializer): results = {} callback = lgb.record_evaluation(eval_result=results) @@ -54,12 +54,9 @@ def test_record_evaluation_callback_is_picklable(serializer): assert callback.eval_result is results -@pytest.mark.parametrize('serializer', SERIALIZERS) +@pytest.mark.parametrize("serializer", SERIALIZERS) def test_reset_parameter_callback_is_picklable(serializer): - params = { - 'bagging_fraction': [0.7] * 5 + [0.6] * 5, - 'feature_fraction': reset_feature_fraction - } + params = {"bagging_fraction": [0.7] * 5 + [0.6] * 5, "feature_fraction": reset_feature_fraction} callback = lgb.reset_parameter(**params) callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer) assert callback_from_disk.order == 10 diff --git a/tests/python_package_test/test_consistency.py b/tests/python_package_test/test_consistency.py index b8610ed44..4f5bca249 100644 --- a/tests/python_package_test/test_consistency.py +++ b/tests/python_package_test/test_consistency.py @@ -6,22 +6,21 @@ from sklearn.datasets import load_svmlight_file import lightgbm as lgb -EXAMPLES_DIR = Path(__file__).absolute().parents[2] / 'examples' +EXAMPLES_DIR = Path(__file__).absolute().parents[2] / "examples" class FileLoader: - - def __init__(self, directory, prefix, config_file='train.conf'): + def __init__(self, directory, prefix, config_file="train.conf"): self.directory = directory self.prefix = prefix - self.params = {'gpu_use_dp': True} - with open(self.directory / config_file, 'r') as f: + self.params = {"gpu_use_dp": True} + with open(self.directory / config_file, "r") as f: for line in f.readlines(): line = line.strip() - if line and not line.startswith('#'): - key, value = [token.strip() for token in line.split('=')] - if 'early_stopping' not in key: # disable early_stopping - self.params[key] = value if key not in {'num_trees', 'num_threads'} else int(value) + if line and not line.startswith("#"): + key, value = [token.strip() for token in line.split("=")] + if "early_stopping" not in key: # disable early_stopping + self.params[key] = value if key not in {"num_trees", "num_threads"} else int(value) def load_dataset(self, suffix, is_sparse=False): filename = str(self.path(suffix)) @@ -33,14 +32,14 @@ class FileLoader: return mat[:, 1:], mat[:, 0], filename def load_field(self, suffix): - return np.loadtxt(str(self.directory / f'{self.prefix}{suffix}')) + return np.loadtxt(str(self.directory / f"{self.prefix}{suffix}")) - def load_cpp_result(self, result_file='LightGBM_predict_result.txt'): + def load_cpp_result(self, result_file="LightGBM_predict_result.txt"): return np.loadtxt(str(self.directory / result_file)) def train_predict_check(self, lgb_train, X_test, X_test_fn, sk_pred): params = dict(self.params) - params['force_row_wise'] = True + params["force_row_wise"] = True gbm = lgb.train(params, lgb_train) y_pred = gbm.predict(X_test) cpp_pred = gbm.predict(X_test_fn) @@ -49,7 +48,7 @@ class FileLoader: def file_load_check(self, lgb_train, name): lgb_train_f = lgb.Dataset(self.path(name), params=self.params).construct() - for f in ('num_data', 'num_feature', 'get_label', 'get_weight', 'get_init_score', 'get_group'): + for f in ("num_data", "num_feature", "get_label", "get_weight", "get_init_score", "get_group"): a = getattr(lgb_train, f)() b = getattr(lgb_train_f, f)() if a is None and b is None: @@ -62,83 +61,83 @@ class FileLoader: assert a == b, f def path(self, suffix): - return self.directory / f'{self.prefix}{suffix}' + return self.directory / f"{self.prefix}{suffix}" def test_binary(): - fd = FileLoader(EXAMPLES_DIR / 'binary_classification', 'binary') - X_train, y_train, _ = fd.load_dataset('.train') - X_test, _, X_test_fn = fd.load_dataset('.test') - weight_train = fd.load_field('.train.weight') + fd = FileLoader(EXAMPLES_DIR / "binary_classification", "binary") + X_train, y_train, _ = fd.load_dataset(".train") + X_test, _, X_test_fn = fd.load_dataset(".test") + weight_train = fd.load_field(".train.weight") lgb_train = lgb.Dataset(X_train, y_train, params=fd.params, weight=weight_train) gbm = lgb.LGBMClassifier(**fd.params) gbm.fit(X_train, y_train, sample_weight=weight_train) sk_pred = gbm.predict_proba(X_test)[:, 1] fd.train_predict_check(lgb_train, X_test, X_test_fn, sk_pred) - fd.file_load_check(lgb_train, '.train') + fd.file_load_check(lgb_train, ".train") def test_binary_linear(): - fd = FileLoader(EXAMPLES_DIR / 'binary_classification', 'binary', 'train_linear.conf') - X_train, y_train, _ = fd.load_dataset('.train') - X_test, _, X_test_fn = fd.load_dataset('.test') - weight_train = fd.load_field('.train.weight') + fd = FileLoader(EXAMPLES_DIR / "binary_classification", "binary", "train_linear.conf") + X_train, y_train, _ = fd.load_dataset(".train") + X_test, _, X_test_fn = fd.load_dataset(".test") + weight_train = fd.load_field(".train.weight") lgb_train = lgb.Dataset(X_train, y_train, params=fd.params, weight=weight_train) gbm = lgb.LGBMClassifier(**fd.params) gbm.fit(X_train, y_train, sample_weight=weight_train) sk_pred = gbm.predict_proba(X_test)[:, 1] fd.train_predict_check(lgb_train, X_test, X_test_fn, sk_pred) - fd.file_load_check(lgb_train, '.train') + fd.file_load_check(lgb_train, ".train") def test_multiclass(): - fd = FileLoader(EXAMPLES_DIR / 'multiclass_classification', 'multiclass') - X_train, y_train, _ = fd.load_dataset('.train') - X_test, _, X_test_fn = fd.load_dataset('.test') + fd = FileLoader(EXAMPLES_DIR / "multiclass_classification", "multiclass") + X_train, y_train, _ = fd.load_dataset(".train") + X_test, _, X_test_fn = fd.load_dataset(".test") lgb_train = lgb.Dataset(X_train, y_train) gbm = lgb.LGBMClassifier(**fd.params) gbm.fit(X_train, y_train) sk_pred = gbm.predict_proba(X_test) fd.train_predict_check(lgb_train, X_test, X_test_fn, sk_pred) - fd.file_load_check(lgb_train, '.train') + fd.file_load_check(lgb_train, ".train") def test_regression(): - fd = FileLoader(EXAMPLES_DIR / 'regression', 'regression') - X_train, y_train, _ = fd.load_dataset('.train') - X_test, _, X_test_fn = fd.load_dataset('.test') - init_score_train = fd.load_field('.train.init') + fd = FileLoader(EXAMPLES_DIR / "regression", "regression") + X_train, y_train, _ = fd.load_dataset(".train") + X_test, _, X_test_fn = fd.load_dataset(".test") + init_score_train = fd.load_field(".train.init") lgb_train = lgb.Dataset(X_train, y_train, init_score=init_score_train) gbm = lgb.LGBMRegressor(**fd.params) gbm.fit(X_train, y_train, init_score=init_score_train) sk_pred = gbm.predict(X_test) fd.train_predict_check(lgb_train, X_test, X_test_fn, sk_pred) - fd.file_load_check(lgb_train, '.train') + fd.file_load_check(lgb_train, ".train") def test_lambdarank(): - fd = FileLoader(EXAMPLES_DIR / 'lambdarank', 'rank') - X_train, y_train, _ = fd.load_dataset('.train', is_sparse=True) - X_test, _, X_test_fn = fd.load_dataset('.test', is_sparse=True) - group_train = fd.load_field('.train.query') + fd = FileLoader(EXAMPLES_DIR / "lambdarank", "rank") + X_train, y_train, _ = fd.load_dataset(".train", is_sparse=True) + X_test, _, X_test_fn = fd.load_dataset(".test", is_sparse=True) + group_train = fd.load_field(".train.query") lgb_train = lgb.Dataset(X_train, y_train, group=group_train) params = dict(fd.params) - params['force_col_wise'] = True + params["force_col_wise"] = True gbm = lgb.LGBMRanker(**params) gbm.fit(X_train, y_train, group=group_train) sk_pred = gbm.predict(X_test) fd.train_predict_check(lgb_train, X_test, X_test_fn, sk_pred) - fd.file_load_check(lgb_train, '.train') + fd.file_load_check(lgb_train, ".train") def test_xendcg(): - fd = FileLoader(EXAMPLES_DIR / 'xendcg', 'rank') - X_train, y_train, _ = fd.load_dataset('.train', is_sparse=True) - X_test, _, X_test_fn = fd.load_dataset('.test', is_sparse=True) - group_train = fd.load_field('.train.query') + fd = FileLoader(EXAMPLES_DIR / "xendcg", "rank") + X_train, y_train, _ = fd.load_dataset(".train", is_sparse=True) + X_test, _, X_test_fn = fd.load_dataset(".test", is_sparse=True) + group_train = fd.load_field(".train.query") lgb_train = lgb.Dataset(X_train, y_train, group=group_train) gbm = lgb.LGBMRanker(**fd.params) gbm.fit(X_train, y_train, group=group_train) sk_pred = gbm.predict(X_test) fd.train_predict_check(lgb_train, X_test, X_test_fn, sk_pred) - fd.file_load_check(lgb_train, '.train') + fd.file_load_check(lgb_train, ".train") diff --git a/tests/python_package_test/test_dask.py b/tests/python_package_test/test_dask.py index 9da509453..9fe4da18f 100644 --- a/tests/python_package_test/test_dask.py +++ b/tests/python_package_test/test_dask.py @@ -17,12 +17,12 @@ import lightgbm as lgb from .utils import sklearn_multiclass_custom_objective -if not platform.startswith('linux'): - pytest.skip('lightgbm.dask is currently supported in Linux environments', allow_module_level=True) -if machine() != 'x86_64': - pytest.skip('lightgbm.dask tests are currently skipped on some architectures like arm64', allow_module_level=True) +if not platform.startswith("linux"): + pytest.skip("lightgbm.dask is currently supported in Linux environments", allow_module_level=True) +if machine() != "x86_64": + pytest.skip("lightgbm.dask tests are currently skipped on some architectures like arm64", allow_module_level=True) if not lgb.compat.DASK_INSTALLED: - pytest.skip('Dask is not installed', allow_module_level=True) + pytest.skip("Dask is not installed", allow_module_level=True) import dask.array as da import dask.dataframe as dd @@ -37,46 +37,46 @@ from sklearn.datasets import make_blobs, make_regression from .utils import make_ranking, pickle_obj, unpickle_obj -tasks = ['binary-classification', 'multiclass-classification', 'regression', 'ranking'] -distributed_training_algorithms = ['data', 'voting'] -data_output = ['array', 'scipy_csr_matrix', 'dataframe', 'dataframe-with-categorical'] -boosting_types = ['gbdt', 'dart', 'goss', 'rf'] +tasks = ["binary-classification", "multiclass-classification", "regression", "ranking"] +distributed_training_algorithms = ["data", "voting"] +data_output = ["array", "scipy_csr_matrix", "dataframe", "dataframe-with-categorical"] +boosting_types = ["gbdt", "dart", "goss", "rf"] group_sizes = [5, 5, 5, 10, 10, 10, 20, 20, 20, 50, 50] task_to_dask_factory = { - 'regression': lgb.DaskLGBMRegressor, - 'binary-classification': lgb.DaskLGBMClassifier, - 'multiclass-classification': lgb.DaskLGBMClassifier, - 'ranking': lgb.DaskLGBMRanker + "regression": lgb.DaskLGBMRegressor, + "binary-classification": lgb.DaskLGBMClassifier, + "multiclass-classification": lgb.DaskLGBMClassifier, + "ranking": lgb.DaskLGBMRanker, } task_to_local_factory = { - 'regression': lgb.LGBMRegressor, - 'binary-classification': lgb.LGBMClassifier, - 'multiclass-classification': lgb.LGBMClassifier, - 'ranking': lgb.LGBMRanker + "regression": lgb.LGBMRegressor, + "binary-classification": lgb.LGBMClassifier, + "multiclass-classification": lgb.LGBMClassifier, + "ranking": lgb.LGBMRanker, } pytestmark = [ - pytest.mark.skipif(getenv('TASK', '') == 'mpi', reason='Fails to run with MPI interface'), - pytest.mark.skipif(getenv('TASK', '') == 'gpu', reason='Fails to run with GPU interface'), - pytest.mark.skipif(getenv('TASK', '') == 'cuda', reason='Fails to run with CUDA interface') + pytest.mark.skipif(getenv("TASK", "") == "mpi", reason="Fails to run with MPI interface"), + pytest.mark.skipif(getenv("TASK", "") == "gpu", reason="Fails to run with GPU interface"), + pytest.mark.skipif(getenv("TASK", "") == "cuda", reason="Fails to run with CUDA interface"), ] -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def cluster(): dask_cluster = LocalCluster(n_workers=2, threads_per_worker=2, dashboard_address=None) yield dask_cluster dask_cluster.close() -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def cluster2(): dask_cluster = LocalCluster(n_workers=2, threads_per_worker=2, dashboard_address=None) yield dask_cluster dask_cluster.close() -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def cluster_three_workers(): dask_cluster = LocalCluster(n_workers=3, threads_per_worker=1, dashboard_address=None) yield dask_cluster @@ -93,46 +93,43 @@ listen_port.port = 13000 def _get_workers_hostname(cluster: LocalCluster) -> str: - one_worker_address = next(iter(cluster.scheduler_info['workers'])) + one_worker_address = next(iter(cluster.scheduler_info["workers"])) return urlparse(one_worker_address).hostname -def _create_ranking_data(n_samples=100, output='array', chunk_size=50, **kwargs): +def _create_ranking_data(n_samples=100, output="array", chunk_size=50, **kwargs): X, y, g = make_ranking(n_samples=n_samples, random_state=42, **kwargs) rnd = np.random.RandomState(42) w = rnd.rand(X.shape[0]) * 0.01 g_rle = np.array([len(list(grp)) for _, grp in groupby(g)]) - if output.startswith('dataframe'): + if output.startswith("dataframe"): # add target, weight, and group to DataFrame so that partitions abide by group boundaries. - X_df = pd.DataFrame(X, columns=[f'feature_{i}' for i in range(X.shape[1])]) - if output == 'dataframe-with-categorical': + X_df = pd.DataFrame(X, columns=[f"feature_{i}" for i in range(X.shape[1])]) + if output == "dataframe-with-categorical": for i in range(5): col_name = f"cat_col{i}" - cat_values = rnd.choice(['a', 'b'], X.shape[0]) - cat_series = pd.Series( - cat_values, - dtype='category' - ) + cat_values = rnd.choice(["a", "b"], X.shape[0]) + cat_series = pd.Series(cat_values, dtype="category") X_df[col_name] = cat_series X = X_df.copy() X_df = X_df.assign(y=y, g=g, w=w) # set_index ensures partitions are based on group id. # See https://stackoverflow.com/questions/49532824/dask-dataframe-split-partitions-based-on-a-column-or-function. - X_df.set_index('g', inplace=True) + X_df.set_index("g", inplace=True) dX = dd.from_pandas(X_df, chunksize=chunk_size) # separate target, weight from features. - dy = dX['y'] - dw = dX['w'] - dX = dX.drop(columns=['y', 'w']) + dy = dX["y"] + dw = dX["w"] + dX = dX.drop(columns=["y", "w"]) dg = dX.index.to_series() # encode group identifiers into run-length encoding, the format LightGBMRanker is expecting # so that within each partition, sum(g) = n_samples. - dg = dg.map_partitions(lambda p: p.groupby('g', sort=False).apply(lambda z: z.shape[0])) - elif output == 'array': + dg = dg.map_partitions(lambda p: p.groupby("g", sort=False).apply(lambda z: z.shape[0])) + elif output == "array": # ranking arrays: one chunk per group. Each chunk must include all columns. p = X.shape[1] dX, dy, dw, dg = [], [], [], [] @@ -148,71 +145,63 @@ def _create_ranking_data(n_samples=100, output='array', chunk_size=50, **kwargs) dw = da.concatenate(dw, axis=0) dg = da.concatenate(dg, axis=0) else: - raise ValueError('Ranking data creation only supported for Dask arrays and dataframes') + raise ValueError("Ranking data creation only supported for Dask arrays and dataframes") return X, y, w, g_rle, dX, dy, dw, dg -def _create_data(objective, n_samples=1_000, output='array', chunk_size=500, **kwargs): - if objective.endswith('classification'): - if objective == 'binary-classification': +def _create_data(objective, n_samples=1_000, output="array", chunk_size=500, **kwargs): + if objective.endswith("classification"): + if objective == "binary-classification": centers = [[-4, -4], [4, 4]] - elif objective == 'multiclass-classification': + elif objective == "multiclass-classification": centers = [[-4, -4], [4, 4], [-4, 4]] else: raise ValueError(f"Unknown classification task '{objective}'") X, y = make_blobs(n_samples=n_samples, centers=centers, random_state=42) - elif objective == 'regression': + elif objective == "regression": X, y = make_regression(n_samples=n_samples, n_features=4, n_informative=2, random_state=42) - elif objective == 'ranking': - return _create_ranking_data( - n_samples=n_samples, - output=output, - chunk_size=chunk_size, - **kwargs - ) + elif objective == "ranking": + return _create_ranking_data(n_samples=n_samples, output=output, chunk_size=chunk_size, **kwargs) else: raise ValueError(f"Unknown objective '{objective}'") rnd = np.random.RandomState(42) weights = rnd.random(X.shape[0]) * 0.01 - if output == 'array': + if output == "array": dX = da.from_array(X, (chunk_size, X.shape[1])) dy = da.from_array(y, chunk_size) dw = da.from_array(weights, chunk_size) - elif output.startswith('dataframe'): - X_df = pd.DataFrame(X, columns=[f'feature_{i}' for i in range(X.shape[1])]) - if output == 'dataframe-with-categorical': + elif output.startswith("dataframe"): + X_df = pd.DataFrame(X, columns=[f"feature_{i}" for i in range(X.shape[1])]) + if output == "dataframe-with-categorical": num_cat_cols = 2 for i in range(num_cat_cols): col_name = f"cat_col{i}" - cat_values = rnd.choice(['a', 'b'], X.shape[0]) - cat_series = pd.Series( - cat_values, - dtype='category' - ) + cat_values = rnd.choice(["a", "b"], X.shape[0]) + cat_series = pd.Series(cat_values, dtype="category") X_df[col_name] = cat_series X = np.hstack((X, cat_series.cat.codes.values.reshape(-1, 1))) # make one categorical feature relevant to the target - cat_col_is_a = X_df['cat_col0'] == 'a' - if objective == 'regression': + cat_col_is_a = X_df["cat_col0"] == "a" + if objective == "regression": y = np.where(cat_col_is_a, y, 2 * y) - elif objective == 'binary-classification': + elif objective == "binary-classification": y = np.where(cat_col_is_a, y, 1 - y) - elif objective == 'multiclass-classification': + elif objective == "multiclass-classification": n_classes = 3 y = np.where(cat_col_is_a, y, (1 + y) % n_classes) - y_df = pd.Series(y, name='target') + y_df = pd.Series(y, name="target") dX = dd.from_pandas(X_df, chunksize=chunk_size) dy = dd.from_pandas(y_df, chunksize=chunk_size) dw = dd.from_array(weights, chunksize=chunk_size) - elif output == 'scipy_csr_matrix': + elif output == "scipy_csr_matrix": dX = da.from_array(X, chunks=(chunk_size, X.shape[1])).map_blocks(csr_matrix) dy = da.from_array(y, chunks=chunk_size) dw = da.from_array(weights, chunk_size) X = csr_matrix(X) - elif output == 'scipy_csc_matrix': + elif output == "scipy_csc_matrix": dX = da.from_array(X, chunks=(chunk_size, X.shape[1])).map_blocks(csc_matrix) dy = da.from_array(y, chunks=chunk_size) dw = da.from_array(weights, chunk_size) @@ -234,7 +223,7 @@ def _accuracy_score(dy_true, dy_pred): def _constant_metric(y_true, y_pred): - metric_name = 'constant_metric' + metric_name = "constant_metric" value = 0.708 is_higher_better = False return metric_name, value, is_higher_better @@ -253,46 +242,32 @@ def _objective_logistic_regression(y_true, y_pred): return grad, hess -@pytest.mark.parametrize('output', data_output) -@pytest.mark.parametrize('task', ['binary-classification', 'multiclass-classification']) -@pytest.mark.parametrize('boosting_type', boosting_types) -@pytest.mark.parametrize('tree_learner', distributed_training_algorithms) +@pytest.mark.parametrize("output", data_output) +@pytest.mark.parametrize("task", ["binary-classification", "multiclass-classification"]) +@pytest.mark.parametrize("boosting_type", boosting_types) +@pytest.mark.parametrize("tree_learner", distributed_training_algorithms) def test_classifier(output, task, boosting_type, tree_learner, cluster): with Client(cluster) as client: - X, y, w, _, dX, dy, dw, _ = _create_data( - objective=task, - output=output - ) + X, y, w, _, dX, dy, dw, _ = _create_data(objective=task, output=output) - params = { - "boosting_type": boosting_type, - "tree_learner": tree_learner, - "n_estimators": 50, - "num_leaves": 31 - } - if boosting_type == 'rf': - params.update({ - 'bagging_freq': 1, - 'bagging_fraction': 0.9, - }) - elif boosting_type == 'goss': - params['top_rate'] = 0.5 + params = {"boosting_type": boosting_type, "tree_learner": tree_learner, "n_estimators": 50, "num_leaves": 31} + if boosting_type == "rf": + params.update( + { + "bagging_freq": 1, + "bagging_fraction": 0.9, + } + ) + elif boosting_type == "goss": + params["top_rate"] = 0.5 - dask_classifier = lgb.DaskLGBMClassifier( - client=client, - time_out=5, - **params - ) + dask_classifier = lgb.DaskLGBMClassifier(client=client, time_out=5, **params) dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw) p1 = dask_classifier.predict(dX) p1_raw = dask_classifier.predict(dX, raw_score=True).compute() p1_first_iter_raw = dask_classifier.predict(dX, start_iteration=0, num_iteration=1, raw_score=True).compute() p1_early_stop_raw = dask_classifier.predict( - dX, - pred_early_stop=True, - pred_early_stop_margin=1.0, - pred_early_stop_freq=2, - raw_score=True + dX, pred_early_stop=True, pred_early_stop_margin=1.0, pred_early_stop_freq=2, raw_score=True ).compute() p1_proba = dask_classifier.predict_proba(dX).compute() p1_pred_leaf = dask_classifier.predict(dX, pred_leaf=True) @@ -306,7 +281,7 @@ def test_classifier(output, task, boosting_type, tree_learner, cluster): p2_proba = local_classifier.predict_proba(X) s2 = local_classifier.score(X, y) - if boosting_type == 'rf': + if boosting_type == "rf": # https://github.com/microsoft/LightGBM/issues/4118 assert_eq(s1, s2, atol=0.01) assert_eq(p1_proba, p2_proba, atol=0.8) @@ -329,47 +304,30 @@ def test_classifier(output, task, boosting_type, tree_learner, cluster): # pref_leaf values should have the right shape # and values that look like valid tree nodes pred_leaf_vals = p1_pred_leaf.compute() - assert pred_leaf_vals.shape == ( - X.shape[0], - dask_classifier.booster_.num_trees() - ) - assert np.max(pred_leaf_vals) <= params['num_leaves'] + assert pred_leaf_vals.shape == (X.shape[0], dask_classifier.booster_.num_trees()) + assert np.max(pred_leaf_vals) <= params["num_leaves"] assert np.min(pred_leaf_vals) >= 0 - assert len(np.unique(pred_leaf_vals)) <= params['num_leaves'] + assert len(np.unique(pred_leaf_vals)) <= params["num_leaves"] # be sure LightGBM actually used at least one categorical column, # and that it was correctly treated as a categorical feature - if output == 'dataframe-with-categorical': - cat_cols = [ - col for col in dX.columns - if dX.dtypes[col].name == 'category' - ] + if output == "dataframe-with-categorical": + cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"] tree_df = dask_classifier.booster_.trees_to_dataframe() - node_uses_cat_col = tree_df['split_feature'].isin(cat_cols) + node_uses_cat_col = tree_df["split_feature"].isin(cat_cols) assert node_uses_cat_col.sum() > 0 - assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == '==' + assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "==" -@pytest.mark.parametrize('output', data_output + ['scipy_csc_matrix']) -@pytest.mark.parametrize('task', ['binary-classification', 'multiclass-classification']) +@pytest.mark.parametrize("output", data_output + ["scipy_csc_matrix"]) +@pytest.mark.parametrize("task", ["binary-classification", "multiclass-classification"]) def test_classifier_pred_contrib(output, task, cluster): with Client(cluster) as client: - X, y, w, _, dX, dy, dw, _ = _create_data( - objective=task, - output=output - ) + X, y, w, _, dX, dy, dw, _ = _create_data(objective=task, output=output) - params = { - "n_estimators": 10, - "num_leaves": 10 - } + params = {"n_estimators": 10, "num_leaves": 10} - dask_classifier = lgb.DaskLGBMClassifier( - client=client, - time_out=5, - tree_learner='data', - **params - ) + dask_classifier = lgb.DaskLGBMClassifier(client=client, time_out=5, tree_learner="data", **params) dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw) preds_with_contrib = dask_classifier.predict(dX, pred_contrib=True) @@ -390,10 +348,10 @@ def test_classifier_pred_contrib(output, task, cluster): # # since that case is so different than all other cases, check the relevant things here # and then return early - if output.startswith('scipy') and task == 'multiclass-classification': - if output == 'scipy_csr_matrix': + if output.startswith("scipy") and task == "multiclass-classification": + if output == "scipy_csr_matrix": expected_type = csr_matrix - elif output == 'scipy_csc_matrix': + elif output == "scipy_csc_matrix": expected_type = csc_matrix else: raise ValueError(f"Unrecognized output type: {output}") @@ -415,20 +373,17 @@ def test_classifier_pred_contrib(output, task, cluster): return preds_with_contrib = preds_with_contrib.compute() - if output.startswith('scipy'): + if output.startswith("scipy"): preds_with_contrib = preds_with_contrib.toarray() # be sure LightGBM actually used at least one categorical column, # and that it was correctly treated as a categorical feature - if output == 'dataframe-with-categorical': - cat_cols = [ - col for col in dX.columns - if dX.dtypes[col].name == 'category' - ] + if output == "dataframe-with-categorical": + cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"] tree_df = dask_classifier.booster_.trees_to_dataframe() - node_uses_cat_col = tree_df['split_feature'].isin(cat_cols) + node_uses_cat_col = tree_df["split_feature"].isin(cat_cols) assert node_uses_cat_col.sum() > 0 - assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == '==' + assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "==" # * shape depends on whether it is binary or multiclass classification # * matrix for binary classification is of the form [feature_contrib, base_value], @@ -446,8 +401,8 @@ def test_classifier_pred_contrib(output, task, cluster): assert len(np.unique(preds_with_contrib[:, base_value_col]) == 1) -@pytest.mark.parametrize('output', data_output) -@pytest.mark.parametrize('task', ['binary-classification', 'multiclass-classification']) +@pytest.mark.parametrize("output", data_output) +@pytest.mark.parametrize("task", ["binary-classification", "multiclass-classification"]) def test_classifier_custom_objective(output, task, cluster): with Client(cluster) as client: X, y, w, _, dX, dy, dw, _ = _create_data( @@ -461,25 +416,19 @@ def test_classifier_custom_objective(output, task, cluster): "verbose": -1, "seed": 708, "deterministic": True, - "force_col_wise": True + "force_col_wise": True, } - if task == 'binary-classification': - params.update({ - 'objective': _objective_logistic_regression, - }) - elif task == 'multiclass-classification': - params.update({ - 'objective': sklearn_multiclass_custom_objective, - 'num_classes': 3 - }) + if task == "binary-classification": + params.update( + { + "objective": _objective_logistic_regression, + } + ) + elif task == "multiclass-classification": + params.update({"objective": sklearn_multiclass_custom_objective, "num_classes": 3}) - dask_classifier = lgb.DaskLGBMClassifier( - client=client, - time_out=5, - tree_learner='data', - **params - ) + dask_classifier = lgb.DaskLGBMClassifier(client=client, time_out=5, tree_learner="data", **params) dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw) dask_classifier_local = dask_classifier.to_local() p1_raw = dask_classifier.predict(dX, raw_score=True).compute() @@ -490,14 +439,14 @@ def test_classifier_custom_objective(output, task, cluster): p2_raw = local_classifier.predict(X, raw_score=True) # with a custom objective, prediction result is a raw score instead of predicted class - if task == 'binary-classification': + if task == "binary-classification": p1_proba = 1.0 / (1.0 + np.exp(-p1_raw)) p1_class = (p1_proba > 0.5).astype(np.int64) p1_proba_local = 1.0 / (1.0 + np.exp(-p1_raw_local)) p1_class_local = (p1_proba_local > 0.5).astype(np.int64) p2_proba = 1.0 / (1.0 + np.exp(-p2_raw)) p2_class = (p2_proba > 0.5).astype(np.int64) - elif task == 'multiclass-classification': + elif task == "multiclass-classification": p1_proba = np.exp(p1_raw) / np.sum(np.exp(p1_raw), axis=1).reshape(-1, 1) p1_class = p1_proba.argmax(axis=1) p1_proba_local = np.exp(p1_raw_local) / np.sum(np.exp(p1_raw_local), axis=1).reshape(-1, 1) @@ -520,7 +469,7 @@ def test_classifier_custom_objective(output, task, cluster): def test_machines_to_worker_map_unparseable_host_names(): - workers = {'0.0.0.1:80': {}, '0.0.0.2:80': {}} + workers = {"0.0.0.1:80": {}, "0.0.0.2:80": {}} machines = "0.0.0.1:80,0.0.0.2:80" with pytest.raises(ValueError, match="Could not parse host name from worker address '0.0.0.1:80'"): lgb.dask._machines_to_worker_map(machines=machines, worker_addresses=workers.keys()) @@ -528,18 +477,13 @@ def test_machines_to_worker_map_unparseable_host_names(): def test_training_does_not_fail_on_port_conflicts(cluster): with Client(cluster) as client: - _, _, _, _, dX, dy, dw, _ = _create_data('binary-classification', output='array') + _, _, _, _, dX, dy, dw, _ = _create_data("binary-classification", output="array") lightgbm_default_port = 12400 workers_hostname = _get_workers_hostname(cluster) with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind((workers_hostname, lightgbm_default_port)) - dask_classifier = lgb.DaskLGBMClassifier( - client=client, - time_out=5, - n_estimators=5, - num_leaves=5 - ) + dask_classifier = lgb.DaskLGBMClassifier(client=client, time_out=5, n_estimators=5, num_leaves=5) for _ in range(5): dask_classifier.fit( X=dX, @@ -549,15 +493,12 @@ def test_training_does_not_fail_on_port_conflicts(cluster): assert dask_classifier.booster_ -@pytest.mark.parametrize('output', data_output) -@pytest.mark.parametrize('boosting_type', boosting_types) -@pytest.mark.parametrize('tree_learner', distributed_training_algorithms) +@pytest.mark.parametrize("output", data_output) +@pytest.mark.parametrize("boosting_type", boosting_types) +@pytest.mark.parametrize("tree_learner", distributed_training_algorithms) def test_regressor(output, boosting_type, tree_learner, cluster): with Client(cluster) as client: - X, y, w, _, dX, dy, dw, _ = _create_data( - objective='regression', - output=output - ) + X, y, w, _, dX, dy, dw, _ = _create_data(objective="regression", output=output) params = { "boosting_type": boosting_type, @@ -565,18 +506,15 @@ def test_regressor(output, boosting_type, tree_learner, cluster): "num_leaves": 31, "n_estimators": 20, } - if boosting_type == 'rf': - params.update({ - 'bagging_freq': 1, - 'bagging_fraction': 0.9, - }) + if boosting_type == "rf": + params.update( + { + "bagging_freq": 1, + "bagging_fraction": 0.9, + } + ) - dask_regressor = lgb.DaskLGBMRegressor( - client=client, - time_out=5, - tree=tree_learner, - **params - ) + dask_regressor = lgb.DaskLGBMRegressor(client=client, time_out=5, tree=tree_learner, **params) dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw) p1 = dask_regressor.predict(dX) p1_pred_leaf = dask_regressor.predict(dX, pred_leaf=True) @@ -603,16 +541,13 @@ def test_regressor(output, boosting_type, tree_learner, cluster): # pref_leaf values should have the right shape # and values that look like valid tree nodes pred_leaf_vals = p1_pred_leaf.compute() - assert pred_leaf_vals.shape == ( - X.shape[0], - dask_regressor.booster_.num_trees() - ) - assert np.max(pred_leaf_vals) <= params['num_leaves'] + assert pred_leaf_vals.shape == (X.shape[0], dask_regressor.booster_.num_trees()) + assert np.max(pred_leaf_vals) <= params["num_leaves"] assert np.min(pred_leaf_vals) >= 0 - assert len(np.unique(pred_leaf_vals)) <= params['num_leaves'] + assert len(np.unique(pred_leaf_vals)) <= params["num_leaves"] - assert_eq(p1, y, rtol=0.5, atol=50.) - assert_eq(p2, y, rtol=0.5, atol=50.) + assert_eq(p1, y, rtol=0.5, atol=50.0) + assert_eq(p2, y, rtol=0.5, atol=50.0) # extra predict() parameters should be passed through correctly with pytest.raises(AssertionError): @@ -620,36 +555,22 @@ def test_regressor(output, boosting_type, tree_learner, cluster): # be sure LightGBM actually used at least one categorical column, # and that it was correctly treated as a categorical feature - if output == 'dataframe-with-categorical': - cat_cols = [ - col for col in dX.columns - if dX.dtypes[col].name == 'category' - ] + if output == "dataframe-with-categorical": + cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"] tree_df = dask_regressor.booster_.trees_to_dataframe() - node_uses_cat_col = tree_df['split_feature'].isin(cat_cols) + node_uses_cat_col = tree_df["split_feature"].isin(cat_cols) assert node_uses_cat_col.sum() > 0 - assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == '==' + assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "==" -@pytest.mark.parametrize('output', data_output) +@pytest.mark.parametrize("output", data_output) def test_regressor_pred_contrib(output, cluster): with Client(cluster) as client: - X, y, w, _, dX, dy, dw, _ = _create_data( - objective='regression', - output=output - ) + X, y, w, _, dX, dy, dw, _ = _create_data(objective="regression", output=output) - params = { - "n_estimators": 10, - "num_leaves": 10 - } + params = {"n_estimators": 10, "num_leaves": 10} - dask_regressor = lgb.DaskLGBMRegressor( - client=client, - time_out=5, - tree_learner='data', - **params - ) + dask_regressor = lgb.DaskLGBMRegressor(client=client, time_out=5, tree_learner="data", **params) dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw) preds_with_contrib = dask_regressor.predict(dX, pred_contrib=True).compute() @@ -668,39 +589,23 @@ def test_regressor_pred_contrib(output, cluster): # be sure LightGBM actually used at least one categorical column, # and that it was correctly treated as a categorical feature - if output == 'dataframe-with-categorical': - cat_cols = [ - col for col in dX.columns - if dX.dtypes[col].name == 'category' - ] + if output == "dataframe-with-categorical": + cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"] tree_df = dask_regressor.booster_.trees_to_dataframe() - node_uses_cat_col = tree_df['split_feature'].isin(cat_cols) + node_uses_cat_col = tree_df["split_feature"].isin(cat_cols) assert node_uses_cat_col.sum() > 0 - assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == '==' + assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "==" -@pytest.mark.parametrize('output', data_output) -@pytest.mark.parametrize('alpha', [.1, .5, .9]) +@pytest.mark.parametrize("output", data_output) +@pytest.mark.parametrize("alpha", [0.1, 0.5, 0.9]) def test_regressor_quantile(output, alpha, cluster): with Client(cluster) as client: - X, y, w, _, dX, dy, dw, _ = _create_data( - objective='regression', - output=output - ) + X, y, w, _, dX, dy, dw, _ = _create_data(objective="regression", output=output) - params = { - "objective": "quantile", - "alpha": alpha, - "random_state": 42, - "n_estimators": 10, - "num_leaves": 10 - } + params = {"objective": "quantile", "alpha": alpha, "random_state": 42, "n_estimators": 10, "num_leaves": 10} - dask_regressor = lgb.DaskLGBMRegressor( - client=client, - tree_learner_type='data_parallel', - **params - ) + dask_regressor = lgb.DaskLGBMRegressor(client=client, tree_learner_type="data_parallel", **params) dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw) p1 = dask_regressor.predict(dX).compute() q1 = np.count_nonzero(y < p1) / y.shape[0] @@ -716,37 +621,22 @@ def test_regressor_quantile(output, alpha, cluster): # be sure LightGBM actually used at least one categorical column, # and that it was correctly treated as a categorical feature - if output == 'dataframe-with-categorical': - cat_cols = [ - col for col in dX.columns - if dX.dtypes[col].name == 'category' - ] + if output == "dataframe-with-categorical": + cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"] tree_df = dask_regressor.booster_.trees_to_dataframe() - node_uses_cat_col = tree_df['split_feature'].isin(cat_cols) + node_uses_cat_col = tree_df["split_feature"].isin(cat_cols) assert node_uses_cat_col.sum() > 0 - assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == '==' + assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "==" -@pytest.mark.parametrize('output', data_output) +@pytest.mark.parametrize("output", data_output) def test_regressor_custom_objective(output, cluster): with Client(cluster) as client: - X, y, w, _, dX, dy, dw, _ = _create_data( - objective='regression', - output=output - ) + X, y, w, _, dX, dy, dw, _ = _create_data(objective="regression", output=output) - params = { - "n_estimators": 10, - "num_leaves": 10, - "objective": _objective_least_squares - } + params = {"n_estimators": 10, "num_leaves": 10, "objective": _objective_least_squares} - dask_regressor = lgb.DaskLGBMRegressor( - client=client, - time_out=5, - tree_learner='data', - **params - ) + dask_regressor = lgb.DaskLGBMRegressor(client=client, time_out=5, tree_learner="data", **params) dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw) dask_regressor_local = dask_regressor.to_local() p1 = dask_regressor.predict(dX) @@ -772,34 +662,26 @@ def test_regressor_custom_objective(output, cluster): assert_eq(p1, p1_local) # predictions should be better than random - assert_precision = {"rtol": 0.5, "atol": 50.} + assert_precision = {"rtol": 0.5, "atol": 50.0} assert_eq(p1, y, **assert_precision) assert_eq(p2, y, **assert_precision) -@pytest.mark.parametrize('output', ['array', 'dataframe', 'dataframe-with-categorical']) -@pytest.mark.parametrize('group', [None, group_sizes]) -@pytest.mark.parametrize('boosting_type', boosting_types) -@pytest.mark.parametrize('tree_learner', distributed_training_algorithms) +@pytest.mark.parametrize("output", ["array", "dataframe", "dataframe-with-categorical"]) +@pytest.mark.parametrize("group", [None, group_sizes]) +@pytest.mark.parametrize("boosting_type", boosting_types) +@pytest.mark.parametrize("tree_learner", distributed_training_algorithms) def test_ranker(output, group, boosting_type, tree_learner, cluster): with Client(cluster) as client: - if output == 'dataframe-with-categorical': + if output == "dataframe-with-categorical": X, y, w, g, dX, dy, dw, dg = _create_data( - objective='ranking', - output=output, - group=group, - n_features=1, - n_informative=1 + objective="ranking", output=output, group=group, n_features=1, n_informative=1 ) else: - X, y, w, g, dX, dy, dw, dg = _create_data( - objective='ranking', - output=output, - group=group - ) + X, y, w, g, dX, dy, dw, dg = _create_data(objective="ranking", output=output, group=group) # rebalance small dask.Array dataset for better performance. - if output == 'array': + if output == "array": dX = dX.persist() dy = dy.persist() dw = dw.persist() @@ -814,20 +696,17 @@ def test_ranker(output, group, boosting_type, tree_learner, cluster): "random_state": 42, "n_estimators": 50, "num_leaves": 20, - "min_child_samples": 1 + "min_child_samples": 1, } - if boosting_type == 'rf': - params.update({ - 'bagging_freq': 1, - 'bagging_fraction': 0.9, - }) + if boosting_type == "rf": + params.update( + { + "bagging_freq": 1, + "bagging_fraction": 0.9, + } + ) - dask_ranker = lgb.DaskLGBMRanker( - client=client, - time_out=5, - tree_learner_type=tree_learner, - **params - ) + dask_ranker = lgb.DaskLGBMRanker(client=client, time_out=5, tree_learner_type=tree_learner, **params) dask_ranker = dask_ranker.fit(dX, dy, sample_weight=dw, group=dg) rnkvec_dask = dask_ranker.predict(dX) rnkvec_dask = rnkvec_dask.compute() @@ -835,11 +714,7 @@ def test_ranker(output, group, boosting_type, tree_learner, cluster): p1_raw = dask_ranker.predict(dX, raw_score=True).compute() p1_first_iter_raw = dask_ranker.predict(dX, start_iteration=0, num_iteration=1, raw_score=True).compute() p1_early_stop_raw = dask_ranker.predict( - dX, - pred_early_stop=True, - pred_early_stop_margin=1.0, - pred_early_stop_freq=2, - raw_score=True + dX, pred_early_stop=True, pred_early_stop_margin=1.0, pred_early_stop_freq=2, raw_score=True ).compute() rnkvec_dask_local = dask_ranker.to_local().predict(X) @@ -864,47 +739,33 @@ def test_ranker(output, group, boosting_type, tree_learner, cluster): # pref_leaf values should have the right shape # and values that look like valid tree nodes pred_leaf_vals = p1_pred_leaf.compute() - assert pred_leaf_vals.shape == ( - X.shape[0], - dask_ranker.booster_.num_trees() - ) - assert np.max(pred_leaf_vals) <= params['num_leaves'] + assert pred_leaf_vals.shape == (X.shape[0], dask_ranker.booster_.num_trees()) + assert np.max(pred_leaf_vals) <= params["num_leaves"] assert np.min(pred_leaf_vals) >= 0 - assert len(np.unique(pred_leaf_vals)) <= params['num_leaves'] + assert len(np.unique(pred_leaf_vals)) <= params["num_leaves"] # be sure LightGBM actually used at least one categorical column, # and that it was correctly treated as a categorical feature - if output == 'dataframe-with-categorical': - cat_cols = [ - col for col in dX.columns - if dX.dtypes[col].name == 'category' - ] + if output == "dataframe-with-categorical": + cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"] tree_df = dask_ranker.booster_.trees_to_dataframe() - node_uses_cat_col = tree_df['split_feature'].isin(cat_cols) + node_uses_cat_col = tree_df["split_feature"].isin(cat_cols) assert node_uses_cat_col.sum() > 0 - assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == '==' + assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "==" -@pytest.mark.parametrize('output', ['array', 'dataframe', 'dataframe-with-categorical']) +@pytest.mark.parametrize("output", ["array", "dataframe", "dataframe-with-categorical"]) def test_ranker_custom_objective(output, cluster): with Client(cluster) as client: - if output == 'dataframe-with-categorical': + if output == "dataframe-with-categorical": X, y, w, g, dX, dy, dw, dg = _create_data( - objective='ranking', - output=output, - group=group_sizes, - n_features=1, - n_informative=1 + objective="ranking", output=output, group=group_sizes, n_features=1, n_informative=1 ) else: - X, y, w, g, dX, dy, dw, dg = _create_data( - objective='ranking', - output=output, - group=group_sizes - ) + X, y, w, g, dX, dy, dw, dg = _create_data(objective="ranking", output=output, group=group_sizes) # rebalance small dask.Array dataset for better performance. - if output == 'array': + if output == "array": dX = dX.persist() dy = dy.persist() dw = dw.persist() @@ -917,15 +778,10 @@ def test_ranker_custom_objective(output, cluster): "n_estimators": 50, "num_leaves": 20, "min_child_samples": 1, - "objective": _objective_least_squares + "objective": _objective_least_squares, } - dask_ranker = lgb.DaskLGBMRanker( - client=client, - time_out=5, - tree_learner_type="data", - **params - ) + dask_ranker = lgb.DaskLGBMRanker(client=client, time_out=5, tree_learner_type="data", **params) dask_ranker = dask_ranker.fit(dX, dy, sample_weight=dw, group=dg) rnkvec_dask = dask_ranker.predict(dX).compute() dask_ranker_local = dask_ranker.to_local() @@ -946,13 +802,13 @@ def test_ranker_custom_objective(output, cluster): assert callable(dask_ranker_local.objective_) -@pytest.mark.parametrize('task', tasks) -@pytest.mark.parametrize('output', data_output) -@pytest.mark.parametrize('eval_sizes', [[0.5, 1, 1.5], [0]]) -@pytest.mark.parametrize('eval_names_prefix', ['specified', None]) +@pytest.mark.parametrize("task", tasks) +@pytest.mark.parametrize("output", data_output) +@pytest.mark.parametrize("eval_sizes", [[0.5, 1, 1.5], [0]]) +@pytest.mark.parametrize("eval_names_prefix", ["specified", None]) def test_eval_set_no_early_stopping(task, output, eval_sizes, eval_names_prefix, cluster): - if task == 'ranking' and output == 'scipy_csr_matrix': - pytest.skip('LGBMRanker is not currently tested on sparse matrices') + if task == "ranking" and output == "scipy_csr_matrix": + pytest.skip("LGBMRanker is not currently tested on sparse matrices") with Client(cluster) as client: # Use larger trainset to prevent premature stopping due to zero loss, causing num_trees() < n_estimators. @@ -966,36 +822,33 @@ def test_eval_set_no_early_stopping(task, output, eval_sizes, eval_names_prefix, eval_init_score = None if eval_names_prefix: - eval_names = [f'{eval_names_prefix}_{i}' for i in range(len(eval_sizes))] + eval_names = [f"{eval_names_prefix}_{i}" for i in range(len(eval_sizes))] else: eval_names = None X, y, w, g, dX, dy, dw, dg = _create_data( - objective=task, - n_samples=n_samples, - output=output, - chunk_size=chunk_size + objective=task, n_samples=n_samples, output=output, chunk_size=chunk_size ) - if task == 'ranking': - eval_metrics = ['ndcg'] + if task == "ranking": + eval_metrics = ["ndcg"] eval_at = (5, 6) - eval_metric_names = [f'ndcg@{k}' for k in eval_at] + eval_metric_names = [f"ndcg@{k}" for k in eval_at] eval_group = [] else: # test eval_class_weight, eval_init_score on binary-classification task. # Note: objective's default `metric` will be evaluated in evals_result_ in addition to all eval_metrics. - if task == 'binary-classification': - eval_metrics = ['binary_error', 'auc'] - eval_metric_names = ['binary_logloss', 'binary_error', 'auc'] + if task == "binary-classification": + eval_metrics = ["binary_error", "auc"] + eval_metric_names = ["binary_logloss", "binary_error", "auc"] eval_class_weight = [] eval_init_score = [] - elif task == 'multiclass-classification': - eval_metrics = ['multi_error'] - eval_metric_names = ['multi_logloss', 'multi_error'] - elif task == 'regression': - eval_metrics = ['l1'] - eval_metric_names = ['l2', 'l1'] + elif task == "multiclass-classification": + eval_metrics = ["multi_error"] + eval_metric_names = ["multi_logloss", "multi_error"] + elif task == "regression": + eval_metrics = ["l1"] + eval_metric_names = ["l2", "l1"] # create eval_sets by creating new datasets or copying training data. for eval_size in eval_sizes: @@ -1008,23 +861,20 @@ def test_eval_set_no_early_stopping(task, output, eval_sizes, eval_names_prefix, else: n_eval_samples = max(chunk_size, int(n_samples * eval_size)) _, y_e, _, _, dX_e, dy_e, dw_e, dg_e = _create_data( - objective=task, - n_samples=n_eval_samples, - output=output, - chunk_size=chunk_size + objective=task, n_samples=n_eval_samples, output=output, chunk_size=chunk_size ) eval_set.append((dX_e, dy_e)) eval_sample_weight.append(dw_e) - if task == 'ranking': + if task == "ranking": eval_group.append(dg_e) - if task == 'binary-classification': + if task == "binary-classification": n_neg = np.sum(y_e == 0) n_pos = np.sum(y_e == 1) eval_class_weight.append({0: n_neg / n_pos, 1: n_pos / n_neg}) init_score_value = np.log(np.mean(y_e) / (1 - np.mean(y_e))) - if 'dataframe' in output: + if "dataframe" in output: d_init_score = dy_e.map_partitions(lambda x, val=init_score_value: pd.Series([val] * x.size)) else: d_init_score = dy_e.map_blocks(lambda x, val=init_score_value: np.repeat(val, x.size)) @@ -1032,44 +882,36 @@ def test_eval_set_no_early_stopping(task, output, eval_sizes, eval_names_prefix, eval_init_score.append(d_init_score) fit_trees = 50 - params = { - "random_state": 42, - "n_estimators": fit_trees, - "num_leaves": 2 - } + params = {"random_state": 42, "n_estimators": fit_trees, "num_leaves": 2} model_factory = task_to_dask_factory[task] - dask_model = model_factory( - client=client, - **params - ) + dask_model = model_factory(client=client, **params) fit_params = { - 'X': dX, - 'y': dy, - 'eval_set': eval_set, - 'eval_names': eval_names, - 'eval_sample_weight': eval_sample_weight, - 'eval_init_score': eval_init_score, - 'eval_metric': eval_metrics + "X": dX, + "y": dy, + "eval_set": eval_set, + "eval_names": eval_names, + "eval_sample_weight": eval_sample_weight, + "eval_init_score": eval_init_score, + "eval_metric": eval_metrics, } - if task == 'ranking': - fit_params.update( - {'group': dg, - 'eval_group': eval_group, - 'eval_at': eval_at} - ) - elif task == 'binary-classification': - fit_params.update({'eval_class_weight': eval_class_weight}) + if task == "ranking": + fit_params.update({"group": dg, "eval_group": eval_group, "eval_at": eval_at}) + elif task == "binary-classification": + fit_params.update({"eval_class_weight": eval_class_weight}) if eval_sizes == [0]: - with pytest.warns(UserWarning, match='Worker (.*) was not allocated eval_set data. Therefore evals_result_ and best_score_ data may be unreliable.'): + with pytest.warns( + UserWarning, + match="Worker (.*) was not allocated eval_set data. Therefore evals_result_ and best_score_ data may be unreliable.", + ): dask_model.fit(**fit_params) else: dask_model = dask_model.fit(**fit_params) # total number of trees scales up for ova classifier. - if task == 'multiclass-classification': + if task == "multiclass-classification": model_trees = fit_trees * dask_model.n_classes_ else: model_trees = fit_trees @@ -1098,67 +940,45 @@ def test_eval_set_no_early_stopping(task, output, eval_sizes, eval_names_prefix, assert len(evals_result[eval_name][metric]) == fit_trees -@pytest.mark.parametrize('task', ['binary-classification', 'regression', 'ranking']) +@pytest.mark.parametrize("task", ["binary-classification", "regression", "ranking"]) def test_eval_set_with_custom_eval_metric(task, cluster): with Client(cluster) as client: n_samples = 1000 n_eval_samples = int(n_samples * 0.5) chunk_size = 10 - output = 'array' + output = "array" X, y, w, g, dX, dy, dw, dg = _create_data( - objective=task, - n_samples=n_samples, - output=output, - chunk_size=chunk_size + objective=task, n_samples=n_samples, output=output, chunk_size=chunk_size ) _, _, _, _, dX_e, dy_e, _, dg_e = _create_data( - objective=task, - n_samples=n_eval_samples, - output=output, - chunk_size=chunk_size + objective=task, n_samples=n_eval_samples, output=output, chunk_size=chunk_size ) - if task == 'ranking': + if task == "ranking": eval_at = (5, 6) - eval_metrics = ['ndcg', _constant_metric] - eval_metric_names = [f'ndcg@{k}' for k in eval_at] + ['constant_metric'] - elif task == 'binary-classification': - eval_metrics = ['binary_error', 'auc', _constant_metric] - eval_metric_names = ['binary_logloss', 'binary_error', 'auc', 'constant_metric'] + eval_metrics = ["ndcg", _constant_metric] + eval_metric_names = [f"ndcg@{k}" for k in eval_at] + ["constant_metric"] + elif task == "binary-classification": + eval_metrics = ["binary_error", "auc", _constant_metric] + eval_metric_names = ["binary_logloss", "binary_error", "auc", "constant_metric"] else: - eval_metrics = ['l1', _constant_metric] - eval_metric_names = ['l2', 'l1', 'constant_metric'] + eval_metrics = ["l1", _constant_metric] + eval_metric_names = ["l2", "l1", "constant_metric"] fit_trees = 50 - params = { - "random_state": 42, - "n_estimators": fit_trees, - "num_leaves": 2 - } + params = {"random_state": 42, "n_estimators": fit_trees, "num_leaves": 2} model_factory = task_to_dask_factory[task] - dask_model = model_factory( - client=client, - **params - ) + dask_model = model_factory(client=client, **params) eval_set = [(dX_e, dy_e)] - fit_params = { - 'X': dX, - 'y': dy, - 'eval_set': eval_set, - 'eval_metric': eval_metrics - } - if task == 'ranking': - fit_params.update( - {'group': dg, - 'eval_group': [dg_e], - 'eval_at': eval_at} - ) + fit_params = {"X": dX, "y": dy, "eval_set": eval_set, "eval_metric": eval_metrics} + if task == "ranking": + fit_params.update({"group": dg, "eval_group": [dg_e], "eval_at": eval_at}) dask_model = dask_model.fit(**fit_params) - eval_name = 'valid_0' + eval_name = "valid_0" evals_result = dask_model.evals_result_ assert len(evals_result) == 1 assert eval_name in evals_result @@ -1167,29 +987,21 @@ def test_eval_set_with_custom_eval_metric(task, cluster): assert metric in evals_result[eval_name] assert len(evals_result[eval_name][metric]) == fit_trees - np.testing.assert_allclose(evals_result[eval_name]['constant_metric'], 0.708) + np.testing.assert_allclose(evals_result[eval_name]["constant_metric"], 0.708) -@pytest.mark.parametrize('task', tasks) +@pytest.mark.parametrize("task", tasks) def test_training_works_if_client_not_provided_or_set_after_construction(task, cluster): with Client(cluster) as client: - _, _, _, _, dX, dy, _, dg = _create_data( - objective=task, - output='array', - group=None - ) + _, _, _, _, dX, dy, _, dg = _create_data(objective=task, output="array", group=None) model_factory = task_to_dask_factory[task] - params = { - "time_out": 5, - "n_estimators": 1, - "num_leaves": 2 - } + params = {"time_out": 5, "n_estimators": 1, "num_leaves": 2} # should be able to use the class without specifying a client dask_model = model_factory(**params) assert dask_model.client is None - with pytest.raises(lgb.compat.LGBMNotFittedError, match='Cannot access property client_ before calling fit'): + with pytest.raises(lgb.compat.LGBMNotFittedError, match="Cannot access property client_ before calling fit"): dask_model.client_ dask_model.fit(dX, dy, group=dg) @@ -1213,7 +1025,7 @@ def test_training_works_if_client_not_provided_or_set_after_construction(task, c dask_model.set_params(client=client) assert dask_model.client == client - with pytest.raises(lgb.compat.LGBMNotFittedError, match='Cannot access property client_ before calling fit'): + with pytest.raises(lgb.compat.LGBMNotFittedError, match="Cannot access property client_ before calling fit"): dask_model.client_ dask_model.fit(dX, dy, group=dg) @@ -1233,34 +1045,23 @@ def test_training_works_if_client_not_provided_or_set_after_construction(task, c local_model.client_ -@pytest.mark.parametrize('serializer', ['pickle', 'joblib', 'cloudpickle']) -@pytest.mark.parametrize('task', tasks) -@pytest.mark.parametrize('set_client', [True, False]) -def test_model_and_local_version_are_picklable_whether_or_not_client_set_explicitly(serializer, task, set_client, tmp_path, cluster, cluster2): - +@pytest.mark.parametrize("serializer", ["pickle", "joblib", "cloudpickle"]) +@pytest.mark.parametrize("task", tasks) +@pytest.mark.parametrize("set_client", [True, False]) +def test_model_and_local_version_are_picklable_whether_or_not_client_set_explicitly( + serializer, task, set_client, tmp_path, cluster, cluster2 +): with Client(cluster) as client1: # data on cluster1 - X_1, _, _, _, dX_1, dy_1, _, dg_1 = _create_data( - objective=task, - output='array', - group=None - ) + X_1, _, _, _, dX_1, dy_1, _, dg_1 = _create_data(objective=task, output="array", group=None) with Client(cluster2) as client2: # create identical data on cluster2 - X_2, _, _, _, dX_2, dy_2, _, dg_2 = _create_data( - objective=task, - output='array', - group=None - ) + X_2, _, _, _, dX_2, dy_2, _, dg_2 = _create_data(objective=task, output="array", group=None) model_factory = task_to_dask_factory[task] - params = { - "time_out": 5, - "n_estimators": 1, - "num_leaves": 2 - } + params = {"time_out": 5, "n_estimators": 1, "num_leaves": 2} # at this point, the result of default_client() is client2 since it was the most recently # created. So setting client to client1 here to test that you can select a non-default client @@ -1277,33 +1078,21 @@ def test_model_and_local_version_are_picklable_whether_or_not_client_set_explici else: assert dask_model.client is None - with pytest.raises(lgb.compat.LGBMNotFittedError, match='Cannot access property client_ before calling fit'): + with pytest.raises( + lgb.compat.LGBMNotFittedError, match="Cannot access property client_ before calling fit" + ): dask_model.client_ assert "client" not in local_model.get_params() assert getattr(local_model, "client", None) is None tmp_file = tmp_path / "model-1.pkl" - pickle_obj( - obj=dask_model, - filepath=tmp_file, - serializer=serializer - ) - model_from_disk = unpickle_obj( - filepath=tmp_file, - serializer=serializer - ) + pickle_obj(obj=dask_model, filepath=tmp_file, serializer=serializer) + model_from_disk = unpickle_obj(filepath=tmp_file, serializer=serializer) local_tmp_file = tmp_path / "local-model-1.pkl" - pickle_obj( - obj=local_model, - filepath=local_tmp_file, - serializer=serializer - ) - local_model_from_disk = unpickle_obj( - filepath=local_tmp_file, - serializer=serializer - ) + pickle_obj(obj=local_model, filepath=local_tmp_file, serializer=serializer) + local_model_from_disk = unpickle_obj(filepath=local_tmp_file, serializer=serializer) assert model_from_disk.client is None @@ -1312,7 +1101,9 @@ def test_model_and_local_version_are_picklable_whether_or_not_client_set_explici else: assert dask_model.client is None - with pytest.raises(lgb.compat.LGBMNotFittedError, match='Cannot access property client_ before calling fit'): + with pytest.raises( + lgb.compat.LGBMNotFittedError, match="Cannot access property client_ before calling fit" + ): dask_model.client_ # client will always be None after unpickling @@ -1340,26 +1131,12 @@ def test_model_and_local_version_are_picklable_whether_or_not_client_set_explici local_model.client_ tmp_file2 = tmp_path / "model-2.pkl" - pickle_obj( - obj=dask_model, - filepath=tmp_file2, - serializer=serializer - ) - fitted_model_from_disk = unpickle_obj( - filepath=tmp_file2, - serializer=serializer - ) + pickle_obj(obj=dask_model, filepath=tmp_file2, serializer=serializer) + fitted_model_from_disk = unpickle_obj(filepath=tmp_file2, serializer=serializer) local_tmp_file2 = tmp_path / "local-model-2.pkl" - pickle_obj( - obj=local_model, - filepath=local_tmp_file2, - serializer=serializer - ) - local_fitted_model_from_disk = unpickle_obj( - filepath=local_tmp_file2, - serializer=serializer - ) + pickle_obj(obj=local_model, filepath=local_tmp_file2, serializer=serializer) + local_fitted_model_from_disk = unpickle_obj(filepath=local_tmp_file2, serializer=serializer) if set_client: assert dask_model.client == client1 @@ -1405,35 +1182,25 @@ def test_warns_and_continues_on_unrecognized_tree_learner(cluster): X = da.random.random((1e3, 10)) y = da.random.random((1e3, 1)) dask_regressor = lgb.DaskLGBMRegressor( - client=client, - time_out=5, - tree_learner='some-nonsense-value', - n_estimators=1, - num_leaves=2 + client=client, time_out=5, tree_learner="some-nonsense-value", n_estimators=1, num_leaves=2 ) - with pytest.warns(UserWarning, match='Parameter tree_learner set to some-nonsense-value'): + with pytest.warns(UserWarning, match="Parameter tree_learner set to some-nonsense-value"): dask_regressor = dask_regressor.fit(X, y) assert dask_regressor.fitted_ -@pytest.mark.parametrize('tree_learner', ['data_parallel', 'voting_parallel']) +@pytest.mark.parametrize("tree_learner", ["data_parallel", "voting_parallel"]) def test_training_respects_tree_learner_aliases(tree_learner, cluster): with Client(cluster) as client: - task = 'regression' - _, _, _, _, dX, dy, dw, dg = _create_data(objective=task, output='array') + task = "regression" + _, _, _, _, dX, dy, dw, dg = _create_data(objective=task, output="array") dask_factory = task_to_dask_factory[task] - dask_model = dask_factory( - client=client, - tree_learner=tree_learner, - time_out=5, - n_estimators=10, - num_leaves=15 - ) + dask_model = dask_factory(client=client, tree_learner=tree_learner, time_out=5, n_estimators=10, num_leaves=15) dask_model.fit(dX, dy, sample_weight=dw, group=dg) assert dask_model.fitted_ - assert dask_model.get_params()['tree_learner'] == tree_learner + assert dask_model.get_params()["tree_learner"] == tree_learner def test_error_on_feature_parallel_tree_learner(cluster): @@ -1444,39 +1211,30 @@ def test_error_on_feature_parallel_tree_learner(cluster): _ = wait([X, y]) client.rebalance() dask_regressor = lgb.DaskLGBMRegressor( - client=client, - time_out=5, - tree_learner='feature_parallel', - n_estimators=1, - num_leaves=2 + client=client, time_out=5, tree_learner="feature_parallel", n_estimators=1, num_leaves=2 ) - with pytest.raises(lgb.basic.LightGBMError, match='Do not support feature parallel in c api'): + with pytest.raises(lgb.basic.LightGBMError, match="Do not support feature parallel in c api"): dask_regressor = dask_regressor.fit(X, y) def test_errors(cluster): with Client(cluster) as client: + def f(part): - raise Exception('foo') + raise Exception("foo") df = dd.demo.make_timeseries() df = df.map_partitions(f, meta=df._meta) with pytest.raises(Exception) as info: - lgb.dask._train( - client=client, - data=df, - label=df.x, - params={}, - model_factory=lgb.LGBMClassifier - ) - assert 'foo' in str(info.value) + lgb.dask._train(client=client, data=df, label=df.x, params={}, model_factory=lgb.LGBMClassifier) + assert "foo" in str(info.value) -@pytest.mark.parametrize('task', tasks) -@pytest.mark.parametrize('output', data_output) +@pytest.mark.parametrize("task", tasks) +@pytest.mark.parametrize("output", data_output) def test_training_succeeds_even_if_some_workers_do_not_have_any_data(task, output, cluster_three_workers): - if task == 'ranking' and output == 'scipy_csr_matrix': - pytest.skip('LGBMRanker is not currently tested on sparse matrices') + if task == "ranking" and output == "scipy_csr_matrix": + pytest.skip("LGBMRanker is not currently tested on sparse matrices") with Client(cluster_three_workers) as client: _, y, _, _, dX, dy, dw, dg = _create_data( @@ -1489,7 +1247,7 @@ def test_training_succeeds_even_if_some_workers_do_not_have_any_data(task, outpu dask_model_factory = task_to_dask_factory[task] - workers = list(client.scheduler_info()['workers'].keys()) + workers = list(client.scheduler_info()["workers"].keys()) assert len(workers) == 3 first_two_workers = workers[:2] @@ -1506,33 +1264,28 @@ def test_training_succeeds_even_if_some_workers_do_not_have_any_data(task, outpu assert len(workers_with_data) == 2 params = { - 'time_out': 5, - 'random_state': 42, - 'num_leaves': 10, - 'n_estimators': 20, + "time_out": 5, + "random_state": 42, + "num_leaves": 10, + "n_estimators": 20, } - dask_model = dask_model_factory(tree='data', client=client, **params) + dask_model = dask_model_factory(tree="data", client=client, **params) dask_model.fit(dX, dy, group=dg, sample_weight=dw) dask_preds = dask_model.predict(dX).compute() - if task == 'regression': + if task == "regression": score = r2_score(y, dask_preds) - elif task.endswith('classification'): + elif task.endswith("classification"): score = accuracy_score(y, dask_preds) else: score = spearmanr(dask_preds, y).correlation assert score > 0.9 -@pytest.mark.parametrize('task', tasks) +@pytest.mark.parametrize("task", tasks) def test_network_params_not_required_but_respected_if_given(task, listen_port, cluster): with Client(cluster) as client: - _, _, _, _, dX, dy, _, dg = _create_data( - objective=task, - output='array', - chunk_size=10, - group=None - ) + _, _, _, _, dX, dy, _, dg = _create_data(objective=task, output="array", chunk_size=10, group=None) dask_model_factory = task_to_dask_factory[task] @@ -1547,11 +1300,11 @@ def test_network_params_not_required_but_respected_if_given(task, listen_port, c dask_model1.fit(dX, dy, group=dg) assert dask_model1.fitted_ params = dask_model1.get_params() - assert 'local_listen_port' not in params - assert 'machines' not in params + assert "local_listen_port" not in params + assert "machines" not in params # model 2 - machines given - workers = list(client.scheduler_info()['workers']) + workers = list(client.scheduler_info()["workers"]) workers_hostname = _get_workers_hostname(cluster) remote_sockets, open_ports = lgb.dask._assign_open_ports_to_workers(client, workers) for s in remote_sockets.values(): @@ -1559,58 +1312,43 @@ def test_network_params_not_required_but_respected_if_given(task, listen_port, c dask_model2 = dask_model_factory( n_estimators=5, num_leaves=5, - machines=",".join([ - f"{workers_hostname}:{port}" - for port in open_ports.values() - ]), + machines=",".join([f"{workers_hostname}:{port}" for port in open_ports.values()]), ) dask_model2.fit(dX, dy, group=dg) assert dask_model2.fitted_ params = dask_model2.get_params() - assert 'local_listen_port' not in params - assert 'machines' in params + assert "local_listen_port" not in params + assert "machines" in params # model 3 - local_listen_port given # training should fail because LightGBM will try to use the same # port for multiple worker processes on the same machine - dask_model3 = dask_model_factory( - n_estimators=5, - num_leaves=5, - local_listen_port=listen_port - ) + dask_model3 = dask_model_factory(n_estimators=5, num_leaves=5, local_listen_port=listen_port) error_msg = "has multiple Dask worker processes running on it" with pytest.raises(lgb.basic.LightGBMError, match=error_msg): dask_model3.fit(dX, dy, group=dg) -@pytest.mark.parametrize('task', tasks) +@pytest.mark.parametrize("task", tasks) def test_machines_should_be_used_if_provided(task, cluster): pytest.skip("skipping due to timeout issues discussed in https://github.com/microsoft/LightGBM/issues/5390") with Client(cluster) as client: - _, _, _, _, dX, dy, _, dg = _create_data( - objective=task, - output='array', - chunk_size=10, - group=None - ) + _, _, _, _, dX, dy, _, dg = _create_data(objective=task, output="array", chunk_size=10, group=None) dask_model_factory = task_to_dask_factory[task] # rebalance data to be sure that each worker has a piece of the data client.rebalance() - n_workers = len(client.scheduler_info()['workers']) + n_workers = len(client.scheduler_info()["workers"]) assert n_workers > 1 workers_hostname = _get_workers_hostname(cluster) open_ports = lgb.dask._find_n_open_ports(n_workers) dask_model = dask_model_factory( n_estimators=5, num_leaves=5, - machines=",".join([ - f"{workers_hostname}:{port}" - for port in open_ports - ]), + machines=",".join([f"{workers_hostname}:{port}" for port in open_ports]), ) # test that "machines" is actually respected by creating a socket that uses @@ -1626,12 +1364,7 @@ def test_machines_should_be_used_if_provided(task, cluster): # an informative error should be raised if "machines" has duplicates one_open_port = lgb.dask._find_n_open_ports(1) - dask_model.set_params( - machines=",".join([ - f"127.0.0.1:{one_open_port}" - for _ in range(n_workers) - ]) - ) + dask_model.set_params(machines=",".join([f"127.0.0.1:{one_open_port}" for _ in range(n_workers)])) with pytest.raises(ValueError, match="Found duplicates in 'machines'"): dask_model.fit(dX, dy, group=dg) @@ -1641,8 +1374,8 @@ def test_machines_should_be_used_if_provided(task, cluster): [ (lgb.DaskLGBMClassifier, lgb.LGBMClassifier), (lgb.DaskLGBMRegressor, lgb.LGBMRegressor), - (lgb.DaskLGBMRanker, lgb.LGBMRanker) - ] + (lgb.DaskLGBMRanker, lgb.LGBMRanker), + ], ) def test_dask_classes_and_sklearn_equivalents_have_identical_constructors_except_client_arg(classes): dask_spec = inspect.getfullargspec(classes[0]) @@ -1655,7 +1388,7 @@ def test_dask_classes_and_sklearn_equivalents_have_identical_constructors_except # "client" should be the only different, and the final argument assert dask_spec.args[:-1] == sklearn_spec.args assert dask_spec.defaults[:-1] == sklearn_spec.defaults - assert dask_spec.args[-1] == 'client' + assert dask_spec.args[-1] == "client" assert dask_spec.defaults[-1] is None @@ -1668,18 +1401,18 @@ def test_dask_classes_and_sklearn_equivalents_have_identical_constructors_except (lgb.DaskLGBMRegressor.fit, lgb.LGBMRegressor.fit), (lgb.DaskLGBMRegressor.predict, lgb.LGBMRegressor.predict), (lgb.DaskLGBMRanker.fit, lgb.LGBMRanker.fit), - (lgb.DaskLGBMRanker.predict, lgb.LGBMRanker.predict) - ] + (lgb.DaskLGBMRanker.predict, lgb.LGBMRanker.predict), + ], ) def test_dask_methods_and_sklearn_equivalents_have_similar_signatures(methods): dask_spec = inspect.getfullargspec(methods[0]) sklearn_spec = inspect.getfullargspec(methods[1]) dask_params = inspect.signature(methods[0]).parameters sklearn_params = inspect.signature(methods[1]).parameters - assert dask_spec.args == sklearn_spec.args[:len(dask_spec.args)] + assert dask_spec.args == sklearn_spec.args[: len(dask_spec.args)] assert dask_spec.varargs == sklearn_spec.varargs if sklearn_spec.varkw: - assert dask_spec.varkw == sklearn_spec.varkw[:len(dask_spec.varkw)] + assert dask_spec.varkw == sklearn_spec.varkw[: len(dask_spec.varkw)] assert dask_spec.kwonlyargs == sklearn_spec.kwonlyargs assert dask_spec.kwonlydefaults == sklearn_spec.kwonlydefaults for param in dask_spec.args: @@ -1687,14 +1420,10 @@ def test_dask_methods_and_sklearn_equivalents_have_similar_signatures(methods): assert dask_params[param].default == sklearn_params[param].default, error_msg -@pytest.mark.parametrize('task', tasks) +@pytest.mark.parametrize("task", tasks) def test_training_succeeds_when_data_is_dataframe_and_label_is_column_array(task, cluster): with Client(cluster): - _, _, _, _, dX, dy, dw, dg = _create_data( - objective=task, - output='dataframe', - group=None - ) + _, _, _, _, dX, dy, dw, dg = _create_data(objective=task, output="dataframe", group=None) model_factory = task_to_dask_factory[task] @@ -1702,58 +1431,41 @@ def test_training_succeeds_when_data_is_dataframe_and_label_is_column_array(task dy_col_array = dy.reshape(-1, 1) assert len(dy_col_array.shape) == 2 and dy_col_array.shape[1] == 1 - params = { - 'n_estimators': 1, - 'num_leaves': 3, - 'random_state': 0, - 'time_out': 5 - } + params = {"n_estimators": 1, "num_leaves": 3, "random_state": 0, "time_out": 5} model = model_factory(**params) model.fit(dX, dy_col_array, sample_weight=dw, group=dg) assert model.fitted_ -@pytest.mark.parametrize('task', tasks) -@pytest.mark.parametrize('output', data_output) +@pytest.mark.parametrize("task", tasks) +@pytest.mark.parametrize("output", data_output) def test_init_score(task, output, cluster): - if task == 'ranking' and output == 'scipy_csr_matrix': - pytest.skip('LGBMRanker is not currently tested on sparse matrices') + if task == "ranking" and output == "scipy_csr_matrix": + pytest.skip("LGBMRanker is not currently tested on sparse matrices") with Client(cluster) as client: - _, _, _, _, dX, dy, dw, dg = _create_data( - objective=task, - output=output, - group=None - ) + _, _, _, _, dX, dy, dw, dg = _create_data(objective=task, output=output, group=None) model_factory = task_to_dask_factory[task] - params = { - 'n_estimators': 1, - 'num_leaves': 2, - 'time_out': 5 - } + params = {"n_estimators": 1, "num_leaves": 2, "time_out": 5} init_score = random.random() size_factor = 1 - if task == 'multiclass-classification': + if task == "multiclass-classification": size_factor = 3 # number of classes - if output.startswith('dataframe'): + if output.startswith("dataframe"): init_scores = dy.map_partitions(lambda x: pd.DataFrame([[init_score] * size_factor] * x.size)) else: init_scores = dy.map_blocks(lambda x: np.full((x.size, size_factor), init_score)) model = model_factory(client=client, **params) model.fit(dX, dy, sample_weight=dw, init_score=init_scores, group=dg) # value of the root node is 0 when init_score is set - assert model.booster_.trees_to_dataframe()['value'][0] == 0 + assert model.booster_.trees_to_dataframe()["value"][0] == 0 def sklearn_checks_to_run(): - check_names = [ - "check_estimator_get_tags_default_keys", - "check_get_params_invariance", - "check_set_params" - ] + check_names = ["check_estimator_get_tags_default_keys", "check_get_params_invariance", "check_set_params"] for check_name in check_names: check_func = getattr(sklearn_checks, check_name, None) if check_func: @@ -1782,79 +1494,58 @@ def test_parameters_default_constructible(estimator): sklearn_checks.check_parameters_default_constructible(name, Estimator) -@pytest.mark.parametrize('task', tasks) -@pytest.mark.parametrize('output', data_output) +@pytest.mark.parametrize("task", tasks) +@pytest.mark.parametrize("output", data_output) def test_predict_with_raw_score(task, output, cluster): - if task == 'ranking' and output == 'scipy_csr_matrix': - pytest.skip('LGBMRanker is not currently tested on sparse matrices') + if task == "ranking" and output == "scipy_csr_matrix": + pytest.skip("LGBMRanker is not currently tested on sparse matrices") with Client(cluster) as client: - _, _, _, _, dX, dy, _, dg = _create_data( - objective=task, - output=output, - group=None - ) + _, _, _, _, dX, dy, _, dg = _create_data(objective=task, output=output, group=None) model_factory = task_to_dask_factory[task] - params = { - 'client': client, - 'n_estimators': 1, - 'num_leaves': 2, - 'time_out': 5, - 'min_sum_hessian': 0 - } + params = {"client": client, "n_estimators": 1, "num_leaves": 2, "time_out": 5, "min_sum_hessian": 0} model = model_factory(**params) model.fit(dX, dy, group=dg) raw_predictions = model.predict(dX, raw_score=True).compute() trees_df = model.booster_.trees_to_dataframe() leaves_df = trees_df[trees_df.node_depth == 2] - if task == 'multiclass-classification': + if task == "multiclass-classification": for i in range(model.n_classes_): class_df = leaves_df[leaves_df.tree_index == i] - assert set(raw_predictions[:, i]) == set(class_df['value']) + assert set(raw_predictions[:, i]) == set(class_df["value"]) else: - assert set(raw_predictions) == set(leaves_df['value']) + assert set(raw_predictions) == set(leaves_df["value"]) - if task.endswith('classification'): + if task.endswith("classification"): pred_proba_raw = model.predict_proba(dX, raw_score=True).compute() assert_eq(raw_predictions, pred_proba_raw) def test_distributed_quantized_training(cluster): with Client(cluster) as client: - X, y, w, _, dX, dy, dw, _ = _create_data( - objective='regression', - output='array' - ) + X, y, w, _, dX, dy, dw, _ = _create_data(objective="regression", output="array") np.savetxt("data_dask.csv", np.hstack([np.array([y]).T, X]), fmt="%f,%f,%f,%f,%f") params = { - "boosting_type": 'gbdt', + "boosting_type": "gbdt", "n_estimators": 50, "num_leaves": 31, - 'use_quantized_grad': True, - 'num_grad_quant_bins': 30, - 'quant_train_renew_leaf': True, - 'verbose': -1, + "use_quantized_grad": True, + "num_grad_quant_bins": 30, + "quant_train_renew_leaf": True, + "verbose": -1, } - quant_dask_classifier = lgb.DaskLGBMRegressor( - client=client, - time_out=5, - **params - ) + quant_dask_classifier = lgb.DaskLGBMRegressor(client=client, time_out=5, **params) quant_dask_classifier = quant_dask_classifier.fit(dX, dy, sample_weight=dw) quant_p1 = quant_dask_classifier.predict(dX) quant_rmse = np.sqrt(np.mean((quant_p1.compute() - y) ** 2)) params["use_quantized_grad"] = False - dask_classifier = lgb.DaskLGBMRegressor( - client=client, - time_out=5, - **params - ) + dask_classifier = lgb.DaskLGBMRegressor(client=client, time_out=5, **params) dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw) p1 = dask_classifier.predict(dX) rmse = np.sqrt(np.mean((p1.compute() - y) ** 2)) diff --git a/tests/python_package_test/test_dual.py b/tests/python_package_test/test_dual.py index 75c54c83e..5aa7d9ec1 100644 --- a/tests/python_package_test/test_dual.py +++ b/tests/python_package_test/test_dual.py @@ -28,7 +28,7 @@ def test_cpu_and_gpu_work(): params_gpu = params_cpu.copy() params_gpu["device"] = "gpu" # Double-precision floats are only supported on x86_64 with PoCL - params_gpu["gpu_use_dp"] = (platform.machine() == "x86_64") + params_gpu["gpu_use_dp"] = platform.machine() == "x86_64" gpu_bst = lgb.train(params_gpu, data, num_boost_round=10) gpu_score = log_loss(y, gpu_bst.predict(X)) diff --git a/tests/python_package_test/test_engine.py b/tests/python_package_test/test_engine.py index e355e5ab0..ccde38977 100644 --- a/tests/python_package_test/test_engine.py +++ b/tests/python_package_test/test_engine.py @@ -22,9 +22,19 @@ from sklearn.model_selection import GroupKFold, TimeSeriesSplit, train_test_spli import lightgbm as lgb from lightgbm.compat import PANDAS_INSTALLED, pd_DataFrame, pd_Series -from .utils import (SERIALIZERS, dummy_obj, load_breast_cancer, load_digits, load_iris, logistic_sigmoid, - make_synthetic_regression, mse_obj, pickle_and_unpickle_object, sklearn_multiclass_custom_objective, - softmax) +from .utils import ( + SERIALIZERS, + dummy_obj, + load_breast_cancer, + load_digits, + load_iris, + logistic_sigmoid, + make_synthetic_regression, + mse_obj, + pickle_and_unpickle_object, + sklearn_multiclass_custom_objective, + softmax, +) decreasing_generator = itertools.count(0, -1) @@ -49,11 +59,11 @@ def top_k_error(y_true, y_pred, k): def constant_metric(preds, train_data): - return ('error', 0.0, False) + return ("error", 0.0, False) def decreasing_metric(preds, train_data): - return ('decreasing_metric', next(decreasing_generator), False) + return ("decreasing_metric", next(decreasing_generator), False) def categorize(continuous_x): @@ -64,87 +74,71 @@ def test_binary(): X, y = load_breast_cancer(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) params = { - 'objective': 'binary', - 'metric': 'binary_logloss', - 'verbose': -1, - 'num_iteration': 50 # test num_iteration in dict here + "objective": "binary", + "metric": "binary_logloss", + "verbose": -1, + "num_iteration": 50, # test num_iteration in dict here } lgb_train = lgb.Dataset(X_train, y_train) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train) evals_result = {} gbm = lgb.train( - params, - lgb_train, - num_boost_round=20, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)] + params, lgb_train, num_boost_round=20, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] ) ret = log_loss(y_test, gbm.predict(X_test)) assert ret < 0.14 - assert len(evals_result['valid_0']['binary_logloss']) == 50 - assert evals_result['valid_0']['binary_logloss'][-1] == pytest.approx(ret) + assert len(evals_result["valid_0"]["binary_logloss"]) == 50 + assert evals_result["valid_0"]["binary_logloss"][-1] == pytest.approx(ret) def test_rf(): X, y = load_breast_cancer(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) params = { - 'boosting_type': 'rf', - 'objective': 'binary', - 'bagging_freq': 1, - 'bagging_fraction': 0.5, - 'feature_fraction': 0.5, - 'num_leaves': 50, - 'metric': 'binary_logloss', - 'verbose': -1 + "boosting_type": "rf", + "objective": "binary", + "bagging_freq": 1, + "bagging_fraction": 0.5, + "feature_fraction": 0.5, + "num_leaves": 50, + "metric": "binary_logloss", + "verbose": -1, } lgb_train = lgb.Dataset(X_train, y_train) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train) evals_result = {} gbm = lgb.train( - params, - lgb_train, - num_boost_round=50, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)] + params, lgb_train, num_boost_round=50, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] ) ret = log_loss(y_test, gbm.predict(X_test)) assert ret < 0.19 - assert evals_result['valid_0']['binary_logloss'][-1] == pytest.approx(ret) + assert evals_result["valid_0"]["binary_logloss"][-1] == pytest.approx(ret) -@pytest.mark.parametrize('objective', ['regression', 'regression_l1', 'huber', 'fair', 'poisson', 'quantile']) +@pytest.mark.parametrize("objective", ["regression", "regression_l1", "huber", "fair", "poisson", "quantile"]) def test_regression(objective): X, y = make_synthetic_regression() y = np.abs(y) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) - params = { - 'objective': objective, - 'metric': 'l2', - 'verbose': -1 - } + params = {"objective": objective, "metric": "l2", "verbose": -1} lgb_train = lgb.Dataset(X_train, y_train) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train) evals_result = {} gbm = lgb.train( - params, - lgb_train, - num_boost_round=50, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)] + params, lgb_train, num_boost_round=50, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] ) ret = mean_squared_error(y_test, gbm.predict(X_test)) - if objective == 'huber': + if objective == "huber": assert ret < 430 - elif objective == 'fair': + elif objective == "fair": assert ret < 296 - elif objective == 'poisson': + elif objective == "poisson": assert ret < 193 - elif objective == 'quantile': + elif objective == "quantile": assert ret < 1311 else: assert ret < 343 - assert evals_result['valid_0']['l2'][-1] == pytest.approx(ret) + assert evals_result["valid_0"]["l2"][-1] == pytest.approx(ret) def test_missing_value_handle(): @@ -157,22 +151,14 @@ def test_missing_value_handle(): lgb_train = lgb.Dataset(X_train, y_train) lgb_eval = lgb.Dataset(X_train, y_train) - params = { - 'metric': 'l2', - 'verbose': -1, - 'boost_from_average': False - } + params = {"metric": "l2", "verbose": -1, "boost_from_average": False} evals_result = {} gbm = lgb.train( - params, - lgb_train, - num_boost_round=20, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)] + params, lgb_train, num_boost_round=20, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] ) ret = mean_squared_error(y_train, gbm.predict(X_train)) assert ret < 0.005 - assert evals_result['valid_0']['l2'][-1] == pytest.approx(ret) + assert evals_result["valid_0"]["l2"][-1] == pytest.approx(ret) def test_missing_value_handle_more_na(): @@ -185,22 +171,14 @@ def test_missing_value_handle_more_na(): lgb_train = lgb.Dataset(X_train, y_train) lgb_eval = lgb.Dataset(X_train, y_train) - params = { - 'metric': 'l2', - 'verbose': -1, - 'boost_from_average': False - } + params = {"metric": "l2", "verbose": -1, "boost_from_average": False} evals_result = {} gbm = lgb.train( - params, - lgb_train, - num_boost_round=20, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)] + params, lgb_train, num_boost_round=20, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] ) ret = mean_squared_error(y_train, gbm.predict(X_train)) assert ret < 0.005 - assert evals_result['valid_0']['l2'][-1] == pytest.approx(ret) + assert evals_result["valid_0"]["l2"][-1] == pytest.approx(ret) def test_missing_value_handle_na(): @@ -213,29 +191,25 @@ def test_missing_value_handle_na(): lgb_eval = lgb.Dataset(X_train, y_train) params = { - 'objective': 'regression', - 'metric': 'auc', - 'verbose': -1, - 'boost_from_average': False, - 'min_data': 1, - 'num_leaves': 2, - 'learning_rate': 1, - 'min_data_in_bin': 1, - 'zero_as_missing': False + "objective": "regression", + "metric": "auc", + "verbose": -1, + "boost_from_average": False, + "min_data": 1, + "num_leaves": 2, + "learning_rate": 1, + "min_data_in_bin": 1, + "zero_as_missing": False, } evals_result = {} gbm = lgb.train( - params, - lgb_train, - num_boost_round=1, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)] + params, lgb_train, num_boost_round=1, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] ) pred = gbm.predict(X_train) np.testing.assert_allclose(pred, y) ret = roc_auc_score(y_train, pred) assert ret > 0.999 - assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret) + assert evals_result["valid_0"]["auc"][-1] == pytest.approx(ret) def test_missing_value_handle_zero(): @@ -248,29 +222,25 @@ def test_missing_value_handle_zero(): lgb_eval = lgb.Dataset(X_train, y_train) params = { - 'objective': 'regression', - 'metric': 'auc', - 'verbose': -1, - 'boost_from_average': False, - 'min_data': 1, - 'num_leaves': 2, - 'learning_rate': 1, - 'min_data_in_bin': 1, - 'zero_as_missing': True + "objective": "regression", + "metric": "auc", + "verbose": -1, + "boost_from_average": False, + "min_data": 1, + "num_leaves": 2, + "learning_rate": 1, + "min_data_in_bin": 1, + "zero_as_missing": True, } evals_result = {} gbm = lgb.train( - params, - lgb_train, - num_boost_round=1, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)] + params, lgb_train, num_boost_round=1, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] ) pred = gbm.predict(X_train) np.testing.assert_allclose(pred, y) ret = roc_auc_score(y_train, pred) assert ret > 0.999 - assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret) + assert evals_result["valid_0"]["auc"][-1] == pytest.approx(ret) def test_missing_value_handle_none(): @@ -283,30 +253,26 @@ def test_missing_value_handle_none(): lgb_eval = lgb.Dataset(X_train, y_train) params = { - 'objective': 'regression', - 'metric': 'auc', - 'verbose': -1, - 'boost_from_average': False, - 'min_data': 1, - 'num_leaves': 2, - 'learning_rate': 1, - 'min_data_in_bin': 1, - 'use_missing': False + "objective": "regression", + "metric": "auc", + "verbose": -1, + "boost_from_average": False, + "min_data": 1, + "num_leaves": 2, + "learning_rate": 1, + "min_data_in_bin": 1, + "use_missing": False, } evals_result = {} gbm = lgb.train( - params, - lgb_train, - num_boost_round=1, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)] + params, lgb_train, num_boost_round=1, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] ) pred = gbm.predict(X_train) assert pred[0] == pytest.approx(pred[1]) assert pred[-1] == pytest.approx(pred[0]) ret = roc_auc_score(y_train, pred) assert ret > 0.83 - assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret) + assert evals_result["valid_0"]["auc"][-1] == pytest.approx(ret) def test_categorical_handle(): @@ -319,34 +285,30 @@ def test_categorical_handle(): lgb_eval = lgb.Dataset(X_train, y_train) params = { - 'objective': 'regression', - 'metric': 'auc', - 'verbose': -1, - 'boost_from_average': False, - 'min_data': 1, - 'num_leaves': 2, - 'learning_rate': 1, - 'min_data_in_bin': 1, - 'min_data_per_group': 1, - 'cat_smooth': 1, - 'cat_l2': 0, - 'max_cat_to_onehot': 1, - 'zero_as_missing': True, - 'categorical_column': 0 + "objective": "regression", + "metric": "auc", + "verbose": -1, + "boost_from_average": False, + "min_data": 1, + "num_leaves": 2, + "learning_rate": 1, + "min_data_in_bin": 1, + "min_data_per_group": 1, + "cat_smooth": 1, + "cat_l2": 0, + "max_cat_to_onehot": 1, + "zero_as_missing": True, + "categorical_column": 0, } evals_result = {} gbm = lgb.train( - params, - lgb_train, - num_boost_round=1, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)] + params, lgb_train, num_boost_round=1, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] ) pred = gbm.predict(X_train) np.testing.assert_allclose(pred, y) ret = roc_auc_score(y_train, pred) assert ret > 0.999 - assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret) + assert evals_result["valid_0"]["auc"][-1] == pytest.approx(ret) def test_categorical_handle_na(): @@ -359,34 +321,30 @@ def test_categorical_handle_na(): lgb_eval = lgb.Dataset(X_train, y_train) params = { - 'objective': 'regression', - 'metric': 'auc', - 'verbose': -1, - 'boost_from_average': False, - 'min_data': 1, - 'num_leaves': 2, - 'learning_rate': 1, - 'min_data_in_bin': 1, - 'min_data_per_group': 1, - 'cat_smooth': 1, - 'cat_l2': 0, - 'max_cat_to_onehot': 1, - 'zero_as_missing': False, - 'categorical_column': 0 + "objective": "regression", + "metric": "auc", + "verbose": -1, + "boost_from_average": False, + "min_data": 1, + "num_leaves": 2, + "learning_rate": 1, + "min_data_in_bin": 1, + "min_data_per_group": 1, + "cat_smooth": 1, + "cat_l2": 0, + "max_cat_to_onehot": 1, + "zero_as_missing": False, + "categorical_column": 0, } evals_result = {} gbm = lgb.train( - params, - lgb_train, - num_boost_round=1, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)] + params, lgb_train, num_boost_round=1, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] ) pred = gbm.predict(X_train) np.testing.assert_allclose(pred, y) ret = roc_auc_score(y_train, pred) assert ret > 0.999 - assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret) + assert evals_result["valid_0"]["auc"][-1] == pytest.approx(ret) def test_categorical_non_zero_inputs(): @@ -399,107 +357,82 @@ def test_categorical_non_zero_inputs(): lgb_eval = lgb.Dataset(X_train, y_train) params = { - 'objective': 'regression', - 'metric': 'auc', - 'verbose': -1, - 'boost_from_average': False, - 'min_data': 1, - 'num_leaves': 2, - 'learning_rate': 1, - 'min_data_in_bin': 1, - 'min_data_per_group': 1, - 'cat_smooth': 1, - 'cat_l2': 0, - 'max_cat_to_onehot': 1, - 'zero_as_missing': False, - 'categorical_column': 0 + "objective": "regression", + "metric": "auc", + "verbose": -1, + "boost_from_average": False, + "min_data": 1, + "num_leaves": 2, + "learning_rate": 1, + "min_data_in_bin": 1, + "min_data_per_group": 1, + "cat_smooth": 1, + "cat_l2": 0, + "max_cat_to_onehot": 1, + "zero_as_missing": False, + "categorical_column": 0, } evals_result = {} gbm = lgb.train( - params, - lgb_train, - num_boost_round=1, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)] + params, lgb_train, num_boost_round=1, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] ) pred = gbm.predict(X_train) np.testing.assert_allclose(pred, y) ret = roc_auc_score(y_train, pred) assert ret > 0.999 - assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret) + assert evals_result["valid_0"]["auc"][-1] == pytest.approx(ret) def test_multiclass(): X, y = load_digits(n_class=10, return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) - params = { - 'objective': 'multiclass', - 'metric': 'multi_logloss', - 'num_class': 10, - 'verbose': -1 - } + params = {"objective": "multiclass", "metric": "multi_logloss", "num_class": 10, "verbose": -1} lgb_train = lgb.Dataset(X_train, y_train, params=params) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params) evals_result = {} gbm = lgb.train( - params, - lgb_train, - num_boost_round=50, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)] + params, lgb_train, num_boost_round=50, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] ) ret = multi_logloss(y_test, gbm.predict(X_test)) assert ret < 0.16 - assert evals_result['valid_0']['multi_logloss'][-1] == pytest.approx(ret) + assert evals_result["valid_0"]["multi_logloss"][-1] == pytest.approx(ret) def test_multiclass_rf(): X, y = load_digits(n_class=10, return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) params = { - 'boosting_type': 'rf', - 'objective': 'multiclass', - 'metric': 'multi_logloss', - 'bagging_freq': 1, - 'bagging_fraction': 0.6, - 'feature_fraction': 0.6, - 'num_class': 10, - 'num_leaves': 50, - 'min_data': 1, - 'verbose': -1, - 'gpu_use_dp': True + "boosting_type": "rf", + "objective": "multiclass", + "metric": "multi_logloss", + "bagging_freq": 1, + "bagging_fraction": 0.6, + "feature_fraction": 0.6, + "num_class": 10, + "num_leaves": 50, + "min_data": 1, + "verbose": -1, + "gpu_use_dp": True, } lgb_train = lgb.Dataset(X_train, y_train, params=params) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params) evals_result = {} gbm = lgb.train( - params, - lgb_train, - num_boost_round=50, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)] + params, lgb_train, num_boost_round=50, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] ) ret = multi_logloss(y_test, gbm.predict(X_test)) assert ret < 0.23 - assert evals_result['valid_0']['multi_logloss'][-1] == pytest.approx(ret) + assert evals_result["valid_0"]["multi_logloss"][-1] == pytest.approx(ret) def test_multiclass_prediction_early_stopping(): X, y = load_digits(n_class=10, return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) - params = { - 'objective': 'multiclass', - 'metric': 'multi_logloss', - 'num_class': 10, - 'verbose': -1 - } + params = {"objective": "multiclass", "metric": "multi_logloss", "num_class": 10, "verbose": -1} lgb_train = lgb.Dataset(X_train, y_train, params=params) - gbm = lgb.train(params, lgb_train, - num_boost_round=50) + gbm = lgb.train(params, lgb_train, num_boost_round=50) - pred_parameter = {"pred_early_stop": True, - "pred_early_stop_freq": 5, - "pred_early_stop_margin": 1.5} + pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 1.5} ret = multi_logloss(y_test, gbm.predict(X_test, **pred_parameter)) assert ret < 0.8 assert ret > 0.6 # loss will be higher than when evaluating the full model @@ -511,136 +444,96 @@ def test_multiclass_prediction_early_stopping(): def test_multi_class_error(): X, y = load_digits(n_class=10, return_X_y=True) - params = {'objective': 'multiclass', 'num_classes': 10, 'metric': 'multi_error', - 'num_leaves': 4, 'verbose': -1} + params = {"objective": "multiclass", "num_classes": 10, "metric": "multi_error", "num_leaves": 4, "verbose": -1} lgb_data = lgb.Dataset(X, label=y) est = lgb.train(params, lgb_data, num_boost_round=10) predict_default = est.predict(X) results = {} est = lgb.train( - dict( - params, - multi_error_top_k=1 - ), + dict(params, multi_error_top_k=1), lgb_data, num_boost_round=10, valid_sets=[lgb_data], - callbacks=[lgb.record_evaluation(results)] + callbacks=[lgb.record_evaluation(results)], ) predict_1 = est.predict(X) # check that default gives same result as k = 1 np.testing.assert_allclose(predict_1, predict_default) # check against independent calculation for k = 1 err = top_k_error(y, predict_1, 1) - assert results['training']['multi_error'][-1] == pytest.approx(err) + assert results["training"]["multi_error"][-1] == pytest.approx(err) # check against independent calculation for k = 2 results = {} est = lgb.train( - dict( - params, - multi_error_top_k=2 - ), + dict(params, multi_error_top_k=2), lgb_data, num_boost_round=10, valid_sets=[lgb_data], - callbacks=[lgb.record_evaluation(results)] + callbacks=[lgb.record_evaluation(results)], ) predict_2 = est.predict(X) err = top_k_error(y, predict_2, 2) - assert results['training']['multi_error@2'][-1] == pytest.approx(err) + assert results["training"]["multi_error@2"][-1] == pytest.approx(err) # check against independent calculation for k = 10 results = {} est = lgb.train( - dict( - params, - multi_error_top_k=10 - ), + dict(params, multi_error_top_k=10), lgb_data, num_boost_round=10, valid_sets=[lgb_data], - callbacks=[lgb.record_evaluation(results)] + callbacks=[lgb.record_evaluation(results)], ) predict_3 = est.predict(X) err = top_k_error(y, predict_3, 10) - assert results['training']['multi_error@10'][-1] == pytest.approx(err) + assert results["training"]["multi_error@10"][-1] == pytest.approx(err) # check cases where predictions are equal X = np.array([[0, 0], [0, 0]]) y = np.array([0, 1]) lgb_data = lgb.Dataset(X, label=y) - params['num_classes'] = 2 + params["num_classes"] = 2 + results = {} + lgb.train(params, lgb_data, num_boost_round=10, valid_sets=[lgb_data], callbacks=[lgb.record_evaluation(results)]) + assert results["training"]["multi_error"][-1] == pytest.approx(1) results = {} lgb.train( - params, + dict(params, multi_error_top_k=2), lgb_data, num_boost_round=10, valid_sets=[lgb_data], - callbacks=[lgb.record_evaluation(results)] + callbacks=[lgb.record_evaluation(results)], ) - assert results['training']['multi_error'][-1] == pytest.approx(1) - results = {} - lgb.train( - dict( - params, - multi_error_top_k=2 - ), - lgb_data, - num_boost_round=10, - valid_sets=[lgb_data], - callbacks=[lgb.record_evaluation(results)] - ) - assert results['training']['multi_error@2'][-1] == pytest.approx(0) + assert results["training"]["multi_error@2"][-1] == pytest.approx(0) -@pytest.mark.skipif(getenv('TASK', '') == 'cuda', reason='Skip due to differences in implementation details of CUDA version') +@pytest.mark.skipif( + getenv("TASK", "") == "cuda", reason="Skip due to differences in implementation details of CUDA version" +) def test_auc_mu(): # should give same result as binary auc for 2 classes X, y = load_digits(n_class=10, return_X_y=True) y_new = np.zeros((len(y))) y_new[y != 0] = 1 lgb_X = lgb.Dataset(X, label=y_new) - params = {'objective': 'multiclass', - 'metric': 'auc_mu', - 'verbose': -1, - 'num_classes': 2, - 'seed': 0} + params = {"objective": "multiclass", "metric": "auc_mu", "verbose": -1, "num_classes": 2, "seed": 0} results_auc_mu = {} - lgb.train( - params, - lgb_X, - num_boost_round=10, - valid_sets=[lgb_X], - callbacks=[lgb.record_evaluation(results_auc_mu)] - ) - params = {'objective': 'binary', - 'metric': 'auc', - 'verbose': -1, - 'seed': 0} + lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(results_auc_mu)]) + params = {"objective": "binary", "metric": "auc", "verbose": -1, "seed": 0} results_auc = {} - lgb.train( - params, - lgb_X, - num_boost_round=10, - valid_sets=[lgb_X], - callbacks=[lgb.record_evaluation(results_auc)] - ) - np.testing.assert_allclose(results_auc_mu['training']['auc_mu'], results_auc['training']['auc']) + lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(results_auc)]) + np.testing.assert_allclose(results_auc_mu["training"]["auc_mu"], results_auc["training"]["auc"]) # test the case where all predictions are equal lgb_X = lgb.Dataset(X[:10], label=y_new[:10]) - params = {'objective': 'multiclass', - 'metric': 'auc_mu', - 'verbose': -1, - 'num_classes': 2, - 'min_data_in_leaf': 20, - 'seed': 0} + params = { + "objective": "multiclass", + "metric": "auc_mu", + "verbose": -1, + "num_classes": 2, + "min_data_in_leaf": 20, + "seed": 0, + } results_auc_mu = {} - lgb.train( - params, - lgb_X, - num_boost_round=10, - valid_sets=[lgb_X], - callbacks=[lgb.record_evaluation(results_auc_mu)] - ) - assert results_auc_mu['training']['auc_mu'][-1] == pytest.approx(0.5) + lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(results_auc_mu)]) + assert results_auc_mu["training"]["auc_mu"][-1] == pytest.approx(0.5) # test that weighted data gives different auc_mu lgb_X = lgb.Dataset(X, label=y) lgb_X_weighted = lgb.Dataset(X, label=y, weight=np.abs(np.random.normal(size=y.shape))) @@ -648,21 +541,17 @@ def test_auc_mu(): results_weighted = {} params = dict(params, num_classes=10, num_leaves=5) lgb.train( - params, - lgb_X, - num_boost_round=10, - valid_sets=[lgb_X], - callbacks=[lgb.record_evaluation(results_unweighted)] + params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(results_unweighted)] ) lgb.train( params, lgb_X_weighted, num_boost_round=10, valid_sets=[lgb_X_weighted], - callbacks=[lgb.record_evaluation(results_weighted)] + callbacks=[lgb.record_evaluation(results_weighted)], ) - assert results_weighted['training']['auc_mu'][-1] < 1 - assert results_unweighted['training']['auc_mu'][-1] != results_weighted['training']['auc_mu'][-1] + assert results_weighted["training"]["auc_mu"][-1] < 1 + assert results_unweighted["training"]["auc_mu"][-1] != results_weighted["training"]["auc_mu"][-1] # test that equal data weights give same auc_mu as unweighted data lgb_X_weighted = lgb.Dataset(X, label=y, weight=np.ones(y.shape) * 0.5) lgb.train( @@ -670,76 +559,54 @@ def test_auc_mu(): lgb_X_weighted, num_boost_round=10, valid_sets=[lgb_X_weighted], - callbacks=[lgb.record_evaluation(results_weighted)] + callbacks=[lgb.record_evaluation(results_weighted)], + ) + assert results_unweighted["training"]["auc_mu"][-1] == pytest.approx( + results_weighted["training"]["auc_mu"][-1], abs=1e-5 ) - assert results_unweighted['training']['auc_mu'][-1] == pytest.approx( - results_weighted['training']['auc_mu'][-1], abs=1e-5) # should give 1 when accuracy = 1 X = X[:10, :] y = y[:10] lgb_X = lgb.Dataset(X, label=y) - params = {'objective': 'multiclass', - 'metric': 'auc_mu', - 'num_classes': 10, - 'min_data_in_leaf': 1, - 'verbose': -1} + params = {"objective": "multiclass", "metric": "auc_mu", "num_classes": 10, "min_data_in_leaf": 1, "verbose": -1} results = {} - lgb.train( - params, - lgb_X, - num_boost_round=100, - valid_sets=[lgb_X], - callbacks=[lgb.record_evaluation(results)] - ) - assert results['training']['auc_mu'][-1] == pytest.approx(1) + lgb.train(params, lgb_X, num_boost_round=100, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(results)]) + assert results["training"]["auc_mu"][-1] == pytest.approx(1) # test loading class weights Xy = np.loadtxt( - str(Path(__file__).absolute().parents[2] / 'examples' / 'multiclass_classification' / 'multiclass.train') + str(Path(__file__).absolute().parents[2] / "examples" / "multiclass_classification" / "multiclass.train") ) y = Xy[:, 0] X = Xy[:, 1:] lgb_X = lgb.Dataset(X, label=y) - params = {'objective': 'multiclass', - 'metric': 'auc_mu', - 'auc_mu_weights': [0, 2, 2, 2, 2, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0], - 'num_classes': 5, - 'verbose': -1, - 'seed': 0} + params = { + "objective": "multiclass", + "metric": "auc_mu", + "auc_mu_weights": [0, 2, 2, 2, 2, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0], + "num_classes": 5, + "verbose": -1, + "seed": 0, + } results_weight = {} - lgb.train( - params, - lgb_X, - num_boost_round=5, - valid_sets=[lgb_X], - callbacks=[lgb.record_evaluation(results_weight)] - ) - params['auc_mu_weights'] = [] + lgb.train(params, lgb_X, num_boost_round=5, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(results_weight)]) + params["auc_mu_weights"] = [] results_no_weight = {} lgb.train( - params, - lgb_X, - num_boost_round=5, - valid_sets=[lgb_X], - callbacks=[lgb.record_evaluation(results_no_weight)] + params, lgb_X, num_boost_round=5, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(results_no_weight)] ) - assert results_weight['training']['auc_mu'][-1] != results_no_weight['training']['auc_mu'][-1] + assert results_weight["training"]["auc_mu"][-1] != results_no_weight["training"]["auc_mu"][-1] def test_ranking_prediction_early_stopping(): - rank_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'lambdarank' - X_train, y_train = load_svmlight_file(str(rank_example_dir / 'rank.train')) - q_train = np.loadtxt(str(rank_example_dir / 'rank.train.query')) - X_test, _ = load_svmlight_file(str(rank_example_dir / 'rank.test')) - params = { - 'objective': 'rank_xendcg', - 'verbose': -1 - } + rank_example_dir = Path(__file__).absolute().parents[2] / "examples" / "lambdarank" + X_train, y_train = load_svmlight_file(str(rank_example_dir / "rank.train")) + q_train = np.loadtxt(str(rank_example_dir / "rank.train.query")) + X_test, _ = load_svmlight_file(str(rank_example_dir / "rank.test")) + params = {"objective": "rank_xendcg", "verbose": -1} lgb_train = lgb.Dataset(X_train, y_train, group=q_train, params=params) gbm = lgb.train(params, lgb_train, num_boost_round=50) - pred_parameter = {"pred_early_stop": True, - "pred_early_stop_freq": 5, - "pred_early_stop_margin": 1.5} + pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 1.5} ret_early = gbm.predict(X_test, **pred_parameter) pred_parameter["pred_early_stop_margin"] = 5.5 @@ -770,15 +637,16 @@ def simulate_position_bias(file_dataset_in, file_query_in, file_dataset_out, bas return 0.8 else: return 0.9 + # an instantiation of a cascade model where the user stops with probability 0.2 after observing each document pstop = 0.2 - f_dataset_in = open(file_dataset_in, 'r') - f_dataset_out = open(file_dataset_out, 'w') + f_dataset_in = open(file_dataset_in, "r") + f_dataset_out = open(file_dataset_out, "w") random.seed(10) positions_all = [] for line in open(file_query_in): - docs_num = int (line) + docs_num = int(line) lines = [] index_values = [] positions = [0] * docs_num @@ -805,108 +673,124 @@ def simulate_position_bias(file_dataset_in, file_query_in, file_dataset_out, bas lines[index][0] = str(new_label) positions[index] = pos for features in lines: - f_dataset_out.write(' '.join(features) + '\n') + f_dataset_out.write(" ".join(features) + "\n") positions_all.extend(positions) f_dataset_out.close() return positions_all -@pytest.mark.skipif(getenv('TASK', '') == 'cuda', reason='Positions in learning to rank is not supported in CUDA version yet') +@pytest.mark.skipif( + getenv("TASK", "") == "cuda", reason="Positions in learning to rank is not supported in CUDA version yet" +) def test_ranking_with_position_information_with_file(tmp_path): - rank_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'lambdarank' + rank_example_dir = Path(__file__).absolute().parents[2] / "examples" / "lambdarank" params = { - 'objective': 'lambdarank', - 'verbose': -1, - 'eval_at': [3], - 'metric': 'ndcg', - 'bagging_freq': 1, - 'bagging_fraction': 0.9, - 'min_data_in_leaf': 50, - 'min_sum_hessian_in_leaf': 5.0 + "objective": "lambdarank", + "verbose": -1, + "eval_at": [3], + "metric": "ndcg", + "bagging_freq": 1, + "bagging_fraction": 0.9, + "min_data_in_leaf": 50, + "min_sum_hessian_in_leaf": 5.0, } # simulate position bias for the train dataset and put the train dataset with biased labels to temp directory - positions = simulate_position_bias(str(rank_example_dir / 'rank.train'), str(rank_example_dir / 'rank.train.query'), str(tmp_path / 'rank.train'), baseline_feature=34) - copyfile(str(rank_example_dir / 'rank.train.query'), str(tmp_path / 'rank.train.query')) - copyfile(str(rank_example_dir / 'rank.test'), str(tmp_path / 'rank.test')) - copyfile(str(rank_example_dir / 'rank.test.query'), str(tmp_path / 'rank.test.query')) + positions = simulate_position_bias( + str(rank_example_dir / "rank.train"), + str(rank_example_dir / "rank.train.query"), + str(tmp_path / "rank.train"), + baseline_feature=34, + ) + copyfile(str(rank_example_dir / "rank.train.query"), str(tmp_path / "rank.train.query")) + copyfile(str(rank_example_dir / "rank.test"), str(tmp_path / "rank.test")) + copyfile(str(rank_example_dir / "rank.test.query"), str(tmp_path / "rank.test.query")) - lgb_train = lgb.Dataset(str(tmp_path / 'rank.train'), params=params) - lgb_valid = [lgb_train.create_valid(str(tmp_path / 'rank.test'))] - gbm_baseline = lgb.train(params, lgb_train, valid_sets = lgb_valid, num_boost_round=50) + lgb_train = lgb.Dataset(str(tmp_path / "rank.train"), params=params) + lgb_valid = [lgb_train.create_valid(str(tmp_path / "rank.test"))] + gbm_baseline = lgb.train(params, lgb_train, valid_sets=lgb_valid, num_boost_round=50) - f_positions_out = open(str(tmp_path / 'rank.train.position'), 'w') + f_positions_out = open(str(tmp_path / "rank.train.position"), "w") for pos in positions: - f_positions_out.write(str(pos) + '\n') + f_positions_out.write(str(pos) + "\n") f_positions_out.close() - lgb_train = lgb.Dataset(str(tmp_path / 'rank.train'), params=params) - lgb_valid = [lgb_train.create_valid(str(tmp_path / 'rank.test'))] - gbm_unbiased_with_file = lgb.train(params, lgb_train, valid_sets = lgb_valid, num_boost_round=50) + lgb_train = lgb.Dataset(str(tmp_path / "rank.train"), params=params) + lgb_valid = [lgb_train.create_valid(str(tmp_path / "rank.test"))] + gbm_unbiased_with_file = lgb.train(params, lgb_train, valid_sets=lgb_valid, num_boost_round=50) # the performance of the unbiased LambdaMART should outperform the plain LambdaMART on the dataset with position bias - assert gbm_baseline.best_score['valid_0']['ndcg@3'] + 0.03 <= gbm_unbiased_with_file.best_score['valid_0']['ndcg@3'] + assert gbm_baseline.best_score["valid_0"]["ndcg@3"] + 0.03 <= gbm_unbiased_with_file.best_score["valid_0"]["ndcg@3"] # add extra row to position file - with open(str(tmp_path / 'rank.train.position'), 'a') as file: - file.write('pos_1000\n') + with open(str(tmp_path / "rank.train.position"), "a") as file: + file.write("pos_1000\n") file.close() - lgb_train = lgb.Dataset(str(tmp_path / 'rank.train'), params=params) - lgb_valid = [lgb_train.create_valid(str(tmp_path / 'rank.test'))] + lgb_train = lgb.Dataset(str(tmp_path / "rank.train"), params=params) + lgb_valid = [lgb_train.create_valid(str(tmp_path / "rank.test"))] with pytest.raises(lgb.basic.LightGBMError, match=r"Positions size \(3006\) doesn't match data size"): - lgb.train(params, lgb_train, valid_sets = lgb_valid, num_boost_round=50) + lgb.train(params, lgb_train, valid_sets=lgb_valid, num_boost_round=50) -@pytest.mark.skipif(getenv('TASK', '') == 'cuda', reason='Positions in learning to rank is not supported in CUDA version yet') +@pytest.mark.skipif( + getenv("TASK", "") == "cuda", reason="Positions in learning to rank is not supported in CUDA version yet" +) def test_ranking_with_position_information_with_dataset_constructor(tmp_path): - rank_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'lambdarank' + rank_example_dir = Path(__file__).absolute().parents[2] / "examples" / "lambdarank" params = { - 'objective': 'lambdarank', - 'verbose': -1, - 'eval_at': [3], - 'metric': 'ndcg', - 'bagging_freq': 1, - 'bagging_fraction': 0.9, - 'min_data_in_leaf': 50, - 'min_sum_hessian_in_leaf': 5.0, - 'num_threads': 1, - 'deterministic': True, - 'seed': 0 + "objective": "lambdarank", + "verbose": -1, + "eval_at": [3], + "metric": "ndcg", + "bagging_freq": 1, + "bagging_fraction": 0.9, + "min_data_in_leaf": 50, + "min_sum_hessian_in_leaf": 5.0, + "num_threads": 1, + "deterministic": True, + "seed": 0, } # simulate position bias for the train dataset and put the train dataset with biased labels to temp directory - positions = simulate_position_bias(str(rank_example_dir / 'rank.train'), str(rank_example_dir / 'rank.train.query'), str(tmp_path / 'rank.train'), baseline_feature=34) - copyfile(str(rank_example_dir / 'rank.train.query'), str(tmp_path / 'rank.train.query')) - copyfile(str(rank_example_dir / 'rank.test'), str(tmp_path / 'rank.test')) - copyfile(str(rank_example_dir / 'rank.test.query'), str(tmp_path / 'rank.test.query')) + positions = simulate_position_bias( + str(rank_example_dir / "rank.train"), + str(rank_example_dir / "rank.train.query"), + str(tmp_path / "rank.train"), + baseline_feature=34, + ) + copyfile(str(rank_example_dir / "rank.train.query"), str(tmp_path / "rank.train.query")) + copyfile(str(rank_example_dir / "rank.test"), str(tmp_path / "rank.test")) + copyfile(str(rank_example_dir / "rank.test.query"), str(tmp_path / "rank.test.query")) - lgb_train = lgb.Dataset(str(tmp_path / 'rank.train'), params=params) - lgb_valid = [lgb_train.create_valid(str(tmp_path / 'rank.test'))] - gbm_baseline = lgb.train(params, lgb_train, valid_sets = lgb_valid, num_boost_round=50) + lgb_train = lgb.Dataset(str(tmp_path / "rank.train"), params=params) + lgb_valid = [lgb_train.create_valid(str(tmp_path / "rank.test"))] + gbm_baseline = lgb.train(params, lgb_train, valid_sets=lgb_valid, num_boost_round=50) positions = np.array(positions) # test setting positions through Dataset constructor with numpy array - lgb_train = lgb.Dataset(str(tmp_path / 'rank.train'), params=params, position=positions) - lgb_valid = [lgb_train.create_valid(str(tmp_path / 'rank.test'))] - gbm_unbiased = lgb.train(params, lgb_train, valid_sets = lgb_valid, num_boost_round=50) + lgb_train = lgb.Dataset(str(tmp_path / "rank.train"), params=params, position=positions) + lgb_valid = [lgb_train.create_valid(str(tmp_path / "rank.test"))] + gbm_unbiased = lgb.train(params, lgb_train, valid_sets=lgb_valid, num_boost_round=50) # the performance of the unbiased LambdaMART should outperform the plain LambdaMART on the dataset with position bias - assert gbm_baseline.best_score['valid_0']['ndcg@3'] + 0.03 <= gbm_unbiased.best_score['valid_0']['ndcg@3'] + assert gbm_baseline.best_score["valid_0"]["ndcg@3"] + 0.03 <= gbm_unbiased.best_score["valid_0"]["ndcg@3"] if PANDAS_INSTALLED: # test setting positions through Dataset constructor with pandas Series - lgb_train = lgb.Dataset(str(tmp_path / 'rank.train'), params=params, position=pd_Series(positions)) - lgb_valid = [lgb_train.create_valid(str(tmp_path / 'rank.test'))] - gbm_unbiased_pandas_series = lgb.train(params, lgb_train, valid_sets = lgb_valid, num_boost_round=50) - assert gbm_unbiased.best_score['valid_0']['ndcg@3'] == gbm_unbiased_pandas_series.best_score['valid_0']['ndcg@3'] + lgb_train = lgb.Dataset(str(tmp_path / "rank.train"), params=params, position=pd_Series(positions)) + lgb_valid = [lgb_train.create_valid(str(tmp_path / "rank.test"))] + gbm_unbiased_pandas_series = lgb.train(params, lgb_train, valid_sets=lgb_valid, num_boost_round=50) + assert ( + gbm_unbiased.best_score["valid_0"]["ndcg@3"] == gbm_unbiased_pandas_series.best_score["valid_0"]["ndcg@3"] + ) # test setting positions through set_position - lgb_train = lgb.Dataset(str(tmp_path / 'rank.train'), params=params) - lgb_valid = [lgb_train.create_valid(str(tmp_path / 'rank.test'))] + lgb_train = lgb.Dataset(str(tmp_path / "rank.train"), params=params) + lgb_valid = [lgb_train.create_valid(str(tmp_path / "rank.test"))] lgb_train.set_position(positions) - gbm_unbiased_set_position = lgb.train(params, lgb_train, valid_sets = lgb_valid, num_boost_round=50) - assert gbm_unbiased.best_score['valid_0']['ndcg@3'] == gbm_unbiased_set_position.best_score['valid_0']['ndcg@3'] + gbm_unbiased_set_position = lgb.train(params, lgb_train, valid_sets=lgb_valid, num_boost_round=50) + assert gbm_unbiased.best_score["valid_0"]["ndcg@3"] == gbm_unbiased_set_position.best_score["valid_0"]["ndcg@3"] # test get_position works positions_from_get = lgb_train.get_position() @@ -915,36 +799,38 @@ def test_ranking_with_position_information_with_dataset_constructor(tmp_path): def test_early_stopping(): X, y = load_breast_cancer(return_X_y=True) - params = { - 'objective': 'binary', - 'metric': 'binary_logloss', - 'verbose': -1 - } + params = {"objective": "binary", "metric": "binary_logloss", "verbose": -1} X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) lgb_train = lgb.Dataset(X_train, y_train) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train) - valid_set_name = 'valid_set' + valid_set_name = "valid_set" # no early stopping - gbm = lgb.train(params, lgb_train, - num_boost_round=10, - valid_sets=lgb_eval, - valid_names=valid_set_name, - callbacks=[lgb.early_stopping(stopping_rounds=5)]) + gbm = lgb.train( + params, + lgb_train, + num_boost_round=10, + valid_sets=lgb_eval, + valid_names=valid_set_name, + callbacks=[lgb.early_stopping(stopping_rounds=5)], + ) assert gbm.best_iteration == 10 assert valid_set_name in gbm.best_score - assert 'binary_logloss' in gbm.best_score[valid_set_name] + assert "binary_logloss" in gbm.best_score[valid_set_name] # early stopping occurs - gbm = lgb.train(params, lgb_train, - num_boost_round=40, - valid_sets=lgb_eval, - valid_names=valid_set_name, - callbacks=[lgb.early_stopping(stopping_rounds=5)]) + gbm = lgb.train( + params, + lgb_train, + num_boost_round=40, + valid_sets=lgb_eval, + valid_names=valid_set_name, + callbacks=[lgb.early_stopping(stopping_rounds=5)], + ) assert gbm.best_iteration <= 39 assert valid_set_name in gbm.best_score - assert 'binary_logloss' in gbm.best_score[valid_set_name] + assert "binary_logloss" in gbm.best_score[valid_set_name] -@pytest.mark.parametrize('use_valid', [True, False]) +@pytest.mark.parametrize("use_valid", [True, False]) def test_early_stopping_ignores_training_set(use_valid): x = np.linspace(-1, 1, 100) X = x.reshape(-1, 1) @@ -954,98 +840,97 @@ def test_early_stopping_ignores_training_set(use_valid): train_ds = lgb.Dataset(X_train, y_train) valid_ds = lgb.Dataset(X_valid, y_valid) valid_sets = [train_ds] - valid_names = ['train'] + valid_names = ["train"] if use_valid: valid_sets.append(valid_ds) - valid_names.append('valid') + valid_names.append("valid") eval_result = {} def train_fn(): return lgb.train( - {'num_leaves': 5}, + {"num_leaves": 5}, train_ds, num_boost_round=2, valid_sets=valid_sets, valid_names=valid_names, - callbacks=[lgb.early_stopping(1), lgb.record_evaluation(eval_result)] + callbacks=[lgb.early_stopping(1), lgb.record_evaluation(eval_result)], ) + if use_valid: bst = train_fn() assert bst.best_iteration == 1 - assert eval_result['train']['l2'][1] < eval_result['train']['l2'][0] # train improved - assert eval_result['valid']['l2'][1] > eval_result['valid']['l2'][0] # valid didn't + assert eval_result["train"]["l2"][1] < eval_result["train"]["l2"][0] # train improved + assert eval_result["valid"]["l2"][1] > eval_result["valid"]["l2"][0] # valid didn't else: - with pytest.warns(UserWarning, match='Only training set found, disabling early stopping.'): + with pytest.warns(UserWarning, match="Only training set found, disabling early stopping."): bst = train_fn() assert bst.current_iteration() == 2 assert bst.best_iteration == 0 -@pytest.mark.parametrize('first_metric_only', [True, False]) +@pytest.mark.parametrize("first_metric_only", [True, False]) def test_early_stopping_via_global_params(first_metric_only): X, y = load_breast_cancer(return_X_y=True) num_trees = 5 params = { - 'num_trees': num_trees, - 'objective': 'binary', - 'metric': 'None', - 'verbose': -1, - 'early_stopping_round': 2, - 'first_metric_only': first_metric_only + "num_trees": num_trees, + "objective": "binary", + "metric": "None", + "verbose": -1, + "early_stopping_round": 2, + "first_metric_only": first_metric_only, } X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) lgb_train = lgb.Dataset(X_train, y_train) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train) - valid_set_name = 'valid_set' - gbm = lgb.train(params, - lgb_train, - feval=[decreasing_metric, constant_metric], - valid_sets=lgb_eval, - valid_names=valid_set_name) + valid_set_name = "valid_set" + gbm = lgb.train( + params, lgb_train, feval=[decreasing_metric, constant_metric], valid_sets=lgb_eval, valid_names=valid_set_name + ) if first_metric_only: assert gbm.best_iteration == num_trees else: assert gbm.best_iteration == 1 assert valid_set_name in gbm.best_score - assert 'decreasing_metric' in gbm.best_score[valid_set_name] - assert 'error' in gbm.best_score[valid_set_name] + assert "decreasing_metric" in gbm.best_score[valid_set_name] + assert "error" in gbm.best_score[valid_set_name] -@pytest.mark.parametrize('first_only', [True, False]) -@pytest.mark.parametrize('single_metric', [True, False]) -@pytest.mark.parametrize('greater_is_better', [True, False]) +@pytest.mark.parametrize("first_only", [True, False]) +@pytest.mark.parametrize("single_metric", [True, False]) +@pytest.mark.parametrize("greater_is_better", [True, False]) def test_early_stopping_min_delta(first_only, single_metric, greater_is_better): if single_metric and not first_only: pytest.skip("first_metric_only doesn't affect single metric.") metric2min_delta = { - 'auc': 0.001, - 'binary_logloss': 0.01, - 'average_precision': 0.001, - 'mape': 0.01, + "auc": 0.001, + "binary_logloss": 0.01, + "average_precision": 0.001, + "mape": 0.01, } if single_metric: if greater_is_better: - metric = 'auc' + metric = "auc" else: - metric = 'binary_logloss' + metric = "binary_logloss" else: if first_only: if greater_is_better: - metric = ['auc', 'binary_logloss'] + metric = ["auc", "binary_logloss"] else: - metric = ['binary_logloss', 'auc'] + metric = ["binary_logloss", "auc"] else: if greater_is_better: - metric = ['auc', 'average_precision'] + metric = ["auc", "average_precision"] else: - metric = ['binary_logloss', 'mape'] + metric = ["binary_logloss", "mape"] X, y = load_breast_cancer(return_X_y=True) X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, random_state=0) train_ds = lgb.Dataset(X_train, y_train) valid_ds = lgb.Dataset(X_valid, y_valid, reference=train_ds) - params = {'objective': 'binary', 'metric': metric, 'verbose': -1} + params = {"objective": "binary", "metric": metric, "verbose": -1} if isinstance(metric, str): min_delta = metric2min_delta[metric] elif first_only: @@ -1057,33 +942,33 @@ def test_early_stopping_min_delta(first_only, single_metric, greater_is_better): "train_set": train_ds, "num_boost_round": 50, "valid_sets": [train_ds, valid_ds], - "valid_names": ['training', 'valid'], + "valid_names": ["training", "valid"], } # regular early stopping evals_result = {} - train_kwargs['callbacks'] = [ + train_kwargs["callbacks"] = [ lgb.callback.early_stopping(10, first_only, verbose=False), - lgb.record_evaluation(evals_result) + lgb.record_evaluation(evals_result), ] bst = lgb.train(**train_kwargs) - scores = np.vstack(list(evals_result['valid'].values())).T + scores = np.vstack(list(evals_result["valid"].values())).T # positive min_delta delta_result = {} - train_kwargs['callbacks'] = [ + train_kwargs["callbacks"] = [ lgb.callback.early_stopping(10, first_only, verbose=False, min_delta=min_delta), - lgb.record_evaluation(delta_result) + lgb.record_evaluation(delta_result), ] delta_bst = lgb.train(**train_kwargs) - delta_scores = np.vstack(list(delta_result['valid'].values())).T + delta_scores = np.vstack(list(delta_result["valid"].values())).T if first_only: scores = scores[:, 0] delta_scores = delta_scores[:, 0] assert delta_bst.num_trees() < bst.num_trees() - np.testing.assert_allclose(scores[:len(delta_scores)], delta_scores) + np.testing.assert_allclose(scores[: len(delta_scores)], delta_scores) last_score = delta_scores[-1] best_score = delta_scores[delta_bst.num_trees() - 1] if greater_is_better: @@ -1098,20 +983,15 @@ def test_early_stopping_can_be_triggered_via_custom_callback(): def _early_stop_after_seventh_iteration(env): if env.iteration == 6: exc = lgb.EarlyStopException( - best_iteration=6, - best_score=[("some_validation_set", "some_metric", 0.708, True)] + best_iteration=6, best_score=[("some_validation_set", "some_metric", 0.708, True)] ) raise exc bst = lgb.train( - params={ - "objective": "regression", - "verbose": -1, - "num_leaves": 2 - }, + params={"objective": "regression", "verbose": -1, "num_leaves": 2}, train_set=lgb.Dataset(X, label=y), num_boost_round=23, - callbacks=[_early_stop_after_seventh_iteration] + callbacks=[_early_stop_after_seventh_iteration], ) assert bst.num_trees() == 7 assert bst.best_score["some_validation_set"]["some_metric"] == 0.708 @@ -1122,15 +1002,11 @@ def test_early_stopping_can_be_triggered_via_custom_callback(): def test_continue_train(): X, y = make_synthetic_regression() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) - params = { - 'objective': 'regression', - 'metric': 'l1', - 'verbose': -1 - } + params = {"objective": "regression", "metric": "l1", "verbose": -1} lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False) init_gbm = lgb.train(params, lgb_train, num_boost_round=20) - model_name = 'model.txt' + model_name = "model.txt" init_gbm.save_model(model_name) evals_result = {} gbm = lgb.train( @@ -1139,22 +1015,19 @@ def test_continue_train(): num_boost_round=30, valid_sets=lgb_eval, # test custom eval metrics - feval=(lambda p, d: ('custom_mae', mean_absolute_error(p, d.get_label()), False)), + feval=(lambda p, d: ("custom_mae", mean_absolute_error(p, d.get_label()), False)), callbacks=[lgb.record_evaluation(evals_result)], - init_model='model.txt' + init_model="model.txt", ) ret = mean_absolute_error(y_test, gbm.predict(X_test)) assert ret < 13.6 - assert evals_result['valid_0']['l1'][-1] == pytest.approx(ret) - np.testing.assert_allclose(evals_result['valid_0']['l1'], evals_result['valid_0']['custom_mae']) + assert evals_result["valid_0"]["l1"][-1] == pytest.approx(ret) + np.testing.assert_allclose(evals_result["valid_0"]["l1"], evals_result["valid_0"]["custom_mae"]) def test_continue_train_reused_dataset(): X, y = make_synthetic_regression() - params = { - 'objective': 'regression', - 'verbose': -1 - } + params = {"objective": "regression", "verbose": -1} lgb_train = lgb.Dataset(X, y, free_raw_data=False) init_gbm = lgb.train(params, lgb_train, num_boost_round=5) init_gbm_2 = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm) @@ -1166,12 +1039,7 @@ def test_continue_train_reused_dataset(): def test_continue_train_dart(): X, y = make_synthetic_regression() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) - params = { - 'boosting_type': 'dart', - 'objective': 'regression', - 'metric': 'l1', - 'verbose': -1 - } + params = {"boosting_type": "dart", "objective": "regression", "metric": "l1", "verbose": -1} lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False) init_gbm = lgb.train(params, lgb_train, num_boost_round=50) @@ -1182,22 +1050,17 @@ def test_continue_train_dart(): num_boost_round=50, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)], - init_model=init_gbm + init_model=init_gbm, ) ret = mean_absolute_error(y_test, gbm.predict(X_test)) assert ret < 13.6 - assert evals_result['valid_0']['l1'][-1] == pytest.approx(ret) + assert evals_result["valid_0"]["l1"][-1] == pytest.approx(ret) def test_continue_train_multiclass(): X, y = load_iris(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) - params = { - 'objective': 'multiclass', - 'metric': 'multi_logloss', - 'num_class': 3, - 'verbose': -1 - } + params = {"objective": "multiclass", "metric": "multi_logloss", "num_class": 3, "verbose": -1} lgb_train = lgb.Dataset(X_train, y_train, params=params, free_raw_data=False) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params, free_raw_data=False) init_gbm = lgb.train(params, lgb_train, num_boost_round=20) @@ -1208,78 +1071,88 @@ def test_continue_train_multiclass(): num_boost_round=30, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)], - init_model=init_gbm + init_model=init_gbm, ) ret = multi_logloss(y_test, gbm.predict(X_test)) assert ret < 0.1 - assert evals_result['valid_0']['multi_logloss'][-1] == pytest.approx(ret) + assert evals_result["valid_0"]["multi_logloss"][-1] == pytest.approx(ret) def test_cv(): X_train, y_train = make_synthetic_regression() - params = {'verbose': -1} + params = {"verbose": -1} lgb_train = lgb.Dataset(X_train, y_train) # shuffle = False, override metric in params - params_with_metric = {'metric': 'l2', 'verbose': -1} - cv_res = lgb.cv(params_with_metric, lgb_train, num_boost_round=10, - nfold=3, stratified=False, shuffle=False, metrics='l1') - assert 'valid l1-mean' in cv_res - assert 'valid l2-mean' not in cv_res - assert len(cv_res['valid l1-mean']) == 10 + params_with_metric = {"metric": "l2", "verbose": -1} + cv_res = lgb.cv( + params_with_metric, lgb_train, num_boost_round=10, nfold=3, stratified=False, shuffle=False, metrics="l1" + ) + assert "valid l1-mean" in cv_res + assert "valid l2-mean" not in cv_res + assert len(cv_res["valid l1-mean"]) == 10 # shuffle = True, callbacks - cv_res = lgb.cv(params, lgb_train, num_boost_round=10, nfold=3, - stratified=False, shuffle=True, metrics='l1', - callbacks=[lgb.reset_parameter(learning_rate=lambda i: 0.1 - 0.001 * i)]) - assert 'valid l1-mean' in cv_res - assert len(cv_res['valid l1-mean']) == 10 + cv_res = lgb.cv( + params, + lgb_train, + num_boost_round=10, + nfold=3, + stratified=False, + shuffle=True, + metrics="l1", + callbacks=[lgb.reset_parameter(learning_rate=lambda i: 0.1 - 0.001 * i)], + ) + assert "valid l1-mean" in cv_res + assert len(cv_res["valid l1-mean"]) == 10 # enable display training loss - cv_res = lgb.cv(params_with_metric, lgb_train, num_boost_round=10, - nfold=3, stratified=False, shuffle=False, - metrics='l1', eval_train_metric=True) - assert 'train l1-mean' in cv_res - assert 'valid l1-mean' in cv_res - assert 'train l2-mean' not in cv_res - assert 'valid l2-mean' not in cv_res - assert len(cv_res['train l1-mean']) == 10 - assert len(cv_res['valid l1-mean']) == 10 + cv_res = lgb.cv( + params_with_metric, + lgb_train, + num_boost_round=10, + nfold=3, + stratified=False, + shuffle=False, + metrics="l1", + eval_train_metric=True, + ) + assert "train l1-mean" in cv_res + assert "valid l1-mean" in cv_res + assert "train l2-mean" not in cv_res + assert "valid l2-mean" not in cv_res + assert len(cv_res["train l1-mean"]) == 10 + assert len(cv_res["valid l1-mean"]) == 10 # self defined folds tss = TimeSeriesSplit(3) folds = tss.split(X_train) cv_res_gen = lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=folds) cv_res_obj = lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=tss) - np.testing.assert_allclose(cv_res_gen['valid l2-mean'], cv_res_obj['valid l2-mean']) + np.testing.assert_allclose(cv_res_gen["valid l2-mean"], cv_res_obj["valid l2-mean"]) # LambdaRank - rank_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'lambdarank' - X_train, y_train = load_svmlight_file(str(rank_example_dir / 'rank.train')) - q_train = np.loadtxt(str(rank_example_dir / 'rank.train.query')) - params_lambdarank = {'objective': 'lambdarank', 'verbose': -1, 'eval_at': 3} + rank_example_dir = Path(__file__).absolute().parents[2] / "examples" / "lambdarank" + X_train, y_train = load_svmlight_file(str(rank_example_dir / "rank.train")) + q_train = np.loadtxt(str(rank_example_dir / "rank.train.query")) + params_lambdarank = {"objective": "lambdarank", "verbose": -1, "eval_at": 3} lgb_train = lgb.Dataset(X_train, y_train, group=q_train) # ... with l2 metric - cv_res_lambda = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3, metrics='l2') + cv_res_lambda = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3, metrics="l2") assert len(cv_res_lambda) == 2 - assert not np.isnan(cv_res_lambda['valid l2-mean']).any() + assert not np.isnan(cv_res_lambda["valid l2-mean"]).any() # ... with NDCG (default) metric cv_res_lambda = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3) assert len(cv_res_lambda) == 2 - assert not np.isnan(cv_res_lambda['valid ndcg@3-mean']).any() + assert not np.isnan(cv_res_lambda["valid ndcg@3-mean"]).any() # self defined folds with lambdarank - cv_res_lambda_obj = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, - folds=GroupKFold(n_splits=3)) - np.testing.assert_allclose(cv_res_lambda['valid ndcg@3-mean'], cv_res_lambda_obj['valid ndcg@3-mean']) + cv_res_lambda_obj = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, folds=GroupKFold(n_splits=3)) + np.testing.assert_allclose(cv_res_lambda["valid ndcg@3-mean"], cv_res_lambda_obj["valid ndcg@3-mean"]) def test_cv_works_with_init_model(tmp_path): X, y = make_synthetic_regression() - params = {'objective': 'regression', 'verbose': -1} + params = {"objective": "regression", "verbose": -1} num_train_rounds = 2 lgb_train = lgb.Dataset(X, y, free_raw_data=False) - bst = lgb.train( - params=params, - train_set=lgb_train, - num_boost_round=num_train_rounds - ) + bst = lgb.train(params=params, train_set=lgb_train, num_boost_round=num_train_rounds) preds_raw = bst.predict(X, raw_score=True) - model_path_txt = str(tmp_path / 'lgb.model') + model_path_txt = str(tmp_path / "lgb.model") bst.save_model(model_path_txt) num_cv_rounds = 5 @@ -1290,42 +1163,27 @@ def test_cv_works_with_init_model(tmp_path): "shuffle": False, "seed": 708, "return_cvbooster": True, - "params": params + "params": params, } # init_model from an in-memory Booster - cv_res = lgb.cv( - train_set=lgb_train, - init_model=bst, - **cv_kwargs - ) + cv_res = lgb.cv(train_set=lgb_train, init_model=bst, **cv_kwargs) cv_bst_w_in_mem_init_model = cv_res["cvbooster"] assert cv_bst_w_in_mem_init_model.current_iteration() == [num_train_rounds + num_cv_rounds] * 3 for booster in cv_bst_w_in_mem_init_model.boosters: - np.testing.assert_allclose( - preds_raw, - booster.predict(X, raw_score=True, num_iteration=num_train_rounds) - ) + np.testing.assert_allclose(preds_raw, booster.predict(X, raw_score=True, num_iteration=num_train_rounds)) # init_model from a text file - cv_res = lgb.cv( - train_set=lgb_train, - init_model=model_path_txt, - **cv_kwargs - ) + cv_res = lgb.cv(train_set=lgb_train, init_model=model_path_txt, **cv_kwargs) cv_bst_w_file_init_model = cv_res["cvbooster"] assert cv_bst_w_file_init_model.current_iteration() == [num_train_rounds + num_cv_rounds] * 3 for booster in cv_bst_w_file_init_model.boosters: - np.testing.assert_allclose( - preds_raw, - booster.predict(X, raw_score=True, num_iteration=num_train_rounds) - ) + np.testing.assert_allclose(preds_raw, booster.predict(X, raw_score=True, num_iteration=num_train_rounds)) # predictions should be identical for i in range(3): np.testing.assert_allclose( - cv_bst_w_in_mem_init_model.boosters[i].predict(X), - cv_bst_w_file_init_model.boosters[i].predict(X) + cv_bst_w_in_mem_init_model.boosters[i].predict(X), cv_bst_w_file_init_model.boosters[i].predict(X) ) @@ -1333,20 +1191,23 @@ def test_cvbooster(): X, y = load_breast_cancer(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) params = { - 'objective': 'binary', - 'metric': 'binary_logloss', - 'verbose': -1, + "objective": "binary", + "metric": "binary_logloss", + "verbose": -1, } nfold = 3 lgb_train = lgb.Dataset(X_train, y_train) # with early stopping - cv_res = lgb.cv(params, lgb_train, - num_boost_round=25, - nfold=nfold, - callbacks=[lgb.early_stopping(stopping_rounds=5)], - return_cvbooster=True) - assert 'cvbooster' in cv_res - cvb = cv_res['cvbooster'] + cv_res = lgb.cv( + params, + lgb_train, + num_boost_round=25, + nfold=nfold, + callbacks=[lgb.early_stopping(stopping_rounds=5)], + return_cvbooster=True, + ) + assert "cvbooster" in cv_res + cvb = cv_res["cvbooster"] assert isinstance(cvb, lgb.CVBooster) assert isinstance(cvb.boosters, list) assert len(cvb.boosters) == nfold @@ -1366,11 +1227,8 @@ def test_cvbooster(): ret = log_loss(y_test, avg_pred) assert ret < 0.13 # without early stopping - cv_res = lgb.cv(params, lgb_train, - num_boost_round=20, - nfold=3, - return_cvbooster=True) - cvb = cv_res['cvbooster'] + cv_res = lgb.cv(params, lgb_train, num_boost_round=20, nfold=3, return_cvbooster=True) + cvb = cv_res["cvbooster"] assert cvb.best_iteration == -1 preds = cvb.predict(X_test) avg_pred = np.mean(preds, axis=0) @@ -1382,23 +1240,26 @@ def test_cvbooster_save_load(tmp_path): X, y = load_breast_cancer(return_X_y=True) X_train, X_test, y_train, _ = train_test_split(X, y, test_size=0.1, random_state=42) params = { - 'objective': 'binary', - 'metric': 'binary_logloss', - 'verbose': -1, + "objective": "binary", + "metric": "binary_logloss", + "verbose": -1, } nfold = 3 lgb_train = lgb.Dataset(X_train, y_train) - cv_res = lgb.cv(params, lgb_train, - num_boost_round=10, - nfold=nfold, - callbacks=[lgb.early_stopping(stopping_rounds=5)], - return_cvbooster=True) - cvbooster = cv_res['cvbooster'] + cv_res = lgb.cv( + params, + lgb_train, + num_boost_round=10, + nfold=nfold, + callbacks=[lgb.early_stopping(stopping_rounds=5)], + return_cvbooster=True, + ) + cvbooster = cv_res["cvbooster"] preds = cvbooster.predict(X_test) best_iteration = cvbooster.best_iteration - model_path_txt = str(tmp_path / 'lgb.model') + model_path_txt = str(tmp_path / "lgb.model") cvbooster.save_model(model_path_txt) model_string = cvbooster.model_to_string() @@ -1411,24 +1272,27 @@ def test_cvbooster_save_load(tmp_path): np.testing.assert_array_equal(preds, cvbooster_loaded.predict(X_test)) -@pytest.mark.parametrize('serializer', SERIALIZERS) +@pytest.mark.parametrize("serializer", SERIALIZERS) def test_cvbooster_picklable(serializer): X, y = load_breast_cancer(return_X_y=True) X_train, X_test, y_train, _ = train_test_split(X, y, test_size=0.1, random_state=42) params = { - 'objective': 'binary', - 'metric': 'binary_logloss', - 'verbose': -1, + "objective": "binary", + "metric": "binary_logloss", + "verbose": -1, } nfold = 3 lgb_train = lgb.Dataset(X_train, y_train) - cv_res = lgb.cv(params, lgb_train, - num_boost_round=10, - nfold=nfold, - callbacks=[lgb.early_stopping(stopping_rounds=5)], - return_cvbooster=True) - cvbooster = cv_res['cvbooster'] + cv_res = lgb.cv( + params, + lgb_train, + num_boost_round=10, + nfold=nfold, + callbacks=[lgb.early_stopping(stopping_rounds=5)], + return_cvbooster=True, + ) + cvbooster = cv_res["cvbooster"] preds = cvbooster.predict(X_test) best_iteration = cvbooster.best_iteration @@ -1443,13 +1307,13 @@ def test_cvbooster_picklable(serializer): def test_feature_name(): X_train, y_train = make_synthetic_regression() - params = {'verbose': -1} + params = {"verbose": -1} lgb_train = lgb.Dataset(X_train, y_train) - feature_names = [f'f_{i}' for i in range(X_train.shape[-1])] + feature_names = [f"f_{i}" for i in range(X_train.shape[-1])] gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names) assert feature_names == gbm.feature_name() # test feature_names with whitespaces - feature_names_with_space = [f'f {i}' for i in range(X_train.shape[-1])] + feature_names_with_space = [f"f {i}" for i in range(X_train.shape[-1])] gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names_with_space) assert feature_names == gbm.feature_name() @@ -1458,15 +1322,15 @@ def test_feature_name_with_non_ascii(): X_train = np.random.normal(size=(100, 4)) y_train = np.random.random(100) # This has non-ascii strings. - feature_names = [u'F_零', u'F_一', u'F_二', u'F_三'] - params = {'verbose': -1} + feature_names = ["F_零", "F_一", "F_二", "F_三"] + params = {"verbose": -1} lgb_train = lgb.Dataset(X_train, y_train) gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names) assert feature_names == gbm.feature_name() - gbm.save_model('lgb.model') + gbm.save_model("lgb.model") - gbm2 = lgb.Booster(model_file='lgb.model') + gbm2 = lgb.Booster(model_file="lgb.model") assert feature_names == gbm2.feature_name() @@ -1475,25 +1339,25 @@ def test_parameters_are_loaded_from_model_file(tmp_path, capsys): y = np.random.rand(100) ds = lgb.Dataset(X, y) params = { - 'bagging_fraction': 0.8, - 'bagging_freq': 2, - 'boosting': 'rf', - 'feature_contri': [0.5, 0.5, 0.5], - 'feature_fraction': 0.7, - 'boost_from_average': False, - 'interaction_constraints': [[0, 1], [0]], - 'metric': ['l2', 'rmse'], - 'num_leaves': 5, - 'num_threads': 1, + "bagging_fraction": 0.8, + "bagging_freq": 2, + "boosting": "rf", + "feature_contri": [0.5, 0.5, 0.5], + "feature_fraction": 0.7, + "boost_from_average": False, + "interaction_constraints": [[0, 1], [0]], + "metric": ["l2", "rmse"], + "num_leaves": 5, + "num_threads": 1, } - model_file = tmp_path / 'model.txt' + model_file = tmp_path / "model.txt" orig_bst = lgb.train(params, ds, num_boost_round=1, categorical_feature=[1, 2]) orig_bst.save_model(model_file) - with model_file.open('rt') as f: + with model_file.open("rt") as f: model_contents = f.readlines() - params_start = model_contents.index('parameters:\n') - model_contents.insert(params_start + 1, '[max_conflict_rate: 0]\n') - with model_file.open('wt') as f: + params_start = model_contents.index("parameters:\n") + model_contents.insert(params_start + 1, "[max_conflict_rate: 0]\n") + with model_file.open("wt") as f: f.writelines(model_contents) bst = lgb.Booster(model_file=model_file) expected_msg = "[LightGBM] [Warning] Ignoring unrecognized parameter 'max_conflict_rate' found in model string." @@ -1501,11 +1365,11 @@ def test_parameters_are_loaded_from_model_file(tmp_path, capsys): assert expected_msg in stdout set_params = {k: bst.params[k] for k in params.keys()} assert set_params == params - assert bst.params['categorical_feature'] == [1, 2] + assert bst.params["categorical_feature"] == [1, 2] # check that passing parameters to the constructor raises warning and ignores them - with pytest.warns(UserWarning, match='Ignoring params argument'): - bst2 = lgb.Booster(params={'num_leaves': 7}, model_file=model_file) + with pytest.warns(UserWarning, match="Ignoring params argument"): + bst2 = lgb.Booster(params={"num_leaves": 7}, model_file=model_file) assert bst.params == bst2.params # check inference isn't affected by unknown parameter @@ -1518,11 +1382,7 @@ def test_save_load_copy_pickle(): def train_and_predict(init_model=None, return_model=False): X, y = make_synthetic_regression() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) - params = { - 'objective': 'regression', - 'metric': 'l2', - 'verbose': -1 - } + params = {"objective": "regression", "metric": "l2", "verbose": -1} lgb_train = lgb.Dataset(X_train, y_train) gbm_template = lgb.train(params, lgb_train, num_boost_round=10, init_model=init_model) return gbm_template if return_model else mean_squared_error(y_test, gbm_template.predict(X_test)) @@ -1530,17 +1390,17 @@ def test_save_load_copy_pickle(): gbm = train_and_predict(return_model=True) ret_origin = train_and_predict(init_model=gbm) other_ret = [] - gbm.save_model('lgb.model') - with open('lgb.model') as f: # check all params are logged into model file correctly + gbm.save_model("lgb.model") + with open("lgb.model") as f: # check all params are logged into model file correctly assert f.read().find("[num_iterations: 10]") != -1 - other_ret.append(train_and_predict(init_model='lgb.model')) - gbm_load = lgb.Booster(model_file='lgb.model') + other_ret.append(train_and_predict(init_model="lgb.model")) + gbm_load = lgb.Booster(model_file="lgb.model") other_ret.append(train_and_predict(init_model=gbm_load)) other_ret.append(train_and_predict(init_model=copy.copy(gbm))) other_ret.append(train_and_predict(init_model=copy.deepcopy(gbm))) - with open('lgb.pkl', 'wb') as f: + with open("lgb.pkl", "wb") as f: pickle.dump(gbm, f) - with open('lgb.pkl', 'rb') as f: + with open("lgb.pkl", "rb") as f: gbm_pickle = pickle.load(f) other_ret.append(train_and_predict(init_model=gbm_pickle)) gbm_pickles = pickle.loads(pickle.dumps(gbm)) @@ -1552,19 +1412,15 @@ def test_save_load_copy_pickle(): def test_all_expected_params_are_written_out_to_model_text(tmp_path): X, y = make_synthetic_regression() params = { - 'objective': 'mape', - 'metric': ['l2', 'mae'], - 'seed': 708, - 'data_sample_strategy': 'bagging', - 'sub_row': 0.8234, - 'verbose': -1 + "objective": "mape", + "metric": ["l2", "mae"], + "seed": 708, + "data_sample_strategy": "bagging", + "sub_row": 0.8234, + "verbose": -1, } dtrain = lgb.Dataset(data=X, label=y) - gbm = lgb.train( - params=params, - train_set=dtrain, - num_boost_round=3 - ) + gbm = lgb.train(params=params, train_set=dtrain, num_boost_round=3) model_txt_from_memory = gbm.model_to_string() model_file = tmp_path / "out.model" @@ -1703,27 +1559,12 @@ def test_all_expected_params_are_written_out_to_model_text(tmp_path): # # passed-in force_col_wise / force_row_wise parameters are ignored on CUDA and GPU builds... # https://github.com/microsoft/LightGBM/blob/1d7ee63686272bceffd522284127573b511df6be/src/io/config.cpp#L375-L377 - if getenv('TASK', '') == 'cuda': - device_entries = [ - "[force_col_wise: 0]", - "[force_row_wise: 1]", - "[device_type: cuda]", - "[gpu_use_dp: 1]" - ] - elif getenv('TASK', '') == 'gpu': - device_entries = [ - "[force_col_wise: 1]", - "[force_row_wise: 0]", - "[device_type: gpu]", - "[gpu_use_dp: 0]" - ] + if getenv("TASK", "") == "cuda": + device_entries = ["[force_col_wise: 0]", "[force_row_wise: 1]", "[device_type: cuda]", "[gpu_use_dp: 1]"] + elif getenv("TASK", "") == "gpu": + device_entries = ["[force_col_wise: 1]", "[force_row_wise: 0]", "[device_type: gpu]", "[gpu_use_dp: 0]"] else: - device_entries = [ - "[force_col_wise: 0]", - "[force_row_wise: 0]", - "[device_type: cpu]", - "[gpu_use_dp: 0]" - ] + device_entries = ["[force_col_wise: 0]", "[force_row_wise: 0]", "[device_type: cpu]", "[gpu_use_dp: 0]"] all_param_entries += device_entries @@ -1749,48 +1590,50 @@ def test_all_expected_params_are_written_out_to_model_text(tmp_path): def test_pandas_categorical(): pd = pytest.importorskip("pandas") np.random.seed(42) # sometimes there is no difference how cols are treated (cat or not cat) - X = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'c', 'd'] * 75), # str - "B": np.random.permutation([1, 2, 3] * 100), # int - "C": np.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60), # float - "D": np.random.permutation([True, False] * 150), # bool - "E": pd.Categorical(np.random.permutation(['z', 'y', 'x', 'w', 'v'] * 60), - ordered=True)}) # str and ordered categorical + X = pd.DataFrame( + { + "A": np.random.permutation(["a", "b", "c", "d"] * 75), # str + "B": np.random.permutation([1, 2, 3] * 100), # int + "C": np.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60), # float + "D": np.random.permutation([True, False] * 150), # bool + "E": pd.Categorical(np.random.permutation(["z", "y", "x", "w", "v"] * 60), ordered=True), + } + ) # str and ordered categorical y = np.random.permutation([0, 1] * 150) - X_test = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'e'] * 20), # unseen category - "B": np.random.permutation([1, 3] * 30), - "C": np.random.permutation([0.1, -0.1, 0.2, 0.2] * 15), - "D": np.random.permutation([True, False] * 30), - "E": pd.Categorical(np.random.permutation(['z', 'y'] * 30), - ordered=True)}) + X_test = pd.DataFrame( + { + "A": np.random.permutation(["a", "b", "e"] * 20), # unseen category + "B": np.random.permutation([1, 3] * 30), + "C": np.random.permutation([0.1, -0.1, 0.2, 0.2] * 15), + "D": np.random.permutation([True, False] * 30), + "E": pd.Categorical(np.random.permutation(["z", "y"] * 30), ordered=True), + } + ) np.random.seed() # reset seed cat_cols_actual = ["A", "B", "C", "D"] cat_cols_to_store = cat_cols_actual + ["E"] - X[cat_cols_actual] = X[cat_cols_actual].astype('category') - X_test[cat_cols_actual] = X_test[cat_cols_actual].astype('category') + X[cat_cols_actual] = X[cat_cols_actual].astype("category") + X_test[cat_cols_actual] = X_test[cat_cols_actual].astype("category") cat_values = [X[col].cat.categories.tolist() for col in cat_cols_to_store] - params = { - 'objective': 'binary', - 'metric': 'binary_logloss', - 'verbose': -1 - } + params = {"objective": "binary", "metric": "binary_logloss", "verbose": -1} lgb_train = lgb.Dataset(X, y) gbm0 = lgb.train(params, lgb_train, num_boost_round=10) pred0 = gbm0.predict(X_test) - assert lgb_train.categorical_feature == 'auto' + assert lgb_train.categorical_feature == "auto" lgb_train = lgb.Dataset(X, pd.DataFrame(y)) # also test that label can be one-column pd.DataFrame gbm1 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=[0]) pred1 = gbm1.predict(X_test) assert lgb_train.categorical_feature == [0] lgb_train = lgb.Dataset(X, pd.Series(y)) # also test that label can be pd.Series - gbm2 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=['A']) + gbm2 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=["A"]) pred2 = gbm2.predict(X_test) - assert lgb_train.categorical_feature == ['A'] + assert lgb_train.categorical_feature == ["A"] lgb_train = lgb.Dataset(X, y) - gbm3 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=['A', 'B', 'C', 'D']) + gbm3 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=["A", "B", "C", "D"]) pred3 = gbm3.predict(X_test) - assert lgb_train.categorical_feature == ['A', 'B', 'C', 'D'] - gbm3.save_model('categorical.model') - gbm4 = lgb.Booster(model_file='categorical.model') + assert lgb_train.categorical_feature == ["A", "B", "C", "D"] + gbm3.save_model("categorical.model") + gbm4 = lgb.Booster(model_file="categorical.model") pred4 = gbm4.predict(X_test) model_str = gbm4.model_to_string() gbm4.model_from_string(model_str) @@ -1798,9 +1641,9 @@ def test_pandas_categorical(): gbm5 = lgb.Booster(model_str=model_str) pred6 = gbm5.predict(X_test) lgb_train = lgb.Dataset(X, y) - gbm6 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=['A', 'B', 'C', 'D', 'E']) + gbm6 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=["A", "B", "C", "D", "E"]) pred7 = gbm6.predict(X_test) - assert lgb_train.categorical_feature == ['A', 'B', 'C', 'D', 'E'] + assert lgb_train.categorical_feature == ["A", "B", "C", "D", "E"] lgb_train = lgb.Dataset(X, y) gbm7 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=[]) pred8 = gbm7.predict(X_test) @@ -1830,23 +1673,28 @@ def test_pandas_categorical(): def test_pandas_sparse(): pd = pytest.importorskip("pandas") - X = pd.DataFrame({"A": pd.arrays.SparseArray(np.random.permutation([0, 1, 2] * 100)), - "B": pd.arrays.SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1, 0.2] * 60)), - "C": pd.arrays.SparseArray(np.random.permutation([True, False] * 150))}) + X = pd.DataFrame( + { + "A": pd.arrays.SparseArray(np.random.permutation([0, 1, 2] * 100)), + "B": pd.arrays.SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1, 0.2] * 60)), + "C": pd.arrays.SparseArray(np.random.permutation([True, False] * 150)), + } + ) y = pd.Series(pd.arrays.SparseArray(np.random.permutation([0, 1] * 150))) - X_test = pd.DataFrame({"A": pd.arrays.SparseArray(np.random.permutation([0, 2] * 30)), - "B": pd.arrays.SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1] * 15)), - "C": pd.arrays.SparseArray(np.random.permutation([True, False] * 30))}) + X_test = pd.DataFrame( + { + "A": pd.arrays.SparseArray(np.random.permutation([0, 2] * 30)), + "B": pd.arrays.SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1] * 15)), + "C": pd.arrays.SparseArray(np.random.permutation([True, False] * 30)), + } + ) for dtype in pd.concat([X.dtypes, X_test.dtypes, pd.Series(y.dtypes)]): assert pd.api.types.is_sparse(dtype) - params = { - 'objective': 'binary', - 'verbose': -1 - } + params = {"objective": "binary", "verbose": -1} lgb_train = lgb.Dataset(X, y) gbm = lgb.train(params, lgb_train, num_boost_round=10) pred_sparse = gbm.predict(X_test, raw_score=True) - if hasattr(X_test, 'sparse'): + if hasattr(X_test, "sparse"): pred_dense = gbm.predict(X_test.sparse.to_dense(), raw_score=True) else: pred_dense = gbm.predict(X_test.to_dense(), raw_score=True) @@ -1860,48 +1708,48 @@ def test_reference_chain(): # take subsets and train tmp_dat_train = tmp_dat.subset(np.arange(80)) tmp_dat_val = tmp_dat.subset(np.arange(80, 100)).subset(np.arange(18)) - params = {'objective': 'regression_l2', 'metric': 'rmse'} + params = {"objective": "regression_l2", "metric": "rmse"} evals_result = {} lgb.train( params, tmp_dat_train, num_boost_round=20, valid_sets=[tmp_dat_train, tmp_dat_val], - callbacks=[lgb.record_evaluation(evals_result)] + callbacks=[lgb.record_evaluation(evals_result)], ) - assert len(evals_result['training']['rmse']) == 20 - assert len(evals_result['valid_1']['rmse']) == 20 + assert len(evals_result["training"]["rmse"]) == 20 + assert len(evals_result["valid_1"]["rmse"]) == 20 def test_contribs(): X, y = load_breast_cancer(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) params = { - 'objective': 'binary', - 'metric': 'binary_logloss', - 'verbose': -1, + "objective": "binary", + "metric": "binary_logloss", + "verbose": -1, } lgb_train = lgb.Dataset(X_train, y_train) gbm = lgb.train(params, lgb_train, num_boost_round=20) - assert (np.linalg.norm(gbm.predict(X_test, raw_score=True) - - np.sum(gbm.predict(X_test, pred_contrib=True), axis=1)) < 1e-4) + assert ( + np.linalg.norm(gbm.predict(X_test, raw_score=True) - np.sum(gbm.predict(X_test, pred_contrib=True), axis=1)) + < 1e-4 + ) def test_contribs_sparse(): n_features = 20 n_samples = 100 # generate CSR sparse dataset - X, y = make_multilabel_classification(n_samples=n_samples, - sparse=True, - n_features=n_features, - n_classes=1, - n_labels=2) + X, y = make_multilabel_classification( + n_samples=n_samples, sparse=True, n_features=n_features, n_classes=1, n_labels=2 + ) y = y.flatten() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) params = { - 'objective': 'binary', - 'verbose': -1, + "objective": "binary", + "verbose": -1, } lgb_train = lgb.Dataset(X_train, y_train) gbm = lgb.train(params, lgb_train, num_boost_round=20) @@ -1910,18 +1758,17 @@ def test_contribs_sparse(): # convert data to dense and get back same contribs contribs_dense = gbm.predict(X_test.toarray(), pred_contrib=True) # validate the values are the same - if platform.machine() == 'aarch64': + if platform.machine() == "aarch64": np.testing.assert_allclose(contribs_csr.toarray(), contribs_dense, rtol=1, atol=1e-12) else: np.testing.assert_allclose(contribs_csr.toarray(), contribs_dense) - assert (np.linalg.norm(gbm.predict(X_test, raw_score=True) - - np.sum(contribs_dense, axis=1)) < 1e-4) + assert np.linalg.norm(gbm.predict(X_test, raw_score=True) - np.sum(contribs_dense, axis=1)) < 1e-4 # validate using CSC matrix X_test_csc = X_test.tocsc() contribs_csc = gbm.predict(X_test_csc, pred_contrib=True) assert isspmatrix_csc(contribs_csc) # validate the values are the same - if platform.machine() == 'aarch64': + if platform.machine() == "aarch64": np.testing.assert_allclose(contribs_csc.toarray(), contribs_dense, rtol=1, atol=1e-12) else: np.testing.assert_allclose(contribs_csc.toarray(), contribs_dense) @@ -1932,17 +1779,15 @@ def test_contribs_sparse_multiclass(): n_samples = 100 n_labels = 4 # generate CSR sparse dataset - X, y = make_multilabel_classification(n_samples=n_samples, - sparse=True, - n_features=n_features, - n_classes=1, - n_labels=n_labels) + X, y = make_multilabel_classification( + n_samples=n_samples, sparse=True, n_features=n_features, n_classes=1, n_labels=n_labels + ) y = y.flatten() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) params = { - 'objective': 'multiclass', - 'num_class': n_labels, - 'verbose': -1, + "objective": "multiclass", + "num_class": n_labels, + "verbose": -1, } lgb_train = lgb.Dataset(X_train, y_train) gbm = lgb.train(params, lgb_train, num_boost_round=20) @@ -1954,9 +1799,10 @@ def test_contribs_sparse_multiclass(): contribs_dense = gbm.predict(X_test.toarray(), pred_contrib=True) # validate the values are the same contribs_csr_array = np.swapaxes(np.array([sparse_array.toarray() for sparse_array in contribs_csr]), 0, 1) - contribs_csr_arr_re = contribs_csr_array.reshape((contribs_csr_array.shape[0], - contribs_csr_array.shape[1] * contribs_csr_array.shape[2])) - if platform.machine() == 'aarch64': + contribs_csr_arr_re = contribs_csr_array.reshape( + (contribs_csr_array.shape[0], contribs_csr_array.shape[1] * contribs_csr_array.shape[2]) + ) + if platform.machine() == "aarch64": np.testing.assert_allclose(contribs_csr_arr_re, contribs_dense, rtol=1, atol=1e-12) else: np.testing.assert_allclose(contribs_csr_arr_re, contribs_dense) @@ -1970,19 +1816,18 @@ def test_contribs_sparse_multiclass(): assert isspmatrix_csc(perclass_contribs_csc) # validate the values are the same contribs_csc_array = np.swapaxes(np.array([sparse_array.toarray() for sparse_array in contribs_csc]), 0, 1) - contribs_csc_array = contribs_csc_array.reshape((contribs_csc_array.shape[0], - contribs_csc_array.shape[1] * contribs_csc_array.shape[2])) - if platform.machine() == 'aarch64': + contribs_csc_array = contribs_csc_array.reshape( + (contribs_csc_array.shape[0], contribs_csc_array.shape[1] * contribs_csc_array.shape[2]) + ) + if platform.machine() == "aarch64": np.testing.assert_allclose(contribs_csc_array, contribs_dense, rtol=1, atol=1e-12) else: np.testing.assert_allclose(contribs_csc_array, contribs_dense) -@pytest.mark.skipif(psutil.virtual_memory().available / 1024 / 1024 / 1024 < 3, reason='not enough RAM') +@pytest.mark.skipif(psutil.virtual_memory().available / 1024 / 1024 / 1024 < 3, reason="not enough RAM") def test_int32_max_sparse_contribs(): - params = { - 'objective': 'binary' - } + params = {"objective": "binary"} train_features = np.random.rand(100, 1000) train_targets = [0] * 50 + [1] * 50 lgb_train = lgb.Dataset(train_features, train_targets) @@ -2005,9 +1850,9 @@ def test_sliced_data(): def train_and_get_predictions(features, labels): dataset = lgb.Dataset(features, label=labels) lgb_params = { - 'application': 'binary', - 'verbose': -1, - 'min_data': 5, + "application": "binary", + "verbose": -1, + "min_data": 5, } gbm = lgb.train( params=lgb_params, @@ -2019,8 +1864,9 @@ def test_sliced_data(): num_samples = 100 features = np.random.rand(num_samples, 5) positive_samples = int(num_samples * 0.25) - labels = np.append(np.ones(positive_samples, dtype=np.float32), - np.zeros(num_samples - positive_samples, dtype=np.float32)) + labels = np.append( + np.ones(positive_samples, dtype=np.float32), np.zeros(num_samples - positive_samples, dtype=np.float32) + ) # test sliced labels origin_pred = train_and_get_predictions(features, labels) stacked_labels = np.column_stack((labels, np.ones(num_samples, dtype=np.float32))) @@ -2058,34 +1904,19 @@ def test_init_with_subset(): subset_data_1 = lgb_train.subset(subset_index_1) subset_index_2 = np.random.choice(np.arange(50), 20, replace=False) subset_data_2 = lgb_train.subset(subset_index_2) - params = { - 'objective': 'binary', - 'verbose': -1 - } - init_gbm = lgb.train(params=params, - train_set=subset_data_1, - num_boost_round=10, - keep_training_booster=True) - lgb.train(params=params, - train_set=subset_data_2, - num_boost_round=10, - init_model=init_gbm) + params = {"objective": "binary", "verbose": -1} + init_gbm = lgb.train(params=params, train_set=subset_data_1, num_boost_round=10, keep_training_booster=True) + lgb.train(params=params, train_set=subset_data_2, num_boost_round=10, init_model=init_gbm) assert lgb_train.get_data().shape[0] == 50 assert subset_data_1.get_data().shape[0] == 30 assert subset_data_2.get_data().shape[0] == 20 lgb_train.save_binary("lgb_train_data.bin") - lgb_train_from_file = lgb.Dataset('lgb_train_data.bin', free_raw_data=False) + lgb_train_from_file = lgb.Dataset("lgb_train_data.bin", free_raw_data=False) subset_data_3 = lgb_train_from_file.subset(subset_index_1) subset_data_4 = lgb_train_from_file.subset(subset_index_2) - init_gbm_2 = lgb.train(params=params, - train_set=subset_data_3, - num_boost_round=10, - keep_training_booster=True) + init_gbm_2 = lgb.train(params=params, train_set=subset_data_3, num_boost_round=10, keep_training_booster=True) with np.testing.assert_raises_regex(lgb.basic.LightGBMError, "Unknown format of training data"): - lgb.train(params=params, - train_set=subset_data_4, - num_boost_round=10, - init_model=init_gbm_2) + lgb.train(params=params, train_set=subset_data_4, num_boost_round=10, init_model=init_gbm_2) assert lgb_train_from_file.get_data() == "lgb_train_data.bin" assert subset_data_3.get_data() == "lgb_train_data.bin" assert subset_data_4.get_data() == "lgb_train_data.bin" @@ -2109,26 +1940,31 @@ def generate_trainset_for_monotone_constraints_tests(x3_to_category=True): x2_negatively_correlated_with_y = np.random.random(size=number_of_dpoints) x3_negatively_correlated_with_y = np.random.random(size=number_of_dpoints) x = np.column_stack( - (x1_positively_correlated_with_y, + ( + x1_positively_correlated_with_y, x2_negatively_correlated_with_y, - categorize(x3_negatively_correlated_with_y) if x3_to_category else x3_negatively_correlated_with_y)) + categorize(x3_negatively_correlated_with_y) if x3_to_category else x3_negatively_correlated_with_y, + ) + ) zs = np.random.normal(loc=0.0, scale=0.01, size=number_of_dpoints) - scales = 10. * (np.random.random(6) + 0.5) - y = (scales[0] * x1_positively_correlated_with_y - + np.sin(scales[1] * np.pi * x1_positively_correlated_with_y) - - scales[2] * x2_negatively_correlated_with_y - - np.cos(scales[3] * np.pi * x2_negatively_correlated_with_y) - - scales[4] * x3_negatively_correlated_with_y - - np.cos(scales[5] * np.pi * x3_negatively_correlated_with_y) - + zs) + scales = 10.0 * (np.random.random(6) + 0.5) + y = ( + scales[0] * x1_positively_correlated_with_y + + np.sin(scales[1] * np.pi * x1_positively_correlated_with_y) + - scales[2] * x2_negatively_correlated_with_y + - np.cos(scales[3] * np.pi * x2_negatively_correlated_with_y) + - scales[4] * x3_negatively_correlated_with_y + - np.cos(scales[5] * np.pi * x3_negatively_correlated_with_y) + + zs + ) categorical_features = [] if x3_to_category: categorical_features = [2] return lgb.Dataset(x, label=y, categorical_feature=categorical_features, free_raw_data=False) -@pytest.mark.skipif(getenv('TASK', '') == 'cuda', reason='Monotone constraints are not yet supported by CUDA version') +@pytest.mark.skipif(getenv("TASK", "") == "cuda", reason="Monotone constraints are not yet supported by CUDA version") @pytest.mark.parametrize("test_with_categorical_variable", [True, False]) def test_monotone_constraints(test_with_categorical_variable): def is_increasing(y): @@ -2187,18 +2023,16 @@ def test_monotone_constraints(test_with_categorical_variable): return n > 1 tree_features = parse_tree_features(gbm) - has_interaction_flag = np.array( - [has_interaction(treef) for treef in tree_features] - ) + has_interaction_flag = np.array([has_interaction(treef) for treef in tree_features]) return not has_interaction_flag.any() - trainset = generate_trainset_for_monotone_constraints_tests( - test_with_categorical_variable - ) + trainset = generate_trainset_for_monotone_constraints_tests(test_with_categorical_variable) for test_with_interaction_constraints in [True, False]: - error_msg = ("Model not correctly constrained " - f"(test_with_interaction_constraints={test_with_interaction_constraints})") + error_msg = ( + "Model not correctly constrained " + f"(test_with_interaction_constraints={test_with_interaction_constraints})" + ) for monotone_constraints_method in ["basic", "intermediate", "advanced"]: params = { "min_data": 20, @@ -2210,15 +2044,13 @@ def test_monotone_constraints(test_with_categorical_variable): if test_with_interaction_constraints: params["interaction_constraints"] = [[0], [1], [2]] constrained_model = lgb.train(params, trainset) - assert is_correctly_constrained( - constrained_model, test_with_categorical_variable - ), error_msg + assert is_correctly_constrained(constrained_model, test_with_categorical_variable), error_msg if test_with_interaction_constraints: feature_sets = [["Column_0"], ["Column_1"], "Column_2"] assert are_interactions_enforced(constrained_model, feature_sets) -@pytest.mark.skipif(getenv('TASK', '') == 'cuda', reason='Monotone constraints are not yet supported by CUDA version') +@pytest.mark.skipif(getenv("TASK", "") == "cuda", reason="Monotone constraints are not yet supported by CUDA version") def test_monotone_penalty(): def are_first_splits_non_monotone(tree, n, monotone_constraints): if n <= 0: @@ -2227,16 +2059,18 @@ def test_monotone_penalty(): return True if monotone_constraints[tree["split_feature"]] != 0: return False - return (are_first_splits_non_monotone(tree["left_child"], n - 1, monotone_constraints) - and are_first_splits_non_monotone(tree["right_child"], n - 1, monotone_constraints)) + return are_first_splits_non_monotone( + tree["left_child"], n - 1, monotone_constraints + ) and are_first_splits_non_monotone(tree["right_child"], n - 1, monotone_constraints) def are_there_monotone_splits(tree, monotone_constraints): if "leaf_value" in tree: return False if monotone_constraints[tree["split_feature"]] != 0: return True - return (are_there_monotone_splits(tree["left_child"], monotone_constraints) - or are_there_monotone_splits(tree["right_child"], monotone_constraints)) + return are_there_monotone_splits(tree["left_child"], monotone_constraints) or are_there_monotone_splits( + tree["right_child"], monotone_constraints + ) max_depth = 5 monotone_constraints = [1, -1, 0] @@ -2244,21 +2078,22 @@ def test_monotone_penalty(): trainset = generate_trainset_for_monotone_constraints_tests(x3_to_category=False) for monotone_constraints_method in ["basic", "intermediate", "advanced"]: params = { - 'max_depth': max_depth, - 'monotone_constraints': monotone_constraints, - 'monotone_penalty': penalization_parameter, + "max_depth": max_depth, + "monotone_constraints": monotone_constraints, + "monotone_penalty": penalization_parameter, "monotone_constraints_method": monotone_constraints_method, } constrained_model = lgb.train(params, trainset, 10) dumped_model = constrained_model.dump_model()["tree_info"] for tree in dumped_model: - assert are_first_splits_non_monotone(tree["tree_structure"], int(penalization_parameter), - monotone_constraints) + assert are_first_splits_non_monotone( + tree["tree_structure"], int(penalization_parameter), monotone_constraints + ) assert are_there_monotone_splits(tree["tree_structure"], monotone_constraints) # test if a penalty as high as the depth indeed prohibits all monotone splits -@pytest.mark.skipif(getenv('TASK', '') == 'cuda', reason='Monotone constraints are not yet supported by CUDA version') +@pytest.mark.skipif(getenv("TASK", "") == "cuda", reason="Monotone constraints are not yet supported by CUDA version") def test_monotone_penalty_max(): max_depth = 5 monotone_constraints = [1, -1, 0] @@ -2269,8 +2104,8 @@ def test_monotone_penalty_max(): x3_negatively_correlated_with_y = x[:, 2] trainset_unconstrained_model = lgb.Dataset(x3_negatively_correlated_with_y.reshape(-1, 1), label=y) params_constrained_model = { - 'monotone_constraints': monotone_constraints, - 'monotone_penalty': penalization_parameter, + "monotone_constraints": monotone_constraints, + "monotone_penalty": penalization_parameter, "max_depth": max_depth, "gpu_use_dp": True, } @@ -2280,9 +2115,7 @@ def test_monotone_penalty_max(): } unconstrained_model = lgb.train(params_unconstrained_model, trainset_unconstrained_model, 10) - unconstrained_model_predictions = unconstrained_model.predict( - x3_negatively_correlated_with_y.reshape(-1, 1) - ) + unconstrained_model_predictions = unconstrained_model.predict(x3_negatively_correlated_with_y.reshape(-1, 1)) for monotone_constraints_method in ["basic", "intermediate", "advanced"]: params_constrained_model["monotone_constraints_method"] = monotone_constraints_method @@ -2300,18 +2133,18 @@ def test_max_bin_by_feature(): X = np.concatenate([col1, col2], axis=1) y = np.arange(0, 100) params = { - 'objective': 'regression_l2', - 'verbose': -1, - 'num_leaves': 100, - 'min_data_in_leaf': 1, - 'min_sum_hessian_in_leaf': 0, - 'min_data_in_bin': 1, - 'max_bin_by_feature': [100, 2] + "objective": "regression_l2", + "verbose": -1, + "num_leaves": 100, + "min_data_in_leaf": 1, + "min_sum_hessian_in_leaf": 0, + "min_data_in_bin": 1, + "max_bin_by_feature": [100, 2], } lgb_data = lgb.Dataset(X, label=y) est = lgb.train(params, lgb_data, num_boost_round=1) assert len(np.unique(est.predict(X))) == 100 - params['max_bin_by_feature'] = [2, 100] + params["max_bin_by_feature"] = [2, 100] lgb_data = lgb.Dataset(X, label=y) est = lgb.train(params, lgb_data, num_boost_round=1) assert len(np.unique(est.predict(X))) == 3 @@ -2323,15 +2156,11 @@ def test_small_max_bin(): x = np.ones((100, 1)) x[:30, 0] = -1 x[60:, 0] = 2 - params = {'objective': 'binary', - 'seed': 0, - 'min_data_in_leaf': 1, - 'verbose': -1, - 'max_bin': 2} + params = {"objective": "binary", "seed": 0, "min_data_in_leaf": 1, "verbose": -1, "max_bin": 2} lgb_x = lgb.Dataset(x, label=y) lgb.train(params, lgb_x, num_boost_round=5) x[0, 0] = np.nan - params['max_bin'] = 3 + params["max_bin"] = 3 lgb_x = lgb.Dataset(x, label=y) lgb.train(params, lgb_x, num_boost_round=5) np.random.seed() # reset seed @@ -2340,12 +2169,7 @@ def test_small_max_bin(): def test_refit(): X, y = load_breast_cancer(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) - params = { - 'objective': 'binary', - 'metric': 'binary_logloss', - 'verbose': -1, - 'min_data': 10 - } + params = {"objective": "binary", "metric": "binary_logloss", "verbose": -1, "min_data": 10} lgb_train = lgb.Dataset(X_train, y_train) gbm = lgb.train(params, lgb_train, num_boost_round=20) err_pred = log_loss(y_test, gbm.predict(X_test)) @@ -2358,18 +2182,14 @@ def test_refit_dataset_params(): # check refit accepts dataset_params X, y = load_breast_cancer(return_X_y=True) lgb_train = lgb.Dataset(X, y, init_score=np.zeros(y.size)) - train_params = { - 'objective': 'binary', - 'verbose': -1, - 'seed': 123 - } + train_params = {"objective": "binary", "verbose": -1, "seed": 123} gbm = lgb.train(train_params, lgb_train, num_boost_round=10) non_weight_err_pred = log_loss(y, gbm.predict(X)) refit_weight = np.random.rand(y.shape[0]) dataset_params = { - 'max_bin': 260, - 'min_data_in_bin': 5, - 'data_random_seed': 123, + "max_bin": 260, + "min_data_in_bin": 5, + "data_random_seed": 123, } new_gbm = gbm.refit( data=X, @@ -2388,18 +2208,18 @@ def test_refit_dataset_params(): np.testing.assert_allclose(stored_weights, refit_weight) -@pytest.mark.parametrize('boosting_type', ['rf', 'dart']) +@pytest.mark.parametrize("boosting_type", ["rf", "dart"]) def test_mape_for_specific_boosting_types(boosting_type): X, y = make_synthetic_regression() y = abs(y) params = { - 'boosting_type': boosting_type, - 'objective': 'mape', - 'verbose': -1, - 'bagging_freq': 1, - 'bagging_fraction': 0.8, - 'feature_fraction': 0.8, - 'boost_from_average': True + "boosting_type": boosting_type, + "objective": "mape", + "verbose": -1, + "bagging_freq": 1, + "bagging_fraction": 0.8, + "feature_fraction": 0.8, + "boost_from_average": True, } lgb_train = lgb.Dataset(X, y) gbm = lgb.train(params, lgb_train, num_boost_round=20) @@ -2414,14 +2234,14 @@ def check_constant_features(y_true, expected_pred, more_params): X_train = np.ones((len(y_true), 1)) y_train = np.array(y_true) params = { - 'objective': 'regression', - 'num_class': 1, - 'verbose': -1, - 'min_data': 1, - 'num_leaves': 2, - 'learning_rate': 1, - 'min_data_in_bin': 1, - 'boost_from_average': True + "objective": "regression", + "num_class": 1, + "verbose": -1, + "min_data": 1, + "num_leaves": 2, + "learning_rate": 1, + "min_data_in_bin": 1, + "boost_from_average": True, } params.update(more_params) lgb_train = lgb.Dataset(X_train, y_train, params=params) @@ -2431,36 +2251,26 @@ def check_constant_features(y_true, expected_pred, more_params): def test_constant_features_regression(): - params = { - 'objective': 'regression' - } + params = {"objective": "regression"} check_constant_features([0.0, 10.0, 0.0, 10.0], 5.0, params) check_constant_features([0.0, 1.0, 2.0, 3.0], 1.5, params) check_constant_features([-1.0, 1.0, -2.0, 2.0], 0.0, params) def test_constant_features_binary(): - params = { - 'objective': 'binary' - } + params = {"objective": "binary"} check_constant_features([0.0, 10.0, 0.0, 10.0], 0.5, params) check_constant_features([0.0, 1.0, 2.0, 3.0], 0.75, params) def test_constant_features_multiclass(): - params = { - 'objective': 'multiclass', - 'num_class': 3 - } + params = {"objective": "multiclass", "num_class": 3} check_constant_features([0.0, 1.0, 2.0, 0.0], [0.5, 0.25, 0.25], params) check_constant_features([0.0, 1.0, 2.0, 1.0], [0.25, 0.5, 0.25], params) def test_constant_features_multiclassova(): - params = { - 'objective': 'multiclassova', - 'num_class': 3 - } + params = {"objective": "multiclassova", "num_class": 3} check_constant_features([0.0, 1.0, 2.0, 0.0], [0.5, 0.25, 0.25], params) check_constant_features([0.0, 1.0, 2.0, 1.0], [0.25, 0.5, 0.25], params) @@ -2475,15 +2285,15 @@ def test_fpreproc(): dtest.label[-5:] = 3 dtrain = lgb.Dataset(train_data, dtrain.label) dtest = lgb.Dataset(test_data, dtest.label, reference=dtrain) - params['num_class'] = 4 + params["num_class"] = 4 return dtrain, dtest, params X, y = load_iris(return_X_y=True) dataset = lgb.Dataset(X, y, free_raw_data=False) - params = {'objective': 'multiclass', 'num_class': 3, 'verbose': -1} + params = {"objective": "multiclass", "num_class": 3, "verbose": -1} results = lgb.cv(params, dataset, num_boost_round=10, fpreproc=preprocess_data) - assert 'valid multi_logloss-mean' in results - assert len(results['valid multi_logloss-mean']) == 10 + assert "valid multi_logloss-mean" in results + assert len(results["valid multi_logloss-mean"]) == 10 def test_metrics(): @@ -2493,21 +2303,27 @@ def test_metrics(): lgb_valid = lgb.Dataset(X_test, y_test, reference=lgb_train) evals_result = {} - params_dummy_obj_verbose = {'verbose': -1, 'objective': dummy_obj} - params_obj_verbose = {'objective': 'binary', 'verbose': -1} - params_obj_metric_log_verbose = {'objective': 'binary', 'metric': 'binary_logloss', 'verbose': -1} - params_obj_metric_err_verbose = {'objective': 'binary', 'metric': 'binary_error', 'verbose': -1} - params_obj_metric_inv_verbose = {'objective': 'binary', 'metric': 'invalid_metric', 'verbose': -1} - params_obj_metric_quant_verbose = {'objective': 'regression', 'metric': 'quantile', 'verbose': 2} - params_obj_metric_multi_verbose = {'objective': 'binary', - 'metric': ['binary_logloss', 'binary_error'], - 'verbose': -1} - params_obj_metric_none_verbose = {'objective': 'binary', 'metric': 'None', 'verbose': -1} - params_dummy_obj_metric_log_verbose = {'objective': dummy_obj, 'metric': 'binary_logloss', 'verbose': -1} - params_dummy_obj_metric_err_verbose = {'objective': dummy_obj, 'metric': 'binary_error', 'verbose': -1} - params_dummy_obj_metric_inv_verbose = {'objective': dummy_obj, 'metric_types': 'invalid_metric', 'verbose': -1} - params_dummy_obj_metric_multi_verbose = {'objective': dummy_obj, 'metric': ['binary_logloss', 'binary_error'], 'verbose': -1} - params_dummy_obj_metric_none_verbose = {'objective': dummy_obj, 'metric': 'None', 'verbose': -1} + params_dummy_obj_verbose = {"verbose": -1, "objective": dummy_obj} + params_obj_verbose = {"objective": "binary", "verbose": -1} + params_obj_metric_log_verbose = {"objective": "binary", "metric": "binary_logloss", "verbose": -1} + params_obj_metric_err_verbose = {"objective": "binary", "metric": "binary_error", "verbose": -1} + params_obj_metric_inv_verbose = {"objective": "binary", "metric": "invalid_metric", "verbose": -1} + params_obj_metric_quant_verbose = {"objective": "regression", "metric": "quantile", "verbose": 2} + params_obj_metric_multi_verbose = { + "objective": "binary", + "metric": ["binary_logloss", "binary_error"], + "verbose": -1, + } + params_obj_metric_none_verbose = {"objective": "binary", "metric": "None", "verbose": -1} + params_dummy_obj_metric_log_verbose = {"objective": dummy_obj, "metric": "binary_logloss", "verbose": -1} + params_dummy_obj_metric_err_verbose = {"objective": dummy_obj, "metric": "binary_error", "verbose": -1} + params_dummy_obj_metric_inv_verbose = {"objective": dummy_obj, "metric_types": "invalid_metric", "verbose": -1} + params_dummy_obj_metric_multi_verbose = { + "objective": dummy_obj, + "metric": ["binary_logloss", "binary_error"], + "verbose": -1, + } + params_dummy_obj_metric_none_verbose = {"objective": dummy_obj, "metric": "None", "verbose": -1} def get_cv_result(params=params_obj_verbose, **kwargs): return lgb.cv(params, lgb_train, num_boost_round=2, **kwargs) @@ -2519,58 +2335,58 @@ def test_metrics(): num_boost_round=2, valid_sets=[lgb_valid], callbacks=[lgb.record_evaluation(evals_result)], - **kwargs + **kwargs, ) # no custom objective, no feval # default metric res = get_cv_result() assert len(res) == 2 - assert 'valid binary_logloss-mean' in res + assert "valid binary_logloss-mean" in res # non-default metric in params res = get_cv_result(params=params_obj_metric_err_verbose) assert len(res) == 2 - assert 'valid binary_error-mean' in res + assert "valid binary_error-mean" in res # default metric in args - res = get_cv_result(metrics='binary_logloss') + res = get_cv_result(metrics="binary_logloss") assert len(res) == 2 - assert 'valid binary_logloss-mean' in res + assert "valid binary_logloss-mean" in res # non-default metric in args - res = get_cv_result(metrics='binary_error') + res = get_cv_result(metrics="binary_error") assert len(res) == 2 - assert 'valid binary_error-mean' in res + assert "valid binary_error-mean" in res # metric in args overwrites one in params - res = get_cv_result(params=params_obj_metric_inv_verbose, metrics='binary_error') + res = get_cv_result(params=params_obj_metric_inv_verbose, metrics="binary_error") assert len(res) == 2 - assert 'valid binary_error-mean' in res + assert "valid binary_error-mean" in res # metric in args overwrites one in params res = get_cv_result(params=params_obj_metric_quant_verbose) assert len(res) == 2 - assert 'valid quantile-mean' in res + assert "valid quantile-mean" in res # multiple metrics in params res = get_cv_result(params=params_obj_metric_multi_verbose) assert len(res) == 4 - assert 'valid binary_logloss-mean' in res - assert 'valid binary_error-mean' in res + assert "valid binary_logloss-mean" in res + assert "valid binary_error-mean" in res # multiple metrics in args - res = get_cv_result(metrics=['binary_logloss', 'binary_error']) + res = get_cv_result(metrics=["binary_logloss", "binary_error"]) assert len(res) == 4 - assert 'valid binary_logloss-mean' in res - assert 'valid binary_error-mean' in res + assert "valid binary_logloss-mean" in res + assert "valid binary_error-mean" in res # remove default metric by 'None' in list - res = get_cv_result(metrics=['None']) + res = get_cv_result(metrics=["None"]) assert len(res) == 0 # remove default metric by 'None' aliases - for na_alias in ('None', 'na', 'null', 'custom'): + for na_alias in ("None", "na", "null", "custom"): res = get_cv_result(metrics=na_alias) assert len(res) == 0 @@ -2582,152 +2398,150 @@ def test_metrics(): # metric in params res = get_cv_result(params=params_dummy_obj_metric_err_verbose) assert len(res) == 2 - assert 'valid binary_error-mean' in res + assert "valid binary_error-mean" in res # metric in args - res = get_cv_result(params=params_dummy_obj_verbose, metrics='binary_error') + res = get_cv_result(params=params_dummy_obj_verbose, metrics="binary_error") assert len(res) == 2 - assert 'valid binary_error-mean' in res + assert "valid binary_error-mean" in res # metric in args overwrites its' alias in params - res = get_cv_result(params=params_dummy_obj_metric_inv_verbose, metrics='binary_error') + res = get_cv_result(params=params_dummy_obj_metric_inv_verbose, metrics="binary_error") assert len(res) == 2 - assert 'valid binary_error-mean' in res + assert "valid binary_error-mean" in res # multiple metrics in params res = get_cv_result(params=params_dummy_obj_metric_multi_verbose) assert len(res) == 4 - assert 'valid binary_logloss-mean' in res - assert 'valid binary_error-mean' in res + assert "valid binary_logloss-mean" in res + assert "valid binary_error-mean" in res # multiple metrics in args - res = get_cv_result(params=params_dummy_obj_verbose, - metrics=['binary_logloss', 'binary_error']) + res = get_cv_result(params=params_dummy_obj_verbose, metrics=["binary_logloss", "binary_error"]) assert len(res) == 4 - assert 'valid binary_logloss-mean' in res - assert 'valid binary_error-mean' in res + assert "valid binary_logloss-mean" in res + assert "valid binary_error-mean" in res # no custom objective, feval # default metric with custom one res = get_cv_result(feval=constant_metric) assert len(res) == 4 - assert 'valid binary_logloss-mean' in res - assert 'valid error-mean' in res + assert "valid binary_logloss-mean" in res + assert "valid error-mean" in res # non-default metric in params with custom one res = get_cv_result(params=params_obj_metric_err_verbose, feval=constant_metric) assert len(res) == 4 - assert 'valid binary_error-mean' in res - assert 'valid error-mean' in res + assert "valid binary_error-mean" in res + assert "valid error-mean" in res # default metric in args with custom one - res = get_cv_result(metrics='binary_logloss', feval=constant_metric) + res = get_cv_result(metrics="binary_logloss", feval=constant_metric) assert len(res) == 4 - assert 'valid binary_logloss-mean' in res - assert 'valid error-mean' in res + assert "valid binary_logloss-mean" in res + assert "valid error-mean" in res # non-default metric in args with custom one - res = get_cv_result(metrics='binary_error', feval=constant_metric) + res = get_cv_result(metrics="binary_error", feval=constant_metric) assert len(res) == 4 - assert 'valid binary_error-mean' in res - assert 'valid error-mean' in res + assert "valid binary_error-mean" in res + assert "valid error-mean" in res # metric in args overwrites one in params, custom one is evaluated too - res = get_cv_result(params=params_obj_metric_inv_verbose, metrics='binary_error', feval=constant_metric) + res = get_cv_result(params=params_obj_metric_inv_verbose, metrics="binary_error", feval=constant_metric) assert len(res) == 4 - assert 'valid binary_error-mean' in res - assert 'valid error-mean' in res + assert "valid binary_error-mean" in res + assert "valid error-mean" in res # multiple metrics in params with custom one res = get_cv_result(params=params_obj_metric_multi_verbose, feval=constant_metric) assert len(res) == 6 - assert 'valid binary_logloss-mean' in res - assert 'valid binary_error-mean' in res - assert 'valid error-mean' in res + assert "valid binary_logloss-mean" in res + assert "valid binary_error-mean" in res + assert "valid error-mean" in res # multiple metrics in args with custom one - res = get_cv_result(metrics=['binary_logloss', 'binary_error'], feval=constant_metric) + res = get_cv_result(metrics=["binary_logloss", "binary_error"], feval=constant_metric) assert len(res) == 6 - assert 'valid binary_logloss-mean' in res - assert 'valid binary_error-mean' in res - assert 'valid error-mean' in res + assert "valid binary_logloss-mean" in res + assert "valid binary_error-mean" in res + assert "valid error-mean" in res # custom metric is evaluated despite 'None' is passed - res = get_cv_result(metrics=['None'], feval=constant_metric) + res = get_cv_result(metrics=["None"], feval=constant_metric) assert len(res) == 2 - assert 'valid error-mean' in res + assert "valid error-mean" in res # custom objective, feval # no default metric, only custom one res = get_cv_result(params=params_dummy_obj_verbose, feval=constant_metric) assert len(res) == 2 - assert 'valid error-mean' in res + assert "valid error-mean" in res # metric in params with custom one res = get_cv_result(params=params_dummy_obj_metric_err_verbose, feval=constant_metric) assert len(res) == 4 - assert 'valid binary_error-mean' in res - assert 'valid error-mean' in res + assert "valid binary_error-mean" in res + assert "valid error-mean" in res # metric in args with custom one - res = get_cv_result(params=params_dummy_obj_verbose, - feval=constant_metric, metrics='binary_error') + res = get_cv_result(params=params_dummy_obj_verbose, feval=constant_metric, metrics="binary_error") assert len(res) == 4 - assert 'valid binary_error-mean' in res - assert 'valid error-mean' in res + assert "valid binary_error-mean" in res + assert "valid error-mean" in res # metric in args overwrites one in params, custom one is evaluated too - res = get_cv_result(params=params_dummy_obj_metric_inv_verbose, - feval=constant_metric, metrics='binary_error') + res = get_cv_result(params=params_dummy_obj_metric_inv_verbose, feval=constant_metric, metrics="binary_error") assert len(res) == 4 - assert 'valid binary_error-mean' in res - assert 'valid error-mean' in res + assert "valid binary_error-mean" in res + assert "valid error-mean" in res # multiple metrics in params with custom one res = get_cv_result(params=params_dummy_obj_metric_multi_verbose, feval=constant_metric) assert len(res) == 6 - assert 'valid binary_logloss-mean' in res - assert 'valid binary_error-mean' in res - assert 'valid error-mean' in res + assert "valid binary_logloss-mean" in res + assert "valid binary_error-mean" in res + assert "valid error-mean" in res # multiple metrics in args with custom one - res = get_cv_result(params=params_dummy_obj_verbose, feval=constant_metric, - metrics=['binary_logloss', 'binary_error']) + res = get_cv_result( + params=params_dummy_obj_verbose, feval=constant_metric, metrics=["binary_logloss", "binary_error"] + ) assert len(res) == 6 - assert 'valid binary_logloss-mean' in res - assert 'valid binary_error-mean' in res - assert 'valid error-mean' in res + assert "valid binary_logloss-mean" in res + assert "valid binary_error-mean" in res + assert "valid error-mean" in res # custom metric is evaluated despite 'None' is passed res = get_cv_result(params=params_dummy_obj_metric_none_verbose, feval=constant_metric) assert len(res) == 2 - assert 'valid error-mean' in res + assert "valid error-mean" in res # no custom objective, no feval # default metric train_booster() - assert len(evals_result['valid_0']) == 1 - assert 'binary_logloss' in evals_result['valid_0'] + assert len(evals_result["valid_0"]) == 1 + assert "binary_logloss" in evals_result["valid_0"] # default metric in params train_booster(params=params_obj_metric_log_verbose) - assert len(evals_result['valid_0']) == 1 - assert 'binary_logloss' in evals_result['valid_0'] + assert len(evals_result["valid_0"]) == 1 + assert "binary_logloss" in evals_result["valid_0"] # non-default metric in params train_booster(params=params_obj_metric_err_verbose) - assert len(evals_result['valid_0']) == 1 - assert 'binary_error' in evals_result['valid_0'] + assert len(evals_result["valid_0"]) == 1 + assert "binary_error" in evals_result["valid_0"] # multiple metrics in params train_booster(params=params_obj_metric_multi_verbose) - assert len(evals_result['valid_0']) == 2 - assert 'binary_logloss' in evals_result['valid_0'] - assert 'binary_error' in evals_result['valid_0'] + assert len(evals_result["valid_0"]) == 2 + assert "binary_logloss" in evals_result["valid_0"] + assert "binary_error" in evals_result["valid_0"] # remove default metric by 'None' aliases - for na_alias in ('None', 'na', 'null', 'custom'): - params = {'objective': 'binary', 'metric': na_alias, 'verbose': -1} + for na_alias in ("None", "na", "null", "custom"): + params = {"objective": "binary", "metric": na_alias, "verbose": -1} train_booster(params=params) assert len(evals_result) == 0 @@ -2738,145 +2552,144 @@ def test_metrics(): # metric in params train_booster(params=params_dummy_obj_metric_log_verbose) - assert len(evals_result['valid_0']) == 1 - assert 'binary_logloss' in evals_result['valid_0'] + assert len(evals_result["valid_0"]) == 1 + assert "binary_logloss" in evals_result["valid_0"] # multiple metrics in params train_booster(params=params_dummy_obj_metric_multi_verbose) - assert len(evals_result['valid_0']) == 2 - assert 'binary_logloss' in evals_result['valid_0'] - assert 'binary_error' in evals_result['valid_0'] + assert len(evals_result["valid_0"]) == 2 + assert "binary_logloss" in evals_result["valid_0"] + assert "binary_error" in evals_result["valid_0"] # no custom objective, feval # default metric with custom one train_booster(feval=constant_metric) - assert len(evals_result['valid_0']) == 2 - assert 'binary_logloss' in evals_result['valid_0'] - assert 'error' in evals_result['valid_0'] + assert len(evals_result["valid_0"]) == 2 + assert "binary_logloss" in evals_result["valid_0"] + assert "error" in evals_result["valid_0"] # default metric in params with custom one train_booster(params=params_obj_metric_log_verbose, feval=constant_metric) - assert len(evals_result['valid_0']) == 2 - assert 'binary_logloss' in evals_result['valid_0'] - assert 'error' in evals_result['valid_0'] + assert len(evals_result["valid_0"]) == 2 + assert "binary_logloss" in evals_result["valid_0"] + assert "error" in evals_result["valid_0"] # non-default metric in params with custom one train_booster(params=params_obj_metric_err_verbose, feval=constant_metric) - assert len(evals_result['valid_0']) == 2 - assert 'binary_error' in evals_result['valid_0'] - assert 'error' in evals_result['valid_0'] + assert len(evals_result["valid_0"]) == 2 + assert "binary_error" in evals_result["valid_0"] + assert "error" in evals_result["valid_0"] # multiple metrics in params with custom one train_booster(params=params_obj_metric_multi_verbose, feval=constant_metric) - assert len(evals_result['valid_0']) == 3 - assert 'binary_logloss' in evals_result['valid_0'] - assert 'binary_error' in evals_result['valid_0'] - assert 'error' in evals_result['valid_0'] + assert len(evals_result["valid_0"]) == 3 + assert "binary_logloss" in evals_result["valid_0"] + assert "binary_error" in evals_result["valid_0"] + assert "error" in evals_result["valid_0"] # custom metric is evaluated despite 'None' is passed train_booster(params=params_obj_metric_none_verbose, feval=constant_metric) assert len(evals_result) == 1 - assert 'error' in evals_result['valid_0'] + assert "error" in evals_result["valid_0"] # custom objective, feval # no default metric, only custom one train_booster(params=params_dummy_obj_verbose, feval=constant_metric) - assert len(evals_result['valid_0']) == 1 - assert 'error' in evals_result['valid_0'] + assert len(evals_result["valid_0"]) == 1 + assert "error" in evals_result["valid_0"] # metric in params with custom one train_booster(params=params_dummy_obj_metric_log_verbose, feval=constant_metric) - assert len(evals_result['valid_0']) == 2 - assert 'binary_logloss' in evals_result['valid_0'] - assert 'error' in evals_result['valid_0'] + assert len(evals_result["valid_0"]) == 2 + assert "binary_logloss" in evals_result["valid_0"] + assert "error" in evals_result["valid_0"] # multiple metrics in params with custom one train_booster(params=params_dummy_obj_metric_multi_verbose, feval=constant_metric) - assert len(evals_result['valid_0']) == 3 - assert 'binary_logloss' in evals_result['valid_0'] - assert 'binary_error' in evals_result['valid_0'] - assert 'error' in evals_result['valid_0'] + assert len(evals_result["valid_0"]) == 3 + assert "binary_logloss" in evals_result["valid_0"] + assert "binary_error" in evals_result["valid_0"] + assert "error" in evals_result["valid_0"] # custom metric is evaluated despite 'None' is passed train_booster(params=params_dummy_obj_metric_none_verbose, feval=constant_metric) assert len(evals_result) == 1 - assert 'error' in evals_result['valid_0'] + assert "error" in evals_result["valid_0"] X, y = load_digits(n_class=3, return_X_y=True) lgb_train = lgb.Dataset(X, y) - obj_multi_aliases = ['multiclass', 'softmax', 'multiclassova', 'multiclass_ova', 'ova', 'ovr'] + obj_multi_aliases = ["multiclass", "softmax", "multiclassova", "multiclass_ova", "ova", "ovr"] for obj_multi_alias in obj_multi_aliases: # Custom objective replaces multiclass - params_obj_class_3_verbose = {'objective': obj_multi_alias, 'num_class': 3, 'verbose': -1} - params_dummy_obj_class_3_verbose = {'objective': dummy_obj, 'num_class': 3, 'verbose': -1} - params_dummy_obj_class_1_verbose = {'objective': dummy_obj, 'num_class': 1, 'verbose': -1} - params_obj_verbose = {'objective': obj_multi_alias, 'verbose': -1} - params_dummy_obj_verbose = {'objective': dummy_obj, 'verbose': -1} + params_obj_class_3_verbose = {"objective": obj_multi_alias, "num_class": 3, "verbose": -1} + params_dummy_obj_class_3_verbose = {"objective": dummy_obj, "num_class": 3, "verbose": -1} + params_dummy_obj_class_1_verbose = {"objective": dummy_obj, "num_class": 1, "verbose": -1} + params_obj_verbose = {"objective": obj_multi_alias, "verbose": -1} + params_dummy_obj_verbose = {"objective": dummy_obj, "verbose": -1} # multiclass default metric res = get_cv_result(params_obj_class_3_verbose) assert len(res) == 2 - assert 'valid multi_logloss-mean' in res + assert "valid multi_logloss-mean" in res # multiclass default metric with custom one res = get_cv_result(params_obj_class_3_verbose, feval=constant_metric) assert len(res) == 4 - assert 'valid multi_logloss-mean' in res - assert 'valid error-mean' in res + assert "valid multi_logloss-mean" in res + assert "valid error-mean" in res # multiclass metric alias with custom one for custom objective res = get_cv_result(params_dummy_obj_class_3_verbose, feval=constant_metric) assert len(res) == 2 - assert 'valid error-mean' in res + assert "valid error-mean" in res # no metric for invalid class_num res = get_cv_result(params_dummy_obj_class_1_verbose) assert len(res) == 0 # custom metric for invalid class_num res = get_cv_result(params_dummy_obj_class_1_verbose, feval=constant_metric) assert len(res) == 2 - assert 'valid error-mean' in res + assert "valid error-mean" in res # multiclass metric alias with custom one with invalid class_num with pytest.raises(lgb.basic.LightGBMError): - get_cv_result(params_dummy_obj_class_1_verbose, metrics=obj_multi_alias, - feval=constant_metric) + get_cv_result(params_dummy_obj_class_1_verbose, metrics=obj_multi_alias, feval=constant_metric) # multiclass default metric without num_class with pytest.raises(lgb.basic.LightGBMError): get_cv_result(params_obj_verbose) - for metric_multi_alias in obj_multi_aliases + ['multi_logloss']: + for metric_multi_alias in obj_multi_aliases + ["multi_logloss"]: # multiclass metric alias res = get_cv_result(params_obj_class_3_verbose, metrics=metric_multi_alias) assert len(res) == 2 - assert 'valid multi_logloss-mean' in res + assert "valid multi_logloss-mean" in res # multiclass metric - res = get_cv_result(params_obj_class_3_verbose, metrics='multi_error') + res = get_cv_result(params_obj_class_3_verbose, metrics="multi_error") assert len(res) == 2 - assert 'valid multi_error-mean' in res + assert "valid multi_error-mean" in res # non-valid metric for multiclass objective with pytest.raises(lgb.basic.LightGBMError): - get_cv_result(params_obj_class_3_verbose, metrics='binary_logloss') - params_class_3_verbose = {'num_class': 3, 'verbose': -1} + get_cv_result(params_obj_class_3_verbose, metrics="binary_logloss") + params_class_3_verbose = {"num_class": 3, "verbose": -1} # non-default num_class for default objective with pytest.raises(lgb.basic.LightGBMError): get_cv_result(params_class_3_verbose) # no metric with non-default num_class for custom objective res = get_cv_result(params_dummy_obj_class_3_verbose) assert len(res) == 0 - for metric_multi_alias in obj_multi_aliases + ['multi_logloss']: + for metric_multi_alias in obj_multi_aliases + ["multi_logloss"]: # multiclass metric alias for custom objective res = get_cv_result(params_dummy_obj_class_3_verbose, metrics=metric_multi_alias) assert len(res) == 2 - assert 'valid multi_logloss-mean' in res + assert "valid multi_logloss-mean" in res # multiclass metric for custom objective - res = get_cv_result(params_dummy_obj_class_3_verbose, metrics='multi_error') + res = get_cv_result(params_dummy_obj_class_3_verbose, metrics="multi_error") assert len(res) == 2 - assert 'valid multi_error-mean' in res + assert "valid multi_error-mean" in res # binary metric with non-default num_class for custom objective with pytest.raises(lgb.basic.LightGBMError): - get_cv_result(params_dummy_obj_class_3_verbose, metrics='binary_error') + get_cv_result(params_dummy_obj_class_3_verbose, metrics="binary_error") def test_multiple_feval_train(): X, y = load_breast_cancer(return_X_y=True) - params = {'verbose': -1, 'objective': 'binary', 'metric': 'binary_logloss'} + params = {"verbose": -1, "objective": "binary", "metric": "binary_logloss"} X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.2) @@ -2889,76 +2702,47 @@ def test_multiple_feval_train(): valid_sets=validation_dataset, num_boost_round=5, feval=[constant_metric, decreasing_metric], - callbacks=[lgb.record_evaluation(evals_result)] + callbacks=[lgb.record_evaluation(evals_result)], ) - assert len(evals_result['valid_0']) == 3 - assert 'binary_logloss' in evals_result['valid_0'] - assert 'error' in evals_result['valid_0'] - assert 'decreasing_metric' in evals_result['valid_0'] + assert len(evals_result["valid_0"]) == 3 + assert "binary_logloss" in evals_result["valid_0"] + assert "error" in evals_result["valid_0"] + assert "decreasing_metric" in evals_result["valid_0"] def test_objective_callable_train_binary_classification(): X, y = load_breast_cancer(return_X_y=True) - params = { - 'verbose': -1, - 'objective': logloss_obj, - 'learning_rate': 0.01 - } + params = {"verbose": -1, "objective": logloss_obj, "learning_rate": 0.01} train_dataset = lgb.Dataset(X, y) - booster = lgb.train( - params=params, - train_set=train_dataset, - num_boost_round=20 - ) + booster = lgb.train(params=params, train_set=train_dataset, num_boost_round=20) y_pred = logistic_sigmoid(booster.predict(X)) logloss_error = log_loss(y, y_pred) rocauc_error = roc_auc_score(y, y_pred) - assert booster.params['objective'] == 'none' + assert booster.params["objective"] == "none" assert logloss_error == pytest.approx(0.547907) assert rocauc_error == pytest.approx(0.995944) def test_objective_callable_train_regression(): X, y = make_synthetic_regression() - params = { - 'verbose': -1, - 'objective': mse_obj - } + params = {"verbose": -1, "objective": mse_obj} lgb_train = lgb.Dataset(X, y) - booster = lgb.train( - params, - lgb_train, - num_boost_round=20 - ) + booster = lgb.train(params, lgb_train, num_boost_round=20) y_pred = booster.predict(X) mse_error = mean_squared_error(y, y_pred) - assert booster.params['objective'] == 'none' + assert booster.params["objective"] == "none" assert mse_error == pytest.approx(286.724194) def test_objective_callable_cv_binary_classification(): X, y = load_breast_cancer(return_X_y=True) - params = { - 'verbose': -1, - 'objective': logloss_obj, - 'learning_rate': 0.01 - } + params = {"verbose": -1, "objective": logloss_obj, "learning_rate": 0.01} train_dataset = lgb.Dataset(X, y) - cv_res = lgb.cv( - params, - train_dataset, - num_boost_round=20, - nfold=3, - return_cvbooster=True - ) - cv_booster = cv_res['cvbooster'].boosters - cv_logloss_errors = [ - log_loss(y, logistic_sigmoid(cb.predict(X))) < 0.56 for cb in cv_booster - ] - cv_objs = [ - cb.params['objective'] == 'none' for cb in cv_booster - ] + cv_res = lgb.cv(params, train_dataset, num_boost_round=20, nfold=3, return_cvbooster=True) + cv_booster = cv_res["cvbooster"].boosters + cv_logloss_errors = [log_loss(y, logistic_sigmoid(cb.predict(X))) < 0.56 for cb in cv_booster] + cv_objs = [cb.params["objective"] == "none" for cb in cv_booster] assert all(cv_objs) assert all(cv_logloss_errors) @@ -2966,25 +2750,11 @@ def test_objective_callable_cv_binary_classification(): def test_objective_callable_cv_regression(): X, y = make_synthetic_regression() lgb_train = lgb.Dataset(X, y) - params = { - 'verbose': -1, - 'objective': mse_obj - } - cv_res = lgb.cv( - params, - lgb_train, - num_boost_round=20, - nfold=3, - stratified=False, - return_cvbooster=True - ) - cv_booster = cv_res['cvbooster'].boosters - cv_mse_errors = [ - mean_squared_error(y, cb.predict(X)) < 463 for cb in cv_booster - ] - cv_objs = [ - cb.params['objective'] == 'none' for cb in cv_booster - ] + params = {"verbose": -1, "objective": mse_obj} + cv_res = lgb.cv(params, lgb_train, num_boost_round=20, nfold=3, stratified=False, return_cvbooster=True) + cv_booster = cv_res["cvbooster"].boosters + cv_mse_errors = [mean_squared_error(y, cb.predict(X)) < 463 for cb in cv_booster] + cv_objs = [cb.params["objective"] == "none" for cb in cv_booster] assert all(cv_objs) assert all(cv_mse_errors) @@ -2992,24 +2762,22 @@ def test_objective_callable_cv_regression(): def test_multiple_feval_cv(): X, y = load_breast_cancer(return_X_y=True) - params = {'verbose': -1, 'objective': 'binary', 'metric': 'binary_logloss'} + params = {"verbose": -1, "objective": "binary", "metric": "binary_logloss"} train_dataset = lgb.Dataset(data=X, label=y) cv_results = lgb.cv( - params=params, - train_set=train_dataset, - num_boost_round=5, - feval=[constant_metric, decreasing_metric]) + params=params, train_set=train_dataset, num_boost_round=5, feval=[constant_metric, decreasing_metric] + ) # Expect three metrics but mean and stdv for each metric assert len(cv_results) == 6 - assert 'valid binary_logloss-mean' in cv_results - assert 'valid error-mean' in cv_results - assert 'valid decreasing_metric-mean' in cv_results - assert 'valid binary_logloss-stdv' in cv_results - assert 'valid error-stdv' in cv_results - assert 'valid decreasing_metric-stdv' in cv_results + assert "valid binary_logloss-mean" in cv_results + assert "valid error-mean" in cv_results + assert "valid decreasing_metric-mean" in cv_results + assert "valid binary_logloss-stdv" in cv_results + assert "valid error-stdv" in cv_results + assert "valid decreasing_metric-stdv" in cv_results def test_default_objective_and_metric(): @@ -3018,22 +2786,22 @@ def test_default_objective_and_metric(): train_dataset = lgb.Dataset(data=X_train, label=y_train) validation_dataset = lgb.Dataset(data=X_test, label=y_test, reference=train_dataset) evals_result = {} - params = {'verbose': -1} + params = {"verbose": -1} lgb.train( params=params, train_set=train_dataset, valid_sets=validation_dataset, num_boost_round=5, - callbacks=[lgb.record_evaluation(evals_result)] + callbacks=[lgb.record_evaluation(evals_result)], ) - assert 'valid_0' in evals_result - assert len(evals_result['valid_0']) == 1 - assert 'l2' in evals_result['valid_0'] - assert len(evals_result['valid_0']['l2']) == 5 + assert "valid_0" in evals_result + assert len(evals_result["valid_0"]) == 1 + assert "l2" in evals_result["valid_0"] + assert len(evals_result["valid_0"]["l2"]) == 5 -@pytest.mark.parametrize('use_weight', [True, False]) +@pytest.mark.parametrize("use_weight", [True, False]) def test_multiclass_custom_objective(use_weight): def custom_obj(y_pred, ds): y_true = ds.get_label() @@ -3047,24 +2815,24 @@ def test_multiclass_custom_objective(use_weight): ds = lgb.Dataset(X, y) if use_weight: ds.set_weight(weight) - params = {'objective': 'multiclass', 'num_class': 3, 'num_leaves': 7} + params = {"objective": "multiclass", "num_class": 3, "num_leaves": 7} builtin_obj_bst = lgb.train(params, ds, num_boost_round=10) builtin_obj_preds = builtin_obj_bst.predict(X) - params['objective'] = custom_obj + params["objective"] = custom_obj custom_obj_bst = lgb.train(params, ds, num_boost_round=10) custom_obj_preds = softmax(custom_obj_bst.predict(X)) np.testing.assert_allclose(builtin_obj_preds, custom_obj_preds, rtol=0.01) -@pytest.mark.parametrize('use_weight', [True, False]) +@pytest.mark.parametrize("use_weight", [True, False]) def test_multiclass_custom_eval(use_weight): def custom_eval(y_pred, ds): y_true = ds.get_label() weight = ds.get_weight() # weight is None when not set loss = log_loss(y_true, y_pred, sample_weight=weight) - return 'custom_logloss', loss, False + return "custom_logloss", loss, False centers = [[-4, -4], [4, 4], [-4, 4]] X, y = make_blobs(n_samples=1_000, centers=centers, random_state=42) @@ -3077,43 +2845,43 @@ def test_multiclass_custom_eval(use_weight): if use_weight: train_ds.set_weight(weight_train) valid_ds.set_weight(weight_valid) - params = {'objective': 'multiclass', 'num_class': 3, 'num_leaves': 7} + params = {"objective": "multiclass", "num_class": 3, "num_leaves": 7} eval_result = {} bst = lgb.train( params, train_ds, num_boost_round=10, valid_sets=[train_ds, valid_ds], - valid_names=['train', 'valid'], + valid_names=["train", "valid"], feval=custom_eval, callbacks=[lgb.record_evaluation(eval_result)], keep_training_booster=True, ) - for key, ds in zip(['train', 'valid'], [train_ds, valid_ds]): - np.testing.assert_allclose(eval_result[key]['multi_logloss'], eval_result[key]['custom_logloss']) + for key, ds in zip(["train", "valid"], [train_ds, valid_ds]): + np.testing.assert_allclose(eval_result[key]["multi_logloss"], eval_result[key]["custom_logloss"]) _, metric, value, _ = bst.eval(ds, key, feval=custom_eval)[1] # first element is multi_logloss - assert metric == 'custom_logloss' + assert metric == "custom_logloss" np.testing.assert_allclose(value, eval_result[key][metric][-1]) -@pytest.mark.skipif(psutil.virtual_memory().available / 1024 / 1024 / 1024 < 3, reason='not enough RAM') +@pytest.mark.skipif(psutil.virtual_memory().available / 1024 / 1024 / 1024 < 3, reason="not enough RAM") def test_model_size(): X, y = make_synthetic_regression() data = lgb.Dataset(X, y) - bst = lgb.train({'verbose': -1}, data, num_boost_round=2) + bst = lgb.train({"verbose": -1}, data, num_boost_round=2) y_pred = bst.predict(X) model_str = bst.model_to_string() - one_tree = model_str[model_str.find('Tree=1'):model_str.find('end of trees')] + one_tree = model_str[model_str.find("Tree=1") : model_str.find("end of trees")] one_tree_size = len(one_tree) - one_tree = one_tree.replace('Tree=1', 'Tree={}') + one_tree = one_tree.replace("Tree=1", "Tree={}") multiplier = 100 total_trees = multiplier + 2 try: - before_tree_sizes = model_str[:model_str.find('tree_sizes')] - trees = model_str[model_str.find('Tree=0'):model_str.find('end of trees')] + before_tree_sizes = model_str[: model_str.find("tree_sizes")] + trees = model_str[model_str.find("Tree=0") : model_str.find("end of trees")] more_trees = (one_tree * multiplier).format(*range(2, total_trees)) - after_trees = model_str[model_str.find('end of trees'):] + after_trees = model_str[model_str.find("end of trees") :] num_end_spaces = 2**31 - one_tree_size * total_trees new_model_str = f"{before_tree_sizes}\n\n{trees}{more_trees}{after_trees}{'':{num_end_spaces}}" assert len(new_model_str) > 2**31 @@ -3122,19 +2890,21 @@ def test_model_size(): y_pred_new = bst.predict(X, num_iteration=2) np.testing.assert_allclose(y_pred, y_pred_new) except MemoryError: - pytest.skipTest('not enough RAM') + pytest.skipTest("not enough RAM") -@pytest.mark.skipif(getenv('TASK', '') == 'cuda', reason='Skip due to differences in implementation details of CUDA version') +@pytest.mark.skipif( + getenv("TASK", "") == "cuda", reason="Skip due to differences in implementation details of CUDA version" +) def test_get_split_value_histogram(): X, y = make_synthetic_regression() X = np.repeat(X, 3, axis=0) y = np.repeat(y, 3, axis=0) X[:, 2] = np.random.default_rng(0).integers(0, 20, size=X.shape[0]) lgb_train = lgb.Dataset(X, y, categorical_feature=[2]) - gbm = lgb.train({'verbose': -1}, lgb_train, num_boost_round=20) + gbm = lgb.train({"verbose": -1}, lgb_train, num_boost_round=20) # test XGBoost-style return value - params = {'feature': 0, 'xgboost_style': True} + params = {"feature": 0, "xgboost_style": True} assert gbm.get_split_value_histogram(**params).shape == (12, 2) assert gbm.get_split_value_histogram(bins=999, **params).shape == (12, 2) assert gbm.get_split_value_histogram(bins=-1, **params).shape == (1, 2) @@ -3146,20 +2916,20 @@ def test_get_split_value_histogram(): if lgb.compat.PANDAS_INSTALLED: np.testing.assert_allclose( gbm.get_split_value_histogram(0, xgboost_style=True).values, - gbm.get_split_value_histogram(gbm.feature_name()[0], xgboost_style=True).values + gbm.get_split_value_histogram(gbm.feature_name()[0], xgboost_style=True).values, ) np.testing.assert_allclose( gbm.get_split_value_histogram(X.shape[-1] - 1, xgboost_style=True).values, - gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1], xgboost_style=True).values + gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1], xgboost_style=True).values, ) else: np.testing.assert_allclose( gbm.get_split_value_histogram(0, xgboost_style=True), - gbm.get_split_value_histogram(gbm.feature_name()[0], xgboost_style=True) + gbm.get_split_value_histogram(gbm.feature_name()[0], xgboost_style=True), ) np.testing.assert_allclose( gbm.get_split_value_histogram(X.shape[-1] - 1, xgboost_style=True), - gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1], xgboost_style=True) + gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1], xgboost_style=True), ) # test numpy-style return value hist, bins = gbm.get_split_value_histogram(0) @@ -3193,12 +2963,12 @@ def test_get_split_value_histogram(): np.testing.assert_array_equal(hist_idx, hist_name) np.testing.assert_allclose(bins_idx, bins_name) # test bins string type - hist_vals, bin_edges = gbm.get_split_value_histogram(0, bins='auto') - hist = gbm.get_split_value_histogram(0, bins='auto', xgboost_style=True) + hist_vals, bin_edges = gbm.get_split_value_histogram(0, bins="auto") + hist = gbm.get_split_value_histogram(0, bins="auto", xgboost_style=True) if lgb.compat.PANDAS_INSTALLED: mask = hist_vals > 0 - np.testing.assert_array_equal(hist_vals[mask], hist['Count'].values) - np.testing.assert_allclose(bin_edges[1:][mask], hist['SplitValue'].values) + np.testing.assert_array_equal(hist_vals[mask], hist["Count"].values) + np.testing.assert_allclose(bin_edges[1:][mask], hist["SplitValue"].values) else: mask = hist_vals > 0 np.testing.assert_array_equal(hist_vals[mask], hist[:, 1]) @@ -3208,18 +2978,18 @@ def test_get_split_value_histogram(): gbm.get_split_value_histogram(2) -@pytest.mark.skipif(getenv('TASK', '') == 'cuda', reason='Skip due to differences in implementation details of CUDA version') +@pytest.mark.skipif( + getenv("TASK", "") == "cuda", reason="Skip due to differences in implementation details of CUDA version" +) def test_early_stopping_for_only_first_metric(): - - def metrics_combination_train_regression(valid_sets, metric_list, assumed_iteration, - first_metric_only, feval=None): + def metrics_combination_train_regression(valid_sets, metric_list, assumed_iteration, first_metric_only, feval=None): params = { - 'objective': 'regression', - 'learning_rate': 1.1, - 'num_leaves': 10, - 'metric': metric_list, - 'verbose': -1, - 'seed': 123 + "objective": "regression", + "learning_rate": 1.1, + "num_leaves": 10, + "metric": metric_list, + "verbose": -1, + "seed": 123, } gbm = lgb.train( params, @@ -3227,20 +2997,21 @@ def test_early_stopping_for_only_first_metric(): num_boost_round=25, valid_sets=valid_sets, feval=feval, - callbacks=[lgb.early_stopping(stopping_rounds=5, first_metric_only=first_metric_only)] + callbacks=[lgb.early_stopping(stopping_rounds=5, first_metric_only=first_metric_only)], ) assert assumed_iteration == gbm.best_iteration - def metrics_combination_cv_regression(metric_list, assumed_iteration, - first_metric_only, eval_train_metric, feval=None): + def metrics_combination_cv_regression( + metric_list, assumed_iteration, first_metric_only, eval_train_metric, feval=None + ): params = { - 'objective': 'regression', - 'learning_rate': 0.9, - 'num_leaves': 10, - 'metric': metric_list, - 'verbose': -1, - 'seed': 123, - 'gpu_use_dp': True + "objective": "regression", + "learning_rate": 0.9, + "num_leaves": 10, + "metric": metric_list, + "verbose": -1, + "seed": 123, + "gpu_use_dp": True, } ret = lgb.cv( params, @@ -3249,7 +3020,7 @@ def test_early_stopping_for_only_first_metric(): stratified=False, feval=feval, callbacks=[lgb.early_stopping(stopping_rounds=5, first_metric_only=first_metric_only)], - eval_train_metric=eval_train_metric + eval_train_metric=eval_train_metric, ) assert assumed_iteration == len(ret[list(ret.keys())[0]]) @@ -3279,82 +3050,102 @@ def test_early_stopping_for_only_first_metric(): metrics_combination_train_regression(lgb_valid1, [], iter_valid1_l2, True) metrics_combination_train_regression(lgb_valid1, None, iter_valid1_l2, False) metrics_combination_train_regression(lgb_valid1, None, iter_valid1_l2, True) - metrics_combination_train_regression(lgb_valid1, 'l2', iter_valid1_l2, True) - metrics_combination_train_regression(lgb_valid1, 'l1', iter_valid1_l1, True) - metrics_combination_train_regression(lgb_valid1, ['l2', 'l1'], iter_valid1_l2, True) - metrics_combination_train_regression(lgb_valid1, ['l1', 'l2'], iter_valid1_l1, True) - metrics_combination_train_regression(lgb_valid1, ['l2', 'l1'], iter_min_valid1, False) - metrics_combination_train_regression(lgb_valid1, ['l1', 'l2'], iter_min_valid1, False) + metrics_combination_train_regression(lgb_valid1, "l2", iter_valid1_l2, True) + metrics_combination_train_regression(lgb_valid1, "l1", iter_valid1_l1, True) + metrics_combination_train_regression(lgb_valid1, ["l2", "l1"], iter_valid1_l2, True) + metrics_combination_train_regression(lgb_valid1, ["l1", "l2"], iter_valid1_l1, True) + metrics_combination_train_regression(lgb_valid1, ["l2", "l1"], iter_min_valid1, False) + metrics_combination_train_regression(lgb_valid1, ["l1", "l2"], iter_min_valid1, False) # test feval for lgb.train - metrics_combination_train_regression(lgb_valid1, 'None', 1, False, - feval=lambda preds, train_data: [decreasing_metric(preds, train_data), - constant_metric(preds, train_data)]) - metrics_combination_train_regression(lgb_valid1, 'None', 25, True, - feval=lambda preds, train_data: [decreasing_metric(preds, train_data), - constant_metric(preds, train_data)]) - metrics_combination_train_regression(lgb_valid1, 'None', 1, True, - feval=lambda preds, train_data: [constant_metric(preds, train_data), - decreasing_metric(preds, train_data)]) + metrics_combination_train_regression( + lgb_valid1, + "None", + 1, + False, + feval=lambda preds, train_data: [decreasing_metric(preds, train_data), constant_metric(preds, train_data)], + ) + metrics_combination_train_regression( + lgb_valid1, + "None", + 25, + True, + feval=lambda preds, train_data: [decreasing_metric(preds, train_data), constant_metric(preds, train_data)], + ) + metrics_combination_train_regression( + lgb_valid1, + "None", + 1, + True, + feval=lambda preds, train_data: [constant_metric(preds, train_data), decreasing_metric(preds, train_data)], + ) # test with two valid data for lgb.train - metrics_combination_train_regression([lgb_valid1, lgb_valid2], ['l2', 'l1'], iter_min_l2, True) - metrics_combination_train_regression([lgb_valid2, lgb_valid1], ['l2', 'l1'], iter_min_l2, True) - metrics_combination_train_regression([lgb_valid1, lgb_valid2], ['l1', 'l2'], iter_min_l1, True) - metrics_combination_train_regression([lgb_valid2, lgb_valid1], ['l1', 'l2'], iter_min_l1, True) + metrics_combination_train_regression([lgb_valid1, lgb_valid2], ["l2", "l1"], iter_min_l2, True) + metrics_combination_train_regression([lgb_valid2, lgb_valid1], ["l2", "l1"], iter_min_l2, True) + metrics_combination_train_regression([lgb_valid1, lgb_valid2], ["l1", "l2"], iter_min_l1, True) + metrics_combination_train_regression([lgb_valid2, lgb_valid1], ["l1", "l2"], iter_min_l1, True) # test for lgb.cv metrics_combination_cv_regression(None, iter_cv_l2, True, False) - metrics_combination_cv_regression('l2', iter_cv_l2, True, False) - metrics_combination_cv_regression('l1', iter_cv_l1, True, False) - metrics_combination_cv_regression(['l2', 'l1'], iter_cv_l2, True, False) - metrics_combination_cv_regression(['l1', 'l2'], iter_cv_l1, True, False) - metrics_combination_cv_regression(['l2', 'l1'], iter_cv_min, False, False) - metrics_combination_cv_regression(['l1', 'l2'], iter_cv_min, False, False) + metrics_combination_cv_regression("l2", iter_cv_l2, True, False) + metrics_combination_cv_regression("l1", iter_cv_l1, True, False) + metrics_combination_cv_regression(["l2", "l1"], iter_cv_l2, True, False) + metrics_combination_cv_regression(["l1", "l2"], iter_cv_l1, True, False) + metrics_combination_cv_regression(["l2", "l1"], iter_cv_min, False, False) + metrics_combination_cv_regression(["l1", "l2"], iter_cv_min, False, False) metrics_combination_cv_regression(None, iter_cv_l2, True, True) - metrics_combination_cv_regression('l2', iter_cv_l2, True, True) - metrics_combination_cv_regression('l1', iter_cv_l1, True, True) - metrics_combination_cv_regression(['l2', 'l1'], iter_cv_l2, True, True) - metrics_combination_cv_regression(['l1', 'l2'], iter_cv_l1, True, True) - metrics_combination_cv_regression(['l2', 'l1'], iter_cv_min, False, True) - metrics_combination_cv_regression(['l1', 'l2'], iter_cv_min, False, True) + metrics_combination_cv_regression("l2", iter_cv_l2, True, True) + metrics_combination_cv_regression("l1", iter_cv_l1, True, True) + metrics_combination_cv_regression(["l2", "l1"], iter_cv_l2, True, True) + metrics_combination_cv_regression(["l1", "l2"], iter_cv_l1, True, True) + metrics_combination_cv_regression(["l2", "l1"], iter_cv_min, False, True) + metrics_combination_cv_regression(["l1", "l2"], iter_cv_min, False, True) # test feval for lgb.cv - metrics_combination_cv_regression('None', 1, False, False, - feval=lambda preds, train_data: [decreasing_metric(preds, train_data), - constant_metric(preds, train_data)]) - metrics_combination_cv_regression('None', 25, True, False, - feval=lambda preds, train_data: [decreasing_metric(preds, train_data), - constant_metric(preds, train_data)]) - metrics_combination_cv_regression('None', 1, True, False, - feval=lambda preds, train_data: [constant_metric(preds, train_data), - decreasing_metric(preds, train_data)]) + metrics_combination_cv_regression( + "None", + 1, + False, + False, + feval=lambda preds, train_data: [decreasing_metric(preds, train_data), constant_metric(preds, train_data)], + ) + metrics_combination_cv_regression( + "None", + 25, + True, + False, + feval=lambda preds, train_data: [decreasing_metric(preds, train_data), constant_metric(preds, train_data)], + ) + metrics_combination_cv_regression( + "None", + 1, + True, + False, + feval=lambda preds, train_data: [constant_metric(preds, train_data), decreasing_metric(preds, train_data)], + ) def test_node_level_subcol(): X, y = load_breast_cancer(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) params = { - 'objective': 'binary', - 'metric': 'binary_logloss', - 'feature_fraction_bynode': 0.8, - 'feature_fraction': 1.0, - 'verbose': -1 + "objective": "binary", + "metric": "binary_logloss", + "feature_fraction_bynode": 0.8, + "feature_fraction": 1.0, + "verbose": -1, } lgb_train = lgb.Dataset(X_train, y_train) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train) evals_result = {} gbm = lgb.train( - params, - lgb_train, - num_boost_round=25, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)] + params, lgb_train, num_boost_round=25, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] ) ret = log_loss(y_test, gbm.predict(X_test)) assert ret < 0.14 - assert evals_result['valid_0']['binary_logloss'][-1] == pytest.approx(ret) - params['feature_fraction'] = 0.5 + assert evals_result["valid_0"]["binary_logloss"][-1] == pytest.approx(ret) + params["feature_fraction"] = 0.5 gbm2 = lgb.train(params, lgb_train, num_boost_round=25) ret2 = log_loss(y_test, gbm2.predict(X_test)) assert ret != ret2 @@ -3371,10 +3162,7 @@ def test_forced_split_feature_indices(tmp_path): with open(tmp_split_file, "w") as f: f.write(json.dumps(forced_split)) lgb_train = lgb.Dataset(X, y) - params = { - "objective": "regression", - "forcedsplits_filename": tmp_split_file - } + params = {"objective": "regression", "forcedsplits_filename": tmp_split_file} with pytest.raises(lgb.basic.LightGBMError, match="Forced splits file includes feature index"): lgb.train(params, lgb_train) @@ -3384,15 +3172,15 @@ def test_forced_bins(): x[:, 0] = np.arange(0, 1, 0.01) x[:, 1] = -np.arange(0, 1, 0.01) y = np.arange(0, 1, 0.01) - forcedbins_filename = ( - Path(__file__).absolute().parents[2] / 'examples' / 'regression' / 'forced_bins.json' - ) - params = {'objective': 'regression_l1', - 'max_bin': 5, - 'forcedbins_filename': forcedbins_filename, - 'num_leaves': 2, - 'min_data_in_leaf': 1, - 'verbose': -1} + forcedbins_filename = Path(__file__).absolute().parents[2] / "examples" / "regression" / "forced_bins.json" + params = { + "objective": "regression_l1", + "max_bin": 5, + "forcedbins_filename": forcedbins_filename, + "num_leaves": 2, + "min_data_in_leaf": 1, + "verbose": -1, + } lgb_x = lgb.Dataset(x, label=y) est = lgb.train(params, lgb_x, num_boost_round=20) new_x = np.zeros((3, x.shape[1])) @@ -3403,15 +3191,15 @@ def test_forced_bins(): new_x[:, 1] = [-0.9, -0.6, -0.3] predicted = est.predict(new_x) assert len(np.unique(predicted)) == 1 - params['forcedbins_filename'] = '' + params["forcedbins_filename"] = "" lgb_x = lgb.Dataset(x, label=y) est = lgb.train(params, lgb_x, num_boost_round=20) predicted = est.predict(new_x) assert len(np.unique(predicted)) == 3 - params['forcedbins_filename'] = ( - Path(__file__).absolute().parents[2] / 'examples' / 'regression' / 'forced_bins2.json' + params["forcedbins_filename"] = ( + Path(__file__).absolute().parents[2] / "examples" / "regression" / "forced_bins2.json" ) - params['max_bin'] = 11 + params["max_bin"] = 11 lgb_x = lgb.Dataset(x[:, :1], label=y) est = lgb.train(params, lgb_x, num_boost_round=50) predicted = est.predict(x[1:, :1]) @@ -3426,12 +3214,14 @@ def test_binning_same_sign(): x[:, 0] = np.arange(0.01, 1, 0.01) x[:, 1] = -np.arange(0.01, 1, 0.01) y = np.arange(0.01, 1, 0.01) - params = {'objective': 'regression_l1', - 'max_bin': 5, - 'num_leaves': 2, - 'min_data_in_leaf': 1, - 'verbose': -1, - 'seed': 0} + params = { + "objective": "regression_l1", + "max_bin": 5, + "num_leaves": 2, + "min_data_in_leaf": 1, + "verbose": -1, + "seed": 0, + } lgb_x = lgb.Dataset(x, label=y) est = lgb.train(params, lgb_x, num_boost_round=20) new_x = np.zeros((3, 2)) @@ -3447,50 +3237,54 @@ def test_binning_same_sign(): def test_dataset_update_params(): - default_params = {"max_bin": 100, - "max_bin_by_feature": [20, 10], - "bin_construct_sample_cnt": 10000, - "min_data_in_bin": 1, - "use_missing": False, - "zero_as_missing": False, - "categorical_feature": [0], - "feature_pre_filter": True, - "pre_partition": False, - "enable_bundle": True, - "data_random_seed": 0, - "is_enable_sparse": True, - "header": True, - "two_round": True, - "label_column": 0, - "weight_column": 0, - "group_column": 0, - "ignore_column": 0, - "min_data_in_leaf": 10, - "linear_tree": False, - "precise_float_parser": True, - "verbose": -1} - unchangeable_params = {"max_bin": 150, - "max_bin_by_feature": [30, 5], - "bin_construct_sample_cnt": 5000, - "min_data_in_bin": 2, - "use_missing": True, - "zero_as_missing": True, - "categorical_feature": [0, 1], - "feature_pre_filter": False, - "pre_partition": True, - "enable_bundle": False, - "data_random_seed": 1, - "is_enable_sparse": False, - "header": False, - "two_round": False, - "label_column": 1, - "weight_column": 1, - "group_column": 1, - "ignore_column": 1, - "forcedbins_filename": "/some/path/forcedbins.json", - "min_data_in_leaf": 2, - "linear_tree": True, - "precise_float_parser": False} + default_params = { + "max_bin": 100, + "max_bin_by_feature": [20, 10], + "bin_construct_sample_cnt": 10000, + "min_data_in_bin": 1, + "use_missing": False, + "zero_as_missing": False, + "categorical_feature": [0], + "feature_pre_filter": True, + "pre_partition": False, + "enable_bundle": True, + "data_random_seed": 0, + "is_enable_sparse": True, + "header": True, + "two_round": True, + "label_column": 0, + "weight_column": 0, + "group_column": 0, + "ignore_column": 0, + "min_data_in_leaf": 10, + "linear_tree": False, + "precise_float_parser": True, + "verbose": -1, + } + unchangeable_params = { + "max_bin": 150, + "max_bin_by_feature": [30, 5], + "bin_construct_sample_cnt": 5000, + "min_data_in_bin": 2, + "use_missing": True, + "zero_as_missing": True, + "categorical_feature": [0, 1], + "feature_pre_filter": False, + "pre_partition": True, + "enable_bundle": False, + "data_random_seed": 1, + "is_enable_sparse": False, + "header": False, + "two_round": False, + "label_column": 1, + "weight_column": 1, + "group_column": 1, + "ignore_column": 1, + "forcedbins_filename": "/some/path/forcedbins.json", + "min_data_in_leaf": 2, + "linear_tree": True, + "precise_float_parser": False, + } X = np.random.random((100, 2)) y = np.random.random(100) @@ -3525,9 +3319,11 @@ def test_dataset_update_params(): param_name = key else: param_name = "forced bins" - err_msg = ("Reducing `min_data_in_leaf` with `feature_pre_filter=true` may cause *" - if key == "min_data_in_leaf" - else f"Cannot change {param_name} *") + err_msg = ( + "Reducing `min_data_in_leaf` with `feature_pre_filter=true` may cause *" + if key == "min_data_in_leaf" + else f"Cannot change {param_name} *" + ) with np.testing.assert_raises_regex(lgb.basic.LightGBMError, err_msg): lgb.train(new_params, lgb_data, num_boost_round=3) @@ -3549,15 +3345,11 @@ def test_extra_trees(): # check extra trees increases regularization X, y = make_synthetic_regression() lgb_x = lgb.Dataset(X, label=y) - params = {'objective': 'regression', - 'num_leaves': 32, - 'verbose': -1, - 'extra_trees': False, - 'seed': 0} + params = {"objective": "regression", "num_leaves": 32, "verbose": -1, "extra_trees": False, "seed": 0} est = lgb.train(params, lgb_x, num_boost_round=10) predicted = est.predict(X) err = mean_squared_error(y, predicted) - params['extra_trees'] = True + params["extra_trees"] = True est = lgb.train(params, lgb_x, num_boost_round=10) predicted_new = est.predict(X) err_new = mean_squared_error(y, predicted_new) @@ -3568,14 +3360,11 @@ def test_path_smoothing(): # check path smoothing increases regularization X, y = make_synthetic_regression() lgb_x = lgb.Dataset(X, label=y) - params = {'objective': 'regression', - 'num_leaves': 32, - 'verbose': -1, - 'seed': 0} + params = {"objective": "regression", "num_leaves": 32, "verbose": -1, "seed": 0} est = lgb.train(params, lgb_x, num_boost_round=10) predicted = est.predict(X) err = mean_squared_error(y, predicted) - params['path_smooth'] = 1 + params["path_smooth"] = 1 est = lgb.train(params, lgb_x, num_boost_round=10) predicted_new = est.predict(X) err_new = mean_squared_error(y, predicted_new) @@ -3586,30 +3375,24 @@ def test_trees_to_dataframe(): pytest.importorskip("pandas") def _imptcs_to_numpy(X, impcts_dict): - cols = [f'Column_{i}' for i in range(X.shape[1])] - return [impcts_dict.get(col, 0.) for col in cols] + cols = [f"Column_{i}" for i in range(X.shape[1])] + return [impcts_dict.get(col, 0.0) for col in cols] X, y = load_breast_cancer(return_X_y=True) data = lgb.Dataset(X, label=y) num_trees = 10 bst = lgb.train({"objective": "binary", "verbose": -1}, data, num_trees) tree_df = bst.trees_to_dataframe() - split_dict = (tree_df[~tree_df['split_gain'].isnull()] - .groupby('split_feature') - .size() - .to_dict()) + split_dict = tree_df[~tree_df["split_gain"].isnull()].groupby("split_feature").size().to_dict() - gains_dict = (tree_df - .groupby('split_feature')['split_gain'] - .sum() - .to_dict()) + gains_dict = tree_df.groupby("split_feature")["split_gain"].sum().to_dict() tree_split = _imptcs_to_numpy(X, split_dict) tree_gains = _imptcs_to_numpy(X, gains_dict) - mod_split = bst.feature_importance('split') - mod_gains = bst.feature_importance('gain') - num_trees_from_df = tree_df['tree_index'].nunique() - obs_counts_from_df = tree_df.loc[tree_df['node_depth'] == 1, 'count'].values + mod_split = bst.feature_importance("split") + mod_gains = bst.feature_importance("gain") + num_trees_from_df = tree_df["tree_index"].nunique() + obs_counts_from_df = tree_df.loc[tree_df["node_depth"] == 1, "count"].values np.testing.assert_equal(tree_split, mod_split) np.testing.assert_allclose(tree_gains, mod_gains) @@ -3624,13 +3407,23 @@ def test_trees_to_dataframe(): tree_df = bst.trees_to_dataframe() assert len(tree_df) == 1 - assert tree_df.loc[0, 'tree_index'] == 0 - assert tree_df.loc[0, 'node_depth'] == 1 - assert tree_df.loc[0, 'node_index'] == "0-L0" - assert tree_df.loc[0, 'value'] is not None - for col in ('left_child', 'right_child', 'parent_index', 'split_feature', - 'split_gain', 'threshold', 'decision_type', 'missing_direction', - 'missing_type', 'weight', 'count'): + assert tree_df.loc[0, "tree_index"] == 0 + assert tree_df.loc[0, "node_depth"] == 1 + assert tree_df.loc[0, "node_index"] == "0-L0" + assert tree_df.loc[0, "value"] is not None + for col in ( + "left_child", + "right_child", + "parent_index", + "split_feature", + "split_gain", + "threshold", + "decision_type", + "missing_direction", + "missing_type", + "weight", + "count", + ): assert tree_df.loc[0, col] is None @@ -3639,12 +3432,10 @@ def test_interaction_constraints(): num_features = X.shape[1] train_data = lgb.Dataset(X, label=y) # check that constraint containing all features is equivalent to no constraint - params = {'verbose': -1, - 'seed': 0} + params = {"verbose": -1, "seed": 0} est = lgb.train(params, train_data, num_boost_round=10) pred1 = est.predict(X) - est = lgb.train(dict(params, interaction_constraints=[list(range(num_features))]), train_data, - num_boost_round=10) + est = lgb.train(dict(params, interaction_constraints=[list(range(num_features))]), train_data, num_boost_round=10) pred2 = est.predict(X) np.testing.assert_allclose(pred1, pred2) # check that constraint partitioning the features reduces train accuracy @@ -3652,17 +3443,20 @@ def test_interaction_constraints(): pred3 = est.predict(X) assert mean_squared_error(y, pred1) < mean_squared_error(y, pred3) # check that constraints consisting of single features reduce accuracy further - est = lgb.train(dict(params, interaction_constraints=[[i] for i in range(num_features)]), train_data, - num_boost_round=10) + est = lgb.train( + dict(params, interaction_constraints=[[i] for i in range(num_features)]), train_data, num_boost_round=10 + ) pred4 = est.predict(X) assert mean_squared_error(y, pred3) < mean_squared_error(y, pred4) # test that interaction constraints work when not all features are used X = np.concatenate([np.zeros((X.shape[0], 1)), X], axis=1) num_features = X.shape[1] train_data = lgb.Dataset(X, label=y) - est = lgb.train(dict(params, interaction_constraints=[[0] + list(range(2, num_features)), - [1] + list(range(2, num_features))]), - train_data, num_boost_round=10) + est = lgb.train( + dict(params, interaction_constraints=[[0] + list(range(2, num_features)), [1] + list(range(2, num_features))]), + train_data, + num_boost_round=10, + ) def test_linear_trees_num_threads(): @@ -3672,11 +3466,7 @@ def test_linear_trees_num_threads(): y = 2 * x + np.random.normal(0, 0.1, len(x)) x = x[:, np.newaxis] lgb_train = lgb.Dataset(x, label=y) - params = {'verbose': -1, - 'objective': 'regression', - 'seed': 0, - 'linear_tree': True, - 'num_threads': 2} + params = {"verbose": -1, "objective": "regression", "seed": 0, "linear_tree": True, "num_threads": 2} est = lgb.train(params, lgb_train, num_boost_round=100) pred1 = est.predict(x) params["num_threads"] = 4 @@ -3692,27 +3482,21 @@ def test_linear_trees(tmp_path): y = 2 * x + np.random.normal(0, 0.1, len(x)) x = x[:, np.newaxis] lgb_train = lgb.Dataset(x, label=y) - params = {'verbose': -1, - 'metric': 'mse', - 'seed': 0, - 'num_leaves': 2} + params = {"verbose": -1, "metric": "mse", "seed": 0, "num_leaves": 2} est = lgb.train(params, lgb_train, num_boost_round=10) pred1 = est.predict(x) lgb_train = lgb.Dataset(x, label=y) res = {} est = lgb.train( - dict( - params, - linear_tree=True - ), + dict(params, linear_tree=True), lgb_train, num_boost_round=10, valid_sets=[lgb_train], - valid_names=['train'], - callbacks=[lgb.record_evaluation(res)] + valid_names=["train"], + callbacks=[lgb.record_evaluation(res)], ) pred2 = est.predict(x) - assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred2), abs=1e-1) + assert res["train"]["l2"][-1] == pytest.approx(mean_squared_error(y, pred2), abs=1e-1) assert mean_squared_error(y, pred2) < mean_squared_error(y, pred1) # test again with nans in data x[:10] = np.nan @@ -3722,36 +3506,28 @@ def test_linear_trees(tmp_path): lgb_train = lgb.Dataset(x, label=y) res = {} est = lgb.train( - dict( - params, - linear_tree=True - ), + dict(params, linear_tree=True), lgb_train, num_boost_round=10, valid_sets=[lgb_train], - valid_names=['train'], - callbacks=[lgb.record_evaluation(res)] + valid_names=["train"], + callbacks=[lgb.record_evaluation(res)], ) pred2 = est.predict(x) - assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred2), abs=1e-1) + assert res["train"]["l2"][-1] == pytest.approx(mean_squared_error(y, pred2), abs=1e-1) assert mean_squared_error(y, pred2) < mean_squared_error(y, pred1) # test again with bagging res = {} est = lgb.train( - dict( - params, - linear_tree=True, - subsample=0.8, - bagging_freq=1 - ), + dict(params, linear_tree=True, subsample=0.8, bagging_freq=1), lgb_train, num_boost_round=10, valid_sets=[lgb_train], - valid_names=['train'], - callbacks=[lgb.record_evaluation(res)] + valid_names=["train"], + callbacks=[lgb.record_evaluation(res)], ) pred = est.predict(x) - assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred), abs=1e-1) + assert res["train"]["l2"][-1] == pytest.approx(mean_squared_error(y, pred), abs=1e-1) # test with a feature that has only one non-nan value x = np.concatenate([np.ones([x.shape[0], 1]), x], 1) x[500:, 1] = np.nan @@ -3759,26 +3535,25 @@ def test_linear_trees(tmp_path): lgb_train = lgb.Dataset(x, label=y) res = {} est = lgb.train( - dict( - params, - linear_tree=True, - subsample=0.8, - bagging_freq=1 - ), + dict(params, linear_tree=True, subsample=0.8, bagging_freq=1), lgb_train, num_boost_round=10, valid_sets=[lgb_train], - valid_names=['train'], - callbacks=[lgb.record_evaluation(res)] + valid_names=["train"], + callbacks=[lgb.record_evaluation(res)], ) pred = est.predict(x) - assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred), abs=1e-1) + assert res["train"]["l2"][-1] == pytest.approx(mean_squared_error(y, pred), abs=1e-1) # test with a categorical feature x[:250, 0] = 0 y[:250] += 10 lgb_train = lgb.Dataset(x, label=y) - est = lgb.train(dict(params, linear_tree=True, subsample=0.8, bagging_freq=1), lgb_train, - num_boost_round=10, categorical_feature=[0]) + est = lgb.train( + dict(params, linear_tree=True, subsample=0.8, bagging_freq=1), + lgb_train, + num_boost_round=10, + categorical_feature=[0], + ) # test refit: same results on same data est2 = est.refit(x, label=y) p1 = est.predict(x) @@ -3799,10 +3574,7 @@ def test_linear_trees(tmp_path): assert np.mean(np.abs(p2 - p1)) > np.abs(np.max(p3 - p1)) # test when num_leaves - 1 < num_features and when num_leaves - 1 > num_features X_train, _, y_train, _ = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1, random_state=2) - params = {'linear_tree': True, - 'verbose': -1, - 'metric': 'mse', - 'seed': 0} + params = {"linear_tree": True, "verbose": -1, "metric": "mse", "seed": 0} train_data = lgb.Dataset(X_train, label=y_train, params=dict(params, num_leaves=2)) est = lgb.train(params, train_data, num_boost_round=10, categorical_feature=[0]) train_data = lgb.Dataset(X_train, label=y_train, params=dict(params, num_leaves=60)) @@ -3810,24 +3582,25 @@ def test_linear_trees(tmp_path): def test_save_and_load_linear(tmp_path): - X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1, - random_state=2) + X_train, X_test, y_train, y_test = train_test_split( + *load_breast_cancer(return_X_y=True), test_size=0.1, random_state=2 + ) X_train = np.concatenate([np.ones((X_train.shape[0], 1)), X_train], 1) - X_train[:X_train.shape[0] // 2, 0] = 0 - y_train[:X_train.shape[0] // 2] = 1 - params = {'linear_tree': True} + X_train[: X_train.shape[0] // 2, 0] = 0 + y_train[: X_train.shape[0] // 2] = 1 + params = {"linear_tree": True} train_data_1 = lgb.Dataset(X_train, label=y_train, params=params) est_1 = lgb.train(params, train_data_1, num_boost_round=10, categorical_feature=[0]) pred_1 = est_1.predict(X_train) - tmp_dataset = str(tmp_path / 'temp_dataset.bin') + tmp_dataset = str(tmp_path / "temp_dataset.bin") train_data_1.save_binary(tmp_dataset) train_data_2 = lgb.Dataset(tmp_dataset) est_2 = lgb.train(params, train_data_2, num_boost_round=10) pred_2 = est_2.predict(X_train) np.testing.assert_allclose(pred_1, pred_2) - model_file = str(tmp_path / 'model.txt') + model_file = str(tmp_path / "model.txt") est_2.save_model(model_file) est_3 = lgb.Booster(model_file=model_file) pred_3 = est_3.predict(X_train) @@ -3837,11 +3610,7 @@ def test_save_and_load_linear(tmp_path): def test_linear_single_leaf(): X_train, y_train = load_breast_cancer(return_X_y=True) train_data = lgb.Dataset(X_train, label=y_train) - params = { - "objective": "binary", - "linear_tree": True, - "min_sum_hessian": 5000 - } + params = {"objective": "binary", "linear_tree": True, "min_sum_hessian": 5000} bst = lgb.train(params, train_data, num_boost_round=5) y_pred = bst.predict(X_train) assert log_loss(y_train, y_pred) < 0.661 @@ -3853,13 +3622,7 @@ def test_predict_with_start_iteration(): train_data = lgb.Dataset(X_train, label=y_train) valid_data = lgb.Dataset(X_test, label=y_test) callbacks = [lgb.early_stopping(early_stopping_rounds)] if early_stopping_rounds is not None else [] - booster = lgb.train( - params, - train_data, - num_boost_round=50, - valid_sets=[valid_data], - callbacks=callbacks - ) + booster = lgb.train(params, train_data, num_boost_round=50, valid_sets=[valid_data], callbacks=callbacks) # test that the predict once with all iterations equals summed results with start_iteration and num_iteration all_pred = booster.predict(X, raw_score=True) @@ -3901,12 +3664,7 @@ def test_predict_with_start_iteration(): # test for regression X, y = make_synthetic_regression() - params = { - 'objective': 'regression', - 'verbose': -1, - 'metric': 'l2', - 'learning_rate': 0.5 - } + params = {"objective": "regression", "verbose": -1, "metric": "l2", "learning_rate": 0.5} # test both with and without early stopping inner_test(X, y, params, early_stopping_rounds=1) inner_test(X, y, params, early_stopping_rounds=5) @@ -3914,12 +3672,7 @@ def test_predict_with_start_iteration(): # test for multi-class X, y = load_iris(return_X_y=True) - params = { - 'objective': 'multiclass', - 'num_class': 3, - 'verbose': -1, - 'metric': 'multi_error' - } + params = {"objective": "multiclass", "num_class": 3, "verbose": -1, "metric": "multi_error"} # test both with and without early stopping inner_test(X, y, params, early_stopping_rounds=1) inner_test(X, y, params, early_stopping_rounds=5) @@ -3927,11 +3680,7 @@ def test_predict_with_start_iteration(): # test for binary X, y = load_breast_cancer(return_X_y=True) - params = { - 'objective': 'binary', - 'verbose': -1, - 'metric': 'auc' - } + params = {"objective": "binary", "verbose": -1, "metric": "auc"} # test both with and without early stopping inner_test(X, y, params, early_stopping_rounds=1) inner_test(X, y, params, early_stopping_rounds=5) @@ -3941,21 +3690,11 @@ def test_predict_with_start_iteration(): def test_average_precision_metric(): # test against sklearn average precision metric X, y = load_breast_cancer(return_X_y=True) - params = { - 'objective': 'binary', - 'metric': 'average_precision', - 'verbose': -1 - } + params = {"objective": "binary", "metric": "average_precision", "verbose": -1} res = {} lgb_X = lgb.Dataset(X, label=y) - est = lgb.train( - params, - lgb_X, - num_boost_round=10, - valid_sets=[lgb_X], - callbacks=[lgb.record_evaluation(res)] - ) - ap = res['training']['average_precision'][-1] + est = lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(res)]) + ap = res["training"]["average_precision"][-1] pred = est.predict(X) sklearn_ap = average_precision_score(y, pred) assert ap == pytest.approx(sklearn_ap) @@ -3963,37 +3702,28 @@ def test_average_precision_metric(): y = y.copy() y[:] = 1 lgb_X = lgb.Dataset(X, label=y) - lgb.train( - params, - lgb_X, - num_boost_round=1, - valid_sets=[lgb_X], - callbacks=[lgb.record_evaluation(res)] - ) - assert res['training']['average_precision'][-1] == pytest.approx(1) + lgb.train(params, lgb_X, num_boost_round=1, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(res)]) + assert res["training"]["average_precision"][-1] == pytest.approx(1) def test_reset_params_works_with_metric_num_class_and_boosting(): X, y = load_breast_cancer(return_X_y=True) dataset_params = {"max_bin": 150} booster_params = { - 'objective': 'multiclass', - 'max_depth': 4, - 'bagging_fraction': 0.8, - 'metric': ['multi_logloss', 'multi_error'], - 'boosting': 'gbdt', - 'num_class': 5 + "objective": "multiclass", + "max_depth": 4, + "bagging_fraction": 0.8, + "metric": ["multi_logloss", "multi_error"], + "boosting": "gbdt", + "num_class": 5, } dtrain = lgb.Dataset(X, y, params=dataset_params) - bst = lgb.Booster( - params=booster_params, - train_set=dtrain - ) + bst = lgb.Booster(params=booster_params, train_set=dtrain) expected_params = dict(dataset_params, **booster_params) assert bst.params == expected_params - booster_params['bagging_fraction'] += 0.1 + booster_params["bagging_fraction"] += 0.1 new_bst = bst.reset_parameter(booster_params) expected_params = dict(dataset_params, **booster_params) @@ -4004,10 +3734,7 @@ def test_reset_params_works_with_metric_num_class_and_boosting(): def test_dump_model(): X, y = load_breast_cancer(return_X_y=True) train_data = lgb.Dataset(X, label=y) - params = { - "objective": "binary", - "verbose": -1 - } + params = {"objective": "binary", "verbose": -1} bst = lgb.train(params, train_data, num_boost_round=5) dumped_model_str = str(bst.dump_model(5, 0)) assert "leaf_features" not in dumped_model_str @@ -4015,7 +3742,7 @@ def test_dump_model(): assert "leaf_const" not in dumped_model_str assert "leaf_value" in dumped_model_str assert "leaf_count" in dumped_model_str - params['linear_tree'] = True + params["linear_tree"] = True train_data = lgb.Dataset(X, label=y) bst = lgb.train(params, train_data, num_boost_round=5) dumped_model_str = str(bst.dump_model(5, 0)) @@ -4027,39 +3754,28 @@ def test_dump_model(): def test_dump_model_hook(): - def hook(obj): - if 'leaf_value' in obj: - obj['LV'] = obj['leaf_value'] - del obj['leaf_value'] + if "leaf_value" in obj: + obj["LV"] = obj["leaf_value"] + del obj["leaf_value"] return obj X, y = load_breast_cancer(return_X_y=True) train_data = lgb.Dataset(X, label=y) - params = { - "objective": "binary", - "verbose": -1 - } + params = {"objective": "binary", "verbose": -1} bst = lgb.train(params, train_data, num_boost_round=5) dumped_model_str = str(bst.dump_model(5, 0, object_hook=hook)) assert "leaf_value" not in dumped_model_str assert "LV" in dumped_model_str -@pytest.mark.skipif(getenv('TASK', '') == 'cuda', reason='Forced splits are not yet supported by CUDA version') +@pytest.mark.skipif(getenv("TASK", "") == "cuda", reason="Forced splits are not yet supported by CUDA version") def test_force_split_with_feature_fraction(tmp_path): X, y = make_synthetic_regression() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) lgb_train = lgb.Dataset(X_train, y_train) - forced_split = { - "feature": 0, - "threshold": 0.5, - "right": { - "feature": 2, - "threshold": 10.0 - } - } + forced_split = {"feature": 0, "threshold": 0.5, "right": {"feature": 2, "threshold": 10.0}} tmp_split_file = tmp_path / "forced_split.json" with open(tmp_split_file, "w") as f: @@ -4070,7 +3786,7 @@ def test_force_split_with_feature_fraction(tmp_path): "feature_fraction": 0.6, "force_col_wise": True, "feature_fraction_seed": 1, - "forcedsplits_filename": tmp_split_file + "forcedsplits_filename": tmp_split_file, } gbm = lgb.train(params, lgb_train) @@ -4081,7 +3797,7 @@ def test_force_split_with_feature_fraction(tmp_path): assert len(tree_info) > 1 for tree in tree_info: tree_structure = tree["tree_structure"] - assert tree_structure['split_feature'] == 0 + assert tree_structure["split_feature"] == 0 def test_goss_boosting_and_strategy_equivalent(): @@ -4090,27 +3806,25 @@ def test_goss_boosting_and_strategy_equivalent(): lgb_train = lgb.Dataset(X_train, y_train) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train) base_params = { - 'metric': 'l2', - 'verbose': -1, - 'bagging_seed': 0, - 'learning_rate': 0.05, - 'num_threads': 1, - 'force_row_wise': True, - 'gpu_use_dp': True, + "metric": "l2", + "verbose": -1, + "bagging_seed": 0, + "learning_rate": 0.05, + "num_threads": 1, + "force_row_wise": True, + "gpu_use_dp": True, } - params1 = {**base_params, 'boosting': 'goss'} + params1 = {**base_params, "boosting": "goss"} evals_result1 = {} - lgb.train(params1, lgb_train, - num_boost_round=10, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result1)]) - params2 = {**base_params, 'data_sample_strategy': 'goss'} + lgb.train( + params1, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result1)] + ) + params2 = {**base_params, "data_sample_strategy": "goss"} evals_result2 = {} - lgb.train(params2, lgb_train, - num_boost_round=10, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result2)]) - assert evals_result1['valid_0']['l2'] == evals_result2['valid_0']['l2'] + lgb.train( + params2, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result2)] + ) + assert evals_result1["valid_0"]["l2"] == evals_result2["valid_0"]["l2"] def test_sample_strategy_with_boosting(): @@ -4120,53 +3834,49 @@ def test_sample_strategy_with_boosting(): lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train) base_params = { - 'metric': 'l2', - 'verbose': -1, - 'num_threads': 1, - 'force_row_wise': True, - 'gpu_use_dp': True, + "metric": "l2", + "verbose": -1, + "num_threads": 1, + "force_row_wise": True, + "gpu_use_dp": True, } - params1 = {**base_params, 'boosting': 'dart', 'data_sample_strategy': 'goss'} + params1 = {**base_params, "boosting": "dart", "data_sample_strategy": "goss"} evals_result = {} - gbm = lgb.train(params1, lgb_train, - num_boost_round=10, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)]) - eval_res1 = evals_result['valid_0']['l2'][-1] + gbm = lgb.train( + params1, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] + ) + eval_res1 = evals_result["valid_0"]["l2"][-1] test_res1 = mean_squared_error(y_test, gbm.predict(X_test)) assert test_res1 == pytest.approx(3149.393862, abs=1.0) assert eval_res1 == pytest.approx(test_res1) - params2 = {**base_params, 'boosting': 'gbdt', 'data_sample_strategy': 'goss'} + params2 = {**base_params, "boosting": "gbdt", "data_sample_strategy": "goss"} evals_result = {} - gbm = lgb.train(params2, lgb_train, - num_boost_round=10, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)]) - eval_res2 = evals_result['valid_0']['l2'][-1] + gbm = lgb.train( + params2, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] + ) + eval_res2 = evals_result["valid_0"]["l2"][-1] test_res2 = mean_squared_error(y_test, gbm.predict(X_test)) assert test_res2 == pytest.approx(2547.715968, abs=1.0) assert eval_res2 == pytest.approx(test_res2) - params3 = {**base_params, 'boosting': 'goss', 'data_sample_strategy': 'goss'} + params3 = {**base_params, "boosting": "goss", "data_sample_strategy": "goss"} evals_result = {} - gbm = lgb.train(params3, lgb_train, - num_boost_round=10, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)]) - eval_res3 = evals_result['valid_0']['l2'][-1] + gbm = lgb.train( + params3, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] + ) + eval_res3 = evals_result["valid_0"]["l2"][-1] test_res3 = mean_squared_error(y_test, gbm.predict(X_test)) assert test_res3 == pytest.approx(2547.715968, abs=1.0) assert eval_res3 == pytest.approx(test_res3) - params4 = {**base_params, 'boosting': 'rf', 'data_sample_strategy': 'goss'} + params4 = {**base_params, "boosting": "rf", "data_sample_strategy": "goss"} evals_result = {} - gbm = lgb.train(params4, lgb_train, - num_boost_round=10, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)]) - eval_res4 = evals_result['valid_0']['l2'][-1] + gbm = lgb.train( + params4, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] + ) + eval_res4 = evals_result["valid_0"]["l2"][-1] test_res4 = mean_squared_error(y_test, gbm.predict(X_test)) assert test_res4 == pytest.approx(2095.538735, abs=1.0) assert eval_res4 == pytest.approx(test_res4) @@ -4180,37 +3890,52 @@ def test_sample_strategy_with_boosting(): assert eval_res2 != eval_res4 assert test_res2 != test_res4 - params5 = {**base_params, 'boosting': 'dart', 'data_sample_strategy': 'bagging', 'bagging_freq': 1, 'bagging_fraction': 0.5} + params5 = { + **base_params, + "boosting": "dart", + "data_sample_strategy": "bagging", + "bagging_freq": 1, + "bagging_fraction": 0.5, + } evals_result = {} - gbm = lgb.train(params5, lgb_train, - num_boost_round=10, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)]) - eval_res5 = evals_result['valid_0']['l2'][-1] + gbm = lgb.train( + params5, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] + ) + eval_res5 = evals_result["valid_0"]["l2"][-1] test_res5 = mean_squared_error(y_test, gbm.predict(X_test)) assert test_res5 == pytest.approx(3134.866931, abs=1.0) assert eval_res5 == pytest.approx(test_res5) - params6 = {**base_params, 'boosting': 'gbdt', 'data_sample_strategy': 'bagging', 'bagging_freq': 1, 'bagging_fraction': 0.5} + params6 = { + **base_params, + "boosting": "gbdt", + "data_sample_strategy": "bagging", + "bagging_freq": 1, + "bagging_fraction": 0.5, + } evals_result = {} - gbm = lgb.train(params6, lgb_train, - num_boost_round=10, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)]) - eval_res6 = evals_result['valid_0']['l2'][-1] + gbm = lgb.train( + params6, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] + ) + eval_res6 = evals_result["valid_0"]["l2"][-1] test_res6 = mean_squared_error(y_test, gbm.predict(X_test)) assert test_res6 == pytest.approx(2539.792378, abs=1.0) assert eval_res6 == pytest.approx(test_res6) assert test_res5 != test_res6 assert eval_res5 != eval_res6 - params7 = {**base_params, 'boosting': 'rf', 'data_sample_strategy': 'bagging', 'bagging_freq': 1, 'bagging_fraction': 0.5} + params7 = { + **base_params, + "boosting": "rf", + "data_sample_strategy": "bagging", + "bagging_freq": 1, + "bagging_fraction": 0.5, + } evals_result = {} - gbm = lgb.train(params7, lgb_train, - num_boost_round=10, - valid_sets=lgb_eval, - callbacks=[lgb.record_evaluation(evals_result)]) - eval_res7 = evals_result['valid_0']['l2'][-1] + gbm = lgb.train( + params7, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)] + ) + eval_res7 = evals_result["valid_0"]["l2"][-1] test_res7 = mean_squared_error(y_test, gbm.predict(X_test)) assert test_res7 == pytest.approx(1518.704481, abs=1.0) assert eval_res7 == pytest.approx(test_res7) @@ -4225,71 +3950,73 @@ def test_record_evaluation_with_train(): ds = lgb.Dataset(X, y) eval_result = {} callbacks = [lgb.record_evaluation(eval_result)] - params = {'objective': 'l2', 'num_leaves': 3} + params = {"objective": "l2", "num_leaves": 3} num_boost_round = 5 bst = lgb.train(params, ds, num_boost_round=num_boost_round, valid_sets=[ds], callbacks=callbacks) - assert list(eval_result.keys()) == ['training'] + assert list(eval_result.keys()) == ["training"] train_mses = [] for i in range(num_boost_round): pred = bst.predict(X, num_iteration=i + 1) mse = mean_squared_error(y, pred) train_mses.append(mse) - np.testing.assert_allclose(eval_result['training']['l2'], train_mses) + np.testing.assert_allclose(eval_result["training"]["l2"], train_mses) -@pytest.mark.parametrize('train_metric', [False, True]) +@pytest.mark.parametrize("train_metric", [False, True]) def test_record_evaluation_with_cv(train_metric): X, y = make_synthetic_regression() ds = lgb.Dataset(X, y) eval_result = {} callbacks = [lgb.record_evaluation(eval_result)] - metrics = ['l2', 'rmse'] - params = {'objective': 'l2', 'num_leaves': 3, 'metric': metrics} - cv_hist = lgb.cv(params, ds, num_boost_round=5, stratified=False, callbacks=callbacks, eval_train_metric=train_metric) - expected_datasets = {'valid'} + metrics = ["l2", "rmse"] + params = {"objective": "l2", "num_leaves": 3, "metric": metrics} + cv_hist = lgb.cv( + params, ds, num_boost_round=5, stratified=False, callbacks=callbacks, eval_train_metric=train_metric + ) + expected_datasets = {"valid"} if train_metric: - expected_datasets.add('train') + expected_datasets.add("train") assert set(eval_result.keys()) == expected_datasets for dataset in expected_datasets: for metric in metrics: - for agg in ('mean', 'stdv'): - key = f'{dataset} {metric}-{agg}' - np.testing.assert_allclose( - cv_hist[key], eval_result[dataset][f'{metric}-{agg}'] - ) + for agg in ("mean", "stdv"): + key = f"{dataset} {metric}-{agg}" + np.testing.assert_allclose(cv_hist[key], eval_result[dataset][f"{metric}-{agg}"]) def test_pandas_with_numpy_regular_dtypes(): - pd = pytest.importorskip('pandas') - uints = ['uint8', 'uint16', 'uint32', 'uint64'] - ints = ['int8', 'int16', 'int32', 'int64'] - bool_and_floats = ['bool', 'float16', 'float32', 'float64'] + pd = pytest.importorskip("pandas") + uints = ["uint8", "uint16", "uint32", "uint64"] + ints = ["int8", "int16", "int32", "int64"] + bool_and_floats = ["bool", "float16", "float32", "float64"] rng = np.random.RandomState(42) n_samples = 100 # data as float64 - df = pd.DataFrame({ - 'x1': rng.randint(0, 2, n_samples), - 'x2': rng.randint(1, 3, n_samples), - 'x3': 10 * rng.randint(1, 3, n_samples), - 'x4': 100 * rng.randint(1, 3, n_samples), - }) + df = pd.DataFrame( + { + "x1": rng.randint(0, 2, n_samples), + "x2": rng.randint(1, 3, n_samples), + "x3": 10 * rng.randint(1, 3, n_samples), + "x4": 100 * rng.randint(1, 3, n_samples), + } + ) df = df.astype(np.float64) - y = df['x1'] * (df['x2'] + df['x3'] + df['x4']) + y = df["x1"] * (df["x2"] + df["x3"] + df["x4"]) ds = lgb.Dataset(df, y) - params = {'objective': 'l2', 'num_leaves': 31, 'min_child_samples': 1} + params = {"objective": "l2", "num_leaves": 31, "min_child_samples": 1} bst = lgb.train(params, ds, num_boost_round=5) preds = bst.predict(df) # test all features were used - assert bst.trees_to_dataframe()['split_feature'].nunique() == df.shape[1] + assert bst.trees_to_dataframe()["split_feature"].nunique() == df.shape[1] # test the score is better than predicting the mean baseline = np.full_like(y, y.mean()) assert mean_squared_error(y, preds) < mean_squared_error(y, baseline) # test all predictions are equal using different input dtypes for target_dtypes in [uints, ints, bool_and_floats]: - df2 = df.astype({f'x{i}': dtype for i, dtype in enumerate(target_dtypes, start=1)}) + df2 = df.astype({f"x{i}": dtype for i, dtype in enumerate(target_dtypes, start=1)}) assert df2.dtypes.tolist() == target_dtypes ds2 = lgb.Dataset(df2, y) bst2 = lgb.train(params, ds2, num_boost_round=5) @@ -4298,34 +4025,36 @@ def test_pandas_with_numpy_regular_dtypes(): def test_pandas_nullable_dtypes(): - pd = pytest.importorskip('pandas') + pd = pytest.importorskip("pandas") rng = np.random.RandomState(0) - df = pd.DataFrame({ - 'x1': rng.randint(1, 3, size=100), - 'x2': np.linspace(-1, 1, 100), - 'x3': pd.arrays.SparseArray(rng.randint(0, 11, size=100)), - 'x4': rng.rand(100) < 0.5, - }) + df = pd.DataFrame( + { + "x1": rng.randint(1, 3, size=100), + "x2": np.linspace(-1, 1, 100), + "x3": pd.arrays.SparseArray(rng.randint(0, 11, size=100)), + "x4": rng.rand(100) < 0.5, + } + ) # introduce some missing values - df.loc[1, 'x1'] = np.nan - df.loc[2, 'x2'] = np.nan - df.loc[3, 'x4'] = np.nan + df.loc[1, "x1"] = np.nan + df.loc[2, "x2"] = np.nan + df.loc[3, "x4"] = np.nan # the previous line turns x3 into object dtype in recent versions of pandas - df['x4'] = df['x4'].astype(np.float64) - y = df['x1'] * df['x2'] + df['x3'] * (1 + df['x4']) + df["x4"] = df["x4"].astype(np.float64) + y = df["x1"] * df["x2"] + df["x3"] * (1 + df["x4"]) y = y.fillna(0) # train with regular dtypes - params = {'objective': 'l2', 'num_leaves': 31, 'min_child_samples': 1} + params = {"objective": "l2", "num_leaves": 31, "min_child_samples": 1} ds = lgb.Dataset(df, y) bst = lgb.train(params, ds, num_boost_round=5) preds = bst.predict(df) # convert to nullable dtypes df2 = df.copy() - df2['x1'] = df2['x1'].astype('Int32') - df2['x2'] = df2['x2'].astype('Float64') - df2['x4'] = df2['x4'].astype('boolean') + df2["x1"] = df2["x1"].astype("Int32") + df2["x2"] = df2["x2"].astype("Float64") + df2["x4"] = df2["x4"].astype("boolean") # test training succeeds ds_nullable_dtypes = lgb.Dataset(df2, y) @@ -4334,7 +4063,7 @@ def test_pandas_nullable_dtypes(): trees_df = bst_nullable_dtypes.trees_to_dataframe() # test all features were used - assert trees_df['split_feature'].nunique() == df.shape[1] + assert trees_df["split_feature"].nunique() == df.shape[1] # test the score is better than predicting the mean baseline = np.full_like(y, y.mean()) assert mean_squared_error(y, preds) < mean_squared_error(y, baseline) @@ -4346,13 +4075,17 @@ def test_pandas_nullable_dtypes(): def test_boost_from_average_with_single_leaf_trees(): # test data are taken from bug report # https://github.com/microsoft/LightGBM/issues/4708 - X = np.array([ - [1021.0589, 1018.9578], - [1023.85754, 1018.7854], - [1024.5468, 1018.88513], - [1019.02954, 1018.88513], - [1016.79926, 1018.88513], - [1007.6, 1018.88513]], dtype=np.float32) + X = np.array( + [ + [1021.0589, 1018.9578], + [1023.85754, 1018.7854], + [1024.5468, 1018.88513], + [1019.02954, 1018.88513], + [1016.79926, 1018.88513], + [1007.6, 1018.88513], + ], + dtype=np.float32, + ) y = np.array([1023.8, 1024.6, 1024.4, 1023.8, 1022.0, 1014.4], dtype=np.float32) params = { "extra_trees": True, @@ -4395,19 +4128,19 @@ def test_cegb_split_buffer_clean(): train = lgb.Dataset(train_data, train_y, free_raw_data=True) params = { - 'boosting_type': 'gbdt', - 'objective': 'regression', - 'max_bin': 255, - 'num_leaves': 31, - 'seed': 0, - 'learning_rate': 0.1, - 'min_data_in_leaf': 0, - 'verbose': -1, - 'min_split_gain': 1000.0, - 'cegb_penalty_feature_coupled': 5 * np.arange(C), - 'cegb_penalty_split': 0.0002, - 'cegb_tradeoff': 10.0, - 'force_col_wise': True, + "boosting_type": "gbdt", + "objective": "regression", + "max_bin": 255, + "num_leaves": 31, + "seed": 0, + "learning_rate": 0.1, + "min_data_in_leaf": 0, + "verbose": -1, + "min_split_gain": 1000.0, + "cegb_penalty_feature_coupled": 5 * np.arange(C), + "cegb_penalty_split": 0.0002, + "cegb_tradeoff": 10.0, + "force_col_wise": True, } model = lgb.train(params, train, num_boost_round=10) @@ -4420,54 +4153,51 @@ def test_verbosity_and_verbose(capsys): X, y = make_synthetic_regression() ds = lgb.Dataset(X, y) params = { - 'num_leaves': 3, - 'verbose': 1, - 'verbosity': 0, + "num_leaves": 3, + "verbose": 1, + "verbosity": 0, } lgb.train(params, ds, num_boost_round=1) - expected_msg = ( - '[LightGBM] [Warning] verbosity is set=0, verbose=1 will be ignored. ' - 'Current value: verbosity=0' - ) + expected_msg = "[LightGBM] [Warning] verbosity is set=0, verbose=1 will be ignored. " "Current value: verbosity=0" stdout = capsys.readouterr().out assert expected_msg in stdout -@pytest.mark.parametrize('verbosity_param', lgb.basic._ConfigAliases.get("verbosity")) -@pytest.mark.parametrize('verbosity', [-1, 0]) +@pytest.mark.parametrize("verbosity_param", lgb.basic._ConfigAliases.get("verbosity")) +@pytest.mark.parametrize("verbosity", [-1, 0]) def test_verbosity_can_suppress_alias_warnings(capsys, verbosity_param, verbosity): X, y = make_synthetic_regression() ds = lgb.Dataset(X, y) params = { - 'num_leaves': 3, - 'subsample': 0.75, - 'bagging_fraction': 0.8, - 'force_col_wise': True, + "num_leaves": 3, + "subsample": 0.75, + "bagging_fraction": 0.8, + "force_col_wise": True, verbosity_param: verbosity, } lgb.train(params, ds, num_boost_round=1) expected_msg = ( - '[LightGBM] [Warning] bagging_fraction is set=0.8, subsample=0.75 will be ignored. ' - 'Current value: bagging_fraction=0.8' + "[LightGBM] [Warning] bagging_fraction is set=0.8, subsample=0.75 will be ignored. " + "Current value: bagging_fraction=0.8" ) stdout = capsys.readouterr().out if verbosity >= 0: assert expected_msg in stdout else: - assert re.search(r'\[LightGBM\]', stdout) is None + assert re.search(r"\[LightGBM\]", stdout) is None -@pytest.mark.skipif(not PANDAS_INSTALLED, reason='pandas is not installed') +@pytest.mark.skipif(not PANDAS_INSTALLED, reason="pandas is not installed") def test_validate_features(): X, y = make_synthetic_regression() - features = ['x1', 'x2', 'x3', 'x4'] + features = ["x1", "x2", "x3", "x4"] df = pd_DataFrame(X, columns=features) ds = lgb.Dataset(df, y) - bst = lgb.train({'num_leaves': 15, 'verbose': -1}, ds, num_boost_round=10) + bst = lgb.train({"num_leaves": 15, "verbose": -1}, ds, num_boost_round=10) assert bst.feature_name() == features # try to predict with a different feature - df2 = df.rename(columns={'x3': 'z'}) + df2 = df.rename(columns={"x3": "z"}) with pytest.raises(lgb.basic.LightGBMError, match="Expected 'x3' at position 2 but found 'z'"): bst.predict(df2, validate_features=True) @@ -4489,7 +4219,7 @@ def test_train_and_cv_raise_informative_error_for_train_set_of_wrong_type(): lgb.cv({}, train_set=[]) -@pytest.mark.parametrize('num_boost_round', [-7, -1, 0]) +@pytest.mark.parametrize("num_boost_round", [-7, -1, 0]) def test_train_and_cv_raise_informative_error_for_impossible_num_boost_round(num_boost_round): X, y = make_synthetic_regression(n_samples=100) error_msg = rf"num_boost_round must be greater than 0\. Got {num_boost_round}\." @@ -4502,15 +4232,13 @@ def test_train_and_cv_raise_informative_error_for_impossible_num_boost_round(num def test_train_raises_informative_error_if_any_valid_sets_are_not_dataset_objects(): X, y = make_synthetic_regression(n_samples=100) X_valid = X * 2.0 - with pytest.raises(TypeError, match=r"Every item in valid_sets must be a Dataset object\. Item 1 has type 'tuple'\."): + with pytest.raises( + TypeError, match=r"Every item in valid_sets must be a Dataset object\. Item 1 has type 'tuple'\." + ): lgb.train( params={}, train_set=lgb.Dataset(X, y), - valid_sets=[ - lgb.Dataset(X_valid, y), - ([1.0], [2.0]), - [5.6, 5.7, 5.8] - ] + valid_sets=[lgb.Dataset(X_valid, y), ([1.0], [2.0]), [5.6, 5.7, 5.8]], ) @@ -4518,21 +4246,23 @@ def test_train_raises_informative_error_for_params_of_wrong_type(): X, y = make_synthetic_regression() params = {"num_leaves": "too-many"} dtrain = lgb.Dataset(X, label=y) - with pytest.raises(lgb.basic.LightGBMError, match="Parameter num_leaves should be of type int, got \"too-many\""): + with pytest.raises(lgb.basic.LightGBMError, match='Parameter num_leaves should be of type int, got "too-many"'): lgb.train(params, dtrain) def test_quantized_training(): X, y = make_synthetic_regression() ds = lgb.Dataset(X, label=y) - bst_params = {'num_leaves': 15, 'verbose': -1, 'seed': 0} + bst_params = {"num_leaves": 15, "verbose": -1, "seed": 0} bst = lgb.train(bst_params, ds, num_boost_round=10) rmse = np.sqrt(np.mean((bst.predict(X) - y) ** 2)) - bst_params.update({ - 'use_quantized_grad': True, - 'num_grad_quant_bins': 30, - 'quant_train_renew_leaf': True, - }) + bst_params.update( + { + "use_quantized_grad": True, + "num_grad_quant_bins": 30, + "quant_train_renew_leaf": True, + } + ) quant_bst = lgb.train(bst_params, ds, num_boost_round=10) quant_rmse = np.sqrt(np.mean((quant_bst.predict(X) - y) ** 2)) assert quant_rmse < rmse + 6.0 diff --git a/tests/python_package_test/test_plotting.py b/tests/python_package_test/test_plotting.py index 39eebabaf..2d68ead6a 100644 --- a/tests/python_package_test/test_plotting.py +++ b/tests/python_package_test/test_plotting.py @@ -9,7 +9,8 @@ from lightgbm.compat import GRAPHVIZ_INSTALLED, MATPLOTLIB_INSTALLED, PANDAS_INS if MATPLOTLIB_INSTALLED: import matplotlib - matplotlib.use('Agg') + + matplotlib.use("Agg") if GRAPHVIZ_INSTALLED: import graphviz @@ -18,8 +19,7 @@ from .utils import load_breast_cancer, make_synthetic_regression @pytest.fixture(scope="module") def breast_cancer_split(): - return train_test_split(*load_breast_cancer(return_X_y=True), - test_size=0.1, random_state=1) + return train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1, random_state=1) def _categorical_data(category_values_lower_bound, category_values_upper_bound): @@ -41,51 +41,51 @@ def train_data(breast_cancer_split): @pytest.fixture def params(): - return {"objective": "binary", - "verbose": -1, - "num_leaves": 3} + return {"objective": "binary", "verbose": -1, "num_leaves": 3} -@pytest.mark.skipif(not MATPLOTLIB_INSTALLED, reason='matplotlib is not installed') +@pytest.mark.skipif(not MATPLOTLIB_INSTALLED, reason="matplotlib is not installed") def test_plot_importance(params, breast_cancer_split, train_data): X_train, _, y_train, _ = breast_cancer_split gbm0 = lgb.train(params, train_data, num_boost_round=10) ax0 = lgb.plot_importance(gbm0) assert isinstance(ax0, matplotlib.axes.Axes) - assert ax0.get_title() == 'Feature importance' - assert ax0.get_xlabel() == 'Feature importance' - assert ax0.get_ylabel() == 'Features' + assert ax0.get_title() == "Feature importance" + assert ax0.get_xlabel() == "Feature importance" + assert ax0.get_ylabel() == "Features" assert len(ax0.patches) <= 30 gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1) gbm1.fit(X_train, y_train) - ax1 = lgb.plot_importance(gbm1, color='r', title='t', xlabel='x', ylabel='y') + ax1 = lgb.plot_importance(gbm1, color="r", title="t", xlabel="x", ylabel="y") assert isinstance(ax1, matplotlib.axes.Axes) - assert ax1.get_title() == 't' - assert ax1.get_xlabel() == 'x' - assert ax1.get_ylabel() == 'y' + assert ax1.get_title() == "t" + assert ax1.get_xlabel() == "x" + assert ax1.get_ylabel() == "y" assert len(ax1.patches) <= 30 for patch in ax1.patches: - assert patch.get_facecolor() == (1., 0, 0, 1.) # red + assert patch.get_facecolor() == (1.0, 0, 0, 1.0) # red - ax2 = lgb.plot_importance(gbm0, color=['r', 'y', 'g', 'b'], title=None, xlabel=None, ylabel=None) + ax2 = lgb.plot_importance(gbm0, color=["r", "y", "g", "b"], title=None, xlabel=None, ylabel=None) assert isinstance(ax2, matplotlib.axes.Axes) - assert ax2.get_title() == '' - assert ax2.get_xlabel() == '' - assert ax2.get_ylabel() == '' + assert ax2.get_title() == "" + assert ax2.get_xlabel() == "" + assert ax2.get_ylabel() == "" assert len(ax2.patches) <= 30 - assert ax2.patches[0].get_facecolor() == (1., 0, 0, 1.) # r - assert ax2.patches[1].get_facecolor() == (.75, .75, 0, 1.) # y - assert ax2.patches[2].get_facecolor() == (0, .5, 0, 1.) # g - assert ax2.patches[3].get_facecolor() == (0, 0, 1., 1.) # b + assert ax2.patches[0].get_facecolor() == (1.0, 0, 0, 1.0) # r + assert ax2.patches[1].get_facecolor() == (0.75, 0.75, 0, 1.0) # y + assert ax2.patches[2].get_facecolor() == (0, 0.5, 0, 1.0) # g + assert ax2.patches[3].get_facecolor() == (0, 0, 1.0, 1.0) # b - ax3 = lgb.plot_importance(gbm0, title='t @importance_type@', xlabel='x @importance_type@', ylabel='y @importance_type@') + ax3 = lgb.plot_importance( + gbm0, title="t @importance_type@", xlabel="x @importance_type@", ylabel="y @importance_type@" + ) assert isinstance(ax3, matplotlib.axes.Axes) - assert ax3.get_title() == 't @importance_type@' - assert ax3.get_xlabel() == 'x split' - assert ax3.get_ylabel() == 'y @importance_type@' + assert ax3.get_title() == "t @importance_type@" + assert ax3.get_xlabel() == "x split" + assert ax3.get_ylabel() == "y @importance_type@" assert len(ax3.patches) <= 30 gbm2 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1, importance_type="gain") @@ -108,51 +108,59 @@ def test_plot_importance(params, breast_cancer_split, train_data): assert first_bar1 != first_bar3 -@pytest.mark.skipif(not MATPLOTLIB_INSTALLED, reason='matplotlib is not installed') +@pytest.mark.skipif(not MATPLOTLIB_INSTALLED, reason="matplotlib is not installed") def test_plot_split_value_histogram(params, breast_cancer_split, train_data): X_train, _, y_train, _ = breast_cancer_split gbm0 = lgb.train(params, train_data, num_boost_round=10) ax0 = lgb.plot_split_value_histogram(gbm0, 27) assert isinstance(ax0, matplotlib.axes.Axes) - assert ax0.get_title() == 'Split value histogram for feature with index 27' - assert ax0.get_xlabel() == 'Feature split value' - assert ax0.get_ylabel() == 'Count' + assert ax0.get_title() == "Split value histogram for feature with index 27" + assert ax0.get_xlabel() == "Feature split value" + assert ax0.get_ylabel() == "Count" assert len(ax0.patches) <= 2 gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1) gbm1.fit(X_train, y_train) - ax1 = lgb.plot_split_value_histogram(gbm1, gbm1.booster_.feature_name()[27], figsize=(10, 5), - title='Histogram for feature @index/name@ @feature@', - xlabel='x', ylabel='y', color='r') + ax1 = lgb.plot_split_value_histogram( + gbm1, + gbm1.booster_.feature_name()[27], + figsize=(10, 5), + title="Histogram for feature @index/name@ @feature@", + xlabel="x", + ylabel="y", + color="r", + ) assert isinstance(ax1, matplotlib.axes.Axes) - title = f'Histogram for feature name {gbm1.booster_.feature_name()[27]}' + title = f"Histogram for feature name {gbm1.booster_.feature_name()[27]}" assert ax1.get_title() == title - assert ax1.get_xlabel() == 'x' - assert ax1.get_ylabel() == 'y' + assert ax1.get_xlabel() == "x" + assert ax1.get_ylabel() == "y" assert len(ax1.patches) <= 2 for patch in ax1.patches: - assert patch.get_facecolor() == (1., 0, 0, 1.) # red + assert patch.get_facecolor() == (1.0, 0, 0, 1.0) # red - ax2 = lgb.plot_split_value_histogram(gbm0, 27, bins=10, color=['r', 'y', 'g', 'b'], - title=None, xlabel=None, ylabel=None) + ax2 = lgb.plot_split_value_histogram( + gbm0, 27, bins=10, color=["r", "y", "g", "b"], title=None, xlabel=None, ylabel=None + ) assert isinstance(ax2, matplotlib.axes.Axes) - assert ax2.get_title() == '' - assert ax2.get_xlabel() == '' - assert ax2.get_ylabel() == '' + assert ax2.get_title() == "" + assert ax2.get_xlabel() == "" + assert ax2.get_ylabel() == "" assert len(ax2.patches) == 10 - assert ax2.patches[0].get_facecolor() == (1., 0, 0, 1.) # r - assert ax2.patches[1].get_facecolor() == (.75, .75, 0, 1.) # y - assert ax2.patches[2].get_facecolor() == (0, .5, 0, 1.) # g - assert ax2.patches[3].get_facecolor() == (0, 0, 1., 1.) # b + assert ax2.patches[0].get_facecolor() == (1.0, 0, 0, 1.0) # r + assert ax2.patches[1].get_facecolor() == (0.75, 0.75, 0, 1.0) # y + assert ax2.patches[2].get_facecolor() == (0, 0.5, 0, 1.0) # g + assert ax2.patches[3].get_facecolor() == (0, 0, 1.0, 1.0) # b with pytest.raises(ValueError): lgb.plot_split_value_histogram(gbm0, 0) # was not used in splitting -@pytest.mark.skipif(not MATPLOTLIB_INSTALLED or not GRAPHVIZ_INSTALLED, - reason='matplotlib or graphviz is not installed') +@pytest.mark.skipif( + not MATPLOTLIB_INSTALLED or not GRAPHVIZ_INSTALLED, reason="matplotlib or graphviz is not installed" +) def test_plot_tree(breast_cancer_split): X_train, _, y_train, _ = breast_cancer_split gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1) @@ -161,14 +169,14 @@ def test_plot_tree(breast_cancer_split): with pytest.raises(IndexError): lgb.plot_tree(gbm, tree_index=83) - ax = lgb.plot_tree(gbm, tree_index=3, figsize=(15, 8), show_info=['split_gain']) + ax = lgb.plot_tree(gbm, tree_index=3, figsize=(15, 8), show_info=["split_gain"]) assert isinstance(ax, matplotlib.axes.Axes) w, h = ax.axes.get_figure().get_size_inches() assert int(w) == 15 assert int(h) == 8 -@pytest.mark.skipif(not GRAPHVIZ_INSTALLED, reason='graphviz is not installed') +@pytest.mark.skipif(not GRAPHVIZ_INSTALLED, reason="graphviz is not installed") def test_create_tree_digraph(breast_cancer_split): X_train, _, y_train, _ = breast_cancer_split @@ -179,28 +187,32 @@ def test_create_tree_digraph(breast_cancer_split): with pytest.raises(IndexError): lgb.create_tree_digraph(gbm, tree_index=83) - graph = lgb.create_tree_digraph(gbm, tree_index=3, - show_info=['split_gain', 'internal_value', 'internal_weight'], - name='Tree4', node_attr={'color': 'red'}) + graph = lgb.create_tree_digraph( + gbm, + tree_index=3, + show_info=["split_gain", "internal_value", "internal_weight"], + name="Tree4", + node_attr={"color": "red"}, + ) graph.render(view=False) assert isinstance(graph, graphviz.Digraph) - assert graph.name == 'Tree4' + assert graph.name == "Tree4" assert len(graph.node_attr) == 1 - assert graph.node_attr['color'] == 'red' + assert graph.node_attr["color"] == "red" assert len(graph.graph_attr) == 0 assert len(graph.edge_attr) == 0 - graph_body = ''.join(graph.body) - assert 'leaf' in graph_body - assert 'gain' in graph_body - assert 'value' in graph_body - assert 'weight' in graph_body - assert '#ffdddd' in graph_body - assert '#ddffdd' in graph_body - assert 'data' not in graph_body - assert 'count' not in graph_body + graph_body = "".join(graph.body) + assert "leaf" in graph_body + assert "gain" in graph_body + assert "value" in graph_body + assert "weight" in graph_body + assert "#ffdddd" in graph_body + assert "#ddffdd" in graph_body + assert "data" not in graph_body + assert "count" not in graph_body -@pytest.mark.skipif(not GRAPHVIZ_INSTALLED, reason='graphviz is not installed') +@pytest.mark.skipif(not GRAPHVIZ_INSTALLED, reason="graphviz is not installed") def test_tree_with_categories_below_max_category_values(): X_train, y_train = _categorical_data(2, 10) params = { @@ -211,7 +223,7 @@ def test_tree_with_categories_below_max_category_values(): "deterministic": True, "num_threads": 1, "seed": 708, - "verbose": -1 + "verbose": -1, } gbm = lgb.LGBMClassifier(**params) gbm.fit(X_train, y_train) @@ -219,28 +231,32 @@ def test_tree_with_categories_below_max_category_values(): with pytest.raises(IndexError): lgb.create_tree_digraph(gbm, tree_index=83) - graph = lgb.create_tree_digraph(gbm, tree_index=3, - show_info=['split_gain', 'internal_value', 'internal_weight'], - name='Tree4', node_attr={'color': 'red'}, - max_category_values=10) + graph = lgb.create_tree_digraph( + gbm, + tree_index=3, + show_info=["split_gain", "internal_value", "internal_weight"], + name="Tree4", + node_attr={"color": "red"}, + max_category_values=10, + ) graph.render(view=False) assert isinstance(graph, graphviz.Digraph) - assert graph.name == 'Tree4' + assert graph.name == "Tree4" assert len(graph.node_attr) == 1 - assert graph.node_attr['color'] == 'red' + assert graph.node_attr["color"] == "red" assert len(graph.graph_attr) == 0 assert len(graph.edge_attr) == 0 - graph_body = ''.join(graph.body) - assert 'leaf' in graph_body - assert 'gain' in graph_body - assert 'value' in graph_body - assert 'weight' in graph_body - assert 'data' not in graph_body - assert 'count' not in graph_body - assert '||...||' not in graph_body + graph_body = "".join(graph.body) + assert "leaf" in graph_body + assert "gain" in graph_body + assert "value" in graph_body + assert "weight" in graph_body + assert "data" not in graph_body + assert "count" not in graph_body + assert "||...||" not in graph_body -@pytest.mark.skipif(not GRAPHVIZ_INSTALLED, reason='graphviz is not installed') +@pytest.mark.skipif(not GRAPHVIZ_INSTALLED, reason="graphviz is not installed") def test_tree_with_categories_above_max_category_values(): X_train, y_train = _categorical_data(20, 30) params = { @@ -251,7 +267,7 @@ def test_tree_with_categories_above_max_category_values(): "deterministic": True, "num_threads": 1, "seed": 708, - "verbose": -1 + "verbose": -1, } gbm = lgb.LGBMClassifier(**params) gbm.fit(X_train, y_train) @@ -259,32 +275,36 @@ def test_tree_with_categories_above_max_category_values(): with pytest.raises(IndexError): lgb.create_tree_digraph(gbm, tree_index=83) - graph = lgb.create_tree_digraph(gbm, tree_index=9, - show_info=['split_gain', 'internal_value', 'internal_weight'], - name='Tree4', node_attr={'color': 'red'}, - max_category_values=4) + graph = lgb.create_tree_digraph( + gbm, + tree_index=9, + show_info=["split_gain", "internal_value", "internal_weight"], + name="Tree4", + node_attr={"color": "red"}, + max_category_values=4, + ) graph.render(view=False) assert isinstance(graph, graphviz.Digraph) - assert graph.name == 'Tree4' + assert graph.name == "Tree4" assert len(graph.node_attr) == 1 - assert graph.node_attr['color'] == 'red' + assert graph.node_attr["color"] == "red" assert len(graph.graph_attr) == 0 assert len(graph.edge_attr) == 0 - graph_body = ''.join(graph.body) - assert 'leaf' in graph_body - assert 'gain' in graph_body - assert 'value' in graph_body - assert 'weight' in graph_body - assert 'data' not in graph_body - assert 'count' not in graph_body - assert '||...||' in graph_body + graph_body = "".join(graph.body) + assert "leaf" in graph_body + assert "gain" in graph_body + assert "value" in graph_body + assert "weight" in graph_body + assert "data" not in graph_body + assert "count" not in graph_body + assert "||...||" in graph_body -@pytest.mark.parametrize('use_missing', [True, False]) -@pytest.mark.parametrize('zero_as_missing', [True, False]) +@pytest.mark.parametrize("use_missing", [True, False]) +@pytest.mark.parametrize("zero_as_missing", [True, False]) def test_numeric_split_direction(use_missing, zero_as_missing): if use_missing and zero_as_missing: - pytest.skip('use_missing and zero_as_missing both set to True') + pytest.skip("use_missing and zero_as_missing both set to True") X, y = make_synthetic_regression() rng = np.random.RandomState(0) zero_mask = rng.rand(X.shape[0]) < 0.05 @@ -294,48 +314,48 @@ def test_numeric_split_direction(use_missing, zero_as_missing): X[nan_mask, :] = np.nan ds = lgb.Dataset(X, y) params = { - 'num_leaves': 127, - 'min_child_samples': 1, - 'use_missing': use_missing, - 'zero_as_missing': zero_as_missing, + "num_leaves": 127, + "min_child_samples": 1, + "use_missing": use_missing, + "zero_as_missing": zero_as_missing, } bst = lgb.train(params, ds, num_boost_round=1) case_with_zero = X[zero_mask][[0]] expected_leaf_zero = bst.predict(case_with_zero, pred_leaf=True)[0] - node = bst.dump_model()['tree_info'][0]['tree_structure'] - while 'decision_type' in node: + node = bst.dump_model()["tree_info"][0]["tree_structure"] + while "decision_type" in node: direction = lgb.plotting._determine_direction_for_numeric_split( - case_with_zero[0][node['split_feature']], node['threshold'], node['missing_type'], node['default_left'] + case_with_zero[0][node["split_feature"]], node["threshold"], node["missing_type"], node["default_left"] ) - node = node['left_child'] if direction == 'left' else node['right_child'] - assert node['leaf_index'] == expected_leaf_zero + node = node["left_child"] if direction == "left" else node["right_child"] + assert node["leaf_index"] == expected_leaf_zero if use_missing: case_with_nan = X[nan_mask][[0]] expected_leaf_nan = bst.predict(case_with_nan, pred_leaf=True)[0] - node = bst.dump_model()['tree_info'][0]['tree_structure'] - while 'decision_type' in node: + node = bst.dump_model()["tree_info"][0]["tree_structure"] + while "decision_type" in node: direction = lgb.plotting._determine_direction_for_numeric_split( - case_with_nan[0][node['split_feature']], node['threshold'], node['missing_type'], node['default_left'] + case_with_nan[0][node["split_feature"]], node["threshold"], node["missing_type"], node["default_left"] ) - node = node['left_child'] if direction == 'left' else node['right_child'] - assert node['leaf_index'] == expected_leaf_nan + node = node["left_child"] if direction == "left" else node["right_child"] + assert node["leaf_index"] == expected_leaf_nan assert expected_leaf_zero != expected_leaf_nan -@pytest.mark.skipif(not GRAPHVIZ_INSTALLED, reason='graphviz is not installed') +@pytest.mark.skipif(not GRAPHVIZ_INSTALLED, reason="graphviz is not installed") def test_example_case_in_tree_digraph(): rng = np.random.RandomState(0) x1 = rng.rand(100) cat = rng.randint(1, 3, size=x1.size) X = np.vstack([x1, cat]).T y = x1 + 2 * cat - feature_name = ['x1', 'cat'] - ds = lgb.Dataset(X, y, feature_name=feature_name, categorical_feature=['cat']) + feature_name = ["x1", "cat"] + ds = lgb.Dataset(X, y, feature_name=feature_name, categorical_feature=["cat"]) num_round = 3 - bst = lgb.train({'num_leaves': 7}, ds, num_boost_round=num_round) + bst = lgb.train({"num_leaves": 7}, ds, num_boost_round=num_round) mod = bst.dump_model() example_case = X[[0]] makes_categorical_splits = False @@ -343,42 +363,46 @@ def test_example_case_in_tree_digraph(): for i in range(num_round): graph = lgb.create_tree_digraph(bst, example_case=example_case, tree_index=i) gbody = graph.body - node = mod['tree_info'][i]['tree_structure'] - while 'decision_type' in node: # iterate through the splits - split_index = node['split_index'] + node = mod["tree_info"][i]["tree_structure"] + while "decision_type" in node: # iterate through the splits + split_index = node["split_index"] - node_in_graph = [n for n in gbody if f'split{split_index}' in n and '->' not in n] + node_in_graph = [n for n in gbody if f"split{split_index}" in n and "->" not in n] assert len(node_in_graph) == 1 seen_indices.add(gbody.index(node_in_graph[0])) - edge_to_node = [e for e in gbody if f'-> split{split_index}' in e] - if node['decision_type'] == '<=': + edge_to_node = [e for e in gbody if f"-> split{split_index}" in e] + if node["decision_type"] == "<=": direction = lgb.plotting._determine_direction_for_numeric_split( - example_case[0][node['split_feature']], node['threshold'], node['missing_type'], node['default_left']) + example_case[0][node["split_feature"]], + node["threshold"], + node["missing_type"], + node["default_left"], + ) else: makes_categorical_splits = True direction = lgb.plotting._determine_direction_for_categorical_split( - example_case[0][node['split_feature']], node['threshold'] + example_case[0][node["split_feature"]], node["threshold"] ) - node = node['left_child'] if direction == 'left' else node['right_child'] - assert 'color=blue' in node_in_graph[0] + node = node["left_child"] if direction == "left" else node["right_child"] + assert "color=blue" in node_in_graph[0] if edge_to_node: assert len(edge_to_node) == 1 - assert 'color=blue' in edge_to_node[0] + assert "color=blue" in edge_to_node[0] seen_indices.add(gbody.index(edge_to_node[0])) # we're in a leaf now - leaf_index = node['leaf_index'] - leaf_in_graph = [n for n in gbody if f'leaf{leaf_index}' in n and '->' not in n] - edge_to_leaf = [e for e in gbody if f'-> leaf{leaf_index}' in e] + leaf_index = node["leaf_index"] + leaf_in_graph = [n for n in gbody if f"leaf{leaf_index}" in n and "->" not in n] + edge_to_leaf = [e for e in gbody if f"-> leaf{leaf_index}" in e] assert len(leaf_in_graph) == 1 - assert 'color=blue' in leaf_in_graph[0] + assert "color=blue" in leaf_in_graph[0] assert len(edge_to_leaf) == 1 - assert 'color=blue' in edge_to_leaf[0] + assert "color=blue" in edge_to_leaf[0] seen_indices.update([gbody.index(leaf_in_graph[0]), gbody.index(edge_to_leaf[0])]) # check that the rest of the elements have black color - remaining_elements = [e for i, e in enumerate(graph.body) if i not in seen_indices and 'graph' not in e] - assert all('color=black' in e for e in remaining_elements) + remaining_elements = [e for i, e in enumerate(graph.body) if i not in seen_indices and "graph" not in e] + assert all("color=black" in e for e in remaining_elements) # check that we got to the expected leaf expected_leaf = bst.predict(example_case, start_iteration=i, num_iteration=1, pred_leaf=True)[0] @@ -386,83 +410,86 @@ def test_example_case_in_tree_digraph(): assert makes_categorical_splits -@pytest.mark.skipif(not GRAPHVIZ_INSTALLED, reason='graphviz is not installed') -@pytest.mark.parametrize('input_type', ['array', 'dataframe']) +@pytest.mark.skipif(not GRAPHVIZ_INSTALLED, reason="graphviz is not installed") +@pytest.mark.parametrize("input_type", ["array", "dataframe"]) def test_empty_example_case_on_tree_digraph_raises_error(input_type): X, y = make_synthetic_regression() - if input_type == 'dataframe': + if input_type == "dataframe": if not PANDAS_INSTALLED: - pytest.skip(reason='pandas is not installed') + pytest.skip(reason="pandas is not installed") X = pd_DataFrame(X) ds = lgb.Dataset(X, y) - bst = lgb.train({'num_leaves': 3}, ds, num_boost_round=1) + bst = lgb.train({"num_leaves": 3}, ds, num_boost_round=1) example_case = X[:0] - if input_type == 'dataframe': + if input_type == "dataframe": example_case = pd_DataFrame(example_case) - with pytest.raises(ValueError, match='example_case must have a single row.'): + with pytest.raises(ValueError, match="example_case must have a single row."): lgb.create_tree_digraph(bst, tree_index=0, example_case=example_case) -@pytest.mark.skipif(not MATPLOTLIB_INSTALLED, reason='matplotlib is not installed') +@pytest.mark.skipif(not MATPLOTLIB_INSTALLED, reason="matplotlib is not installed") def test_plot_metrics(params, breast_cancer_split, train_data): X_train, X_test, y_train, y_test = breast_cancer_split test_data = lgb.Dataset(X_test, y_test, reference=train_data) params.update({"metric": {"binary_logloss", "binary_error"}}) evals_result0 = {} - lgb.train(params, train_data, - valid_sets=[train_data, test_data], - valid_names=['v1', 'v2'], - num_boost_round=10, - callbacks=[lgb.record_evaluation(evals_result0)]) + lgb.train( + params, + train_data, + valid_sets=[train_data, test_data], + valid_names=["v1", "v2"], + num_boost_round=10, + callbacks=[lgb.record_evaluation(evals_result0)], + ) with pytest.warns(UserWarning, match="More than one metric available, picking one to plot."): ax0 = lgb.plot_metric(evals_result0) assert isinstance(ax0, matplotlib.axes.Axes) - assert ax0.get_title() == 'Metric during training' - assert ax0.get_xlabel() == 'Iterations' - assert ax0.get_ylabel() in {'binary_logloss', 'binary_error'} + assert ax0.get_title() == "Metric during training" + assert ax0.get_xlabel() == "Iterations" + assert ax0.get_ylabel() in {"binary_logloss", "binary_error"} legend_items = ax0.get_legend().get_texts() assert len(legend_items) == 2 - assert legend_items[0].get_text() == 'v1' - assert legend_items[1].get_text() == 'v2' + assert legend_items[0].get_text() == "v1" + assert legend_items[1].get_text() == "v2" - ax1 = lgb.plot_metric(evals_result0, metric='binary_error') + ax1 = lgb.plot_metric(evals_result0, metric="binary_error") assert isinstance(ax1, matplotlib.axes.Axes) - assert ax1.get_title() == 'Metric during training' - assert ax1.get_xlabel() == 'Iterations' - assert ax1.get_ylabel() == 'binary_error' + assert ax1.get_title() == "Metric during training" + assert ax1.get_xlabel() == "Iterations" + assert ax1.get_ylabel() == "binary_error" legend_items = ax1.get_legend().get_texts() assert len(legend_items) == 2 - assert legend_items[0].get_text() == 'v1' - assert legend_items[1].get_text() == 'v2' + assert legend_items[0].get_text() == "v1" + assert legend_items[1].get_text() == "v2" - ax2 = lgb.plot_metric(evals_result0, metric='binary_logloss', dataset_names=['v2']) + ax2 = lgb.plot_metric(evals_result0, metric="binary_logloss", dataset_names=["v2"]) assert isinstance(ax2, matplotlib.axes.Axes) - assert ax2.get_title() == 'Metric during training' - assert ax2.get_xlabel() == 'Iterations' - assert ax2.get_ylabel() == 'binary_logloss' + assert ax2.get_title() == "Metric during training" + assert ax2.get_xlabel() == "Iterations" + assert ax2.get_ylabel() == "binary_logloss" legend_items = ax2.get_legend().get_texts() assert len(legend_items) == 1 - assert legend_items[0].get_text() == 'v2' + assert legend_items[0].get_text() == "v2" ax3 = lgb.plot_metric( evals_result0, - metric='binary_logloss', - dataset_names=['v1'], - title='Metric @metric@', - xlabel='Iterations @metric@', + metric="binary_logloss", + dataset_names=["v1"], + title="Metric @metric@", + xlabel="Iterations @metric@", ylabel='Value of "@metric@"', figsize=(5, 5), dpi=600, - grid=False + grid=False, ) assert isinstance(ax3, matplotlib.axes.Axes) - assert ax3.get_title() == 'Metric @metric@' - assert ax3.get_xlabel() == 'Iterations @metric@' + assert ax3.get_title() == "Metric @metric@" + assert ax3.get_xlabel() == "Iterations @metric@" assert ax3.get_ylabel() == 'Value of "binary_logloss"' legend_items = ax3.get_legend().get_texts() assert len(legend_items) == 1 - assert legend_items[0].get_text() == 'v1' + assert legend_items[0].get_text() == "v1" assert ax3.get_figure().get_figheight() == 5 assert ax3.get_figure().get_figwidth() == 5 assert ax3.get_figure().get_dpi() == 600 @@ -472,9 +499,7 @@ def test_plot_metrics(params, breast_cancer_split, train_data): assert not grid_line.get_visible() evals_result1 = {} - lgb.train(params, train_data, - num_boost_round=10, - callbacks=[lgb.record_evaluation(evals_result1)]) + lgb.train(params, train_data, num_boost_round=10, callbacks=[lgb.record_evaluation(evals_result1)]) with pytest.raises(ValueError, match="eval results cannot be empty."): lgb.plot_metric(evals_result1) @@ -482,9 +507,9 @@ def test_plot_metrics(params, breast_cancer_split, train_data): gbm2.fit(X_train, y_train, eval_set=[(X_test, y_test)]) ax4 = lgb.plot_metric(gbm2, title=None, xlabel=None, ylabel=None) assert isinstance(ax4, matplotlib.axes.Axes) - assert ax4.get_title() == '' - assert ax4.get_xlabel() == '' - assert ax4.get_ylabel() == '' + assert ax4.get_title() == "" + assert ax4.get_xlabel() == "" + assert ax4.get_ylabel() == "" legend_items = ax4.get_legend().get_texts() assert len(legend_items) == 1 - assert legend_items[0].get_text() == 'valid_0' + assert legend_items[0].get_text() == "valid_0" diff --git a/tests/python_package_test/test_sklearn.py b/tests/python_package_test/test_sklearn.py index 06b9ef18f..2fc127b52 100644 --- a/tests/python_package_test/test_sklearn.py +++ b/tests/python_package_test/test_sklearn.py @@ -23,32 +23,40 @@ from sklearn.utils.validation import check_is_fitted import lightgbm as lgb from lightgbm.compat import DATATABLE_INSTALLED, PANDAS_INSTALLED, dt_DataTable, pd_DataFrame, pd_Series -from .utils import (load_breast_cancer, load_digits, load_iris, load_linnerud, make_ranking, make_synthetic_regression, - sklearn_multiclass_custom_objective, softmax) +from .utils import ( + load_breast_cancer, + load_digits, + load_iris, + load_linnerud, + make_ranking, + make_synthetic_regression, + sklearn_multiclass_custom_objective, + softmax, +) decreasing_generator = itertools.count(0, -1) task_to_model_factory = { - 'ranking': lgb.LGBMRanker, - 'binary-classification': lgb.LGBMClassifier, - 'multiclass-classification': lgb.LGBMClassifier, - 'regression': lgb.LGBMRegressor, + "ranking": lgb.LGBMRanker, + "binary-classification": lgb.LGBMClassifier, + "multiclass-classification": lgb.LGBMClassifier, + "regression": lgb.LGBMRegressor, } def _create_data(task, n_samples=100, n_features=4): - if task == 'ranking': + if task == "ranking": X, y, g = make_ranking(n_features=4, n_samples=n_samples) g = np.bincount(g) - elif task.endswith('classification'): - if task == 'binary-classification': + elif task.endswith("classification"): + if task == "binary-classification": centers = 2 - elif task == 'multiclass-classification': + elif task == "multiclass-classification": centers = 3 else: ValueError(f"Unknown classification task '{task}'") X, y = make_blobs(n_samples=n_samples, n_features=n_features, centers=centers, random_state=42) g = None - elif task == 'regression': + elif task == "regression": X, y = make_synthetic_regression(n_samples=n_samples, n_features=n_features) g = None return X, y, g @@ -70,7 +78,7 @@ def custom_asymmetric_obj(y_true, y_pred): def objective_ls(y_true, y_pred): - grad = (y_pred - y_true) + grad = y_pred - y_true hess = np.ones(len(y_true)) return grad, hess @@ -87,15 +95,15 @@ def custom_dummy_obj(y_true, y_pred): def constant_metric(y_true, y_pred): - return 'error', 0, False + return "error", 0, False def decreasing_metric(y_true, y_pred): - return ('decreasing_metric', next(decreasing_generator), False) + return ("decreasing_metric", next(decreasing_generator), False) def mse(y_true, y_pred): - return 'custom MSE', mean_squared_error(y_true, y_pred), False + return "custom MSE", mean_squared_error(y_true, y_pred), False def binary_error(y_true, y_pred): @@ -117,7 +125,7 @@ def test_binary(): gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], callbacks=[lgb.early_stopping(5)]) ret = log_loss(y_test, gbm.predict_proba(X_test)) assert ret < 0.12 - assert gbm.evals_result_['valid_0']['binary_logloss'][gbm.best_iteration_ - 1] == pytest.approx(ret) + assert gbm.evals_result_["valid_0"]["binary_logloss"][gbm.best_iteration_ - 1] == pytest.approx(ret) def test_regression(): @@ -127,10 +135,12 @@ def test_regression(): gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], callbacks=[lgb.early_stopping(5)]) ret = mean_squared_error(y_test, gbm.predict(X_test)) assert ret < 174 - assert gbm.evals_result_['valid_0']['l2'][gbm.best_iteration_ - 1] == pytest.approx(ret) + assert gbm.evals_result_["valid_0"]["l2"][gbm.best_iteration_ - 1] == pytest.approx(ret) -@pytest.mark.skipif(getenv('TASK', '') == 'cuda', reason='Skip due to differences in implementation details of CUDA version') +@pytest.mark.skipif( + getenv("TASK", "") == "cuda", reason="Skip due to differences in implementation details of CUDA version" +) def test_multiclass(): X, y = load_digits(n_class=10, return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) @@ -140,16 +150,18 @@ def test_multiclass(): assert ret < 0.05 ret = multi_logloss(y_test, gbm.predict_proba(X_test)) assert ret < 0.16 - assert gbm.evals_result_['valid_0']['multi_logloss'][gbm.best_iteration_ - 1] == pytest.approx(ret) + assert gbm.evals_result_["valid_0"]["multi_logloss"][gbm.best_iteration_ - 1] == pytest.approx(ret) -@pytest.mark.skipif(getenv('TASK', '') == 'cuda', reason='Skip due to differences in implementation details of CUDA version') +@pytest.mark.skipif( + getenv("TASK", "") == "cuda", reason="Skip due to differences in implementation details of CUDA version" +) def test_lambdarank(): - rank_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'lambdarank' - X_train, y_train = load_svmlight_file(str(rank_example_dir / 'rank.train')) - X_test, y_test = load_svmlight_file(str(rank_example_dir / 'rank.test')) - q_train = np.loadtxt(str(rank_example_dir / 'rank.train.query')) - q_test = np.loadtxt(str(rank_example_dir / 'rank.test.query')) + rank_example_dir = Path(__file__).absolute().parents[2] / "examples" / "lambdarank" + X_train, y_train = load_svmlight_file(str(rank_example_dir / "rank.train")) + X_test, y_test = load_svmlight_file(str(rank_example_dir / "rank.test")) + q_train = np.loadtxt(str(rank_example_dir / "rank.train.query")) + q_test = np.loadtxt(str(rank_example_dir / "rank.test.query")) gbm = lgb.LGBMRanker(n_estimators=50) gbm.fit( X_train, @@ -158,23 +170,20 @@ def test_lambdarank(): eval_set=[(X_test, y_test)], eval_group=[q_test], eval_at=[1, 3], - callbacks=[ - lgb.early_stopping(10), - lgb.reset_parameter(learning_rate=lambda x: max(0.01, 0.1 - 0.01 * x)) - ] + callbacks=[lgb.early_stopping(10), lgb.reset_parameter(learning_rate=lambda x: max(0.01, 0.1 - 0.01 * x))], ) assert gbm.best_iteration_ <= 24 - assert gbm.best_score_['valid_0']['ndcg@1'] > 0.5674 - assert gbm.best_score_['valid_0']['ndcg@3'] > 0.578 + assert gbm.best_score_["valid_0"]["ndcg@1"] > 0.5674 + assert gbm.best_score_["valid_0"]["ndcg@3"] > 0.578 def test_xendcg(): - xendcg_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'xendcg' - X_train, y_train = load_svmlight_file(str(xendcg_example_dir / 'rank.train')) - X_test, y_test = load_svmlight_file(str(xendcg_example_dir / 'rank.test')) - q_train = np.loadtxt(str(xendcg_example_dir / 'rank.train.query')) - q_test = np.loadtxt(str(xendcg_example_dir / 'rank.test.query')) - gbm = lgb.LGBMRanker(n_estimators=50, objective='rank_xendcg', random_state=5, n_jobs=1) + xendcg_example_dir = Path(__file__).absolute().parents[2] / "examples" / "xendcg" + X_train, y_train = load_svmlight_file(str(xendcg_example_dir / "rank.train")) + X_test, y_test = load_svmlight_file(str(xendcg_example_dir / "rank.test")) + q_train = np.loadtxt(str(xendcg_example_dir / "rank.train.query")) + q_test = np.loadtxt(str(xendcg_example_dir / "rank.test.query")) + gbm = lgb.LGBMRanker(n_estimators=50, objective="rank_xendcg", random_state=5, n_jobs=1) gbm.fit( X_train, y_train, @@ -182,28 +191,25 @@ def test_xendcg(): eval_set=[(X_test, y_test)], eval_group=[q_test], eval_at=[1, 3], - eval_metric='ndcg', - callbacks=[ - lgb.early_stopping(10), - lgb.reset_parameter(learning_rate=lambda x: max(0.01, 0.1 - 0.01 * x)) - ] + eval_metric="ndcg", + callbacks=[lgb.early_stopping(10), lgb.reset_parameter(learning_rate=lambda x: max(0.01, 0.1 - 0.01 * x))], ) assert gbm.best_iteration_ <= 24 - assert gbm.best_score_['valid_0']['ndcg@1'] > 0.6211 - assert gbm.best_score_['valid_0']['ndcg@3'] > 0.6253 + assert gbm.best_score_["valid_0"]["ndcg@1"] > 0.6211 + assert gbm.best_score_["valid_0"]["ndcg@3"] > 0.6253 def test_eval_at_aliases(): - rank_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'lambdarank' - X_train, y_train = load_svmlight_file(str(rank_example_dir / 'rank.train')) - X_test, y_test = load_svmlight_file(str(rank_example_dir / 'rank.test')) - q_train = np.loadtxt(str(rank_example_dir / 'rank.train.query')) - q_test = np.loadtxt(str(rank_example_dir / 'rank.test.query')) - for alias in lgb.basic._ConfigAliases.get('eval_at'): + rank_example_dir = Path(__file__).absolute().parents[2] / "examples" / "lambdarank" + X_train, y_train = load_svmlight_file(str(rank_example_dir / "rank.train")) + X_test, y_test = load_svmlight_file(str(rank_example_dir / "rank.test")) + q_train = np.loadtxt(str(rank_example_dir / "rank.train.query")) + q_test = np.loadtxt(str(rank_example_dir / "rank.test.query")) + for alias in lgb.basic._ConfigAliases.get("eval_at"): gbm = lgb.LGBMRanker(n_estimators=5, **{alias: [1, 2, 3, 9]}) with pytest.warns(UserWarning, match=f"Found '{alias}' in params. Will use it instead of 'eval_at' argument"): gbm.fit(X_train, y_train, group=q_train, eval_set=[(X_test, y_test)], eval_group=[q_test]) - assert list(gbm.evals_result_['valid_0'].keys()) == ['ndcg@1', 'ndcg@2', 'ndcg@3', 'ndcg@9'] + assert list(gbm.evals_result_["valid_0"].keys()) == ["ndcg@1", "ndcg@2", "ndcg@3", "ndcg@9"] @pytest.mark.parametrize("custom_objective", [True, False]) @@ -212,20 +218,22 @@ def test_objective_aliases(custom_objective): X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) if custom_objective: obj = custom_dummy_obj - metric_name = 'l2' # default one + metric_name = "l2" # default one else: - obj = 'mape' - metric_name = 'mape' + obj = "mape" + metric_name = "mape" evals = [] - for alias in lgb.basic._ConfigAliases.get('objective'): + for alias in lgb.basic._ConfigAliases.get("objective"): gbm = lgb.LGBMRegressor(n_estimators=5, **{alias: obj}) - if alias != 'objective': - with pytest.warns(UserWarning, match=f"Found '{alias}' in params. Will use it instead of 'objective' argument"): + if alias != "objective": + with pytest.warns( + UserWarning, match=f"Found '{alias}' in params. Will use it instead of 'objective' argument" + ): gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)]) else: gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)]) - assert list(gbm.evals_result_['valid_0'].keys()) == [metric_name] - evals.append(gbm.evals_result_['valid_0'][metric_name]) + assert list(gbm.evals_result_["valid_0"].keys()) == [metric_name] + evals.append(gbm.evals_result_["valid_0"][metric_name]) evals_t = np.array(evals).T for i in range(evals_t.shape[0]): np.testing.assert_allclose(evals_t[i], evals_t[i][0]) @@ -241,7 +249,7 @@ def test_regression_with_custom_objective(): gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], callbacks=[lgb.early_stopping(5)]) ret = mean_squared_error(y_test, gbm.predict(X_test)) assert ret < 174 - assert gbm.evals_result_['valid_0']['l2'][gbm.best_iteration_ - 1] == pytest.approx(ret) + assert gbm.evals_result_["valid_0"]["l2"][gbm.best_iteration_ - 1] == pytest.approx(ret) def test_binary_classification_with_custom_objective(): @@ -260,7 +268,7 @@ def test_binary_classification_with_custom_objective(): def test_dart(): X, y = make_synthetic_regression() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) - gbm = lgb.LGBMRegressor(boosting_type='dart', n_estimators=50) + gbm = lgb.LGBMRegressor(boosting_type="dart", n_estimators=50) gbm.fit(X_train, y_train) score = gbm.score(X_test, y_test) assert 0.8 <= score <= 1.0 @@ -269,22 +277,21 @@ def test_dart(): def test_stacking_classifier(): X, y = load_iris(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) - classifiers = [('gbm1', lgb.LGBMClassifier(n_estimators=3)), - ('gbm2', lgb.LGBMClassifier(n_estimators=3))] - clf = StackingClassifier(estimators=classifiers, - final_estimator=lgb.LGBMClassifier(n_estimators=3), - passthrough=True) + classifiers = [("gbm1", lgb.LGBMClassifier(n_estimators=3)), ("gbm2", lgb.LGBMClassifier(n_estimators=3))] + clf = StackingClassifier( + estimators=classifiers, final_estimator=lgb.LGBMClassifier(n_estimators=3), passthrough=True + ) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) assert score >= 0.8 - assert score <= 1. + assert score <= 1.0 assert clf.n_features_in_ == 4 # number of input features - assert len(clf.named_estimators_['gbm1'].feature_importances_) == 4 - assert clf.named_estimators_['gbm1'].n_features_in_ == clf.named_estimators_['gbm2'].n_features_in_ + assert len(clf.named_estimators_["gbm1"].feature_importances_) == 4 + assert clf.named_estimators_["gbm1"].n_features_in_ == clf.named_estimators_["gbm2"].n_features_in_ assert clf.final_estimator_.n_features_in_ == 10 # number of concatenated features assert len(clf.final_estimator_.feature_importances_) == 10 - assert all(clf.named_estimators_['gbm1'].classes_ == clf.named_estimators_['gbm2'].classes_) - assert all(clf.classes_ == clf.named_estimators_['gbm1'].classes_) + assert all(clf.named_estimators_["gbm1"].classes_ == clf.named_estimators_["gbm2"].classes_) + assert all(clf.classes_ == clf.named_estimators_["gbm1"].classes_) def test_stacking_regressor(): @@ -292,18 +299,15 @@ def test_stacking_regressor(): n_features = X.shape[1] n_input_models = 2 X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) - regressors = [('gbm1', lgb.LGBMRegressor(n_estimators=3)), - ('gbm2', lgb.LGBMRegressor(n_estimators=3))] - reg = StackingRegressor(estimators=regressors, - final_estimator=lgb.LGBMRegressor(n_estimators=3), - passthrough=True) + regressors = [("gbm1", lgb.LGBMRegressor(n_estimators=3)), ("gbm2", lgb.LGBMRegressor(n_estimators=3))] + reg = StackingRegressor(estimators=regressors, final_estimator=lgb.LGBMRegressor(n_estimators=3), passthrough=True) reg.fit(X_train, y_train) score = reg.score(X_test, y_test) assert score >= 0.2 - assert score <= 1. + assert score <= 1.0 assert reg.n_features_in_ == n_features # number of input features - assert len(reg.named_estimators_['gbm1'].feature_importances_) == n_features - assert reg.named_estimators_['gbm1'].n_features_in_ == reg.named_estimators_['gbm2'].n_features_in_ + assert len(reg.named_estimators_["gbm1"].feature_importances_) == n_features + assert reg.named_estimators_["gbm1"].n_features_in_ == reg.named_estimators_["gbm2"].n_features_in_ assert reg.final_estimator_.n_features_in_ == n_features + n_input_models # number of concatenated features assert len(reg.final_estimator_.feature_importances_) == n_features + n_input_models @@ -313,91 +317,69 @@ def test_grid_search(): y = y.astype(str) # utilize label encoder at it's max power X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42) - params = { - "subsample": 0.8, - "subsample_freq": 1 - } - grid_params = { - "boosting_type": ['rf', 'gbdt'], - "n_estimators": [4, 6], - "reg_alpha": [0.01, 0.005] - } + params = {"subsample": 0.8, "subsample_freq": 1} + grid_params = {"boosting_type": ["rf", "gbdt"], "n_estimators": [4, 6], "reg_alpha": [0.01, 0.005]} evals_result = {} fit_params = { "eval_set": [(X_val, y_val)], "eval_metric": constant_metric, - "callbacks": [ - lgb.early_stopping(2), - lgb.record_evaluation(evals_result) - ] + "callbacks": [lgb.early_stopping(2), lgb.record_evaluation(evals_result)], } grid = GridSearchCV(estimator=lgb.LGBMClassifier(**params), param_grid=grid_params, cv=2) grid.fit(X_train, y_train, **fit_params) score = grid.score(X_test, y_test) # utilizes GridSearchCV default refit=True - assert grid.best_params_['boosting_type'] in ['rf', 'gbdt'] - assert grid.best_params_['n_estimators'] in [4, 6] - assert grid.best_params_['reg_alpha'] in [0.01, 0.005] - assert grid.best_score_ <= 1. + assert grid.best_params_["boosting_type"] in ["rf", "gbdt"] + assert grid.best_params_["n_estimators"] in [4, 6] + assert grid.best_params_["reg_alpha"] in [0.01, 0.005] + assert grid.best_score_ <= 1.0 assert grid.best_estimator_.best_iteration_ == 1 - assert grid.best_estimator_.best_score_['valid_0']['multi_logloss'] < 0.25 - assert grid.best_estimator_.best_score_['valid_0']['error'] == 0 + assert grid.best_estimator_.best_score_["valid_0"]["multi_logloss"] < 0.25 + assert grid.best_estimator_.best_score_["valid_0"]["error"] == 0 assert score >= 0.2 - assert score <= 1. + assert score <= 1.0 assert evals_result == grid.best_estimator_.evals_result_ def test_random_search(): X, y = load_iris(return_X_y=True) y = y.astype(str) # utilize label encoder at it's max power - X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, - random_state=42) - X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, - random_state=42) + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) + X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42) n_iter = 3 # Number of samples - params = { - "subsample": 0.8, - "subsample_freq": 1 - } + params = {"subsample": 0.8, "subsample_freq": 1} param_dist = { - "boosting_type": ['rf', 'gbdt'], + "boosting_type": ["rf", "gbdt"], "n_estimators": [np.random.randint(low=3, high=10) for i in range(n_iter)], - "reg_alpha": [np.random.uniform(low=0.01, high=0.06) for i in range(n_iter)] + "reg_alpha": [np.random.uniform(low=0.01, high=0.06) for i in range(n_iter)], } - fit_params = { - "eval_set": [(X_val, y_val)], - "eval_metric": constant_metric, - "callbacks": [lgb.early_stopping(2)] - } - rand = RandomizedSearchCV(estimator=lgb.LGBMClassifier(**params), - param_distributions=param_dist, cv=2, - n_iter=n_iter, random_state=42) + fit_params = {"eval_set": [(X_val, y_val)], "eval_metric": constant_metric, "callbacks": [lgb.early_stopping(2)]} + rand = RandomizedSearchCV( + estimator=lgb.LGBMClassifier(**params), param_distributions=param_dist, cv=2, n_iter=n_iter, random_state=42 + ) rand.fit(X_train, y_train, **fit_params) score = rand.score(X_test, y_test) # utilizes RandomizedSearchCV default refit=True - assert rand.best_params_['boosting_type'] in ['rf', 'gbdt'] - assert rand.best_params_['n_estimators'] in list(range(3, 10)) - assert rand.best_params_['reg_alpha'] >= 0.01 # Left-closed boundary point - assert rand.best_params_['reg_alpha'] <= 0.06 # Right-closed boundary point - assert rand.best_score_ <= 1. - assert rand.best_estimator_.best_score_['valid_0']['multi_logloss'] < 0.25 - assert rand.best_estimator_.best_score_['valid_0']['error'] == 0 + assert rand.best_params_["boosting_type"] in ["rf", "gbdt"] + assert rand.best_params_["n_estimators"] in list(range(3, 10)) + assert rand.best_params_["reg_alpha"] >= 0.01 # Left-closed boundary point + assert rand.best_params_["reg_alpha"] <= 0.06 # Right-closed boundary point + assert rand.best_score_ <= 1.0 + assert rand.best_estimator_.best_score_["valid_0"]["multi_logloss"] < 0.25 + assert rand.best_estimator_.best_score_["valid_0"]["error"] == 0 assert score >= 0.2 - assert score <= 1. + assert score <= 1.0 def test_multioutput_classifier(): n_outputs = 3 - X, y = make_multilabel_classification(n_samples=100, n_features=20, - n_classes=n_outputs, random_state=0) + X, y = make_multilabel_classification(n_samples=100, n_features=20, n_classes=n_outputs, random_state=0) y = y.astype(str) # utilize label encoder at it's max power - X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, - random_state=42) + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) clf = MultiOutputClassifier(estimator=lgb.LGBMClassifier(n_estimators=10)) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) assert score >= 0.2 - assert score <= 1. - np.testing.assert_array_equal(np.tile(np.unique(y_train), n_outputs), - np.concatenate(clf.classes_)) + assert score <= 1.0 + np.testing.assert_array_equal(np.tile(np.unique(y_train), n_outputs), np.concatenate(clf.classes_)) for classifier in clf.estimators_: assert isinstance(classifier, lgb.LGBMClassifier) assert isinstance(classifier.booster_, lgb.Booster) @@ -405,15 +387,14 @@ def test_multioutput_classifier(): def test_multioutput_regressor(): bunch = load_linnerud(as_frame=True) # returns a Bunch instance - X, y = bunch['data'], bunch['target'] - X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, - random_state=42) + X, y = bunch["data"], bunch["target"] + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) reg = MultiOutputRegressor(estimator=lgb.LGBMRegressor(n_estimators=10)) reg.fit(X_train, y_train) y_pred = reg.predict(X_test) _, score, _ = mse(y_test, y_pred) assert score >= 0.2 - assert score <= 120. + assert score <= 120.0 for regressor in reg.estimators_: assert isinstance(regressor, lgb.LGBMRegressor) assert isinstance(regressor.booster_, lgb.Booster) @@ -421,19 +402,15 @@ def test_multioutput_regressor(): def test_classifier_chain(): n_outputs = 3 - X, y = make_multilabel_classification(n_samples=100, n_features=20, - n_classes=n_outputs, random_state=0) - X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, - random_state=42) + X, y = make_multilabel_classification(n_samples=100, n_features=20, n_classes=n_outputs, random_state=0) + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) order = [2, 0, 1] - clf = ClassifierChain(base_estimator=lgb.LGBMClassifier(n_estimators=10), - order=order, random_state=42) + clf = ClassifierChain(base_estimator=lgb.LGBMClassifier(n_estimators=10), order=order, random_state=42) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) assert score >= 0.2 - assert score <= 1. - np.testing.assert_array_equal(np.tile(np.unique(y_train), n_outputs), - np.concatenate(clf.classes_)) + assert score <= 1.0 + np.testing.assert_array_equal(np.tile(np.unique(y_train), n_outputs), np.concatenate(clf.classes_)) assert order == clf.order_ for classifier in clf.estimators_: assert isinstance(classifier, lgb.LGBMClassifier) @@ -442,16 +419,15 @@ def test_classifier_chain(): def test_regressor_chain(): bunch = load_linnerud(as_frame=True) # returns a Bunch instance - X, y = bunch['data'], bunch['target'] + X, y = bunch["data"], bunch["target"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) order = [2, 0, 1] - reg = RegressorChain(base_estimator=lgb.LGBMRegressor(n_estimators=10), order=order, - random_state=42) + reg = RegressorChain(base_estimator=lgb.LGBMRegressor(n_estimators=10), order=order, random_state=42) reg.fit(X_train, y_train) y_pred = reg.predict(X_test) _, score, _ = mse(y_test, y_pred) assert score >= 0.2 - assert score <= 120. + assert score <= 120.0 assert order == reg.order_ for regressor in reg.estimators_: assert isinstance(regressor, lgb.LGBMRegressor) @@ -489,24 +465,17 @@ def test_clone_and_property(): def test_joblib(): X, y = make_synthetic_regression() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) - gbm = lgb.LGBMRegressor(n_estimators=10, objective=custom_asymmetric_obj, - verbose=-1, importance_type='split') + gbm = lgb.LGBMRegressor(n_estimators=10, objective=custom_asymmetric_obj, verbose=-1, importance_type="split") gbm.fit( X_train, y_train, - eval_set=[ - (X_train, y_train), - (X_test, y_test) - ], + eval_set=[(X_train, y_train), (X_test, y_test)], eval_metric=mse, - callbacks=[ - lgb.early_stopping(5), - lgb.reset_parameter(learning_rate=list(np.arange(1, 0, -0.1))) - ] + callbacks=[lgb.early_stopping(5), lgb.reset_parameter(learning_rate=list(np.arange(1, 0, -0.1)))], ) - joblib.dump(gbm, 'lgb.pkl') # test model with custom functions - gbm_pickle = joblib.load('lgb.pkl') + joblib.dump(gbm, "lgb.pkl") # test model with custom functions + gbm_pickle = joblib.load("lgb.pkl") assert isinstance(gbm_pickle.booster_, lgb.Booster) assert gbm.get_params() == gbm_pickle.get_params() np.testing.assert_array_equal(gbm.feature_importances_, gbm_pickle.feature_importances_) @@ -515,8 +484,7 @@ def test_joblib(): for eval_set in gbm.evals_result_: for metric in gbm.evals_result_[eval_set]: - np.testing.assert_allclose(gbm.evals_result_[eval_set][metric], - gbm_pickle.evals_result_[eval_set][metric]) + np.testing.assert_allclose(gbm.evals_result_[eval_set][metric], gbm_pickle.evals_result_[eval_set][metric]) pred_origin = gbm.predict(X_test) pred_pickle = gbm_pickle.predict(X_test) np.testing.assert_allclose(pred_origin, pred_pickle) @@ -526,7 +494,7 @@ def test_non_serializable_objects_in_callbacks(tmp_path): unpicklable_callback = UnpicklableCallback() with pytest.raises(Exception, match="This class in not picklable"): - joblib.dump(unpicklable_callback, tmp_path / 'tmp.joblib') + joblib.dump(unpicklable_callback, tmp_path / "tmp.joblib") X, y = make_synthetic_regression() gbm = lgb.LGBMRegressor(n_estimators=5) @@ -578,9 +546,9 @@ def test_feature_importances_type(): data = load_iris(return_X_y=False) clf = lgb.LGBMClassifier(n_estimators=10) clf.fit(data.data, data.target) - clf.set_params(importance_type='split') + clf.set_params(importance_type="split") importances_split = clf.feature_importances_ - clf.set_params(importance_type='gain') + clf.set_params(importance_type="gain") importances_gain = clf.feature_importances_ # Test that the largest element is NOT the same, the smallest can be the same, i.e. zero importance_split_top1 = sorted(importances_split, reverse=True)[0] @@ -591,38 +559,44 @@ def test_feature_importances_type(): def test_pandas_categorical(): pd = pytest.importorskip("pandas") np.random.seed(42) # sometimes there is no difference how cols are treated (cat or not cat) - X = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'c', 'd'] * 75), # str - "B": np.random.permutation([1, 2, 3] * 100), # int - "C": np.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60), # float - "D": np.random.permutation([True, False] * 150), # bool - "E": pd.Categorical(np.random.permutation(['z', 'y', 'x', 'w', 'v'] * 60), - ordered=True)}) # str and ordered categorical + X = pd.DataFrame( + { + "A": np.random.permutation(["a", "b", "c", "d"] * 75), # str + "B": np.random.permutation([1, 2, 3] * 100), # int + "C": np.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60), # float + "D": np.random.permutation([True, False] * 150), # bool + "E": pd.Categorical(np.random.permutation(["z", "y", "x", "w", "v"] * 60), ordered=True), + } + ) # str and ordered categorical y = np.random.permutation([0, 1] * 150) - X_test = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'e'] * 20), # unseen category - "B": np.random.permutation([1, 3] * 30), - "C": np.random.permutation([0.1, -0.1, 0.2, 0.2] * 15), - "D": np.random.permutation([True, False] * 30), - "E": pd.Categorical(np.random.permutation(['z', 'y'] * 30), - ordered=True)}) + X_test = pd.DataFrame( + { + "A": np.random.permutation(["a", "b", "e"] * 20), # unseen category + "B": np.random.permutation([1, 3] * 30), + "C": np.random.permutation([0.1, -0.1, 0.2, 0.2] * 15), + "D": np.random.permutation([True, False] * 30), + "E": pd.Categorical(np.random.permutation(["z", "y"] * 30), ordered=True), + } + ) np.random.seed() # reset seed cat_cols_actual = ["A", "B", "C", "D"] cat_cols_to_store = cat_cols_actual + ["E"] - X[cat_cols_actual] = X[cat_cols_actual].astype('category') - X_test[cat_cols_actual] = X_test[cat_cols_actual].astype('category') + X[cat_cols_actual] = X[cat_cols_actual].astype("category") + X_test[cat_cols_actual] = X_test[cat_cols_actual].astype("category") cat_values = [X[col].cat.categories.tolist() for col in cat_cols_to_store] gbm0 = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, y) pred0 = gbm0.predict(X_test, raw_score=True) pred_prob = gbm0.predict_proba(X_test)[:, 1] gbm1 = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, pd.Series(y), categorical_feature=[0]) pred1 = gbm1.predict(X_test, raw_score=True) - gbm2 = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, y, categorical_feature=['A']) + gbm2 = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, y, categorical_feature=["A"]) pred2 = gbm2.predict(X_test, raw_score=True) - gbm3 = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, y, categorical_feature=['A', 'B', 'C', 'D']) + gbm3 = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, y, categorical_feature=["A", "B", "C", "D"]) pred3 = gbm3.predict(X_test, raw_score=True) - gbm3.booster_.save_model('categorical.model') - gbm4 = lgb.Booster(model_file='categorical.model') + gbm3.booster_.save_model("categorical.model") + gbm4 = lgb.Booster(model_file="categorical.model") pred4 = gbm4.predict(X_test) - gbm5 = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, y, categorical_feature=['A', 'B', 'C', 'D', 'E']) + gbm5 = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, y, categorical_feature=["A", "B", "C", "D", "E"]) pred5 = gbm5.predict(X_test, raw_score=True) gbm6 = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, y, categorical_feature=[]) pred6 = gbm6.predict(X_test, raw_score=True) @@ -648,18 +622,26 @@ def test_pandas_categorical(): def test_pandas_sparse(): pd = pytest.importorskip("pandas") - X = pd.DataFrame({"A": pd.arrays.SparseArray(np.random.permutation([0, 1, 2] * 100)), - "B": pd.arrays.SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1, 0.2] * 60)), - "C": pd.arrays.SparseArray(np.random.permutation([True, False] * 150))}) + X = pd.DataFrame( + { + "A": pd.arrays.SparseArray(np.random.permutation([0, 1, 2] * 100)), + "B": pd.arrays.SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1, 0.2] * 60)), + "C": pd.arrays.SparseArray(np.random.permutation([True, False] * 150)), + } + ) y = pd.Series(pd.arrays.SparseArray(np.random.permutation([0, 1] * 150))) - X_test = pd.DataFrame({"A": pd.arrays.SparseArray(np.random.permutation([0, 2] * 30)), - "B": pd.arrays.SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1] * 15)), - "C": pd.arrays.SparseArray(np.random.permutation([True, False] * 30))}) + X_test = pd.DataFrame( + { + "A": pd.arrays.SparseArray(np.random.permutation([0, 2] * 30)), + "B": pd.arrays.SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1] * 15)), + "C": pd.arrays.SparseArray(np.random.permutation([True, False] * 30)), + } + ) for dtype in pd.concat([X.dtypes, X_test.dtypes, pd.Series(y.dtypes)]): assert pd.api.types.is_sparse(dtype) gbm = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, y) pred_sparse = gbm.predict(X_test, raw_score=True) - if hasattr(X_test, 'sparse'): + if hasattr(X_test, "sparse"): pred_dense = gbm.predict(X_test.sparse.to_dense(), raw_score=True) else: pred_dense = gbm.predict(X_test.to_dense(), raw_score=True) @@ -669,13 +651,9 @@ def test_pandas_sparse(): def test_predict(): # With default params iris = load_iris(return_X_y=False) - X_train, X_test, y_train, _ = train_test_split(iris.data, iris.target, - test_size=0.2, random_state=42) + X_train, X_test, y_train, _ = train_test_split(iris.data, iris.target, test_size=0.2, random_state=42) - gbm = lgb.train({'objective': 'multiclass', - 'num_class': 3, - 'verbose': -1}, - lgb.Dataset(X_train, y_train)) + gbm = lgb.train({"objective": "multiclass", "num_class": 3, "verbose": -1}, lgb.Dataset(X_train, y_train)) clf = lgb.LGBMClassifier(verbose=-1).fit(X_train, y_train) # Tests same probabilities @@ -705,9 +683,7 @@ def test_predict(): # Tests other parameters for the prediction works res_engine = gbm.predict(X_test) - res_sklearn_params = clf.predict_proba(X_test, - pred_early_stop=True, - pred_early_stop_margin=1.0) + res_sklearn_params = clf.predict_proba(X_test, pred_early_stop=True, pred_early_stop_margin=1.0) with pytest.raises(AssertionError): np.testing.assert_allclose(res_engine, res_sklearn_params) @@ -739,9 +715,7 @@ def test_predict(): # Tests other parameters for the prediction works, starting from iteration 10 res_engine = gbm.predict(X_test, start_iteration=10) - res_sklearn_params = clf.predict_proba(X_test, - pred_early_stop=True, - pred_early_stop_margin=1.0, start_iteration=10) + res_sklearn_params = clf.predict_proba(X_test, pred_early_stop=True, pred_early_stop_margin=1.0, start_iteration=10) with pytest.raises(AssertionError): np.testing.assert_allclose(res_engine, res_sklearn_params) @@ -750,34 +724,43 @@ def test_predict_with_params_from_init(): X, y = load_iris(return_X_y=True) X_train, X_test, y_train, _ = train_test_split(X, y, test_size=0.2, random_state=42) - predict_params = { - 'pred_early_stop': True, - 'pred_early_stop_margin': 1.0 - } + predict_params = {"pred_early_stop": True, "pred_early_stop_margin": 1.0} - y_preds_no_params = lgb.LGBMClassifier(verbose=-1).fit(X_train, y_train).predict( - X_test, raw_score=True) + y_preds_no_params = lgb.LGBMClassifier(verbose=-1).fit(X_train, y_train).predict(X_test, raw_score=True) - y_preds_params_in_predict = lgb.LGBMClassifier(verbose=-1).fit(X_train, y_train).predict( - X_test, raw_score=True, **predict_params) + y_preds_params_in_predict = ( + lgb.LGBMClassifier(verbose=-1).fit(X_train, y_train).predict(X_test, raw_score=True, **predict_params) + ) with pytest.raises(AssertionError): np.testing.assert_allclose(y_preds_no_params, y_preds_params_in_predict) - y_preds_params_in_set_params_before_fit = lgb.LGBMClassifier(verbose=-1).set_params( - **predict_params).fit(X_train, y_train).predict(X_test, raw_score=True) + y_preds_params_in_set_params_before_fit = ( + lgb.LGBMClassifier(verbose=-1) + .set_params(**predict_params) + .fit(X_train, y_train) + .predict(X_test, raw_score=True) + ) np.testing.assert_allclose(y_preds_params_in_predict, y_preds_params_in_set_params_before_fit) - y_preds_params_in_set_params_after_fit = lgb.LGBMClassifier(verbose=-1).fit(X_train, y_train).set_params( - **predict_params).predict(X_test, raw_score=True) + y_preds_params_in_set_params_after_fit = ( + lgb.LGBMClassifier(verbose=-1) + .fit(X_train, y_train) + .set_params(**predict_params) + .predict(X_test, raw_score=True) + ) np.testing.assert_allclose(y_preds_params_in_predict, y_preds_params_in_set_params_after_fit) - y_preds_params_in_init = lgb.LGBMClassifier(verbose=-1, **predict_params).fit(X_train, y_train).predict( - X_test, raw_score=True) + y_preds_params_in_init = ( + lgb.LGBMClassifier(verbose=-1, **predict_params).fit(X_train, y_train).predict(X_test, raw_score=True) + ) np.testing.assert_allclose(y_preds_params_in_predict, y_preds_params_in_init) # test that params passed in predict have higher priority - y_preds_params_overwritten = lgb.LGBMClassifier(verbose=-1, **predict_params).fit(X_train, y_train).predict( - X_test, raw_score=True, pred_early_stop=False) + y_preds_params_overwritten = ( + lgb.LGBMClassifier(verbose=-1, **predict_params) + .fit(X_train, y_train) + .predict(X_test, raw_score=True, pred_early_stop=False) + ) np.testing.assert_allclose(y_preds_no_params, y_preds_params_overwritten) @@ -787,315 +770,307 @@ def test_evaluate_train_set(): gbm = lgb.LGBMRegressor(n_estimators=10, verbose=-1) gbm.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)]) assert len(gbm.evals_result_) == 2 - assert 'training' in gbm.evals_result_ - assert len(gbm.evals_result_['training']) == 1 - assert 'l2' in gbm.evals_result_['training'] - assert 'valid_1' in gbm.evals_result_ - assert len(gbm.evals_result_['valid_1']) == 1 - assert 'l2' in gbm.evals_result_['valid_1'] + assert "training" in gbm.evals_result_ + assert len(gbm.evals_result_["training"]) == 1 + assert "l2" in gbm.evals_result_["training"] + assert "valid_1" in gbm.evals_result_ + assert len(gbm.evals_result_["valid_1"]) == 1 + assert "l2" in gbm.evals_result_["valid_1"] def test_metrics(): X, y = make_synthetic_regression() y = abs(y) - params = {'n_estimators': 2, 'verbose': -1} - params_fit = {'X': X, 'y': y, 'eval_set': (X, y)} + params = {"n_estimators": 2, "verbose": -1} + params_fit = {"X": X, "y": y, "eval_set": (X, y)} # no custom objective, no custom metric # default metric gbm = lgb.LGBMRegressor(**params).fit(**params_fit) - assert len(gbm.evals_result_['training']) == 1 - assert 'l2' in gbm.evals_result_['training'] + assert len(gbm.evals_result_["training"]) == 1 + assert "l2" in gbm.evals_result_["training"] # non-default metric - gbm = lgb.LGBMRegressor(metric='mape', **params).fit(**params_fit) - assert len(gbm.evals_result_['training']) == 1 - assert 'mape' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(metric="mape", **params).fit(**params_fit) + assert len(gbm.evals_result_["training"]) == 1 + assert "mape" in gbm.evals_result_["training"] # no metric - gbm = lgb.LGBMRegressor(metric='None', **params).fit(**params_fit) + gbm = lgb.LGBMRegressor(metric="None", **params).fit(**params_fit) assert gbm.evals_result_ == {} # non-default metric in eval_metric - gbm = lgb.LGBMRegressor(**params).fit(eval_metric='mape', **params_fit) - assert len(gbm.evals_result_['training']) == 2 - assert 'l2' in gbm.evals_result_['training'] - assert 'mape' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(**params).fit(eval_metric="mape", **params_fit) + assert len(gbm.evals_result_["training"]) == 2 + assert "l2" in gbm.evals_result_["training"] + assert "mape" in gbm.evals_result_["training"] # non-default metric with non-default metric in eval_metric - gbm = lgb.LGBMRegressor(metric='gamma', **params).fit(eval_metric='mape', **params_fit) - assert len(gbm.evals_result_['training']) == 2 - assert 'gamma' in gbm.evals_result_['training'] - assert 'mape' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(metric="gamma", **params).fit(eval_metric="mape", **params_fit) + assert len(gbm.evals_result_["training"]) == 2 + assert "gamma" in gbm.evals_result_["training"] + assert "mape" in gbm.evals_result_["training"] # non-default metric with multiple metrics in eval_metric - gbm = lgb.LGBMRegressor(metric='gamma', - **params).fit(eval_metric=['l2', 'mape'], **params_fit) - assert len(gbm.evals_result_['training']) == 3 - assert 'gamma' in gbm.evals_result_['training'] - assert 'l2' in gbm.evals_result_['training'] - assert 'mape' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(metric="gamma", **params).fit(eval_metric=["l2", "mape"], **params_fit) + assert len(gbm.evals_result_["training"]) == 3 + assert "gamma" in gbm.evals_result_["training"] + assert "l2" in gbm.evals_result_["training"] + assert "mape" in gbm.evals_result_["training"] # non-default metric with multiple metrics in eval_metric for LGBMClassifier X_classification, y_classification = load_breast_cancer(return_X_y=True) - params_classification = {'n_estimators': 2, 'verbose': -1, - 'objective': 'binary', 'metric': 'binary_logloss'} - params_fit_classification = {'X': X_classification, 'y': y_classification, - 'eval_set': (X_classification, y_classification)} - gbm = lgb.LGBMClassifier(**params_classification).fit(eval_metric=['fair', 'error'], - **params_fit_classification) - assert len(gbm.evals_result_['training']) == 3 - assert 'fair' in gbm.evals_result_['training'] - assert 'binary_error' in gbm.evals_result_['training'] - assert 'binary_logloss' in gbm.evals_result_['training'] + params_classification = {"n_estimators": 2, "verbose": -1, "objective": "binary", "metric": "binary_logloss"} + params_fit_classification = { + "X": X_classification, + "y": y_classification, + "eval_set": (X_classification, y_classification), + } + gbm = lgb.LGBMClassifier(**params_classification).fit(eval_metric=["fair", "error"], **params_fit_classification) + assert len(gbm.evals_result_["training"]) == 3 + assert "fair" in gbm.evals_result_["training"] + assert "binary_error" in gbm.evals_result_["training"] + assert "binary_logloss" in gbm.evals_result_["training"] # default metric for non-default objective - gbm = lgb.LGBMRegressor(objective='regression_l1', **params).fit(**params_fit) - assert len(gbm.evals_result_['training']) == 1 - assert 'l1' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective="regression_l1", **params).fit(**params_fit) + assert len(gbm.evals_result_["training"]) == 1 + assert "l1" in gbm.evals_result_["training"] # non-default metric for non-default objective - gbm = lgb.LGBMRegressor(objective='regression_l1', metric='mape', - **params).fit(**params_fit) - assert len(gbm.evals_result_['training']) == 1 - assert 'mape' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective="regression_l1", metric="mape", **params).fit(**params_fit) + assert len(gbm.evals_result_["training"]) == 1 + assert "mape" in gbm.evals_result_["training"] # no metric - gbm = lgb.LGBMRegressor(objective='regression_l1', metric='None', - **params).fit(**params_fit) + gbm = lgb.LGBMRegressor(objective="regression_l1", metric="None", **params).fit(**params_fit) assert gbm.evals_result_ == {} # non-default metric in eval_metric for non-default objective - gbm = lgb.LGBMRegressor(objective='regression_l1', - **params).fit(eval_metric='mape', **params_fit) - assert len(gbm.evals_result_['training']) == 2 - assert 'l1' in gbm.evals_result_['training'] - assert 'mape' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective="regression_l1", **params).fit(eval_metric="mape", **params_fit) + assert len(gbm.evals_result_["training"]) == 2 + assert "l1" in gbm.evals_result_["training"] + assert "mape" in gbm.evals_result_["training"] # non-default metric with non-default metric in eval_metric for non-default objective - gbm = lgb.LGBMRegressor(objective='regression_l1', metric='gamma', - **params).fit(eval_metric='mape', **params_fit) - assert len(gbm.evals_result_['training']) == 2 - assert 'gamma' in gbm.evals_result_['training'] - assert 'mape' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective="regression_l1", metric="gamma", **params).fit(eval_metric="mape", **params_fit) + assert len(gbm.evals_result_["training"]) == 2 + assert "gamma" in gbm.evals_result_["training"] + assert "mape" in gbm.evals_result_["training"] # non-default metric with multiple metrics in eval_metric for non-default objective - gbm = lgb.LGBMRegressor(objective='regression_l1', metric='gamma', - **params).fit(eval_metric=['l2', 'mape'], **params_fit) - assert len(gbm.evals_result_['training']) == 3 - assert 'gamma' in gbm.evals_result_['training'] - assert 'l2' in gbm.evals_result_['training'] - assert 'mape' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective="regression_l1", metric="gamma", **params).fit( + eval_metric=["l2", "mape"], **params_fit + ) + assert len(gbm.evals_result_["training"]) == 3 + assert "gamma" in gbm.evals_result_["training"] + assert "l2" in gbm.evals_result_["training"] + assert "mape" in gbm.evals_result_["training"] # custom objective, no custom metric # default regression metric for custom objective gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, **params).fit(**params_fit) - assert len(gbm.evals_result_['training']) == 1 - assert 'l2' in gbm.evals_result_['training'] + assert len(gbm.evals_result_["training"]) == 1 + assert "l2" in gbm.evals_result_["training"] # non-default regression metric for custom objective - gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric='mape', **params).fit(**params_fit) - assert len(gbm.evals_result_['training']) == 1 - assert 'mape' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric="mape", **params).fit(**params_fit) + assert len(gbm.evals_result_["training"]) == 1 + assert "mape" in gbm.evals_result_["training"] # multiple regression metrics for custom objective - gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric=['l1', 'gamma'], - **params).fit(**params_fit) - assert len(gbm.evals_result_['training']) == 2 - assert 'l1' in gbm.evals_result_['training'] - assert 'gamma' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric=["l1", "gamma"], **params).fit(**params_fit) + assert len(gbm.evals_result_["training"]) == 2 + assert "l1" in gbm.evals_result_["training"] + assert "gamma" in gbm.evals_result_["training"] # no metric - gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric='None', - **params).fit(**params_fit) + gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric="None", **params).fit(**params_fit) assert gbm.evals_result_ == {} # default regression metric with non-default metric in eval_metric for custom objective - gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, - **params).fit(eval_metric='mape', **params_fit) - assert len(gbm.evals_result_['training']) == 2 - assert 'l2' in gbm.evals_result_['training'] - assert 'mape' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, **params).fit(eval_metric="mape", **params_fit) + assert len(gbm.evals_result_["training"]) == 2 + assert "l2" in gbm.evals_result_["training"] + assert "mape" in gbm.evals_result_["training"] # non-default regression metric with metric in eval_metric for custom objective - gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric='mape', - **params).fit(eval_metric='gamma', **params_fit) - assert len(gbm.evals_result_['training']) == 2 - assert 'mape' in gbm.evals_result_['training'] - assert 'gamma' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric="mape", **params).fit(eval_metric="gamma", **params_fit) + assert len(gbm.evals_result_["training"]) == 2 + assert "mape" in gbm.evals_result_["training"] + assert "gamma" in gbm.evals_result_["training"] # multiple regression metrics with metric in eval_metric for custom objective - gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric=['l1', 'gamma'], - **params).fit(eval_metric='l2', **params_fit) - assert len(gbm.evals_result_['training']) == 3 - assert 'l1' in gbm.evals_result_['training'] - assert 'gamma' in gbm.evals_result_['training'] - assert 'l2' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric=["l1", "gamma"], **params).fit( + eval_metric="l2", **params_fit + ) + assert len(gbm.evals_result_["training"]) == 3 + assert "l1" in gbm.evals_result_["training"] + assert "gamma" in gbm.evals_result_["training"] + assert "l2" in gbm.evals_result_["training"] # multiple regression metrics with multiple metrics in eval_metric for custom objective - gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric=['l1', 'gamma'], - **params).fit(eval_metric=['l2', 'mape'], **params_fit) - assert len(gbm.evals_result_['training']) == 4 - assert 'l1' in gbm.evals_result_['training'] - assert 'gamma' in gbm.evals_result_['training'] - assert 'l2' in gbm.evals_result_['training'] - assert 'mape' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric=["l1", "gamma"], **params).fit( + eval_metric=["l2", "mape"], **params_fit + ) + assert len(gbm.evals_result_["training"]) == 4 + assert "l1" in gbm.evals_result_["training"] + assert "gamma" in gbm.evals_result_["training"] + assert "l2" in gbm.evals_result_["training"] + assert "mape" in gbm.evals_result_["training"] # no custom objective, custom metric # default metric with custom metric gbm = lgb.LGBMRegressor(**params).fit(eval_metric=constant_metric, **params_fit) - assert len(gbm.evals_result_['training']) == 2 - assert 'l2' in gbm.evals_result_['training'] - assert 'error' in gbm.evals_result_['training'] + assert len(gbm.evals_result_["training"]) == 2 + assert "l2" in gbm.evals_result_["training"] + assert "error" in gbm.evals_result_["training"] # non-default metric with custom metric - gbm = lgb.LGBMRegressor(metric='mape', - **params).fit(eval_metric=constant_metric, **params_fit) - assert len(gbm.evals_result_['training']) == 2 - assert 'mape' in gbm.evals_result_['training'] - assert 'error' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(metric="mape", **params).fit(eval_metric=constant_metric, **params_fit) + assert len(gbm.evals_result_["training"]) == 2 + assert "mape" in gbm.evals_result_["training"] + assert "error" in gbm.evals_result_["training"] # multiple metrics with custom metric - gbm = lgb.LGBMRegressor(metric=['l1', 'gamma'], - **params).fit(eval_metric=constant_metric, **params_fit) - assert len(gbm.evals_result_['training']) == 3 - assert 'l1' in gbm.evals_result_['training'] - assert 'gamma' in gbm.evals_result_['training'] - assert 'error' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(metric=["l1", "gamma"], **params).fit(eval_metric=constant_metric, **params_fit) + assert len(gbm.evals_result_["training"]) == 3 + assert "l1" in gbm.evals_result_["training"] + assert "gamma" in gbm.evals_result_["training"] + assert "error" in gbm.evals_result_["training"] # custom metric (disable default metric) - gbm = lgb.LGBMRegressor(metric='None', - **params).fit(eval_metric=constant_metric, **params_fit) - assert len(gbm.evals_result_['training']) == 1 - assert 'error' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(metric="None", **params).fit(eval_metric=constant_metric, **params_fit) + assert len(gbm.evals_result_["training"]) == 1 + assert "error" in gbm.evals_result_["training"] # default metric for non-default objective with custom metric - gbm = lgb.LGBMRegressor(objective='regression_l1', - **params).fit(eval_metric=constant_metric, **params_fit) - assert len(gbm.evals_result_['training']) == 2 - assert 'l1' in gbm.evals_result_['training'] - assert 'error' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective="regression_l1", **params).fit(eval_metric=constant_metric, **params_fit) + assert len(gbm.evals_result_["training"]) == 2 + assert "l1" in gbm.evals_result_["training"] + assert "error" in gbm.evals_result_["training"] # non-default metric for non-default objective with custom metric - gbm = lgb.LGBMRegressor(objective='regression_l1', metric='mape', - **params).fit(eval_metric=constant_metric, **params_fit) - assert len(gbm.evals_result_['training']) == 2 - assert 'mape' in gbm.evals_result_['training'] - assert 'error' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective="regression_l1", metric="mape", **params).fit( + eval_metric=constant_metric, **params_fit + ) + assert len(gbm.evals_result_["training"]) == 2 + assert "mape" in gbm.evals_result_["training"] + assert "error" in gbm.evals_result_["training"] # multiple metrics for non-default objective with custom metric - gbm = lgb.LGBMRegressor(objective='regression_l1', metric=['l1', 'gamma'], - **params).fit(eval_metric=constant_metric, **params_fit) - assert len(gbm.evals_result_['training']) == 3 - assert 'l1' in gbm.evals_result_['training'] - assert 'gamma' in gbm.evals_result_['training'] - assert 'error' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective="regression_l1", metric=["l1", "gamma"], **params).fit( + eval_metric=constant_metric, **params_fit + ) + assert len(gbm.evals_result_["training"]) == 3 + assert "l1" in gbm.evals_result_["training"] + assert "gamma" in gbm.evals_result_["training"] + assert "error" in gbm.evals_result_["training"] # custom metric (disable default metric for non-default objective) - gbm = lgb.LGBMRegressor(objective='regression_l1', metric='None', - **params).fit(eval_metric=constant_metric, **params_fit) - assert len(gbm.evals_result_['training']) == 1 - assert 'error' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective="regression_l1", metric="None", **params).fit( + eval_metric=constant_metric, **params_fit + ) + assert len(gbm.evals_result_["training"]) == 1 + assert "error" in gbm.evals_result_["training"] # custom objective, custom metric # custom metric for custom objective - gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, - **params).fit(eval_metric=constant_metric, **params_fit) - assert len(gbm.evals_result_['training']) == 2 - assert 'error' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, **params).fit(eval_metric=constant_metric, **params_fit) + assert len(gbm.evals_result_["training"]) == 2 + assert "error" in gbm.evals_result_["training"] # non-default regression metric with custom metric for custom objective - gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric='mape', - **params).fit(eval_metric=constant_metric, **params_fit) - assert len(gbm.evals_result_['training']) == 2 - assert 'mape' in gbm.evals_result_['training'] - assert 'error' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric="mape", **params).fit( + eval_metric=constant_metric, **params_fit + ) + assert len(gbm.evals_result_["training"]) == 2 + assert "mape" in gbm.evals_result_["training"] + assert "error" in gbm.evals_result_["training"] # multiple regression metrics with custom metric for custom objective - gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric=['l2', 'mape'], - **params).fit(eval_metric=constant_metric, **params_fit) - assert len(gbm.evals_result_['training']) == 3 - assert 'l2' in gbm.evals_result_['training'] - assert 'mape' in gbm.evals_result_['training'] - assert 'error' in gbm.evals_result_['training'] + gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric=["l2", "mape"], **params).fit( + eval_metric=constant_metric, **params_fit + ) + assert len(gbm.evals_result_["training"]) == 3 + assert "l2" in gbm.evals_result_["training"] + assert "mape" in gbm.evals_result_["training"] + assert "error" in gbm.evals_result_["training"] X, y = load_digits(n_class=3, return_X_y=True) - params_fit = {'X': X, 'y': y, 'eval_set': (X, y)} + params_fit = {"X": X, "y": y, "eval_set": (X, y)} # default metric and invalid binary metric is replaced with multiclass alternative - gbm = lgb.LGBMClassifier(**params).fit(eval_metric='binary_error', **params_fit) - assert len(gbm.evals_result_['training']) == 2 - assert 'multi_logloss' in gbm.evals_result_['training'] - assert 'multi_error' in gbm.evals_result_['training'] + gbm = lgb.LGBMClassifier(**params).fit(eval_metric="binary_error", **params_fit) + assert len(gbm.evals_result_["training"]) == 2 + assert "multi_logloss" in gbm.evals_result_["training"] + assert "multi_error" in gbm.evals_result_["training"] # invalid binary metric is replaced with multiclass alternative - gbm = lgb.LGBMClassifier(**params).fit(eval_metric='binary_error', **params_fit) - assert gbm.objective_ == 'multiclass' - assert len(gbm.evals_result_['training']) == 2 - assert 'multi_logloss' in gbm.evals_result_['training'] - assert 'multi_error' in gbm.evals_result_['training'] + gbm = lgb.LGBMClassifier(**params).fit(eval_metric="binary_error", **params_fit) + assert gbm.objective_ == "multiclass" + assert len(gbm.evals_result_["training"]) == 2 + assert "multi_logloss" in gbm.evals_result_["training"] + assert "multi_error" in gbm.evals_result_["training"] # default metric for non-default multiclass objective # and invalid binary metric is replaced with multiclass alternative - gbm = lgb.LGBMClassifier(objective='ovr', - **params).fit(eval_metric='binary_error', **params_fit) - assert gbm.objective_ == 'ovr' - assert len(gbm.evals_result_['training']) == 2 - assert 'multi_logloss' in gbm.evals_result_['training'] - assert 'multi_error' in gbm.evals_result_['training'] + gbm = lgb.LGBMClassifier(objective="ovr", **params).fit(eval_metric="binary_error", **params_fit) + assert gbm.objective_ == "ovr" + assert len(gbm.evals_result_["training"]) == 2 + assert "multi_logloss" in gbm.evals_result_["training"] + assert "multi_error" in gbm.evals_result_["training"] X, y = load_digits(n_class=2, return_X_y=True) - params_fit = {'X': X, 'y': y, 'eval_set': (X, y)} + params_fit = {"X": X, "y": y, "eval_set": (X, y)} # default metric and invalid multiclass metric is replaced with binary alternative - gbm = lgb.LGBMClassifier(**params).fit(eval_metric='multi_error', **params_fit) - assert len(gbm.evals_result_['training']) == 2 - assert 'binary_logloss' in gbm.evals_result_['training'] - assert 'binary_error' in gbm.evals_result_['training'] + gbm = lgb.LGBMClassifier(**params).fit(eval_metric="multi_error", **params_fit) + assert len(gbm.evals_result_["training"]) == 2 + assert "binary_logloss" in gbm.evals_result_["training"] + assert "binary_error" in gbm.evals_result_["training"] # invalid multiclass metric is replaced with binary alternative for custom objective - gbm = lgb.LGBMClassifier(objective=custom_dummy_obj, - **params).fit(eval_metric='multi_logloss', **params_fit) - assert len(gbm.evals_result_['training']) == 1 - assert 'binary_logloss' in gbm.evals_result_['training'] + gbm = lgb.LGBMClassifier(objective=custom_dummy_obj, **params).fit(eval_metric="multi_logloss", **params_fit) + assert len(gbm.evals_result_["training"]) == 1 + assert "binary_logloss" in gbm.evals_result_["training"] def test_multiple_eval_metrics(): - X, y = load_breast_cancer(return_X_y=True) - params = {'n_estimators': 2, 'verbose': -1, 'objective': 'binary', 'metric': 'binary_logloss'} - params_fit = {'X': X, 'y': y, 'eval_set': (X, y)} + params = {"n_estimators": 2, "verbose": -1, "objective": "binary", "metric": "binary_logloss"} + params_fit = {"X": X, "y": y, "eval_set": (X, y)} # Verify that can receive a list of metrics, only callable gbm = lgb.LGBMClassifier(**params).fit(eval_metric=[constant_metric, decreasing_metric], **params_fit) - assert len(gbm.evals_result_['training']) == 3 - assert 'error' in gbm.evals_result_['training'] - assert 'decreasing_metric' in gbm.evals_result_['training'] - assert 'binary_logloss' in gbm.evals_result_['training'] + assert len(gbm.evals_result_["training"]) == 3 + assert "error" in gbm.evals_result_["training"] + assert "decreasing_metric" in gbm.evals_result_["training"] + assert "binary_logloss" in gbm.evals_result_["training"] # Verify that can receive a list of custom and built-in metrics - gbm = lgb.LGBMClassifier(**params).fit(eval_metric=[constant_metric, decreasing_metric, 'fair'], **params_fit) - assert len(gbm.evals_result_['training']) == 4 - assert 'error' in gbm.evals_result_['training'] - assert 'decreasing_metric' in gbm.evals_result_['training'] - assert 'binary_logloss' in gbm.evals_result_['training'] - assert 'fair' in gbm.evals_result_['training'] + gbm = lgb.LGBMClassifier(**params).fit(eval_metric=[constant_metric, decreasing_metric, "fair"], **params_fit) + assert len(gbm.evals_result_["training"]) == 4 + assert "error" in gbm.evals_result_["training"] + assert "decreasing_metric" in gbm.evals_result_["training"] + assert "binary_logloss" in gbm.evals_result_["training"] + assert "fair" in gbm.evals_result_["training"] # Verify that works as expected when eval_metric is empty gbm = lgb.LGBMClassifier(**params).fit(eval_metric=[], **params_fit) - assert len(gbm.evals_result_['training']) == 1 - assert 'binary_logloss' in gbm.evals_result_['training'] + assert len(gbm.evals_result_["training"]) == 1 + assert "binary_logloss" in gbm.evals_result_["training"] # Verify that can receive a list of metrics, only built-in - gbm = lgb.LGBMClassifier(**params).fit(eval_metric=['fair', 'error'], **params_fit) - assert len(gbm.evals_result_['training']) == 3 - assert 'binary_logloss' in gbm.evals_result_['training'] + gbm = lgb.LGBMClassifier(**params).fit(eval_metric=["fair", "error"], **params_fit) + assert len(gbm.evals_result_["training"]) == 3 + assert "binary_logloss" in gbm.evals_result_["training"] # Verify that eval_metric is robust to receiving a list with None - gbm = lgb.LGBMClassifier(**params).fit(eval_metric=['fair', 'error', None], **params_fit) - assert len(gbm.evals_result_['training']) == 3 - assert 'binary_logloss' in gbm.evals_result_['training'] + gbm = lgb.LGBMClassifier(**params).fit(eval_metric=["fair", "error", None], **params_fit) + assert len(gbm.evals_result_["training"]) == 3 + assert "binary_logloss" in gbm.evals_result_["training"] def test_nan_handle(): @@ -1104,18 +1079,18 @@ def test_nan_handle(): X = np.random.randn(nrows, ncols) y = np.random.randn(nrows) + np.full(nrows, 1e30) weight = np.zeros(nrows) - params = {'n_estimators': 20, 'verbose': -1} - params_fit = {'X': X, 'y': y, 'sample_weight': weight, 'eval_set': (X, y), - 'callbacks': [lgb.early_stopping(5)]} + params = {"n_estimators": 20, "verbose": -1} + params_fit = {"X": X, "y": y, "sample_weight": weight, "eval_set": (X, y), "callbacks": [lgb.early_stopping(5)]} gbm = lgb.LGBMRegressor(**params).fit(**params_fit) - np.testing.assert_allclose(gbm.evals_result_['training']['l2'], np.nan) + np.testing.assert_allclose(gbm.evals_result_["training"]["l2"], np.nan) -@pytest.mark.skipif(getenv('TASK', '') == 'cuda', reason='Skip due to differences in implementation details of CUDA version') +@pytest.mark.skipif( + getenv("TASK", "") == "cuda", reason="Skip due to differences in implementation details of CUDA version" +) def test_first_metric_only(): - def fit_and_check(eval_set_names, metric_names, assumed_iteration, first_metric_only): - params['first_metric_only'] = first_metric_only + params["first_metric_only"] = first_metric_only gbm = lgb.LGBMRegressor(**params).fit(**params_fit) assert len(gbm.evals_result_) == len(eval_set_names) for eval_set_name in eval_set_names: @@ -1125,11 +1100,13 @@ def test_first_metric_only(): assert metric_name in gbm.evals_result_[eval_set_name] actual = len(gbm.evals_result_[eval_set_name][metric_name]) - expected = assumed_iteration + (params['early_stopping_rounds'] - if eval_set_name != 'training' - and assumed_iteration != gbm.n_estimators else 0) + expected = assumed_iteration + ( + params["early_stopping_rounds"] + if eval_set_name != "training" and assumed_iteration != gbm.n_estimators + else 0 + ) assert expected == actual - if eval_set_name != 'training': + if eval_set_name != "training": assert assumed_iteration == gbm.best_iteration_ else: assert gbm.n_estimators == gbm.best_iteration_ @@ -1137,14 +1114,15 @@ def test_first_metric_only(): X, y = make_synthetic_regression(n_samples=300) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) X_test1, X_test2, y_test1, y_test2 = train_test_split(X_test, y_test, test_size=0.5, random_state=72) - params = {'n_estimators': 30, - 'learning_rate': 0.8, - 'num_leaves': 15, - 'verbose': -1, - 'seed': 123, - 'early_stopping_rounds': 5} # early stop should be supported via global LightGBM parameter - params_fit = {'X': X_train, - 'y': y_train} + params = { + "n_estimators": 30, + "learning_rate": 0.8, + "num_leaves": 15, + "verbose": -1, + "seed": 123, + "early_stopping_rounds": 5, + } # early stop should be supported via global LightGBM parameter + params_fit = {"X": X_train, "y": y_train} iter_valid1_l1 = 4 iter_valid1_l2 = 4 @@ -1157,100 +1135,116 @@ def test_first_metric_only(): iter_min_valid1 = min([iter_valid1_l1, iter_valid1_l2]) # feval - params['metric'] = 'None' - params_fit['eval_metric'] = lambda preds, train_data: [decreasing_metric(preds, train_data), - constant_metric(preds, train_data)] - params_fit['eval_set'] = (X_test1, y_test1) - fit_and_check(['valid_0'], ['decreasing_metric', 'error'], 1, False) - fit_and_check(['valid_0'], ['decreasing_metric', 'error'], 30, True) - params_fit['eval_metric'] = lambda preds, train_data: [constant_metric(preds, train_data), - decreasing_metric(preds, train_data)] - fit_and_check(['valid_0'], ['decreasing_metric', 'error'], 1, True) + params["metric"] = "None" + params_fit["eval_metric"] = lambda preds, train_data: [ + decreasing_metric(preds, train_data), + constant_metric(preds, train_data), + ] + params_fit["eval_set"] = (X_test1, y_test1) + fit_and_check(["valid_0"], ["decreasing_metric", "error"], 1, False) + fit_and_check(["valid_0"], ["decreasing_metric", "error"], 30, True) + params_fit["eval_metric"] = lambda preds, train_data: [ + constant_metric(preds, train_data), + decreasing_metric(preds, train_data), + ] + fit_and_check(["valid_0"], ["decreasing_metric", "error"], 1, True) # single eval_set - params.pop('metric') - params_fit.pop('eval_metric') - fit_and_check(['valid_0'], ['l2'], iter_valid1_l2, False) - fit_and_check(['valid_0'], ['l2'], iter_valid1_l2, True) + params.pop("metric") + params_fit.pop("eval_metric") + fit_and_check(["valid_0"], ["l2"], iter_valid1_l2, False) + fit_and_check(["valid_0"], ["l2"], iter_valid1_l2, True) - params_fit['eval_metric'] = "l2" - fit_and_check(['valid_0'], ['l2'], iter_valid1_l2, False) - fit_and_check(['valid_0'], ['l2'], iter_valid1_l2, True) + params_fit["eval_metric"] = "l2" + fit_and_check(["valid_0"], ["l2"], iter_valid1_l2, False) + fit_and_check(["valid_0"], ["l2"], iter_valid1_l2, True) - params_fit['eval_metric'] = "l1" - fit_and_check(['valid_0'], ['l1', 'l2'], iter_min_valid1, False) - fit_and_check(['valid_0'], ['l1', 'l2'], iter_valid1_l1, True) + params_fit["eval_metric"] = "l1" + fit_and_check(["valid_0"], ["l1", "l2"], iter_min_valid1, False) + fit_and_check(["valid_0"], ["l1", "l2"], iter_valid1_l1, True) - params_fit['eval_metric'] = ["l1", "l2"] - fit_and_check(['valid_0'], ['l1', 'l2'], iter_min_valid1, False) - fit_and_check(['valid_0'], ['l1', 'l2'], iter_valid1_l1, True) + params_fit["eval_metric"] = ["l1", "l2"] + fit_and_check(["valid_0"], ["l1", "l2"], iter_min_valid1, False) + fit_and_check(["valid_0"], ["l1", "l2"], iter_valid1_l1, True) - params_fit['eval_metric'] = ["l2", "l1"] - fit_and_check(['valid_0'], ['l1', 'l2'], iter_min_valid1, False) - fit_and_check(['valid_0'], ['l1', 'l2'], iter_valid1_l2, True) + params_fit["eval_metric"] = ["l2", "l1"] + fit_and_check(["valid_0"], ["l1", "l2"], iter_min_valid1, False) + fit_and_check(["valid_0"], ["l1", "l2"], iter_valid1_l2, True) - params_fit['eval_metric'] = ["l2", "regression", "mse"] # test aliases - fit_and_check(['valid_0'], ['l2'], iter_valid1_l2, False) - fit_and_check(['valid_0'], ['l2'], iter_valid1_l2, True) + params_fit["eval_metric"] = ["l2", "regression", "mse"] # test aliases + fit_and_check(["valid_0"], ["l2"], iter_valid1_l2, False) + fit_and_check(["valid_0"], ["l2"], iter_valid1_l2, True) # two eval_set - params_fit['eval_set'] = [(X_test1, y_test1), (X_test2, y_test2)] - params_fit['eval_metric'] = ["l1", "l2"] - fit_and_check(['valid_0', 'valid_1'], ['l1', 'l2'], iter_min_l1, True) - params_fit['eval_metric'] = ["l2", "l1"] - fit_and_check(['valid_0', 'valid_1'], ['l1', 'l2'], iter_min_l2, True) + params_fit["eval_set"] = [(X_test1, y_test1), (X_test2, y_test2)] + params_fit["eval_metric"] = ["l1", "l2"] + fit_and_check(["valid_0", "valid_1"], ["l1", "l2"], iter_min_l1, True) + params_fit["eval_metric"] = ["l2", "l1"] + fit_and_check(["valid_0", "valid_1"], ["l1", "l2"], iter_min_l2, True) - params_fit['eval_set'] = [(X_test2, y_test2), (X_test1, y_test1)] - params_fit['eval_metric'] = ["l1", "l2"] - fit_and_check(['valid_0', 'valid_1'], ['l1', 'l2'], iter_min, False) - fit_and_check(['valid_0', 'valid_1'], ['l1', 'l2'], iter_min_l1, True) - params_fit['eval_metric'] = ["l2", "l1"] - fit_and_check(['valid_0', 'valid_1'], ['l1', 'l2'], iter_min, False) - fit_and_check(['valid_0', 'valid_1'], ['l1', 'l2'], iter_min_l2, True) + params_fit["eval_set"] = [(X_test2, y_test2), (X_test1, y_test1)] + params_fit["eval_metric"] = ["l1", "l2"] + fit_and_check(["valid_0", "valid_1"], ["l1", "l2"], iter_min, False) + fit_and_check(["valid_0", "valid_1"], ["l1", "l2"], iter_min_l1, True) + params_fit["eval_metric"] = ["l2", "l1"] + fit_and_check(["valid_0", "valid_1"], ["l1", "l2"], iter_min, False) + fit_and_check(["valid_0", "valid_1"], ["l1", "l2"], iter_min_l2, True) def test_class_weight(): X, y = load_digits(n_class=10, return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) - y_train_str = y_train.astype('str') - y_test_str = y_test.astype('str') - gbm = lgb.LGBMClassifier(n_estimators=10, class_weight='balanced', verbose=-1) - gbm.fit(X_train, y_train, - eval_set=[(X_train, y_train), (X_test, y_test), (X_test, y_test), - (X_test, y_test), (X_test, y_test)], - eval_class_weight=['balanced', None, 'balanced', {1: 10, 4: 20}, {5: 30, 2: 40}]) + y_train_str = y_train.astype("str") + y_test_str = y_test.astype("str") + gbm = lgb.LGBMClassifier(n_estimators=10, class_weight="balanced", verbose=-1) + gbm.fit( + X_train, + y_train, + eval_set=[(X_train, y_train), (X_test, y_test), (X_test, y_test), (X_test, y_test), (X_test, y_test)], + eval_class_weight=["balanced", None, "balanced", {1: 10, 4: 20}, {5: 30, 2: 40}], + ) for eval_set1, eval_set2 in itertools.combinations(gbm.evals_result_.keys(), 2): for metric in gbm.evals_result_[eval_set1]: - np.testing.assert_raises(AssertionError, - np.testing.assert_allclose, - gbm.evals_result_[eval_set1][metric], - gbm.evals_result_[eval_set2][metric]) - gbm_str = lgb.LGBMClassifier(n_estimators=10, class_weight='balanced', verbose=-1) - gbm_str.fit(X_train, y_train_str, - eval_set=[(X_train, y_train_str), (X_test, y_test_str), - (X_test, y_test_str), (X_test, y_test_str), (X_test, y_test_str)], - eval_class_weight=['balanced', None, 'balanced', {'1': 10, '4': 20}, {'5': 30, '2': 40}]) + np.testing.assert_raises( + AssertionError, + np.testing.assert_allclose, + gbm.evals_result_[eval_set1][metric], + gbm.evals_result_[eval_set2][metric], + ) + gbm_str = lgb.LGBMClassifier(n_estimators=10, class_weight="balanced", verbose=-1) + gbm_str.fit( + X_train, + y_train_str, + eval_set=[ + (X_train, y_train_str), + (X_test, y_test_str), + (X_test, y_test_str), + (X_test, y_test_str), + (X_test, y_test_str), + ], + eval_class_weight=["balanced", None, "balanced", {"1": 10, "4": 20}, {"5": 30, "2": 40}], + ) for eval_set1, eval_set2 in itertools.combinations(gbm_str.evals_result_.keys(), 2): for metric in gbm_str.evals_result_[eval_set1]: - np.testing.assert_raises(AssertionError, - np.testing.assert_allclose, - gbm_str.evals_result_[eval_set1][metric], - gbm_str.evals_result_[eval_set2][metric]) + np.testing.assert_raises( + AssertionError, + np.testing.assert_allclose, + gbm_str.evals_result_[eval_set1][metric], + gbm_str.evals_result_[eval_set2][metric], + ) for eval_set in gbm.evals_result_: for metric in gbm.evals_result_[eval_set]: - np.testing.assert_allclose(gbm.evals_result_[eval_set][metric], - gbm_str.evals_result_[eval_set][metric]) + np.testing.assert_allclose(gbm.evals_result_[eval_set][metric], gbm_str.evals_result_[eval_set][metric]) def test_continue_training_with_model(): X, y = load_digits(n_class=3, return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) init_gbm = lgb.LGBMClassifier(n_estimators=5).fit(X_train, y_train, eval_set=(X_test, y_test)) - gbm = lgb.LGBMClassifier(n_estimators=5).fit(X_train, y_train, eval_set=(X_test, y_test), - init_model=init_gbm) - assert len(init_gbm.evals_result_['valid_0']['multi_logloss']) == len(gbm.evals_result_['valid_0']['multi_logloss']) - assert len(init_gbm.evals_result_['valid_0']['multi_logloss']) == 5 - assert gbm.evals_result_['valid_0']['multi_logloss'][-1] < init_gbm.evals_result_['valid_0']['multi_logloss'][-1] + gbm = lgb.LGBMClassifier(n_estimators=5).fit(X_train, y_train, eval_set=(X_test, y_test), init_model=init_gbm) + assert len(init_gbm.evals_result_["valid_0"]["multi_logloss"]) == len(gbm.evals_result_["valid_0"]["multi_logloss"]) + assert len(init_gbm.evals_result_["valid_0"]["multi_logloss"]) == 5 + assert gbm.evals_result_["valid_0"]["multi_logloss"][-1] < init_gbm.evals_result_["valid_0"]["multi_logloss"][-1] def test_actual_number_of_trees(): @@ -1288,20 +1282,16 @@ def test_sklearn_integration(estimator, check): check(estimator) -@pytest.mark.parametrize('task', ['binary-classification', 'multiclass-classification', 'ranking', 'regression']) +@pytest.mark.parametrize("task", ["binary-classification", "multiclass-classification", "ranking", "regression"]) def test_training_succeeds_when_data_is_dataframe_and_label_is_column_array(task): pd = pytest.importorskip("pandas") X, y, g = _create_data(task) X = pd.DataFrame(X) y_col_array = y.reshape(-1, 1) - params = { - 'n_estimators': 1, - 'num_leaves': 3, - 'random_state': 0 - } + params = {"n_estimators": 1, "num_leaves": 3, "random_state": 0} model_factory = task_to_model_factory[task] - with pytest.warns(UserWarning, match='column-vector'): - if task == 'ranking': + with pytest.warns(UserWarning, match="column-vector"): + if task == "ranking": model_1d = model_factory(**params).fit(X, y, group=g) model_2d = model_factory(**params).fit(X, y_col_array, group=g) else: @@ -1313,12 +1303,12 @@ def test_training_succeeds_when_data_is_dataframe_and_label_is_column_array(task np.testing.assert_array_equal(preds_1d, preds_2d) -@pytest.mark.parametrize('use_weight', [True, False]) +@pytest.mark.parametrize("use_weight", [True, False]) def test_multiclass_custom_objective(use_weight): centers = [[-4, -4], [4, 4], [-4, 4]] X, y = make_blobs(n_samples=1_000, centers=centers, random_state=42) weight = np.full_like(y, 2) if use_weight else None - params = {'n_estimators': 10, 'num_leaves': 7} + params = {"n_estimators": 10, "num_leaves": 7} builtin_obj_model = lgb.LGBMClassifier(**params) builtin_obj_model.fit(X, y, sample_weight=weight) builtin_obj_preds = builtin_obj_model.predict_proba(X) @@ -1332,11 +1322,11 @@ def test_multiclass_custom_objective(use_weight): assert callable(custom_obj_model.objective_) -@pytest.mark.parametrize('use_weight', [True, False]) +@pytest.mark.parametrize("use_weight", [True, False]) def test_multiclass_custom_eval(use_weight): def custom_eval(y_true, y_pred, weight): loss = log_loss(y_true, y_pred, sample_weight=weight) - return 'custom_logloss', loss, False + return "custom_logloss", loss, False centers = [[-4, -4], [4, 4], [-4, 4]] X, y = make_blobs(n_samples=1_000, centers=centers, random_state=42) @@ -1348,27 +1338,25 @@ def test_multiclass_custom_eval(use_weight): else: weight_train = None weight_valid = None - params = {'objective': 'multiclass', 'num_class': 3, 'num_leaves': 7} + params = {"objective": "multiclass", "num_class": 3, "num_leaves": 7} model = lgb.LGBMClassifier(**params) model.fit( X_train, y_train, sample_weight=weight_train, eval_set=[(X_train, y_train), (X_valid, y_valid)], - eval_names=['train', 'valid'], + eval_names=["train", "valid"], eval_sample_weight=[weight_train, weight_valid], eval_metric=custom_eval, ) eval_result = model.evals_result_ train_ds = (X_train, y_train, weight_train) valid_ds = (X_valid, y_valid, weight_valid) - for key, (X, y_true, weight) in zip(['train', 'valid'], [train_ds, valid_ds]): - np.testing.assert_allclose( - eval_result[key]['multi_logloss'], eval_result[key]['custom_logloss'] - ) + for key, (X, y_true, weight) in zip(["train", "valid"], [train_ds, valid_ds]): + np.testing.assert_allclose(eval_result[key]["multi_logloss"], eval_result[key]["custom_logloss"]) y_pred = model.predict_proba(X) _, metric_value, _ = custom_eval(y_true, y_pred, weight) - np.testing.assert_allclose(metric_value, eval_result[key]['custom_logloss'][-1]) + np.testing.assert_allclose(metric_value, eval_result[key]["custom_logloss"][-1]) def test_negative_n_jobs(tmp_path): @@ -1397,21 +1385,21 @@ def test_default_n_jobs(tmp_path): assert bool(re.search(rf"\[num_threads: {n_cores}\]", model_txt)) -@pytest.mark.skipif(not PANDAS_INSTALLED, reason='pandas is not installed') -@pytest.mark.parametrize('task', ['binary-classification', 'multiclass-classification', 'ranking', 'regression']) +@pytest.mark.skipif(not PANDAS_INSTALLED, reason="pandas is not installed") +@pytest.mark.parametrize("task", ["binary-classification", "multiclass-classification", "ranking", "regression"]) def test_validate_features(task): X, y, g = _create_data(task, n_features=4) - features = ['x1', 'x2', 'x3', 'x4'] + features = ["x1", "x2", "x3", "x4"] df = pd_DataFrame(X, columns=features) model = task_to_model_factory[task](n_estimators=10, num_leaves=15, verbose=-1) - if task == 'ranking': + if task == "ranking": model.fit(df, y, group=g) else: model.fit(df, y) assert model.feature_name_ == features # try to predict with a different feature - df2 = df.rename(columns={'x2': 'z'}) + df2 = df.rename(columns={"x2": "z"}) with pytest.raises(lgb.basic.LightGBMError, match="Expected 'x2' at position 1 but found 'z'"): model.predict(df2, validate_features=True) @@ -1419,59 +1407,59 @@ def test_validate_features(task): model.predict(df2, validate_features=False) -@pytest.mark.parametrize('X_type', ['dt_DataTable', 'list2d', 'numpy', 'scipy_csc', 'scipy_csr', 'pd_DataFrame']) -@pytest.mark.parametrize('y_type', ['list1d', 'numpy', 'pd_Series', 'pd_DataFrame']) -@pytest.mark.parametrize('task', ['binary-classification', 'multiclass-classification', 'regression']) +@pytest.mark.parametrize("X_type", ["dt_DataTable", "list2d", "numpy", "scipy_csc", "scipy_csr", "pd_DataFrame"]) +@pytest.mark.parametrize("y_type", ["list1d", "numpy", "pd_Series", "pd_DataFrame"]) +@pytest.mark.parametrize("task", ["binary-classification", "multiclass-classification", "regression"]) def test_classification_and_regression_minimally_work_with_all_all_accepted_data_types(X_type, y_type, task): if any(t.startswith("pd_") for t in [X_type, y_type]) and not PANDAS_INSTALLED: - pytest.skip('pandas is not installed') + pytest.skip("pandas is not installed") if any(t.startswith("dt_") for t in [X_type, y_type]) and not DATATABLE_INSTALLED: - pytest.skip('datatable is not installed') + pytest.skip("datatable is not installed") X, y, g = _create_data(task, n_samples=2_000) weights = np.abs(np.random.randn(y.shape[0])) - if task == 'binary-classification' or task == 'regression': + if task == "binary-classification" or task == "regression": init_score = np.full_like(y, np.mean(y)) - elif task == 'multiclass-classification': + elif task == "multiclass-classification": init_score = np.outer(y, np.array([0.1, 0.2, 0.7])) else: raise ValueError(f"Unrecognized task '{task}'") X_valid = X * 2 - if X_type == 'dt_DataTable': + if X_type == "dt_DataTable": X = dt_DataTable(X) - elif X_type == 'list2d': + elif X_type == "list2d": X = X.tolist() - elif X_type == 'scipy_csc': + elif X_type == "scipy_csc": X = scipy.sparse.csc_matrix(X) - elif X_type == 'scipy_csr': + elif X_type == "scipy_csr": X = scipy.sparse.csr_matrix(X) - elif X_type == 'pd_DataFrame': + elif X_type == "pd_DataFrame": X = pd_DataFrame(X) - elif X_type != 'numpy': + elif X_type != "numpy": raise ValueError(f"Unrecognized X_type: '{X_type}'") # make weights and init_score same types as y, just to avoid # a huge number of combinations and therefore test cases - if y_type == 'list1d': + if y_type == "list1d": y = y.tolist() weights = weights.tolist() init_score = init_score.tolist() - elif y_type == 'pd_DataFrame': + elif y_type == "pd_DataFrame": y = pd_DataFrame(y) weights = pd_Series(weights) - if task == 'multiclass-classification': + if task == "multiclass-classification": init_score = pd_DataFrame(init_score) else: init_score = pd_Series(init_score) - elif y_type == 'pd_Series': + elif y_type == "pd_Series": y = pd_Series(y) weights = pd_Series(weights) - if task == 'multiclass-classification': + if task == "multiclass-classification": init_score = pd_DataFrame(init_score) else: init_score = pd_Series(init_score) - elif y_type != 'numpy': + elif y_type != "numpy": raise ValueError(f"Unrecognized y_type: '{y_type}'") model = task_to_model_factory[task](n_estimators=10, verbose=-1) @@ -1482,73 +1470,73 @@ def test_classification_and_regression_minimally_work_with_all_all_accepted_data init_score=init_score, eval_set=[(X_valid, y)], eval_sample_weight=[weights], - eval_init_score=[init_score] + eval_init_score=[init_score], ) preds = model.predict(X) - if task == 'binary-classification': + if task == "binary-classification": assert accuracy_score(y, preds) >= 0.99 - elif task == 'multiclass-classification': + elif task == "multiclass-classification": assert accuracy_score(y, preds) >= 0.99 - elif task == 'regression': + elif task == "regression": assert r2_score(y, preds) > 0.86 else: raise ValueError(f"Unrecognized task: '{task}'") -@pytest.mark.parametrize('X_type', ['dt_DataTable', 'list2d', 'numpy', 'scipy_csc', 'scipy_csr', 'pd_DataFrame']) -@pytest.mark.parametrize('y_type', ['list1d', 'numpy', 'pd_DataFrame', 'pd_Series']) -@pytest.mark.parametrize('g_type', ['list1d_float', 'list1d_int', 'numpy', 'pd_Series']) +@pytest.mark.parametrize("X_type", ["dt_DataTable", "list2d", "numpy", "scipy_csc", "scipy_csr", "pd_DataFrame"]) +@pytest.mark.parametrize("y_type", ["list1d", "numpy", "pd_DataFrame", "pd_Series"]) +@pytest.mark.parametrize("g_type", ["list1d_float", "list1d_int", "numpy", "pd_Series"]) def test_ranking_minimally_works_with_all_all_accepted_data_types(X_type, y_type, g_type): if any(t.startswith("pd_") for t in [X_type, y_type, g_type]) and not PANDAS_INSTALLED: - pytest.skip('pandas is not installed') + pytest.skip("pandas is not installed") if any(t.startswith("dt_") for t in [X_type, y_type, g_type]) and not DATATABLE_INSTALLED: - pytest.skip('datatable is not installed') - X, y, g = _create_data(task='ranking', n_samples=1_000) + pytest.skip("datatable is not installed") + X, y, g = _create_data(task="ranking", n_samples=1_000) weights = np.abs(np.random.randn(y.shape[0])) init_score = np.full_like(y, np.mean(y)) X_valid = X * 2 - if X_type == 'dt_DataTable': + if X_type == "dt_DataTable": X = dt_DataTable(X) - elif X_type == 'list2d': + elif X_type == "list2d": X = X.tolist() - elif X_type == 'scipy_csc': + elif X_type == "scipy_csc": X = scipy.sparse.csc_matrix(X) - elif X_type == 'scipy_csr': + elif X_type == "scipy_csr": X = scipy.sparse.csr_matrix(X) - elif X_type == 'pd_DataFrame': + elif X_type == "pd_DataFrame": X = pd_DataFrame(X) - elif X_type != 'numpy': + elif X_type != "numpy": raise ValueError(f"Unrecognized X_type: '{X_type}'") # make weights and init_score same types as y, just to avoid # a huge number of combinations and therefore test cases - if y_type == 'list1d': + if y_type == "list1d": y = y.tolist() weights = weights.tolist() init_score = init_score.tolist() - elif y_type == 'pd_DataFrame': + elif y_type == "pd_DataFrame": y = pd_DataFrame(y) weights = pd_Series(weights) init_score = pd_Series(init_score) - elif y_type == 'pd_Series': + elif y_type == "pd_Series": y = pd_Series(y) weights = pd_Series(weights) init_score = pd_Series(init_score) - elif y_type != 'numpy': + elif y_type != "numpy": raise ValueError(f"Unrecognized y_type: '{y_type}'") - if g_type == 'list1d_float': + if g_type == "list1d_float": g = g.astype("float").tolist() - elif g_type == 'list1d_int': + elif g_type == "list1d_int": g = g.astype("int").tolist() - elif g_type == 'pd_Series': + elif g_type == "pd_Series": g = pd_Series(g) - elif g_type != 'numpy': + elif g_type != "numpy": raise ValueError(f"Unrecognized g_type: '{g_type}'") - model = task_to_model_factory['ranking'](n_estimators=10, verbose=-1) + model = task_to_model_factory["ranking"](n_estimators=10, verbose=-1) model.fit( X=X, y=y, @@ -1558,7 +1546,7 @@ def test_ranking_minimally_works_with_all_all_accepted_data_types(X_type, y_type eval_set=[(X_valid, y)], eval_sample_weight=[weights], eval_init_score=[init_score], - eval_group=[g] + eval_group=[g], ) preds = model.predict(X) assert spearmanr(preds, y).correlation >= 0.99 @@ -1570,7 +1558,7 @@ def test_classifier_fit_detects_classes_every_time(): ncols = 20 X = rng.standard_normal(size=(nrows, ncols)) - y_bin = (rng.random(size=nrows) <= .3).astype(np.float64) + y_bin = (rng.random(size=nrows) <= 0.3).astype(np.float64) y_multi = rng.integers(4, size=nrows) model = lgb.LGBMClassifier(verbose=-1) diff --git a/tests/python_package_test/test_utilities.py b/tests/python_package_test/test_utilities.py index cfd5b133b..08208ccfb 100644 --- a/tests/python_package_test/test_utilities.py +++ b/tests/python_package_test/test_utilities.py @@ -10,7 +10,7 @@ import lightgbm as lgb def test_register_logger(tmp_path): logger = logging.getLogger("LightGBM") logger.setLevel(logging.DEBUG) - formatter = logging.Formatter('%(levelname)s | %(message)s') + formatter = logging.Formatter("%(levelname)s | %(message)s") log_filename = tmp_path / "LightGBM_test_logger.log" file_handler = logging.FileHandler(log_filename, mode="w", encoding="utf-8") file_handler.setLevel(logging.DEBUG) @@ -18,29 +18,27 @@ def test_register_logger(tmp_path): logger.addHandler(file_handler) def dummy_metric(_, __): - logger.debug('In dummy_metric') - return 'dummy_metric', 1, True + logger.debug("In dummy_metric") + return "dummy_metric", 1, True lgb.register_logger(logger) - X = np.array([[1, 2, 3], - [1, 2, 4], - [1, 2, 4], - [1, 2, 3]], - dtype=np.float32) + X = np.array([[1, 2, 3], [1, 2, 4], [1, 2, 4], [1, 2, 3]], dtype=np.float32) y = np.array([0, 1, 1, 0]) lgb_train = lgb.Dataset(X, y) lgb_valid = lgb.Dataset(X, y) # different object for early-stopping eval_records = {} - callbacks = [ - lgb.record_evaluation(eval_records), - lgb.log_evaluation(2), - lgb.early_stopping(10) - ] - lgb.train({'objective': 'binary', 'metric': ['auc', 'binary_error']}, - lgb_train, num_boost_round=10, feval=dummy_metric, - valid_sets=[lgb_valid], categorical_feature=[1], callbacks=callbacks) + callbacks = [lgb.record_evaluation(eval_records), lgb.log_evaluation(2), lgb.early_stopping(10)] + lgb.train( + {"objective": "binary", "metric": ["auc", "binary_error"]}, + lgb_train, + num_boost_round=10, + feval=dummy_metric, + valid_sets=[lgb_valid], + categorical_feature=[1], + callbacks=callbacks, + ) lgb.plot_metric(eval_records) @@ -89,7 +87,7 @@ WARNING | More than one metric available, picking one to plot. "INFO | [LightGBM] [Warning] GPU acceleration is disabled because no non-trivial dense features can be found", "INFO | [LightGBM] [Warning] Using sparse features with CUDA is currently not supported.", "INFO | [LightGBM] [Warning] CUDA currently requires double precision calculations.", - "INFO | [LightGBM] [Info] LightGBM using CUDA trainer with DP float!!" + "INFO | [LightGBM] [Info] LightGBM using CUDA trainer with DP float!!", ] cuda_lines = [ "INFO | [LightGBM] [Warning] Metric auc is not implemented in cuda version. Fall back to evaluation on CPU.", @@ -142,11 +140,7 @@ def test_register_custom_logger(): logged_messages.append(msg) custom_logger = CustomLogger() - lgb.register_logger( - custom_logger, - info_method_name="custom_info", - warning_method_name="custom_warning" - ) + lgb.register_logger(custom_logger, info_method_name="custom_info", warning_method_name="custom_warning") lgb.basic._log_info("info message") lgb.basic._log_warning("warning message") @@ -155,18 +149,14 @@ def test_register_custom_logger(): assert logged_messages == expected_log logged_messages = [] - X = np.array([[1, 2, 3], - [1, 2, 4], - [1, 2, 4], - [1, 2, 3]], - dtype=np.float32) + X = np.array([[1, 2, 3], [1, 2, 4], [1, 2, 4], [1, 2, 3]], dtype=np.float32) y = np.array([0, 1, 1, 0]) lgb_data = lgb.Dataset(X, y) lgb.train( - {'objective': 'binary', 'metric': 'auc'}, + {"objective": "binary", "metric": "auc"}, lgb_data, num_boost_round=10, valid_sets=[lgb_data], - categorical_feature=[1] + categorical_feature=[1], ) assert logged_messages, "custom logger was not called" diff --git a/tests/python_package_test/utils.py b/tests/python_package_test/utils.py index 7eae62b14..66298b819 100644 --- a/tests/python_package_test/utils.py +++ b/tests/python_package_test/utils.py @@ -34,8 +34,9 @@ def load_linnerud(**kwargs): return sklearn.datasets.load_linnerud(**kwargs) -def make_ranking(n_samples=100, n_features=20, n_informative=5, gmax=2, - group=None, random_gs=False, avg_gs=10, random_state=0): +def make_ranking( + n_samples=100, n_features=20, n_informative=5, gmax=2, group=None, random_gs=False, avg_gs=10, random_state=0 +): """Generate a learning-to-rank dataset - feature vectors grouped together with integer-valued graded relevance scores. Replace this with a sklearn.datasets function if ranking objective becomes supported in sklearn.datasets module. @@ -81,7 +82,7 @@ def make_ranking(n_samples=100, n_features=20, n_informative=5, gmax=2, relvalues = range(gmax + 1) # build y/target and group-id vectors with user-specified group sizes. - if group is not None and hasattr(group, '__len__'): + if group is not None and hasattr(group, "__len__"): n_samples = np.sum(group) for i, gsize in enumerate(group): @@ -116,8 +117,9 @@ def make_ranking(n_samples=100, n_features=20, n_informative=5, gmax=2, @lru_cache(maxsize=None) def make_synthetic_regression(n_samples=100, n_features=4, n_informative=2, random_state=42): - return sklearn.datasets.make_regression(n_samples=n_samples, n_features=n_features, - n_informative=n_informative, random_state=random_state) + return sklearn.datasets.make_regression( + n_samples=n_samples, n_features=n_features, n_informative=n_informative, random_state=random_state + ) def dummy_obj(preds, train_data): @@ -126,7 +128,7 @@ def dummy_obj(preds, train_data): def mse_obj(y_pred, dtrain): y_true = dtrain.get_label() - grad = (y_pred - y_true) + grad = y_pred - y_true hess = np.ones(len(grad)) return grad, hess @@ -157,50 +159,41 @@ def sklearn_multiclass_custom_objective(y_true, y_pred, weight=None): def pickle_obj(obj, filepath, serializer): - if serializer == 'pickle': - with open(filepath, 'wb') as f: + if serializer == "pickle": + with open(filepath, "wb") as f: pickle.dump(obj, f) - elif serializer == 'joblib': + elif serializer == "joblib": joblib.dump(obj, filepath) - elif serializer == 'cloudpickle': - with open(filepath, 'wb') as f: + elif serializer == "cloudpickle": + with open(filepath, "wb") as f: cloudpickle.dump(obj, f) else: - raise ValueError(f'Unrecognized serializer type: {serializer}') + raise ValueError(f"Unrecognized serializer type: {serializer}") def unpickle_obj(filepath, serializer): - if serializer == 'pickle': - with open(filepath, 'rb') as f: + if serializer == "pickle": + with open(filepath, "rb") as f: return pickle.load(f) - elif serializer == 'joblib': + elif serializer == "joblib": return joblib.load(filepath) - elif serializer == 'cloudpickle': - with open(filepath, 'rb') as f: + elif serializer == "cloudpickle": + with open(filepath, "rb") as f: return cloudpickle.load(f) else: - raise ValueError(f'Unrecognized serializer type: {serializer}') + raise ValueError(f"Unrecognized serializer type: {serializer}") def pickle_and_unpickle_object(obj, serializer): with lgb.basic._TempFile() as tmp_file: - pickle_obj( - obj=obj, - filepath=tmp_file.name, - serializer=serializer - ) - obj_from_disk = unpickle_obj( - filepath=tmp_file.name, - serializer=serializer - ) + pickle_obj(obj=obj, filepath=tmp_file.name, serializer=serializer) + obj_from_disk = unpickle_obj(filepath=tmp_file.name, serializer=serializer) return obj_from_disk # noqa: RET504 # doing this here, at import time, to ensure it only runs once_per import # instead of once per assertion -_numpy_testing_supports_strict_kwarg = ( - "strict" in getfullargspec(np.testing.assert_array_equal).kwonlyargs -) +_numpy_testing_supports_strict_kwarg = "strict" in getfullargspec(np.testing.assert_array_equal).kwonlyargs def np_assert_array_equal(*args, **kwargs):