diff --git a/.gitignore b/.gitignore index b1e32ad2..d114daf4 100644 --- a/.gitignore +++ b/.gitignore @@ -120,3 +120,5 @@ dmypy.json # Project-specific sandbox.ipynb +solaris/nets/weights +model_weights diff --git a/.travis.yml b/.travis.yml index a70f4df1..56a0499f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,7 @@ language: python sudo: required dist: xenial +cache: false python: - "3.6" - "3.7" @@ -19,20 +20,22 @@ install: - export PATH="$HOME/miniconda/bin:$PATH" - hash -r - conda config --set always_yes yes --set changeps1 no - - conda update -q conda # Useful for debugging any issues with conda + - conda update conda -c conda-forge - conda info -a # switch python version spec in environment.yml to match TRAVIS_PYTHON_VERSION # annoying workaround to `conda env create python=$TRAVIS_PYTHON_VERSION` not working - sed -i -E 's/(python=)(.*)/\1'$TRAVIS_PYTHON_VERSION'/' ./environment.yml - conda env create -n solaris -f environment.yml + - conda list -n solaris - source activate solaris - python --version - pip install -q -e .[test] - - pip install codecov pytest pytest-cov + - conda install pytest=4.6.2 -c conda-forge + - pip install codecov pytest-cov # command to run tests script: - - pytest --cov=./ + - python -m pytest --cov=./ after_success: - codecov diff --git a/docs/_templates/custom_sidebar.html b/docs/_templates/custom_sidebar.html new file mode 100644 index 00000000..8ff3907c --- /dev/null +++ b/docs/_templates/custom_sidebar.html @@ -0,0 +1 @@ + diff --git a/docs/api/index.rst b/docs/api/index.rst index fea8972b..01f82ef6 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -1,3 +1,5 @@ +.. _api_index: + .. title:: API reference contents ################### diff --git a/docs/conf.py b/docs/conf.py index e52d9a0d..db96850a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -24,8 +24,8 @@ copyright = u'2018-{}, CosmiQ Works: an IQT Lab'.format(time.strftime("%Y")) # The full version, including alpha/beta/rc tags -release = '0.1.1' -version = '0.1.1' +release = '0.1.2' +version = '0.1.2' # -- General configuration --------------------------------------------------- @@ -128,7 +128,7 @@ def setup(app): # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # -html_sidebars = {'**': ['localtoc.html', 'sourcelink.html', 'searchbox.html']} +html_sidebars = {'**': ['custom_sidebar.html', 'sourcelink.html', 'searchbox.html']} # -- Options for HTMLHelp output --------------------------------------------- diff --git a/docs/index.rst b/docs/index.rst index b7d95d8c..373ecbe7 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -23,6 +23,7 @@ An open source machine learning pipeline for geospatial imagery installation intro + pretrained_models api/index tutorials/index @@ -31,13 +32,14 @@ An open source machine learning pipeline for geospatial imagery User Guide ========== -* `What is solaris? `_ -* `Installation `_ -* `Tutorials and recipes `_ +* :ref:`What is solaris? ` +* :ref:`Installation ` +* :ref:`Pretrained models available in solaris ` +* :ref:`Tutorials and recipes ` Reference ========= -* `API reference `_ +* :ref:`API reference ` Index ===== diff --git a/docs/installation.rst b/docs/installation.rst index 9dae3810..6340bf2a 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -1,3 +1,5 @@ +.. _installation: + ###################### Installing ``solaris`` ###################### diff --git a/docs/intro.rst b/docs/intro.rst index d595cafc..feaa1431 100644 --- a/docs/intro.rst +++ b/docs/intro.rst @@ -1,3 +1,5 @@ +.. _intro: + ############################## An introduction to ``solaris`` diff --git a/docs/pretrained_models.rst b/docs/pretrained_models.rst new file mode 100644 index 00000000..b5d7b5a3 --- /dev/null +++ b/docs/pretrained_models.rst @@ -0,0 +1,62 @@ +.. _pretrained_models: + +########################################## +Pretrained models available in ``solaris`` +########################################## + +``solaris`` provides access to a number of pre-trained models from +`the SpaceNet challenges `_. See the table below for a +summary. Note that the model name in the first column should be used as the +``"model_name"`` argument in +`the config file `_ if you wish to use that model with ``solaris``. Note that we re-trained the + competitors' models for compatibility with ``solaris`` and the training parameters, + inputs, and performance may vary slightly from their original models. + +Model details +============= + ++------------------------------------+---------------------+-----------------------+----------------+-------------+-------------+---------------------------------+---------------------------------------+ +| Model name | Model type | Model details | # Parameters | Input shape |Output shape | Config file | Weights file | ++====================================+=====================+=======================+================+=============+=============+=================================+=======================================+ +| xdxd_spacenet4 | Segmentation UNet | Encoder: VGG16 | 29.3M | 3x512x512 | 1x512x512 | `link `_ | `link `_ (117 MB) | ++------------------------------------+---------------------+-----------------------+----------------+-------------+-------------+---------------------------------+---------------------------------------+ +| selimsef_spacenet4_resnet34unet | Segmentation UNet | Encoder: ResNet-34 | 30.0M | 4x416x416 | 3x416x416 | `link `_ | `link `_ (120 MB) | ++------------------------------------+---------------------+-----------------------+----------------+-------------+-------------+---------------------------------+---------------------------------------+ +| selimsef_spacenet4_densenet121unet | Segmentation UNet | Encoder: DenseNet-121 | 15.6M | 3x384x384 | 3x384x384 | `link `_ | `link `_ (63 MB) | ++------------------------------------+---------------------+-----------------------+----------------+-------------+-------------+---------------------------------+---------------------------------------+ +| selimsef_spacenet4_densenet161unet | Segmentation UNet | Encoder: DenseNet-161 | 41.1M | 3x384x384 | 3x384x384 | `link `_ | `link `_ (158 MB) | ++------------------------------------+---------------------+-----------------------+----------------+-------------+-------------+---------------------------------+---------------------------------------+ + +Training details +================ + +Below is a summary of the training hyperparameters for each model. For image +pre-processing and augmentation pipelines see the config files linked above. +*Note that our hyperparameters may differ from the competitors' original values.* +See `their solution descriptions `_ for +more on their implementations. + ++------------------------------------+-------------------------+-------------------+---------------+------------------------+-----------------+------------+-----------------+---------------------+ +| Model name | Loss function | Optimizer | Learning Rate | Training input | Training mask | Batch size | Training Epochs | Pre-trained weights | ++====================================+=========================+===================+===============+========================+=================+============+=================+=====================+ +| xdxd_spacenet4 | BCE + | Adam | 1e-4 | SpaceNet 4 | Footprints only | 12 | 60 | None | +| | Jaccard (4:1) | default params | with decay | Pan-sharpened RGB | | | | | ++------------------------------------+-------------------------+-------------------+---------------+------------------------+-----------------+------------+-----------------+---------------------+ +| selimsef_spacenet4_resnet34unet | Focal + Dice | AdamW | 2e-4 | SpaceNet 4 | 3-channel (FP, | 42 | 70 | ImageNet (encoder | +| | (1:1) | 1e-3 weight decay | with decay | Pan-sharpened RGB+NIR | (edge, contact) | | | only) | ++------------------------------------+-------------------------+-------------------+---------------+------------------------+-----------------+------------+-----------------+---------------------+ +| selimsef_spacenet4_densenet121unet | Focal + Dice | AdamW | 2e-4 | SpaceNet 4 | 3-channel (FP, | 32 | 70 | ImageNet (encoder | +| | (1:1) | 1e-3 weight decay | with decay | Pan-sharpened RGB | (edge, contact) | | | only) | ++------------------------------------+-------------------------+-------------------+---------------+------------------------+-----------------+------------+-----------------+---------------------+ +| selimsef_spacenet4_densenet161unet | Focal + Dice | AdamW | 2e-4 | SpaceNet 4 | 3-channel (FP, | 20 | 60 | ImageNet (encoder | +| | (1:1) | 1e-3 weight decay | with decay | Pan-sharpened RGB | (edge, contact) | | | only) | ++------------------------------------+-------------------------+-------------------+---------------+------------------------+-----------------+------------+-----------------+---------------------+ + +.. _XDXDconfig: https://github.com/CosmiQ/solaris/blob/master/solaris/nets/configs/xdxd_spacenet4.yml +.. _ssresnet34config: https://github.com/CosmiQ/solaris/blob/master/solaris/nets/configs/selimsef_resnet34unet_spacenet4.yml +.. _ssdense121config: https://github.com/CosmiQ/solaris/blob/master/solaris/nets/configs/selimsef_densenet121unet_spacenet4.yml +.. _ssdense161config: https://github.com/CosmiQ/solaris/blob/master/solaris/nets/configs/selimsef_densenet161unet_spacenet4.yml +.. _XDXDweights: https://s3.amazonaws.com/spacenet-dataset/spacenet-model-weights/spacenet-4/xdxd_spacenet4_solaris_weights.pth +.. _ssresnet34weights: https://s3.amazonaws.com/spacenet-dataset/spacenet-model-weights/spacenet-4/selimsef_spacenet4_resnet34unet_solaris_weights.pth +.. _ssdense121weights: https://s3.amazonaws.com/spacenet-dataset/spacenet-model-weights/spacenet-4/selimsef_spacenet4_densenet121unet_solaris_weights.pth +.. _ssdense161weights: https://s3.amazonaws.com/spacenet-dataset/spacenet-model-weights/spacenet-4/selimsef_spacenet4_densenet161unet_solaris_weights.pth diff --git a/docs/tutorials/cli_eval.rst b/docs/tutorials/cli_eval.rst deleted file mode 100644 index 8af957e0..00000000 --- a/docs/tutorials/cli_eval.rst +++ /dev/null @@ -1,13 +0,0 @@ -Evaluating prediction quality on SpaceNet data with the ``solaris`` CLI -======================================================================= - -*Coming soon!* - - -------------- - - -Follow us at our blog `The DownlinQ `_ or -`on Twitter `_ for updates! - -`Click here to view solaris on GitHub `_ diff --git a/docs/tutorials/index.rst b/docs/tutorials/index.rst index 6eeffb8a..07fe565b 100644 --- a/docs/tutorials/index.rst +++ b/docs/tutorials/index.rst @@ -1,3 +1,5 @@ +.. _tutorials_index: + ############################## Solaris Tutorials and Cookbook ############################## @@ -40,7 +42,7 @@ on creating configuration files and running the CLI can be found below. * `Creating reference files to help solaris find your imagery `_ * `Creating training masks with the solaris CLI `_ * `Running a full deep learning pipeline using the solaris CLI `_ -* :doc:`Evaluating prediction quality on SpaceNet data with the solaris CLI ` +* `Evaluating prediction quality on SpaceNet data with the solaris CLI `_ If these relatively narrow use cases don't cover your needs, the ``solaris`` python API can help! @@ -62,10 +64,10 @@ the tutorials below. * `Tiling imagery `_ * `Creating training masks `_ -* :doc:`Training a SpaceNet model ` -* :doc:`Inference with a pre-trained SpaceNet model ` -* :doc:`Training a custom model ` -* :doc:`Converting pixel masks to vector labels ` +* `Training a SpaceNet model `_ +* `Inference with a pre-trained SpaceNet model `_ +* `Training a custom model `_ +* `Converting pixel masks to vector labels `_ * `Scoring your model's performance with the solaris Python API `_ diff --git a/docs/tutorials/notebooks/api_inference_spacenet.ipynb b/docs/tutorials/notebooks/api_inference_spacenet.ipynb new file mode 100644 index 00000000..2bb9fa0f --- /dev/null +++ b/docs/tutorials/notebooks/api_inference_spacenet.ipynb @@ -0,0 +1,161 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Inferencing with included SpaceNet models and the `solaris` Python API\n", + "\n", + "We've included a number of SpaceNet models with `solaris`, including pre-trained model weights. You can find more information about your model choices [here](../pretrained_models.html) and the original competitors' code for the models [here](https://github.com/spacenetchallenge/spacenet_off_nadir_solutions).\n", + "\n", + "For this tutorial we'll walk through running inference with XD_XD's SpaceNet 4 model. We'll use the config file for that model, which you can find [here](https://github.com/CosmiQ/solaris/blob/master/solaris/nets/configs/xdxd_spacenet4.yml).\n", + "\n", + "You'll also need to [create the image reference files](creating_im_reference_csvs.ipynb) before you start.\n", + "\n", + "First, we'll load the configuration:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'model_name': 'xdxd_spacenet4',\n", + " 'model_path': None,\n", + " 'train': False,\n", + " 'infer': True,\n", + " 'pretrained': True,\n", + " 'nn_framework': 'torch',\n", + " 'batch_size': 12,\n", + " 'data_specs': {'width': 512,\n", + " 'height': 512,\n", + " 'image_type': 'zscore',\n", + " 'rescale': False,\n", + " 'rescale_minima': 'auto',\n", + " 'rescale_maxima': 'auto',\n", + " 'channels': 4,\n", + " 'label_type': 'mask',\n", + " 'is_categorical': False,\n", + " 'mask_channels': 1,\n", + " 'val_holdout_frac': 0.2,\n", + " 'data_workers': None},\n", + " 'training_data_csv': '/path/to/training_df.csv',\n", + " 'validation_data_csv': None,\n", + " 'inference_data_csv': '/path/to/test_df.csv',\n", + " 'training_augmentation': {'augmentations': {'DropChannel': {'idx': 3,\n", + " 'axis': 2},\n", + " 'HorizontalFlip': {'p': 0.5},\n", + " 'RandomRotate90': {'p': 0.5},\n", + " 'RandomCrop': {'height': 512, 'width': 512, 'p': 1.0},\n", + " 'Normalize': {'mean': [0.006479, 0.009328, 0.01123],\n", + " 'std': [0.004986, 0.004964, 0.00495],\n", + " 'max_pixel_value': 65535.0,\n", + " 'p': 1.0}},\n", + " 'p': 1.0,\n", + " 'shuffle': True},\n", + " 'validation_augmentation': {'augmentations': {'DropChannel': {'idx': 3,\n", + " 'axis': 2},\n", + " 'CenterCrop': {'height': 512, 'width': 512, 'p': 1.0},\n", + " 'Normalize': {'mean': [0.006479, 0.009328, 0.01123],\n", + " 'std': [0.004986, 0.004964, 0.00495],\n", + " 'max_pixel_value': 65535.0,\n", + " 'p': 1.0}},\n", + " 'p': 1.0},\n", + " 'inference_augmentation': {'augmentations': {'DropChannel': {'idx': 3,\n", + " 'axis': 2,\n", + " 'p': 1.0},\n", + " 'Normalize': {'mean': [0.006479, 0.009328, 0.01123],\n", + " 'std': [0.004986, 0.004964, 0.00495],\n", + " 'max_pixel_value': 65535.0,\n", + " 'p': 1.0}},\n", + " 'p': 1.0},\n", + " 'training': {'epochs': 60,\n", + " 'steps_per_epoch': None,\n", + " 'optimizer': 'Adam',\n", + " 'lr': 0.0001,\n", + " 'opt_args': None,\n", + " 'loss': {'bcewithlogits': None, 'jaccard': None},\n", + " 'loss_weights': {'bcewithlogits': 10, 'jaccard': 2.5},\n", + " 'metrics': {'training': None, 'validation': None},\n", + " 'checkpoint_frequency': 10,\n", + " 'callbacks': {'model_checkpoint': {'filepath': 'xdxd_best.pth',\n", + " 'monitor': 'val_loss'}},\n", + " 'model_dest_path': 'xdxd.pth',\n", + " 'verbose': True},\n", + " 'inference': {'window_step_size_x': None,\n", + " 'window_step_size_y': None,\n", + " 'output_dir': 'inference_out/'}}" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import solaris as sol\n", + "\n", + "config = sol.utils.config.parse('/Users/nweir/code/cosmiq_repos/solaris/solaris/nets/configs/xdxd_spacenet4.yml')\n", + "config" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As you can see, the YAML gets parsed into a set of nested dictionaries by `solaris`. Relevant pieces of that config then get read during training.\n", + "\n", + "Inferencing is _very_ similar to [training](api_training_spacenet.ipynb), with one major difference: you'll load in and pass the reference CSV for your inference dataset as an argument to the `Inferer` object. `solaris` is set up this way so that you can quickly and easily iterate through inference on a number of inputs without having to re-instantiate your inferencer each time." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "inferer = sol.nets.infer.Inferer(config)\n", + "inference_df = sol.nets.infer.get_infer_df(config)\n", + "inferer(inference_df)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The above commands will create prediction segmentation masks for each input image in the `output_dir` specified in your `config`. You can then [use sol.vector.mask.mask_to_poly_geojson to convert these predicted masks to vector-formatted geometries](api_mask_to_vector.ipynb)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "solaris", + "language": "python", + "name": "solaris" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/tutorials/notebooks/api_mask_to_vector.ipynb b/docs/tutorials/notebooks/api_mask_to_vector.ipynb new file mode 100644 index 00000000..61d859ab --- /dev/null +++ b/docs/tutorials/notebooks/api_mask_to_vector.ipynb @@ -0,0 +1,303 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Converting model outputs to vector format using the Python API\n", + "\n", + "To use segmentation masks in a geospatial application, one often needs to convert to a vector format. This is a non-trivial task in many cases and a lot of science goes into finding the best way to convert a pixel mask to vector-formatted outputs, but we've provided a basic implementation in `solaris` for users to build from.\n", + "\n", + "Let's begin with an image showing some predicted building footprints:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import solaris as sol\n", + "import os\n", + "import skimage\n", + "import matplotlib.pyplot as plt\n", + "\n", + "mask_image = skimage.io.imread(os.path.join(sol.data.data_dir, 'sample_fbc_from_df2px.tif'))\n", + "\n", + "f, ax = plt.subplots(figsize=(10, 8))\n", + "plt.imshow(mask_image)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(900, 900, 3)" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "mask_image.shape" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This image has the value 255 in the first channel anywhere a building footprint is predicted, 255 in the second channel where an edge is predicted, and 255 in the third channel anywhere two buildings are very near one another:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAagAAAVKCAYAAAC8ce9aAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAAgAElEQVR4nOzdffQk113f+c+HkS0/xxK2lLFGi8QyMZG88cPMChuzYLBBwnA8OpsoOwRzBlAyyYlgbeAc0OAk2JsHTJYQICdi0bGNh9hYOxgTzTG72PIAMY+WNX7AGsmDxpaQxhprzIPjgDeyJX/3j662Sj3V3VXd9XBv1ft1zu90d/2qu27dqrrfurdu3XJECACA1HzF0AkAAKAKAQoAkCQCFAAgSQQoAECSCFAAgCQRoAAASSJAYXJsf7/te2x/wfZnW/7tl9l+ve3Wjy3bl9kO29+bUrqArrCzYlJsP0fSzZL+QNK3SHpFy4t4maSfUDfH1hlJL5H0Gxt892XqLl1AJ84bOgFAz3ZL2iHpcET83tCJqcO2JT0hIh6W9EdDpwfoC2dTmAzbb5X0O8XHY0Vz2VttP8H2v7J9X9Hsd1/x+QkL399p+5dt/5nth23/se1Xl/7/es1qKZL0xeL3o/jfvHnun9r+GdtnbX/e9rttX7awnPtsv61oivy4pC9I+o6qJr4i/adtv9D27xa/eY/tf1IzXefZ/pe2P2H7vxfr9nu2v2GrzAZaQA0KU/IvJR2X9POSbpD0IUmfkXRY0t+X9G8k/Z5mzWj/TNJXS/oHkmT7qZL+i6QLJP24pAckvVrSf7L9lIi4WdKbJO2SdL2kb5D0aEUaDkn6iKTvk3RRscz32r4yIr5Ymu+bJb1A0hsknZV034r1eoakX5H0s5L+j+K3f8H2yYj47TXp+jFJPyTpdUW6niFpr6QLVywP6EdE8MffZP40u+YUkl5WfH5e8fn1C/P9s2L63yk+/0D5e6X53qdZANlRfH59Md95C/NdVky/S9JXlKa/tJh+fWnafZI+L+lvLvmN7y1Ne2sx7ZtL086X9GeSbi5NW5aud0t619DbhT/+qv5o4sPUfWPx+raF6fPP31Sa71MR8TsV8z1b0hU1l/fOiPjS/ENE/L6k05rV2sr+KCI+XfM3Px+zmtL8Nx+WdI+k/6HGdz8o6ZW2/7Xtb7D9xJrLBDpHgMLUzZuyzixM//TC/y+smKdqvnUeWjLtkoVpVcta5i8rpj0s6Uk1vvtvNLs+9SpJvyvpz23/ku1nNVg+0AkCFKbuL4rXv7kwff75z0vzLc5TNd86Fy+Z9qmFab08BycivhgRPxUR/5OknZpdj/q7kv5jH8sHViFAYer+S/G6f2H6dxev7y/Nt8v2Sxfm+weaXYO6u/j8cPH65CXL+3vlm2WL39sl6Q8bprupdelSRHw6It6k2XW153WcHmAtevFh0iLihO13SHq97fM0u4H3JZL+uaR3RMQfF7O+VdJrJL3L9us0u2703ZK+VdI/joh5z7i7itcfsf3/Sno0Iu4oLfLpkv6z7V/U7NrVT2p2veiXu1rHVemyfaukj2rWo/EvJb1Q0jWSfrHj9ABrEaAA6YCkT0r6fs167z0o6ac06+ItSYqIv7b9TZL+raQ3ahZoTkr6nogod7B4t6SbJP1TSf9Ckou/uZ+U9DWaBbynSvptST8Qj+9i3oVl6Xq/pOs063b/FEn3a7aO/7rj9ABrOYJHvgNdK27GvVfSPyqa0QCswTUoAECSOgtQtq+xfdL2Kds3drUcAMA4ddLEZ3uHpD/R7ALyac1uBvyuiLhr5RcBACh0VYO6StKpiPhkRHxB0i2S9nW0LADACHUVoC7RbDDNudM69055AACW6qqbuSumPa4t0fZBSQeLj3s6SgeAFuzZU32IHj9+vOeU9G/Zuucm4W31ZxHx7Kp/dHUN6iWajQ59dfH5kCRFxE8umZ++7kDClpUTs2cpjttYbsVJeFsdj4i9Vf/oqonvg5J22768GB15v6SjHS0LADBCnTTxRcQjtn9A0ns0e7z2WyLiRBfLAtA925U1iYhI+cwcmUtiJAma+ID0TbWZL4Uysg0Jb6fem/gAANgKAQoAkCRGMwdQS8JNRBgpalAAgCQRoAAASSJAAQCSRIACACSJAAUASBIBCgCQJAIUACBJBCgAQJIIUACAJBGgAABJIkABAJJEgAIAJIkABQBIEqOZA4lZfEAeo4hjqghQHRvyKaRVy6awA5pZ9rh7dI8AtYWISKLA5+Ch1tEHTnjQt0kHqCkW7KkEVeRhiscI0jHKAFU+qBYL4xwPuG2bCbsMSKmeVROIgfyNLkANFYCaLndVEE1VbsG9i+A5ROAbsvmS6y8Y0ugC1KL5wZVLEOhTnYKnjQK9zd/b1jzArFr3odMIYGZUAWoMZ3pjWAd0K+fmy5zTjv6NJkCtK9infGCMqRbZVo2sXItq+hvUvlbjJAttYSQJ9CoiNi7AplLw1Qly2+QjkIvJBSjOcNMwL2DrFrK5FMZ9pzOXfAE2MZkAlXJg6utsOMfCrI+8abKMlPcjYGxGcw1qnTFfg8p9vdoIQLnnwZSM+VhEuyYToLZR52BaNs+6wnfIm21Xdbduurxl86d8H802HSy6tun+1IWm25Dgg7YQoBYMfXD1vfwmy0u5QEe32IYYwmgC1PwAqjN6wLIzwq5qKQCA5kYToObqNkcQOID6hnxsDKZrlL34bHPgAD1I9foixmFtgLL9Fttnbd9Zmnah7dts31O8XlD63yHbp2yftH11Vwmvg0AFAPmqU4N6q6RrFqbdKOlYROyWdKz4LNtXSNov6criOzfZ3tFaagEAk7E2QEXE+yX9xcLkfZIOF+8PS7q2NP2WiHg4Iu6VdErSVS2lFZi8eatA+Q8Yq02vQV0cEWckqXi9qJh+iaQHSvOdLqYBANBI2734qk7nKq+i2j4o6WDLywcAjMSmNaiHbO+UpOL1bDH9tKRLS/PtkvRg1Q9ExM0RsTci9m6YBgDAiG0aoI5KOlC8PyDp1tL0/bbPt325pN2Sbt8uiQCAKVrbxGf7HZJeJulZtk9L+glJb5R0xPb1ku6XdJ0kRcQJ20ck3SXpEUk3RMSjHaUdADBiTuFGO9vDJwLAUlN/inAK5eS2Et5Ox5dd6hndUEcAupFwAYeRGuVQRwDatRiceOQ8+kANCsBKBCIMhRoUgI0RvNAlAhQAIEkEKABAkghQAFai9x6GQoACsBV69KErBCgAQJIIUACAJBGgALSCZj60jQAFAEjS5EeSWDzro8cSAKRh0gGqqkkiIghSHZnn7Tzfc87nMaxDE1NZT6Rl0gFqUyk9eqBuu/9QBcxi+sqfy++HSF+dvFuXrqrfKH+HEx5gcwSoCm0WKusKsCbfq/vdLqwrzLtIV5M8aCPYbJOmKUhtn8T4TS5AbVLAbFoodVGYtdW0lHLNa2xBYN02G7omWcfYtgnykEQvvj179nz5bnTuSkdK2twXq5o7l/3+4vGw7A8Ys8nVoJCeVGsNfTZb5iyHGiDylEQNCt1KudDoM21DBIeU874LYwzAGM7kalDzAiP1AymXdK6z2KMN9Yxl+wPbSDZAjbF77ibrM7Y8KOti3YbKrybLHfM2BdqUbIBKyaYFSh8FUV+FXRvLabPr/lDpGfokg+CGKSFAYRQouIdD3qMrSQeoMTbzAU2w/2PKnMJFWNvDJwLJ6GtUCKCuFMrJbSV8zByPiL1V/6CbOQAgSQQoABtjRAt0KelrUADSQSBC36hBAQCSRIACgDUS7mAwajTxIQsUEMD0UIMCACSJAAVga3SgQBfWBijbl9r+bdt32z5h+zXF9Att32b7nuL1gtJ3Dtk+Zfuk7au7XAEAwDjVqUE9IulHIuJvS3qxpBtsXyHpRknHImK3pGPFZxX/2y/pSknXSLrJ9o4uEo9xsn3OX8p42i3QjbUBKiLORMSHivf/TdLdki6RtE/S4WK2w5KuLd7vk3RLRDwcEfdKOiXpqrYTDqBfqZ8o5KjqZKyrvxw16sVn+zJJL5T0AUkXR8QZaRbEbF9UzHaJpD8qfe10MQ0AspVrIZ+z2gHK9tMk/Zqk10bE51ZsrKp/nNPeYfugpIN1lw8gPRTa6FKtXny2n6BZcHp7RLyrmPyQ7Z3F/3dKOltMPy3p0tLXd0l6cPE3I+LmiNi7bBRbAOkZQ7MR8lGnF58lvVnS3RHxM6V/HZV0oHh/QNKtpen7bZ9v+3JJuyXd3l6SAQyBziDoW50mvpdK+h5JH7P9kWLaj0t6o6Qjtq+XdL+k6yQpIk7YPiLpLs16AN4QEY+2nnIgEdQkgG7wwEIAtSwrKwjQ2BIPLAQA5IUABQBIEgEKwFZSuEyAcSJAAQCSxPOgRmDxDJaL1kC7xlBLzLFcoAYFoJZVBRz3RKELBCgAQJIIUACAJHENCkBtVc18NO2hKwSoEcjx4ifGg/0PXaGJDwCQJGpQABpZ16RHjQptoQYFAEgSAQoAkCQCFAAgSQQoAECSCFANcc8HAPSDXnwbWBWklvVgmn+HHk4AUA81KABAkghQAIAkEaBaRPMdwKM30B4CFAAgSXSS6FnVmeW2I0TbrjV/WzW88rKoNUJiP0A3CFALplb4thXYppRvUx+Lbuzrh3QQoHqQe3t8RKztPp+jZWnvogBeXNY2y6hbCwdyN8kAVbdQbXrv0qqCvG56mn4/xYJpMR9yq121GUzakNpJwCb3AQKbmGSAGqO616H6MuUbk+usexfXIoGxoRffxPURQFY1D1IAL5dq3kzxpAPDoAbVEg7aeqoK3WVNgHUL6D7zPtWgkYpNm7mBKgSokRuysGi7IwCAaaGJryUUqBgS+x/GiBpUQ2Nsvuij52Db+dbG79XpWLK4nMXP2zRD1mkOG+P+BtRFgOpB0xtdu1xOLjYNBNsup08EJ2C1tU18tp9k+3bbH7V9wvYbiukX2r7N9j3F6wWl7xyyfcr2SdtXd7kCbbC98q/PNJQ/Iz997zfAmNW5BvWwpG+JiOdLeoGka2y/WNKNko5FxG5Jx4rPsn2FpP2SrpR0jaSbbO/oIvGbGiIAbSKFwJmKVNc91XR1bUrriuGsDVAx81fFxycUfyFpn6TDxfTDkq4t3u+TdEtEPBwR90o6JemqVlMNSRQSSA/7JNpUqxef7R22PyLprKTbIuIDki6OiDOSVLxeVMx+iaQHSl8/XUwDMCJTr9Wje7UCVEQ8GhEvkLRL0lW2n7di9qq99Jwr3LYP2r7D9h31kgoAmJJG90FFxGcl/Y5m15Yesr1TkorXs8VspyVdWvraLkkPVvzWzRGxNyL2bpBuAIlgyCp0pU4vvmfbfmbx/smSXiHp45KOSjpQzHZA0q3F+6OS9ts+3/blknZLur3thAMYzjwoEZjQpTr3Qe2UdLjoifcVko5ExLtt/6GkI7avl3S/pOskKSJO2D4i6S5Jj0i6ISIe7Sb5APpGUEJfnMLOZnv4RACopc8HPaYihXJyWwlvn+PLLvUwFh8AIEkEKABAkghQAIAkEaAAAEkiQAEAkkSAAtCKMfR0Q1oIUACAJPHAQgCNJHw/DUaGGhQAIEnUoABsZIojSqBfBCgAtdXpCBERBCm0giY+ALWtCzw8uBBtIkABAJJEgAIAJIkABaA13KyLNtFJAlkpF4Bc6wDGjQCFpHAGDmAuywC1qhBL7ay6Kq3r0rjJd9CfJkG06XbLoYu2bU4k0IssAxTysq4wS71AXrRtAb3Jd8eWh0AdWQWoNs/a5r+VyoHNGSlylMrxg3HKKkBtYrHJZDEQpBaogBxwvKAPSXQz37NnjyLicX/baut3tk1Dk+lox+K+1OZ+hcdbldfkO7aVVQ2q6qytqkaUwtkdByXGin0bfckqQLUh1x5IqQTenLWZf9v8FtsRqGdyAaoPOQbAvtQZbHSI5aYu9/RzgoVNEKDQuRQKJgpIID/ZB6ixFDqLTY9jWS8A2FT2ASpFm17nIigBwGOcwvUS270moo+hhLjzH2O1aZmR8z6fQjm5rYTz/3hE7K36RxL3QQHIR8IFHUaGAAUASNIkr0H1cQbIWSamhP0dXZhkgAKwHQIS+kATHwAgSbUDlO0dtj9s+93F5wtt32b7nuL1gtK8h2yfsn3S9tVdJBxAehgwFm1qUoN6jaS7S59vlHQsInZLOlZ8lu0rJO2XdKWkayTdZHtHO8kFkCqCENpWK0DZ3iXpOyS9qTR5n6TDxfvDkq4tTb8lIh6OiHslnZJ0VTvJBQBMRd0a1M9K+lFJXypNuzgizkhS8XpRMf0SSQ+U5jtdTAMwYdSw0NTaAGX7OyWdjYjjNX+zqnvPOXum7YO277B9R83fBQBMSJ1u5i+V9Crbr5T0JEnPsP02SQ/Z3hkRZ2zvlHS2mP+0pEtL398l6cHFH42ImyXdLPU/1BGA9uX6rDWka20NKiIORcSuiLhMs84PvxURr5Z0VNKBYrYDkm4t3h+VtN/2+bYvl7Rb0u2tpxytoNcVMA05HtPb3Kj7RklHbF8v6X5J10lSRJywfUTSXZIekXRDRDy6dUrRO56hhLaxT6GJSY5mjscw6jraNsZ9KoVysg2J5j2jmQMA8kKAAtCrsdRG0D0Gi524qio/BQi2saw3X6LNS0gYAQrnmGpBUicwTzVvmiKf0Aaa+AAASSJAAQCSRIACACSJAAUASBIBCgCQJAIUACBJdDMHCmPsGs3Yd8gZAQoYIW62xhjQxAeMSNWjUghWyBU1KAxusQClSaoZAhDGKpkAxTAz3VqWv+TpNHAtCjmiiW/CKLDGge2IsUqmBlUHZ4GbWzbCdIrm6UxhW6f+8L1ctimwiawCFKaFa1Pt4gQPuZl8E9+819MUz0SnuM5jQ8DBmE26BtVmAb3qtyhEkErtJZV05CanJvIxySpAtXlgVe1sUzx4x7TOqTzFte+CrLyOFKIYk2QCVJ0zlDEVpm3b5npNDvm6LH0UyI/HmT5SP5abSCZASWkcXF1fmG/7fqRlv9dW0OmjF1sK2x1YZ0wFfy6SClBSejtByrWLtq+hpbqeaBfbGrmYfC8+YGwIPhiL5GpQfUmtWWmos1oKs+VS2j/awLZGbqhBrWB7VAd1F+sytkJ8LMrbemz7MaZjsjUoKb1aVFOL6R+qEOKaRprYJsjdpAOU9NhB3FegajsobloI1f3ePL2r7rUZsiDs8iQj1XUGpmLyAWpqNilYF7/TReG8zW/2cZLRJH0EL6AdBKhCV2fiFFb9I8+BcSBALdG0kKNQHB7bABgXAlRJXwUcBSkArEc3cwBAkmoFKNv32f6Y7Y/YvqOYdqHt22zfU7xeUJr/kO1Ttk/avrqrxAMAxqtJDeqbI+IFEbG3+HyjpGMRsVvSseKzbF8hab+kKyVdI+km2ztaTDMAYAK2aeLbJ+lw8f6wpGtL02+JiIcj4l5JpyRdtcVyAKAX5SdsL/6hf3UDVEh6r+3jtg8W0y6OiDOSVLxeVEy/RNIDpe+eLqYBAFBb3V58L42IB21fJOk22x9fMW9VF7VzTj+KQHewYl4AAOrVoCLiweL1rKRf16zJ7iHbOyWpeD1bzH5a0qWlr++S9GDFb94cEXtL17QAAPiytQHK9lNtP33+XtK3SbpT0lFJB4rZDki6tXh/VNJ+2+fbvlzSbkm3t51wAOgT16H6V6eJ72JJv17cXHqepF+JiN+0/UFJR2xfL+l+SddJUkScsH1E0l2SHpF0Q0Q82knqAQCj5RTOCmwPnwgAk1enPGQkmNYdX3aph5EkAABJIkABAJJEgAIAJIkABQBIEgEKAGqig0S/eB4UABQIQGmhBgUASBIBCgCQJAIUACBJBCgAQJLoJNGR8pApXHgFgOYIUC1aNo7XfDqBajsRQR4CE0KAapHtlYNNTrWAbTIg8br8aWswz2W/M8XtA6SKAIWkDB3Eqe1O27oTIPaLftFJAgCQJGpQmIy6TY2cJQNpGE0NKiKSeCRzG9dQAAAjqEEtFvjlaxhDX89YlFJasJlVJxhsX6Bd2Qaodb3lqt73VYAs9uaj4GrHul6SXaLmC/QvywCVQ2FBUNrMunwjX4HpyDJAbXomnVqTX4426YZNnmMsKEP6NZpOEnXlUPtKzbwDyrKm01xQsAB5ybIGJW13PYKzoHrW5W+ON7XmlFZg6rKuQS0WNrYpgFrSJPjnWJtqiv0K6F+2NahFFCDtaRpwppL3U1lPIBVJBqgmTXBV8y1OqypwaearllNwWkwr2xNtYD9KR3JNfPNCp81mo2U73BSapprIPT9yTz+GR3BKS3IBqm8UajOb5AMHM4AuJR2g+qhFYaZp/qSWn3SQAcYnyWtQZVwrwjLsF8PI6Tol8pZ0DWquy2Y4Dp7HbNMxBQDalkwNqs5NoW0WjBSy1cojwVdNB4C+JFGD2rNnT6/Lo7BdjzxCG9iPsI1aAcr2M22/0/bHbd9t+yW2L7R9m+17itcLSvMfsn3K9knbV3eX/Ga4kN7M1PKqasxBnGt+HE1t/0D/6tagfk7Sb0bE10p6vqS7Jd0o6VhE7JZ0rPgs21dI2i/pSknXSLrJ9o5tEsnBMJyp5HvVgy8xbeUTlmV/6NbaAGX7GZK+UdKbJSkivhARn5W0T9LhYrbDkq4t3u+TdEtEPBwR90o6JemqTRPYdwHJzneusQepZdua/QAYVp0a1FdL+oykX7L9Ydtvsv1USRdHxBlJKl4vKua/RNIDpe+fLqYtdfz48crpQwQnYBH7BTCMOgHqPEkvkvQLEfFCSX+tojlviaqocs4Rbvug7Tts31F8nk/vvUlv2ZkyBdM0jL2GCOSqToA6Lel0RHyg+PxOzQLWQ7Z3SlLxerY0/6Wl7++S9ODij0bEzRGxNyL2zqcNUVAQhLAOAWy5qecN5Ue31gaoiPi0pAdsP7eY9HJJd0k6KulAMe2ApFuL90cl7bd9vu3LJe2WdHurqW7R1A8wUMigGmXD8OreqPuDkt5u+4mSPinp+zQLbkdsXy/pfknXSVJEnLB9RLMg9oikGyLi0dZT3qJtns6LcaOQAobjFApm24MnYlk+5FhAMX5hfav2f/KwnjHnYZ3yMfd1TMDx8qWesiRGkkhBnQcf5oTu8vXkvI2RLo67diQzFl8K5k19uRZaq+7nkSiMl6kafzCXvKra5rmkfezqHHc5lzd9IEAtyHVnqXPGxsGwWm4nKKtOSPpch1zyayib1qbaOGHKaX+uQoACSnI5mGlCmpZ123tdLW3T79bVVSAkQI3EssdkVM2DvBGc+lN1zNTtOJHLdqoTXIZaFwIURmXs19tyKPRyvJa3qbGv39AIUCNS56GP0ngPqvL65972vkyKZ+appadrTZ88vWn+bFp761uXxxkBakRSLLz6smwsxTEGKeRlyH1wcdltd6zpet24D2pk1u0wUyuwxxawx7Y+wCoEqC2kWlgsGw1+zMFpXS+mVLdVE6muw5j3K8wMVZ4QoDaUamFRNrWCI9WeSG2Z2vZEP+o+3qj8KKS+9kUC1JZSL/TKz9maglXrOYY8qFuQAFVy2zcIUBsqb+jUx72re6F0zHI7MFfJKQhPcV9DewhQW0ihG2jT5c3np+DI29SuMWJmsZmt76eP940AtYXFQn6IR9VXpQPTUN7XxlxIoVsplx+jDFBDZHjfBcTiOtZpYpzSHf6LUj4It5HKGXQKacD4jCpA9X0daKgOCKvWsc5YfGMuTDbNG2Aqcjr+RxOgqmoUfUhxY68K1Cmmt005dSAAsNooAtS6B/WNyRjXqW10IEgL+2x6culkkX2AYuevlvJO1wc6EPSPfEbbGCw2I3WD8ZQKilUjtE8pH4AxSqIGtWfPnsqbXevcAFtneJsx1LLGsA5Nrdv+VfsK0pJ6ExLSlkSAWtRFYTOVwmushcFQnWDQTA7XNZAPmvgyQNPeDEEpL2PfH9G9JGtQVbZp5qvzG6kiOAGYKqdQaNuOOulY98yfmsuqna6hNdk2Oa1XXU33zTHmATABxyNib9U/kqhB7dmzp9Z8KQTTPtGWD2DKkghQTVQFqSaBK8cgN9XHuDdZr1zyIPVHswApyS5AbXO/S841krE8xr1p4Zzb+q1CT0SgmawC1KaFVc6BaVHO67FpgTzmGiRBClgum27mY2zu2VSO67es9tCk9lv1WznkRZ3HoOSwHkDfsunFxwE8HlUPehyrbXunAhOQdi++dTiAx2WxyTWFkyQA6UkmQC3rAEBwGq91QSrnwFWnMwj7N7Da2gBl+7m2P1L6+5zt19q+0PZttu8pXi8ofeeQ7VO2T9q+um5iOFinZ1mQmr/PMUjRrAe0o9E1KNs7JH1K0tdJukHSX0TEG23fKOmCiPgx21dIeoekqyQ9R9L7JP2tiHh0xe8+LhFcND5X004FORlbgT7GUU2mqI3h1VBLa9egXi7pExHxp5L2STpcTD8s6dri/T5Jt0TEwxFxr6RTmgWr2tj4j6l6BMnYjKkbeSo3jZdvCG76B6SiaYDar1ntSJIujogzklS8XlRMv0TSA6XvnC6moSEKi/EGJwDr1Q5Qtp8o6VWSfnXdrBXTzjlybR+0fYftO+qmYSqqzmTLz9mZSkE49uCU0/oBQ2hSg/p2SR+KiIeKzw/Z3ilJxevZYvppSZeWvrdL0oOLPxYRN0fE3mVtj1O0GJiWPfxtjAVbzutEcEJfptYk2yRAfZcea96TpKOSDhTvD0i6tTR9v+3zbV8uabek27dN6JgtqzFNSa7rP5bgxJNwx2Wba5ApBb9aQx3Zfoqkb5X0j0uT3yjpiO3rJd0v6TpJiogTto9IukvSI5JuWNWDb+qqdoKpFxK5rP+YghPSlkKwGEIyQx0NnYYhEJweL7fbC1IelZ0u0ttLKQ+bpqWtcr2n9VzazTybwWLHaEodHurIreCsu/1SWa8h0rEsf1LJk2U4LmeGPmkkQA2sXMilftDWNbb1WWW+jqkVxFPIe4xfMmPxTdmYCpOx31TcxJi2KzAEAlQicm/uW9brJ+d1amIKtwJsgnzI35DHME18CRnrwTyVJr+xr9+mcj/5wnCoQaE3FFLTRfDGJqhBoRU8gBAYRlVPu7GcEFCDQusYkQBAGwhQ6AxBCsA2aOID0AtOWNAUNSh0qlwoUUBhDN33ODwAACAASURBVNiP+0MNCp3jgAawCQIUACxI7aQqtfT0ZZJNfHSDbh95CqBtk6tBzQvSoUfpHQOCEoAuTSZAUZi2Z1leEvQBtGkSTXyrClQ0Q54B6MvoA9S6ApUCt5lVNSRqTwDaNOomPoJPPwhMALqQdYBq65oH106amT8+gTzDWNQ9mWWf71eWAYqa0fA4UDEWlCfpyuYa1PyJrYs7EzsXkI/cj9dU07/sida5S7oGtW2Gr3uS55RrAYv5MuW8QLtyKyhzfuJvrumuK9kaVJOM32QjTbVAXnamNfYdHelIcV/jGWZpSjJAtbkDs9PN1GkCSLHgAFDPGMu65Jr4uigk5xtu/ttj3JAApmfsZVmSNaiuTLkaX2e9p5o3aBf7EdqSXIDadOemeWo7FCpo07qTQY5X1JFcE5+0Wa+aqRaw2zZbTjXfMLxcbvbOIY1jlWSAkpoFqanuQG10wwfwGI6JtCTXxNfUVHeoTW5YLufVVPMNaaGpD6skW4OSzu19V/U/PKZOPpFvKOv6Rvacb4LF8LKoQS1ecKWQJQ+wnVTui2M/xiq1ApTtH7J9wvadtt9h+0m2L7R9m+17itcLSvMfsn3K9knbV7eV2Cl3E6+yLi+mmlflcRuXjeE4ReQFcrM2QNm+RNL/LmlvRDxP0g5J+yXdKOlYROyWdKz4LNtXFP+/UtI1km6yvaOb5GOZKQansRS+XQTYMeQLpqduE995kp5s+zxJT5H0oKR9kg4X/z8s6dri/T5Jt0TEwxFxr6RTkq5qL8koqwpEUwxOUvvrXSdQLJtnk6Cy7ju5Bpmp7o/Y3toAFRGfkvTTku6XdEbSf42I90q6OCLOFPOckXRR8ZVLJD1Q+onTxTR0hAKguXWBpc71mToBo8l8XUpxH0kxTUhLnSa+CzSrFV0u6TmSnmr71au+UjHtnKPP9kHbd9i+o25isR4H/XJD1kDaWnautai5+XVk9lPUUaeJ7xWS7o2Iz0TEFyW9S9LXS3rI9k5JKl7PFvOflnRp6fu7NGsSfJyIuDki9kbE3m1WIGdtj9rOQY9UsX9iE3UC1P2SXmz7KZ7tYS+XdLeko5IOFPMckHRr8f6opP22z7d9uaTdkm5vN9l5Kzf75H5GnCsKy/XIIwxt7Y26EfEB2++U9CFJj0j6sKSbJT1N0hHb12sWxK4r5j9h+4iku4r5b4iIRztKf3YISEhFVQDKZXw8TINTKDBtD5+IHvD4+e6t25/L+bzpvr/p6Ahtje7NvoKROb7sUk/SQx2NQd2ChzPXdvQ1tM5QQ/jkuI9wYoZNEaCABpaNbbisEG5aAI+lwE6hZQbDKG/7bfdnAlQHyrWhJmfa1KLStGybtDk+JNsdOBcBqkXlQLRpsCFIba9u/pHP3al7Ysb+jlWyGM08B1UHI80cALA5AtSW6gxlwxkipoaTM7SBALWhpmOs0eyEqcgtOK0bCBjD4RrUhtruZkxgAoZHQNpO2/lHDaoH62pRBCeMRVUBxQCx09BFcJ9UDWrb/vmLPY426ULOQYoxY/+ejj6uvU8iQLUR2an6A9PU5o2naGb0TXzbBJaqEccXf48dFqgv9+OFE9V+jboGte7x2ctGc677G3PrmvpyPyiBNnE8TEPTHsxVRl+DWmXTsyFGngaA7mVbg6oTnbsccXrdEC0EJmA8OJ6HkV2AWtYE19YOVCeoVS1r/j12ZABoRzYBqquHy1UFlVXfodYEjBfHcFqyCFDbNtO11dTHzgtsrq1nZrVt6OVjuSwCVB3bNK+tq0WxAwOb6WK8SqShj+2VRS++uhnRdldvRn4AgOFkEaC6RhACgPRk08S3ae+6Vb9BYAKAx6R2nXByNah5RhOcAKCeoYZ4yipAtdXFm+AE9KPJscY4d1iUVYCSzt3h6cgAAOOUXYCSaKYDgCnIMkBJBCcAGLtsevEBXZvis76WPaIdSEG2NSgAwLhRgxohho5pZqq9x5atd9v3wnT52Bu0J8VtRIAakRR3sJRNOb/aXvd1J0UEKWyCJr6RqDr4KRDQhYj48r5Vfj//DLSFGlTmKBC6M9ZhsdraZ9Y1EY4pzzAMalCJWleILJ65olvkNXmA/lGDSkTdg79pIcFj6Nu3SUGdyjYgyKCu+T475D5DgOrRNht62++mUkACSNOqDi5DoYmvB200x23TxZfgNLxUai5t7AvsT+gLNagebNrFdtuaDwUJqrQZpKido0upBKg/k/TXxWvqnqUN0jnQmetGaR3AIOnccJs8y3YOeSr1kK8tPeYml/1UIq1d+Kpl/3BCTQ93RMTeodOxTi7plPJJay7plEhrF3JJp0Ra+8Y1KABAkghQAIAkpRSgbh46ATXlkk4pn7Tmkk6JtHYhl3RKpLVXyVyDAgCgLKUaFAAAXzZ4gLJ9je2Ttk/ZvjGB9LzF9lnbd5amXWj7Ntv3FK8XlP53qEj7SdtX95jOS23/tu27bZ+w/ZqE0/ok27fb/miR1jekmtZi2Ttsf9j2uxNP5322P2b7I7bvSDytz7T9TtsfL/bZl6SWVtvPLfJy/vc5269NLZ2lZf9QcTzdafsdxXGWZFo3Nh/lYIg/STskfULSV0t6oqSPSrpi4DR9o6QXSbqzNO3fSrqxeH+jpJ8q3l9RpPl8SZcX67Kjp3TulPSi4v3TJf1JkZ4U02pJTyveP0HSByS9OMW0Fsv/YUm/IundqW7/Yvn3SXrWwrRU03pY0j8s3j9R0jNTTWuRhh2SPq3ZPTrJpVPSJZLulfTk4vMRSd+bYlq3Ws9BFy69RNJ7Sp8PSTo0eKZIl+nxAeqkpJ3F+52STlalV9J7JL1koDTfKulbU0+rpKdI+pCkr0sxrZJ2STom6Vv0WIBKLp3F8u7TuQEqubRKekZRmDr1tJaW+W2Sfj/VdGoWoB6QdKFmAy68u0hzcmnd5m/oJr55Js+dLqal5uKIOCNJxetFxfQk0m/7Mkkv1KxmkmRai2azj0g6K+m2iEg1rT8r6Uclfak0LcV0SlJIeq/t47YPFtNSTOtXS/qMpF8qmk7fZPupiaZ1br+kdxTvk0tnRHxK0k9Lul/SGUn/NSLem2JatzF0gKoa+ySnboWDp9/20yT9mqTXRsTnVs1aMa23tEbEoxHxAs1qKFfZft6K2QdJq+3vlHQ2Io7X/UrFtD63/0sj4kWSvl3SDba/ccW8Q6b1PM2azX8hIl6o2bBmq643D5qvtp8o6VWSfnXdrBXTeklncW1pn2bNdc+R9FTbr171lYppyZe1Qweo05IuLX3eJenBgdKyykO2d0pS8Xq2mD5o+m0/QbPg9PaIeFfKaZ2LiM9K+h1J1yi9tL5U0qts3yfpFknfYvttCaZTkhQRDxavZyX9uqSrEk3raUmni1qzJL1Ts4CVYlqlWcD/UEQ8VHxOMZ2vkHRvRHwmIr4o6V2Svj7RtG5s6AD1QUm7bV9enLXsl3R04DRVOSrpQPH+gGbXe+bT99s+3/blknZLur2PBNm2pDdLujsifibxtD7b9jOL90/W7OD6eGppjYhDEbErIi7TbF/8rYh4dWrplCTbT7X99Pl7za4/3JliWiPi05IesP3cYtLLJd2VYloL36XHmvfm6UktnfdLerHtpxRlwcsl3Z1oWjc39EUwSa/UrAfaJyS9LoH0vEOzNt0vanbWcb2kr9Tswvk9xeuFpflfV6T9pKRv7zGd36BZFf2PJX2k+Htlomn9O5I+XKT1Tkn/opieXFpLy3+ZHuskkVw6Nbuu89Hi78T82EkxrcWyXyDpjmIf+M+SLkgxrZp14vlzSX+jNC25dBbLfoNmJ3p3SvpPmvXQSzKtm/4xkgQAIElDN/EBAFCJAAUASBIBCgCQJAIUACBJBCgAQJIIUACAJBGgAABJIkABAJJEgAIAJIkABQBIEgEKAJAkAhQAIEkEKABAkghQAIAkEaAAAEkiQAEAkkSAAgAkiQAFAEgSAQoAkCQCFAAgSQQoAECSCFAAgCQRoAAASSJAAQCSRIACACSJAAUASBIBCgCQJAIUACBJBCgAQJIIUACAJBGgAABJIkABAJJEgAIAJIkABQBIEgEKAJAkAhQAIEkEKABAkghQAIAkEaAAAEkiQAEAkkSAAgAkiQAFAEgSAQoAkCQCFNAD2/fZfuvQ6QByQoACACSJAAUASBIBCmjA9vNtH7X9l7b/P9u/b/t/WZjnNUWT3n+3fcfi/0vzvcL2h4v5Ttn+h7bfavu+hfmeYvunbN9r+wvF6+tsf0VpnqfZ/g+277f9sO2HbL/P9td2khFAD84bOgFALmy/SNLvSvqwpH8k6fOS/omk99n++og4bvt6ST8r6a2S/m9JXyPpHZKevvBbV0j6DUm3S9ov6YmS/rmkvyHpS6X5zpP0HklXSPqXkj4m6cXFvBdK+pFi1n8v6VWSflzSPZK+UtJLJT2zxSwAeuWIGDoNQBZsH5P0HEnPj4gvFNN2SLpT0klJ/6ukP5V0IiKuKX3vf5N0i6TDEfG9xbRfkfStkr4qIj5fTNsp6V5Jn46Iy4pp3yPplyV9U0S8v/Sbr5P0E5J2RcRZ23dKem9E/HB3OQD0iyY+oAbbT5b0TZJ+VdKXbJ9X1G4s6X2SvlHSruLvyMLXf03SIwvTXizp/5kHJ0mKiDOS/mBhvms0C3p/MF9msdz3SnpC8TuS9EFJ32v7x23vLQInkDUCFFDPhZJ2aNa09sWFvx+QdIGkncW8D5W/GBGPSPrzhd/bKelsxXIeWvh8kaSvqljm7cX/v7J4/UFJvyjp+zULVmdt/3vbT6m9hkBiuAYF1PNZza4N/UfNmtyqzAPOxeWJRY3nKxfmPaNZ8Fl08cLnP9es2e/vL1nmfZIUEX8l6ZCkQ7a/StLfk/RGSV+Q9GNLvgskjQAF1BARf237dyU9X9KHIuJLi/MUveoe0CyYvKX0r7+rc4+1P5L0SttPWbgG9VLNgtfcbxbf/6uI+HjNtP6ppH9n+7slPa/Od4AUEaCA+n5Y0vslvcf2mzULJM+S9CJJOyLiRttvkPQm27+kWceIr9GsZvO5hd/6V5rVct5j+6clna9Z8+FDKvXik/R2Sd8n6Zjtfyfpo5r1+PsfNeu1d21EfN72H0o6qlkvv7/S7HrZ8yUdbjcLgP4QoICaIuJDtv9nzXrP/bxmXcI/I+lDkv6vYp43236aZsHsuzTr4bdf0tsWfusu298h6f/UrFPFpyT9lGadIi4rzfdF21dLulHSQUmXS/prSZ/QrJv6F4pZ369Zze1GzY7rT0r6oYj4+VYzAegR3cyBRBSB7ZSk34iI64dODzA0alDAQGz/B826lT+o2f1Vr9GsN+DPDZkuIBWddTO3fY3tk8UQLjd2tRwgY0/SrFnvvZJu1qzp7hUR8ceDpgpIRCdNfMVNgn+i2Z3ypzW7L+O7IuKu1hcGABilrmpQV0k6FRGfLIaEuUXSvo6WBQAYoa4C1CWa3Q8yd7qYBgBALV11knDFtMe1Jdo+qFm3WUna01E6AGxpz549On78eOP/jcHY1y8RfxYRz676R1fXoF4i6fURcXXx+ZAkRcRPLpmfvu5AoiJCdtU55+x/kpb+P3er1h2tOR4Re6v+0VUT3wcl7bZ9ue0nanaj4tGOlgWgQ7a17ESWwhtd6qSJLyIesf0Dmj1obYekt0TEiS6WBQAYpyRGkqCJD0jbuma+sdakxrxuCem9iQ8AgK0w1BGAWlJobcG0EKAArDVv5hp7rz2khQAFoDYCE/rENSgAQJIIUACAJBGgAABJIkABAJJEgAIAJIkABQBIEgEKAJAkAhQAIEkEKABAkghQAIAkMdQRkBAeDAg8hgDVoXWjP3dZ6KxaNoVd2ha3D6OIY6oIUB1b9ZC3NjQNRGMs7Kh1oEs8tHA4kw1QbRTUqey0TdMxxgOOWkf7FvNw2QnP2PalMtvsSwMaXYCq2pkWn2WzOD1lbR8cbR9wKddexlh49v08prHlH/IyqgBVdfB2HZT6KAS3bSZsIyA1CUQpnHGO7cx3TOsC1DWqACUNU2AuC1KrAtdiEO07yDXJk/l3N0njqhpt17bZF1Jqzhqq+XIe5KlFjUOO23I0AWrdGX4OZ6B9p7HOzrptmlK7NlSn0F2XxhQO9BTS0FQ5X3NLO4YxigDVd7t8l7pahzHlUQoF3dA1w6ED/TJ1bq1INe1IzygClLS+cJjygZFaLXKTYFn+zqprjE1+rypfFn+/yrraV1+BKtVaVIppQp5GE6CQh3JQaBqouir4UitQ6wSexeCa2joAbZjcWHyp1CKmbl5TqXsdrOtRN9raL7atqTddTwITxmwyNah5IZdigOr72kVqhdq60TD66uFYJ29SzD9grCYToFINTm3Ifb3aKPD7yIMm+9BUA1mddZ5ivmAzkwlQ26hTKG06osK2N9tuc7CvKkSbFvjL5h/yRuZVtmnW6yMYpjRCR9NtWCd/CFKogwC1oOqi86YHU9ObQld1HGj7gK5zf9Im9zBVpbvL0Tw2/a2+v9fGMoaoKW/So5Hgg7aMIkCVC8FVowfM51t2RthFj6hNRmwYwjZdq7f9baRtyMfGYNpGEaCk9cGnPFzPquadFM7ugdSkVKvDdIwmQEnnBp9lBxWBA2hHqj1DMQ6jClBS9fUcAEB+1t6oa/stts/avrM07ULbt9m+p3i9oPS/Q7ZP2T5p++quEr5O+UZQzu4AID91RpJ4q6RrFqbdKOlYROyWdKz4LNtXSNov6criOzfZ3tFaaoEJo8kaU7M2QEXE+yX9xcLkfZIOF+8PS7q2NP2WiHg4Iu6VdErSVS2lFQAwIZuOxXdxRJyRpOL1omL6JZIeKM13uph2DtsHbd9h+44N0wAAGLG2O0lUtTVU9lSIiJsl3SxJtunNAAB4nE1rUA/Z3ilJxevZYvppSZeW5tsl6cHNkwcAmKpNA9RRSQeK9wck3Vqavt/2+bYvl7Rb0u3bJRHAkOiEgaGsbeKz/Q5JL5P0LNunJf2EpDdKOmL7ekn3S7pOkiLihO0jku6S9IikGyLi0Y7SDgAYMadwMyvXoID0rXtu11iNZaSMhNfjeETsrfrH6EaSANCedWNWJlzoYQQIUABW4inDGMqmnSQAgMCEThGgAABJIkABAJJEgAKwUgo9fTFNBCgAS9W5xlTu0Qe0iQAFAEgSAQpAa6hFoU0EKACtoMs52jbpG3XX3SUPABjOZAPUPDgtBiOaKLDO4j7CCc24sX2HM9kAJVXveLZXDt1SJ4D1tUM3DaZ9H2hVBXk5b4cs6Jvk3bL9ZP47Vb81X9dVv5GTuq0Nua8n0jLpALXKqiC16iAsH8ibjv68rvAsf7dugbBtzXDTwLyqACu/r/r9uvm3abCpk3frtmfVbywGptxr5avWYVlLBNCGSQWougVF+YDctHBZdeBuUwtbdsa+Sdrqsl1rwNA21Kl11A0Udb7X1KYF8bITnhyaC9cFodwDMNKVRC++PXv2fLngLf91YV7YNikIyt9p+r22NU3Dut9Z9zeEIZddZdN9cb4O8/Up79fl94v5XXUsrPrrQ0rbA9ORRA3q+PHjdFZAktpsolsMUqsK/b6abtsyputtSEcSAQqbGUtB0Gch2+TZRV2kayzbrGzT64HAOkk08aFbKT9Qbp62PtK3yTK2TdvUCuxU9zPkaXI1qFx6HeWSzlXG1tW6T2PY/sC2kg1Q6+5H2vQ3h9SkF+F8/tzPwFeNdL3NutXtFdeXJstd130+tdHBhz5uMF3JBqiUbNPVfJN7ljadv7zctm37m3XukWryW5veY7bNPJt0amhzvaXt9ykgJ8kHqKGvn2xTiA6lq+7tKUultjFFOdzLhTwlHaDGcBc+ujfmAjGX/X/M2wDDSaIX37Ibdbs8OFO4+XFIdW8ApeAZTko3T0/5WMFwnMIOZnv4RCAJdZt0h276BU17uUn4mDkeEXur/pF0Ex+ANCw7kU2txyHGhQAFoJZEz74xYklcgwIAYBEBCgDWoAlzGAQoZIEL8sD0EKAAtIJaBtpGgAKwNWq06MLaAGX7Utu/bftu2ydsv6aYfqHt22zfU7xeUPrOIdunbJ+0fXWXKwAMaeo3fANdqlODekTSj0TE35b0Ykk32L5C0o2SjkXEbknHis8q/rdf0pWSrpF0k+0dXSQe45RbQZ/SiA/AmKwNUBFxJiI+VLz/b5LulnSJpH2SDhezHZZ0bfF+n6RbIuLhiLhX0ilJV7WdcIzTssKeAn94qZ8o5KjOkGNt/eWo0Y26ti+T9EJJH5B0cUSckWZBzPZFxWyXSPqj0tdOF9MAZIqBm7sL0Jx8LVc7QNl+mqRfk/TaiPjcikyt+sc5W9b2QUkH6y4fQHqmErQIIsOo1YvP9hM0C05vj4h3FZMfsr2z+P9OSWeL6aclXVr6+i5JDy7+ZkTcHBF7lw0SCCAd80C0rNmIZlh0oU4vPkt6s6S7I+JnSv86KulA8f6ApFtL0/fbPt/25ZJ2S7q9vSQDGALXBtG3Ok18L5X0PZI+ZvsjxbQfl/RGSUdsXy/pfknXSVJEnLB9RNJdmvUAvCEiHm095UAiptLMBfSN50EBWGvVs4QSfs4Q8rD0eVCMJAEASBIBCgCQJAIUgK3QvIeu8ETdzK17FDeA7aRwnb4tuZULBKgRWNzpxnRAIQ+L90SNzRjWKcdygSY+ALWsKuC4JwpdIEABWIvggyEQoAAASeIaFIDaqpr5qF2hKwSoEcjx4ifyUxWIcn7WENJHgMocZ68YUnn/I1ChbVyDAlAbPfnQJwIUACBJBCgAQJIIUACAJNFJooGxD+cCACkhQDVke23X2lVj4xHYAKAeAtQGVgWZdaOL8/RRIC8cr8PhGhQAIEnUoAC0iiZttIUaVItovsPY1dm/uWEXbaEGNYB1gWyTIWP6KhAYLBRAXwhQCxYL4LYL37q/12S5m46B1uR75fQsjr82paBVJ8/Guu6rMA4fukCAWtD24Jd1fmPIpsE6y21aKJfnn79PrdBet06r0tt0XdoO4Ot6inZtSickGNZkA1Tdg7yNAratIDCUNgrrFAPVsrRU1Qq3Tfem368bDPrcf1Lahhi3yQWoeW2l7sXe+XfqmN/E2/QA3qbW1mT+vmpq83wof+47DdtYdaN1HXV6sdU5QUoxqK+Tw/ZFPiYXoMZo08DYpXXBfcxdkeuc2KyrBeUYnIC2EaAmro/AtriMxcJ3WWG8aSeOrvTdDJtqcErtZAjjRYBqSY5nvItNcau0VShV/cayJs6m17f60Nf2zWk/Kss13UgTAWrkhmxK2yZwUNA1R80GY8NIEi1p2qGiT0Pe2U+B2Q/yGWNEDaqhFAPQNja5cbhpHjQpPOs2O7Z1j9q6tC0uZ9Pu58u6i69bjxz3N2pyaAsBqoE272Vp67ebLmdbQ90Muu7aVVN1nuu1uJxNO3Es67G37rpljtc1gTYRoNRtwV73Jt2qM/WmNY+x2PY+pE2X04ZtgtYm8wBjtvYalO0n2b7d9kdtn7D9hmL6hbZvs31P8XpB6TuHbJ+yfdL21V2uQFPLzsTX/XWdpqrlzQPXsr8pSXHd6+xLY5XKNsC41ekk8bCkb4mI50t6gaRrbL9Y0o2SjkXEbknHis+yfYWk/ZKulHSNpJts7+gi8WNWJ2BOpZAY4qQBy63K+6nsk+jH2gAVM39VfHxC8ReS9kk6XEw/LOna4v0+SbdExMMRca+kU5KuajXVoKDG4JbV6tkn0ZZa3cxt77D9EUlnJd0WER+QdHFEnJGk4vWiYvZLJD1Q+vrpYhqAkVh2ckRwQptqBaiIeDQiXiBpl6SrbD9vxexVe+g59X7bB23fYfuOekkFkIKqGhOBCV1odKNuRHxW0u9odm3pIds7Jal4PVvMdlrSpaWv7ZL0YMVv3RwReyNi7wbpBjAgmpjRhzq9+J5t+5nF+ydLeoWkj0s6KulAMdsBSbcW749K2m/7fNuXS9ot6fa2Ew6gf9yEiz7VuQ9qp6TDRU+8r5B0JCLebfsPJR2xfb2k+yVdJ0kRccL2EUl3SXpE0g0R8Wg3yQcAjJVT6BZqe/hEAFhrVQ2K2lXaEt4+x5dd6mGwWABAkghQAIAkEaAAAEkiQAFoTQrXtDEeBCgArUj0AjwyRoACACSJAAWgtnW1JGpRaBMPLATQ2LprTQQqtIEABaCWxaC06obdhG8KRUYIUABqq/uoenrzoQ1cgwJQ27rAU34MB7AtAhSAWuo22fEYDrSFAAUASBIBCkBrqDmhTXSSQFKadF+mp1j/6uQ32wVtyS5A1bkAm8rBsSqtq7roNpl/bHJYz6adAOquU/l3c8gHoGvZBShp9cGbWg+iqrRO6SbHnE4o6mqS3sX1r7Ptm35nk3RtgxoS+pJNgGoz8NS94bAvddYt50Ih13Qvs+222OS7676T2okZ0IZsApS02YE9P3AXz0znv1W+b2PIgnRshTimgcCILiXRi2/Pnj1fDhTlv02Vf2Ox4F+8R6OrezY4cPtXtQ+1uV/hMYsndot/5XnId2wqiRrU8ePHzwkS2+7QQ9ZI1tXIqC11K6drlLlp0jy+2OMSaCqJAFVX1U5eVUNK4WDoqlZGcFsulW0/duyD6Es2AWqT3nBVci3kKXy312VHm6HSAYxZNgGqDX0V8jkGwL6sO0Foch9Y0+7ebW2XvnvwSfkHNY4JbGJSAQrDaFK4dlWQ5VxA5px2YBvZB6jczyznFs/wx7JeKRWuuTbvAlOVdYBqOlxQX+oUhOV5tm2+AvqyzX7JCQKaSvY+qG1x/8V0cf8TMA5O4cC13Vsilp3FtXl217QGBeRk032XfX5YCef/8YjYW/WPJGpQAAAsyvoa1Kb6qDWmUDMF+sL+ji5MLkD1UcVNtBoNtCa1JwJgnCYXoABsZ5veswQyNFH7GpTtHbY/bPvdxecLbd9m+57i9YLSvIdsn7J90vbVXSQcQDpWjWze1RMDMH5NOkm8RtLdpc83SjoWEbslHSs+heamzQAAIABJREFUy/YVkvZLulLSNZJusr2jneQCSBVBCG2rFaBs75L0HZLeVJq8T9Lh4v1hSdeWpt8SEQ9HxL2STkm6qp3kAsgZnSnQRN0a1M9K+lFJXypNuzgizkhS8XpRMf0SSQ+U5jtdTAMwYdSw0NTaAGX7OyWdjYjjNX+zai8857TJ9kHbd9i+o+bvAgAmpE4vvpdKepXtV0p6kqRn2H6bpIds74yIM7Z3SjpbzH9a0qWl7++S9ODij0bEzZJulvodSQKPodcV2jJ/lA37S9py20Zra1ARcSgidkXEZZp1fvitiHi1pKOSDhSzHZB0a/H+qKT9ts+3fbmk3ZJubz3laMW6XldcM0Db2KeGkVNgmtvmPqg3Sjpi+3pJ90u6TpIi4oTtI5LukvSIpBsi4tGtU4re8RRftI19Ck1MbrBYPIZBbdGm8r1Q6+ZjnxpGonnPYLEAupVgwYfMEaAA9IpAhroYi2/iqpp4p1iANGnqnmL+1LWqN185j8lD1EGAmrBlhUgK1yWHUKfQnGreNLUsnwhMaIIAhcehAMG22IfQFq5BAQCSRIACACSJAAUASBIBCgCQJAIUACBJ9OIDCmPpQr54v1Giw9sAaxGgAI2ja/SqG2EJUsgRAQqD4obO7a0boYERxJGrJK5B7dmz58sjGKz7Q3Op52nVc6hQz7xmRN5hjJKpQTHMTLeW5S95Og1ci0KOkglQdXGQtSeF4JTytqybP0OvA8cExiqrAEVb+mZWnT2nWLCtetx83+mt8/C9oWwSmAhmyElWAaptKRSAQ0u5wKrqiYbNcYKH3CTRSWII5cdTT/UC8xTXeShddEjZNOAQpJCLSdagysGp6n9tN5ukXEuR0k9fHal0V1+VjhQCQyrpyM0YjpEcZRWg2jywxna/yKbNlfN1zvEAXFznVNI/ZFBMJQ+ANiQToOoUkH0FkFU1rDZ+d5lNlrcsrU2CzrJ8rZPXfRWIqdSQUrO47XI80UC7cj3JrpJEgDp+/LikNA6uZRfm20pXW/cjrUvXJsG8Kv/72B5NAmlXv52z8vqNqXBKTV95u+3+Oqb9PYkAJT2+qSklKTf7tbkjpryeaBc37TZHXg0jmQAlsRN0WWh09bsUdGkh+GBMkgpQfUnxIB6qBtPGMofKy66XmdL+0QY6UyA3kwxQdY2tyauL0SRoGkxTeVt31ekH6NqkA1TdnoOpWkz/UIEi5TyaoqrruWwj5GiyAWqos8u2msO2rQnVDWbz9Kb6MLwul71unaumpxIIUkkHsI3JBqi5PpuoUmgOa3JWvXgm3mVPy21+s6uTjPJJTJMboYfexsBYTD5AzW1zJr5umKNUbLJ+fdQQtr0GJj0+n7sY867L+QFUI0Cpumaz6Y2zVb+N7pXzmTwHxoEAVbLN2G5dXP8BgCkjQBX6Ouvm7B4A6qn1PCjb99n+mO2P2L6jmHah7dts31O8XlCa/5DtU7ZP2r66q8QDAMaryQMLvzkiXhARe4vPN0o6FhG7JR0rPsv2FZL2S7pS0jWSbrK9o8U0A0Dr5r011/2hP9s8UXefpMPF+8OSri1NvyUiHo6IeyWdknTVFssBgF649ITtxT/0r26ACknvtX3c9sFi2sURcUaSiteLiumXSHqg9N3TxTQAyBqBql91O0m8NCIetH2RpNtsf3zFvFVb8Jx6cRHoDlbMCwBAvRpURDxYvJ6V9OuaNdk9ZHunJBWvZ4vZT0u6tPT1XZIerPjNmyNib+maFgAkjWtQ/VoboGw/1fbT5+8lfZukOyUdlXSgmO2ApFuL90cl7bd9vu3LJe2WdHvbCQeAPtG81786TXwXS/r1YuOcJ+lXIuI3bX9Q0hHb10u6X9J1khQRJ2wfkXSXpEck3RARj3aSegDAaDmFKqvt4RMBYNJSe4jphBxfdqlnm27mADA5KZzUTwUBCgCQJAIUACBJBCgAaIDrVP0hQAFATVx/6heP2wCAAgEoLQQoABBNdymiiQ8AkCQCFAAgSTTxdaCqHZvmAwBohgDVksWgVA5I5SdxEqgAoB4CVIuWBZ/59HmgmlKQ2qRXVFX+zPOt7u+ty+NVvzOl7QOkjADVonXBp0kBOyZNCvx1QbzOb9XJ42W/Q213uto6+UF7CFAtmWrwadvQ+Viu7WJ6tql5o32jCVCc9WKVVdcIAaRpFN3My4VPDmc4OaRxjGx/+W8Vtg+QhuwDVLnmVNVzru/CZt3yOHPPw7prVAQxoHtZNvGVC4fFgqR8DWP+vq/mv3XXT2iGzNOq/W1xPrYt0J7sAlSdQr78v8VaVR8FyOJyCEztGDIAsO2A/mUXoKS0C4vFWlt5OtZbVgNdlq8AxivLALXpmfRi819XxhSMlgXauvnYNC/q1oyBIbAP9iu7ThLb7iDsYM1V9X5bHB0jB7mkE8BMljWoeU1o02Cz7ffHblnPyEU5DeG0rnMDkPL+O1XZ1aDKFguWnM7mU7VJE2juB3a5hrgsKLNfYY59oT9J1qDq9tTb9gbd1M/6+7bp9bkhDthVnSnaRgcNYBjJBajyqNVNBl+t6tZdnm/ZdwlSM9sGpyHycHGZXW5P9pHp4EQkHUkFqHLhUnfQ0GXBp/ybywqtoQcmTcW2QSaVwpvtiW1xz2Jakr8GtW2BU6cbNIXaZgck+YaxqTNWI/qTVA1qUVvNcPTaW69p/qR2pkmw7E9bD6EE1kk6QEnUcPqwaR4PXejUHSMP7cqhEw3GIYkmvj179qx9BHdXOzkHz2Pq5kUKebauWziA/CVTg6p7raitwii1JqqhrRvCaApjC7JPAGlxImfDtRKxTYAqf5eCaLVlXffHnF9Vx8GY17cvHGuo4XhE7K36RzI1qC7NDxKuWdQz1XvE6t6eMHVNj6Mc87DJiXuO65eLWtegbD/T9jttf9z23bZfYvtC27fZvqd4vaA0/yHbp2yftH31tolsq7Coc82i/MTUFGqXQ5lSbbNq38pxMNy+TOW6X9UQWFz77FfdThI/J+k3I+JrJT1f0t2SbpR0LCJ2SzpWfJbtKyTtl3SlpGsk3WR7x6YJbKOArLMzlYMgO9/MYpAaq2XburwfjD0P8Hgc/2lYG6BsP0PSN0p6syRFxBci4rOS9kk6XMx2WNK1xft9km6JiIcj4l5JpyRdtWoZe/bsqZze19n7quVMvWAiWFNYLUO+oGt1alBfLekzkn7J9odtv8n2UyVdHBFnJKl4vaiY/xJJD5S+f7qYtlL5OtFibaYr65bDATgNUz8JwXbYf7pTp5PEeZJeJOkHI+IDtn9ORXPeElWl+jlb0PZBSQcfN9MA1zsIQliHAmi5qXckYSCBbtWpQZ2WdDoiPlB8fqdmAesh2zslqXg9W5r/0tL3d0l6cPFHI+LmiNg771441LUfdq5pq1vATrkQnirKhuGtDVAR8WlJD9h+bjHp5ZLuknRU0oFi2gFJtxbvj0rab/t825dL2i3p9lZT3ZJ5QFy3I+a2o9LzrJ46eTT1GgIwpLr3Qf2gpLfbfqKkT0r6Ps2C2xHb10u6X9J1khQRJ2wf0SyIPSLphoh4tPWUo9Li87TmKGSrrbvlAGiK4649WY0k0ZV1w/vkspONZT36si5fUq89LTt2+0zzujxKPQ9XqZv2xfnKJ4kSx10N0x5JYp3yvS657lirDqYp3XTbxKp8Sb1grXNrRMrpn4I699DNy5tlJ5WLv9XEGGpyBKgFOR7cTS70p1BjTs3i0E6p59G6fbTvdUg9v4a2aVmyeMK0yXKqmvvrfreuLgMhAaokp6CEdi0eyKnuC6mlL5V05G7dSWadkXC6+G7V/6sCZ1cnRQSoEajbLDn2M91tm0VSL2xTC07LjKFpaa6qAN7kulTKljUzLs7TZHpbCFATlNPBU9di4THW0chTW5+qk57U0ripxfWoqiUsW/+mJ4NjybO2EaBGZN1OPvaBT8vrv3hdaSxSXJ/U0tO1ujX1bWu8fQf/TfetLssTAtSIpFh49WHZeo81SOVkbHlfd11W9a7s4/tVv7Gup+8mgabrZmcC1IZSux5QtzAea+1pmbEEqdT2NzQ31m3X5XrVfR4USrruubKp8tBNVekaayG3bjvUHdIqdalut9zzFestK0+63icJUBtqeo9Cn1Zda0q1kGtDnXs9UttWTaSY9jHvT1Ox7j6pxY5HfY71SRPfllId965qdIzFAjqFdLalTlNeigV8XU2aKnNeT3Rr1fXaVYYqK6hBbaCqoJ//9anO2cw8TVX3cYytIKtz7S3noFxnu80LoFTWM/da65iksk80QYDawpBNaJsGm3LAmpoxrPOq7T1UIBhDvuZisZmtrQ5AqZ5E0MS3hapunFXT27a4nDrD9KS6A6K5qua+MdQQsVpX2zblWu5oalCLzV19Z3jfBUTVXe7ralOLHTvGVpita+rs8+Ju16pGzBhye44lX5GWUQSocnDos3fdYjDso4CoO2ZWVWE8VPDu0ybjieVqqGufyFtOx3/2AWpZcOj6oE25aaWq0Eo5vW1a14EAwyDv05DbcT+Ka1Crhu8YYrld2bZZLreds6lVXbHH2KSZi5SvcUxVLtsj+wBV556QMRRMTXeoXHbAbVV191/WgQDdGsuxNnY5baMkmvj27Nmz9JpJGxe2x1JA1d2xxtKMt277L3YOqOp6P5a8SB35iy44hcLb9uMSUVWoLDs7q3vWlnNB1fTMdAxnsovrUN5+67rT577uuVrWtCrledyhN8cjYm/VP5KoQS3qomdSrgfIJk17ua7rKlPp5JG7qlov2wqbyuoa1FgL32WaFsYp1Ia7NKVtn6PFGu/Ujle0L8kaVJVVPfWaDPOTWyE+tetOGAfuz0IbkghQe/bsqT1vbgFmG00P8KkXCFPaN4ApSLKTxCrb3uOSU02jSVrH1pyyyfrkkAdcmwHOkVcniVW2vQEzp0Kh7n08ORTMUrPbBpo2x6ZworVOuVv82MYGBLqQXYDa5jEDORYI6wqzXNZncbzEJtusrpSDdNVJRMrpBVKQVYCqKqzqNn8tG1A2F6tqU6mvz7KmynUnDGOpQU5plHWgTdl0M296PWYu14BUparAzmXdVg1euyrArFvn1Av2OsEzx96lQB+SqEHV7cW3rqv5GGpKdeT4mIVl15/q1pIWhzTKYRijpqOcAHi8LGpQdc9CkaaqGzjL01eNQr7qt1LWNOjksl5An5LpZs44XtOybLzFsnXj7qVqCuNDAi3avJu57efa/kjp73O2X2v7Qtu32b6neL2g9J1Dtk/ZPmn76rqpzLEDQB/KTVopnFC0Yd21pXLtanG+lBGcxmHxmKv6Q/ca1aBs75D0KUlfJ+kGSX8REW+0faOkCyLix2xfIekdkq6S9BxJ75P0tyLi0RW/G9K5I1Zz8FY/QmJM+dKkQE99vTcdO7Ht9dq28Ew9n/uwbn/LYX/MSGs36r5c0ici4k8l7ZN0uJh+WNL/397dB0t233V+f38ysmRLNmsJkDKWFFuuTLzILrCdibDxBlgbI9lQlrMbV8YVpYYtUeIPLWCThEg4WaDCVrwbQpFKYrIqDMzyYGUwBk2cBKwI2M0S1vLIlrEePGiMtNKgscaGxSZeSljimz/6XNS60/fe7r798Dun36+qW9339Dnd33P69Pn073ce+p3d/RuBO6vq6ap6FDjNKKz2NL4jfNPf/PF9MttbFkP69jbN/PRhYzBP2CxznsYPpJn1b0jrl/pt1oA6wqh1BHBFVZ0F6G4v74ZfCTwxNs2ZbpimMO3RaZuyEenDfNpdJy3H1AGV5ELgHcCv7jXqhGHnbWWS3JLkZJKT09YwdLsdIj/e970pG8I+bPj3U2Mfwlft2MT9YbMcZv424JNV9VT3/1NJDlbV2SQHgXPd8DPA1WPTXQU8uf3JquoO4A6Y7WKxQ7TblRa2hvfp5NR57BS8Qw6nludNbZq0zmzfHix6+7DO9XSWLr5381z3HsAJ4Gh3/yhw19jwI0kuSnINcAi4d7+FDtFO+5m2HoPJK0ffTtKdxqT5b3kehxZOm/BtvK/meV/2sw9y/G/dpmpBJbkYeCvwfWOD3w8cT3Iz8DjwLoCqejDJceAh4Bng1t2O4NtUu23g+tC1tUx92UgOpVtv09e3PtjU96aZE3XXXcOq7fQtusVv18u2/fSCrfstm/V9WuV8TXOI9JZl1rPbtqXl93fag5RWNQ+zHvK+yNpWtN7ueJh5Ly51NFRDDaNZN4BbwdSXcNrSt5NyVxVM43Z6ndbX/ZZrW6V1n3ZgQK3J+EZ5KCfhTjqpeNp56tt8j8/j+P/brSucvCqLhqCJq5lvqu0buT6btCEe0vztZJqutFUHQ6s7vKVZGVBrNpTW004bwU0JqZ0O9e3ze7pf6+4e0mL05TBzLUnfN+J71T1+CZ2+zuNexufPcHq+ob7nm2Rd76H7oBqxCRuzoX+j3oT3cFaT9rVK0zKgtBDrOEJM/TD0LybrNs3VJfrKgNK+zXIVDEnLNaTPnfugtHB936cmqQ22oLQUdutoO9cHzcqA0tJshdRQDqXX/Ib0vrser44BpaXzm7OkeRhQWqo+XQBW2tLal6ppzjUcoo0KqD5eNbtV2z8w27vztj8m9UVr62tr9azSRgTUpHN03Ik/v71+y2qTP1CSFmfwATXN1abdoE5nr2Vp6EtapEGfBzXNBlWzcZlJWpXBBtS0+5j81j+b3ZaVy1FDNuSLHbdq0F18s/4kt62D3e0U5l6HT31m6LSrdwG1fWO4iGCxFTW98WXukZAaimnXYbcTq9WLgJp0SPP2x91IrpbhpFm12NIeyrajxWW7CE0H1DQL3dbPfCYts2lX7CF9ALQcO30m+/55bWXd3+lLe5+X7SRNBtQivp1P0/031G8dO9ltfv1ROS3C1jrUp/Woz+t9X+ueVnNH8c0TTvO8SeOvM/Q3efvPkHu1B61TS+vaeMujT62PlpbhMjUVUPtpOe3UZbXbSrcpbzJMF8R9735Rf7S0ng3lS2pLy3RRmuviW/SKMulosz436SVpy14HkPVdUy2o/dptx+zW40P8ljGNaeZ7U5eNFmva9cj1bX+2Wn5DC6VxTQXUfrqYpum+mnbcPpkmdKeZXw8b1yJM+2XQ9WyxhhpUzXXxwXxdcNME25DewEV1WxpMWrRZTlfoQ3f70LvRWtZcQG2ttLOsuJu2kd1pfndbZjv9NMZOj0mbaK/dBFqt5gIKZuvq29SN7Pb5HQ/2ncYbD7BNXW463zq74lpa/1qqRSNNBhTs/au3bmDPN80PCLrcBNOdB7iIk7f70IWndjUbUHB+q8Bv/8+Z54O/icvL/Qfnm3bdWcV5cQaYdtN0QMHkDcqmr9DT7qfbpMN4d7qM06TWY8sWcUTmTs+5Ne2mf37UH1MdZp7kvUkeTPJAkg8leWGSy5LcneSR7vbSsfFvT3I6yakk1y+v/M2110ZmE1uZiz7UdutLwPa/vR7fafxpXmt8Prb/bY03j01aDzQcewZUkiuBHwAOV9VrgAPAEeA24J6qOgTc0/1Pkmu7x18N3AB8IMmB5ZS/2XbqgtnEcILZNt7TBMtOQTFNmMwSKtNeF7Kv72cfWq5q07Qn6l4AvCjJBcDFwJPAjcCx7vFjwDu7+zcCd1bV01X1KHAauG5xJWu7/fx0xlDMc0rCNOGy2+tNux9npyMs56l/XusKCbugtR97BlRV/THwk8DjwFngS1X1MeCKqjrbjXMWuLyb5ErgibGnONMN05hZu4B2MulcKE22ztBe1GvP8zwtfVnZvt63VJvaM00X36WMWkXXAC8DLkly026TTBh23lYzyS1JTiY5OW2xQ7GMD6cferVsp25RaTfTdPF9B/BoVX2hqr4KfAT4FuCpJAcButtz3fhngKvHpr+KUZfg81TVHVV1uKoO72cG+mT7vg1YzKG8s3RNacSW5t4WtYxcPzWvaQLqceANSS7OaO16C/AwcAI42o1zFLiru38COJLkoiTXAIeAexdbdv/s1bpxg6lV2+mIQ1vjasWe50FV1ceTfBj4JPAM8CngDuDFwPEkNzMKsXd14z+Y5DjwUDf+rVX17JLqb95O5+eMW8UJkRqOeU9u3etgGoNJrUkLG8Yk6y+iAZ5Vvz/TbFy3lvF+lvW8G/GdXnOWWvoQILNcvcN1fljmvHLLfTvt6mn+ShJ9tJ+NiB/Y+a2iJbr9+RfddTvN+H1YP/pQo5Zjke+9AbVA4xurebpL7Opr06Rvhdvf353et1lbRrNMI7Vuv1+4DagFmBRE84bNfrufNt1eYbF9n8vW8L2ec69h+3m/hvheT7sOD3HetTgG1D7ttpGzRbQebhj7xS9k2okBNadZuu/8AGrT9GV9361r1W7X9TOg5jBL4Izvr5g2zKS+2s8h8OsKgUndvuPD/UxOZxnLyYCa06I/UH04fFjazRA25H7+5rOs7de0VzPvvUlnzM9r1jdhr29ihpOGYqfzvNZ9NXctzzK3X4NvQe3UjzzvyZHj19Kbt6tv+3A/nBqK3dbvPrew3B/1fOPbvt3OAdzvshp0QO33hNlx6/y5BKkPhrpuT7oc1FDntTWD7eKb9rI3k4Ztn3an85s8jFyaTd8/LwbTavU6oPbTrz2p22G8+26a5uuk+7vV6cqtTbZTd/Y6Pxd+JpdnEV27vQyoRRzoAPOvnOMLfq/n8AAISZpP7/ZBbd/g79Y6WWarZa/uPYNJGo6+d032VW8CaqcN/iJ/Y2naI/N2unjo9v1TkobDz/TqNR9Q027wJwXLNIEzaZzdptmtHsNJ2tl+r/i+CraU2tJ8QMFsP1mw366+3Vpee10YVtJzpj1VY93nGC3rlBLtXy8Caho7XfNumq6+vVpRtoyk+cxyZfl1tV78XM9nFe9XLwJqlt+W2a31s5+rPrgSS9LIqraHzQfUIk6GnfaIu52mlaSha3E72HxAwfxH1+31HLu1kAwmaX/8DPXPPAeHLVMvT9TdzbQhNr7A/SBJ0s7WtY3sTUDtp5tu/Dm2xjWYpOWbpXvez6O260UX37hZuukm8UMgSf3Qq4Da3gLy8G9JGq5eBdSWva40Ls2qD1c5WLQWj9qSxvUyoKRl2L5R3oTL3ux2yS5p3QyoAbLrczZukM+3iS3KTdbqZ8CAGohJB4v4I4k72/SrhOy2bkxzzbzdHpvUEt3EZdw3Lb5HBtQA2GKaz6Yur/0GxqSg2ul6ejtdI3NZtWlYenMelCbb6wrrrTbdtR6L+iXq8QOVJl2cedNbqFoMW1AN2uvD7Yd/NSadbzeE5b3f1tNeR9HaxaxFMaAasFOXyfbh8waTG4r9m9QymOc51mnZF12WFs2AWoPd+vD3mmbWjdys+wC0t3mWYysbdtcBTauFXgMDaskmtXrmecP3s5L4zVfSXqbtyVklA2oFFtHnP28LyGBqw37ew0VaRA0tzIcWa7f30xbUwM37gd7vt5cWmuhqxyL2o20ZX7f8EqRlSQsrV5IvAF8BvrjuWqbwdfSjTuhPrX2pE6x1GfpSJ1jrMry8qr5+0gNNBBRAkpNVdXjddeylL3VCf2rtS51grcvQlzrBWlfNE3UlSU0yoCRJTWopoO5YdwFT6kud0J9a+1InWOsy9KVOsNaVamYflCRJ41pqQUmS9NfWHlBJbkhyKsnpJLc1UM/PJTmX5IGxYZcluTvJI93tpWOP3d7VfirJ9Sus8+okv5Pk4SQPJvnBhmt9YZJ7k3y6q/XHW621e+0DST6V5KON1/lYks8kuT/JycZrfWmSDyf5bLfOvrG1WpO8qluWW39fTvKe1uoce+33dp+nB5J8qPucNVnr3LZO3FvHH3AA+BzwSuBC4NPAtWuu6VuB1wMPjA37x8Bt3f3bgH/U3b+2q/ki4JpuXg6sqM6DwOu7+y8B/rCrp8VaA7y4u/8C4OPAG1qstXv9HwJ+Bfhoq+9/9/qPAV+3bVirtR4Dvre7fyHw0lZr7Wo4AHweeHmLdQJXAo8CL+r+Pw58T4u17ms+1/ri8Ebgt8b+vx24fe0LBV7B8wPqFHCwu38QODWpXuC3gDeuqea7gLe2XitwMfBJ4JtbrBW4CrgHeDPPBVRzdXav9xjnB1RztQJf021M03qtY6/5ncDvtVono4B6AriM0RWBPtrV3Fyt+/lbdxff1kLecqYb1porquosQHd7eTe8ifqTvAJ4HaOWSZO1dt1m9wPngLurqtVafxr4YeCvxoa1WCdAAR9Lcl+SW7phLdb6SuALwM93Xac/m+SSRmvdcgT4UHe/uTqr6o+BnwQeB84CX6qqj7VY636sO6AmXSSuT4cVrr3+JC8Gfg14T1V9ebdRJwxbWa1V9WxVvZZRC+W6JK/ZZfS11Jrku4FzVXXftJNMGLbK9/9NVfV64G3ArUm+dZdx11nrBYy6zX+mql7H6LJmu+1vXutyTXIh8A7gV/cadcKwldTZ7Vu6kVF33cuAS5LctNskE4Y1v61dd0CdAa4e+/8q4Mk11bKbp5IcBOhuz3XD11p/khcwCqdfrqqPtFzrlqr6M+B3gRtor9Y3Ae9I8hhwJ/DmJL/UYJ0AVNWT3e054NeB6xqt9Qxwpms1A3yYUWC1WCuMAv+TVfVU93+LdX4H8GhVfaGqvgp8BPiWRmud27oD6hPAoSTXdN9ajgAn1lzTJCeAo939o4z292wNP5LkoiTXAIeAe1dRUJIAHwQerqqfarzWr0/y0u7+ixh9uD7bWq1VdXtVXVVVr2C0Lv52Vd3UWp0ASS5J8pKt+4z2PzzQYq1V9XngiSSv6ga9BXioxVo77+a57r2telqr83HgDUku7rYFbwEebrTW+a17JxjwdkZHoH0OeF8D9XyIUZ/uVxl967gZ+FpGO84f6W4vGxv/fV3tp4C3rbDOv8Woif4HwP3d39sbrfUbgU91tT4A/INueHO1jr3+t/PcQRLN1clov86nu78Htz47LdbavfZrgZPdOvAbwKUt1sroIJ4/Af7G2LDm6uzp4entAAAgAElEQVRe+8cZfdF7APhFRkfoNVnrvH9eSUKS1KR1d/FJkjSRASVJapIBJUlqkgElSWqSASVJapIBJUlqkgElSWqSASVJapIBJUlqkgElSWqSASVJapIBJUlqkgElSWqSASVJapIBJUlqkgElSWqSASVJapIBJUlqkgElSWqSASVJapIBJUlqkgElSWqSASVJapIBJUlqkgElSWqSASVJapIBJUlqkgElSWqSASVJapIBJUlqkgElSWqSASVJapIBJUlqkgElSWqSASVJapIBJUlqkgElSWqSASVJapIBJUlqkgElSWqSASVJapIBJUlqkgElSWqSASWtWJL3JPk7S36NH0vy5mW+hrRsBpS0eu8BlhpQwI8CBpR6zYCSJDXJgJKAJN+U5NeT/EmSv0hyKsnt3WNJ8t5u2F8mOZvkf07yNdueo5L8RJIfSPJokj9P8s+SvHpsnMeAlwP/aTd+JfmF7rF/N8kvdtP+RZI/SvIzSS6dUO+3Jbk7yZeSfCXJp5PcvFVHN9r7xl7jx7rH/oNuuj9J8m+61/jA4peotH8XrLsAad2SXAf8LnAaeC9wBjgEfGM3yj8Ebgf+F+B/B64F/lvgm5J8W1X91djT3QScAn4QuBD474G7kvzNqnoG+I+A/xP4NPBj3TRf6G5f1r32e4B/DbwS+JFu/DeO1Xsj8GvA7wHfB3wReDWj4KMb9/eBXwD+STfsTJIXA78F3At8D/DnwCuAb5l+aUmrk6raeyxpwJL8c+Aa4FVV9W+2PXYZ8CRwZ1V9z9jwm4BfBG6sqhPdsGIUctdW1Ve7Yf8x8KvAm6rq/+2GPQb8i6q6aY+6LgDeAPw/wOur6lNJAjzKKJSu2xaO49MW8A+r6r8eG3YY+ATwTVX1B9MsG2md7OLTRktyMfAm4Je3h1PnDcBFwC9tG34n8AzwbduG370VTp3PdLf/zhS1XJjkR5J8NslfAF9lFE4Arxq7fTnwszuF0y4eAf4M+CdJbkpy9YzTSytlQGnTXcroc3Bmh8cv627Pjg/suuv+ZOzxLX+67f+nu9sXTlHLf8eo2++XgO8CruO5o/22pv/a7nanendUVV8C/jajFuEHgMeTPJDk7876XNIqGFDadP8a+Cvgyh0e3wqcf3t8YNf99rWMQmpRjgD/tKp+oqp+u6o+wajFM+6L3e1O9e6qqu6vqr/LKFjfCHwOOJ7kNfMWLS2LAaWN1nXr/QvgpiQvmjDKv2TUCjqybfh/wuggo382x8s+DUx6rYsZdeuN+3vb/v9D4DHge7v9UTv5yx1eAxi1AKvqXwL/DaPtwDfsUbO0ch7FJ8F/wShofj/J/8Co++yVwGur6vuT/BRwe5KvMDqi7huAn2AUbP/HHK/3EPAfJvlu4PPAF6vqMeA3gaNJPsPoYIu/w7Yj7KqqkrwH+Ajw20n+V0ZHAX4DcHlV/ejYa3xXkt9k1Ep8Eng9cAvwG4wOtLgE+AFGR/P9/hzzIS2VLShtvK4r7U3AE8D/xCiE/kue28/zPuCHgLcBHwVuA/4p8F1zHKgAo0PWTwHHGR1V92Pd8O8HTjA6rP1/A14CvHtCvXcBb+3+/WA3zS2MWlZb/j7wFUaHxX+ie/wR4C8YtZr+L+DnGR3o8daqmnmflrRsHmYuSWqSLShJUpOWFlBJbuguDXM6yW3Leh1J0jAtpYsvyQFGRxu9lVE//ieAd1fVQwt/MUnSIC2rBXUdcLqq/qiq/pLRWfc3Lum1JEkDtKyAupLREVFbzjDniYWSpM20rPOgJp1A+Ly+xCS3MDr0FeDfX1IdkqS2fbGqvn7SA8sKqDPA+IUor2J0ouBfq6o7gDvgeb9fI0naLP9qpweW1cX3CeBQkmuSXMjoMjEnlvRakqQBWkoLqqqeSfL3Gf042gHg56rqwWW8liRpmJq4koRdfJK0se6rqsOTHvBKEpKkJhlQkqQmGVCSpCYZUJKkJhlQkqQmGVCSpCYZUJKkJhlQkqQmGVCSpCYZUJKkJhlQkqQmGVCSpCYZUJKkJhlQkqQmGVCSpCYZUJKkJhlQkqQmGVA9VVW08GvIkrQsGxFQy9yYbz33usLCkJI0VBesuwCtx1awJVnIeIuabtnGA7212iQ930a0oLa03tqYpb79bFxbXw7LsqnzLfXVRgXUMu03MFa18Zy3BTFLfaucl1mW3fb5NbCktm1EQNmVc75NXSZb851kY5fBMhj2WoaNCChJUv9sTED5bXl43A8nDdvGBJSezw20pNZtXEC5YZakftiYgGo5mNxhL0nn25iAGnIAtBy+qzLtMnBZSf2xMQG1DOMbu/1s+OY5D6ovG9qW62y5tlbstW56TUgtk5c62ockC/9wVtVUrb2tcWZ9/a2al9miHF8uLbdcW66tBYv6AibNa2MCavuHbVEbp1Vs8Pd6/VVM06pZ52VI896adX4ONEwbE1DLaO2MP/d+p2/1wz1vbS3Oi2YzTUu4Dy1l9dfGBNS4Fj9MLda0peXatFx7vfeuG1qmPQ+SSPJzSc4leWBs2GVJ7k7ySHd76dhjtyc5neRUkuuXVfg8/DBJUn9McxTfLwA3bBt2G3BPVR0C7un+J8m1wBHg1d00H0hyYGHVLoAhJUn9sGdAVdU/B/502+AbgWPd/WPAO8eG31lVT1fVo8Bp4LoF1SpJ2iDzngd1RVWdBehuL++GXwk8MTbemW6YJEkzWfRBEpP6zyYeOpfkFuCWBb++JGkg5m1BPZXkIEB3e64bfga4emy8q4AnJz1BVd1RVYer6vCcNUiSBmzegDoBHO3uHwXuGht+JMlFSa4BDgH37q9ESdIm2rOLL8mHgG8Hvi7JGeBHgfcDx5PcDDwOvAugqh5Mchx4CHgGuLWqnl1S7ZKkAUsL19hKsv4iJD3P+LZht9MzvJqE9um+nXb1bOSVJCTNroUvs9os/tyGpD1tD6dJP7JpgGnRbEFJmmiWCwXbvadlMKAk7ciLxWqd7OKTJDXJgJIkNcmAkiQ1yYCSJDXJgJIkNcmAkiQ1yYCSJDXJgJIWrKq8qoK0AAbUEriBmt+yl9s878287+dQ1oGhzIf6xytJ7GKeqzSPf5invUzMvHXBfLVNO828rzOPeQNg3vdmnmmW9X62avu8T2OTlo+WzxbUDvzW+HyzLI9VLruhvU+2vqXnGFCayhC+GY/PQ+sh0EJ929/zrSuYb/8b10LdGg67+JZoWRv1ratML9uqXmcerdY1NF7JXOtkC2oDuXGf3ro2vm70JQNKDWg5MFuuTRo6A2oD+e18euPLyuUmrZYBpaYYAiO23CQDqpf2s/FqOQA2vTZDSXo+A2qJhrTBmWVeWg6aLcuscd7ntjtRej4DageL2EAs8zDzoRniPM1j0rlF0qbyPKgFW+U5SquYZtyyLvWzyg3yqpbbJoXMKi+Jpc1iQO1iEV01fdfqvLR8EvHQzHqZq1bXGfWPAaXeckO4fLN+CfA90SK5D0rS3Mb3mRlOWjRbUJJ2tBU6e3XdGU5aBltQkvZkAGkdDChJUpMMKElSkwwoSVKTDChJUpP2DKgkVyf5nSQPJ3kwyQ92wy9LcneSR7rbS8emuT3J6SSnkly/zBmQJA3TNC2oZ4D/vKq+AXgDcGuSa4HbgHuq6hBwT/c/3WNHgFcDNwAfSHJgGcVLkoZrz4CqqrNV9cnu/p8DDwNXAjcCx7rRjgHv7O7fCNxZVU9X1aPAaeC6RRcuSRq2mfZBJXkF8Drg48AVVXUWRiEGXN6NdiXwxNhkZ7phknqqqrz2oVZu6itJJHkx8GvAe6rqy7ucuDfpgfPW7CS3ALdM+/qSVm+vUNr+uCf0apGmakEleQGjcPrlqvpIN/ipJAe7xw8C57rhZ4Crxya/Cnhy+3NW1R1VdbiqDs9bvKTl2h44Wy0pW1RahWmO4gvwQeDhqvqpsYdOAEe7+0eBu8aGH0lyUZJrgEPAvYsrWdI6bF0YdvuftCzTdPG9CfjPgM8kub8b9iPA+4HjSW4GHgfeBVBVDyY5DjzE6AjAW6vq2YVXLmklpvntLYNKy5AWmulJ1l+EJGkd7ttpV49XkpAkNcmAkiQ1yYCSJDXJgJIkNcmAkiQ1yYCSJDXJgJIkNcmAkiQ1aeqLxUrabDud1D9+FYmtcbyyhBbBgJK0p92uODPpsaoypLRvdvFJkppkC0rSnmwNaR1sQUmSmmRASZKaZEBJkppkQEmSmmRASZKaZEBJkppkQEmSmmRASZKa5Im6PTd+mZllnkzpNdYkrZotqCnsdh2ydU+zyOklqSUG1JRm3fjP29Koqplea+t1kkz9mvsJsmmnnXU+WreOLxwt2mmett7vob3vWi+7+LRUQ+oa3PQrdG+9l9ME0KYvKy2GLagZzPLNcNZvkUP/1jn0+dvJUOZ7KPOhfrEF1Yitb5tDDLZN/yY9S2ui1Rbn+Pq5W219WB/VHwbUQLTWpbLfwG1pXtzoPmev96Wl9039Zxeflm6WAytmnUbScBlQaobfviWNM6C0NPtpBRlWkgwoSVKTDKgNtI7WyV6tKU/wlLSdATUgrW3gx4Nw1qO/WpsXSau3Z0AleWGSe5N8OsmDSX68G35ZkruTPNLdXjo2ze1JTic5leT6Zc7AKqyqxdH6UWyzXE5pP9NsTdeK8ctJzTPdrNO0NO/SOk3TgnoaeHNVfRPwWuCGJG8AbgPuqapDwD3d/yS5FjgCvBq4AfhAkgPLKH6VZt1wrGojM6TX2VrGLW6g562p1fmR+mDPgKqR/6/79wXdXwE3Ase64ceAd3b3bwTurKqnq+pR4DRw3UKrHqh5Lvw6Pr4bQq1biy1/9ddU+6CSHEhyP3AOuLuqPg5cUVVnAbrby7vRrwSeGJv8TDdMUzBk1LrtVy7ffhVzQ0qLMlVAVdWzVfVa4CrguiSv2WX0SVvY89bYJLckOZnk5HSlSlo3w0erNNNRfFX1Z8DvMtq39FSSgwDd7blutDPA1WOTXQU8OeG57qiqw1V1eI66JTVie/eyvQBalGmO4vv6JC/t7r8I+A7gs8AJ4Gg32lHgru7+CeBIkouSXAMcAu5ddOGSVm98f+ek/Z6GkxZpmquZHwSOdUfi/VvA8ar6aJLfB44nuRl4HHgXQFU9mOQ48BDwDHBrVT27nPIltcJw0qKlhT7lJOsvQpK0DvfttKvHK0lIkppkQEmSmmRASZKaZEBJkppkQEmSmmRASZKaZEBJmtq0PzzZwukr6r9pTtSVtOFa/60yDZMtKEkL51UltAi2oCTtKcl5LSdDSMtmQEmaioGkVbOLT5LUJANKktQkA0qS1CQDSpLUJANKa+eJnZO5TLTpDCg1Y8gb5CHPm7QsgwyovlxupfX6JGmdmg6oeTbgk6ZpMay26mmtLmk303yW+vIFUe1r/kTdqlrYCYKLfC5p0+z05U9alqZbUENmUC6OG8nVs4WkVWi+BTUvA0BaLK/Hp1UbXEBN+hBJfdPqOmwgaZWa7uLzw6AhaDVspNY1HVDSNFr/IjNvfa3Pl7RszQeUH9Lh23qPh/xezzpvQ14W0rSaD6hZTHuOhtrjBlnSdoMKKEnScGxEQI1/O2/pm3pLtUhSawZ1mHkfN/h9rFmSVmEjWlBbDANJ6o9BtaB2YzhJyzPLwUd+FjWtjWpBSVqsea7J55G0mpYBJWmlbEFpWlMHVJIDST6V5KPd/5cluTvJI93tpWPj3p7kdJJTSa5fRuGS2pNkzz9pWrO0oH4QeHjs/9uAe6rqEHBP9z9JrgWOAK8GbgA+kOTAYsqV1BLDR8s0VUAluQr4LuBnxwbfCBzr7h8D3jk2/M6qerqqHgVOA9ctplxJ0qaYtgX108APA381NuyKqjoL0N1e3g2/EnhibLwz3TBJkqa2Z0Al+W7gXFXdN+VzTmrnn3fYTpJbkpxMcnLK55UkbZBpzoN6E/COJG8HXgh8TZJfAp5KcrCqziY5CJzrxj8DXD02/VXAk9uftKruAO4ASOJxp5Kk59mzBVVVt1fVVVX1CkYHP/x2Vd0EnACOdqMdBe7q7p8AjiS5KMk1wCHg3oVXLkkatP1cSeL9wPEkNwOPA+8CqKoHkxwHHgKeAW6tqmf3XakkaaOkhbO67eKTpI11X1UdnvSAV5KQJDXJgJIkNcmAkiQ1yYCSJDXJgJIkNcmAkiQ1yYCSJDXJgJIkNcmAkiQ1yYCSJDXJgJIkNcmAkiQ1yYCSJDXJgJIkNcmAkiQ1yYCSJDXJgJIWoKpo4cc/pSExoCRJTTKgJElNMqBWZF1dQEPsdmp1npKsuwRpUAyoFVj3BnXdry9J8zCgJElNMqB0ntZbXHalSZvBgJIkNcmAkiQ1yYCSJDXJgJIkNcmAGrh5DijwIARJLTCgVqz1I+QkqRUG1AawRbR8LmNp8QyoFXNDJknTMaBWwFCSpNkZUCtkUEnS9AyoFTGcJGk2UwVUkseSfCbJ/UlOdsMuS3J3kke620vHxr89yekkp5Jcv6ziJUnDNUsL6m9X1Wur6nD3/23APVV1CLin+58k1wJHgFcDNwAfSHJggTVLkjbAfrr4bgSOdfePAe8cG35nVT1dVY8Cp4Hr9vE6kqQNNG1AFfCxJPcluaUbdkVVnQXobi/vhl8JPDE27Zlu2PMkuSXJya0uQ0mSxl0w5Xhvqqonk1wO3J3ks7uMO+logPMun1BVdwB3ACTx8gqSpOeZqgVVVU92t+eAX2fUZfdUkoMA3e25bvQzwNVjk18FPLmogiVJm2HPgEpySZKXbN0HvhN4ADgBHO1GOwrc1d0/ARxJclGSa4BDwL2LLlySNGzTdPFdAfx6dx7PBcCvVNVvJvkEcDzJzcDjwLsAqurBJMeBh4BngFur6tmlVC9JGqy0cHVt90FJ0sa6b+z0pefxShKSpCYZUJKkJhlQkqQmGVCSpCYZUJKkJhlQkqQmGVCSpCYZUJKkJhlQkqQmGVCSpCYZUJKkJhlQkqQmGVCSpCYZUJKkJhlQkqQmGVCSpCYZUNIuWvhBT2lTGVAatP0EjOEkrZcBJe0iybpLkDaWASVJapIBpUGzBST1lwElSWqSASVJapIBJUlqkgEl7cD9V9J6GVCSpCYZUJKkJhlQkqQmGVCSpCYZUJKkJhlQkqQmGVCSpCYZUJKkJk0VUElemuTDST6b5OEkb0xyWZK7kzzS3V46Nv7tSU4nOZXk+uWVL0kaqmlbUP8j8JtV9TeBbwIeBm4D7qmqQ8A93f8kuRY4ArwauAH4QJIDiy5ckjRsewZUkq8BvhX4IEBV/WVV/RlwI3CsG+0Y8M7u/o3AnVX1dFU9CpwGrlt04ZKkYZumBfVK4AvAzyf5VJKfTXIJcEVVnQXobi/vxr8SeGJs+jPdMEmSpjZNQF0AvB74map6HfAVuu68HUy6wmadN1JyS5KTSU5OVakkaaNME1BngDNV9fHu/w8zCqynkhwE6G7PjY1/9dj0VwFPbn/Sqrqjqg5X1eF5i5ckDdeeAVVVnweeSPKqbtBbgIeAE8DRbthR4K7u/gngSJKLklwDHALuXWjVkqTBu2DK8b4f+OUkFwJ/BPw9RuF2PMnNwOPAuwCq6sEkxxmF2DPArVX17MIrl6SGVJW/IbZgqTpv99Dqi0jWX4Qk7YMBNbf7dtrV45UkJElNMqAkSU0yoCQtTFWd97dJ9ju/i1heQ1rm0x4kIUnnmWZj6L4ZzcsWlKSlG9K3eq2OLShJc9urZWTrSfthC0rS0hhOs9nv8hpaS9WAkqQFSLL2QF736y+aASVJapL7oDbMeBfA0L5tSRrW59oW1AYxnCT1iQG1IYa281TS8BlQG2B7ONl6ktQHBtTAGU6S+sqA2iCGk6Q+MaA2hOEkqW8MKPXaJl4xW9oUBtSGGOpGfKtlaFBJw+OJugO3aV17XpxUGg5bUOq97YFkS0oaBgNKg2BIScNjQGkwtl9N2pCS+q2XAeWGR7txH5Q0DL08SMINkPbiOiL1Xy9bUJKk4TOgJElN6m1AuR9KkoattwEFhpQkDVmvA0qSNFy9DSiP0pKkYevlYeZbDKnhGu++9X2WNlNvW1CbZutq3Zuy380rQkgyoHpi/DI+m7LB3j7PmzLfkkZ63cW3icY32JvS9bW9NbUp8y1tuj1bUEleleT+sb8vJ3lPksuS3J3kke720rFpbk9yOsmpJNcvdxY206ZupDd1vqVNlFm6TZIcAP4Y+GbgVuBPq+r9SW4DLq2q/yrJtcCHgOuAlwH/N/DvVdWzuzyvfTeStJnuq6rDkx6YdR/UW4DPVdW/Am4EjnXDjwHv7O7fCNxZVU9X1aPAaUZhJUnS1GYNqCOMWkcAV1TVWYDu9vJu+JXAE2PTnOmGSZI0takDKsmFwDuAX91r1AnDzuvCS3JLkpNJTk5bgyRpc8zSgnob8Mmqeqr7/6kkBwG623Pd8DPA1WPTXQU8uf3JquqOqjq8U9+jJGl6QzwNY5aAejfPde8BnACOdvePAneNDT+S5KIk1wCHgHv3W6gkabNMdR5UkouBtwLfNzb4/cDxJDcDjwPvAqiqB5McBx4CngFu3e0IPkmSJpnpMPOlFeFh5pK0Lz0+iX1hh5lLkhrVQoNjkQwoSVKTDChJUpMMKElSkwwoSVKTDChJUpMMKElSkwwoSVKTDChJGoCenqS7KwNKkgZiaCFlQEmSmmRASZKaZEBJkppkQEmSmmRASZKaZEBJkppkQEmSmmRASZKaZEBJkppkQEmSmmRASZKaZEBJkpo0uICqqnWXIElagMEF1NCu5itJm2pwASVJGgYDSpLUJANKktQkA0qS1CQDauCqyiMbJfWSASVJapIBtSFsRUnqGwNq4MbPCzOkJPWJASVJatJUAZXkvUkeTPJAkg8leWGSy5LcneSR7vbSsfFvT3I6yakk1y+vfE3Dq2s8Zz8HjWxNO/4naXn2DKgkVwI/AByuqtcAB4AjwG3APVV1CLin+58k13aPvxq4AfhAkgPLKV/TSrLxQWWgSP0ybRffBcCLklwAXAw8CdwIHOsePwa8s7t/I3BnVT1dVY8Cp4HrFleytH/zhJUhL63WngFVVX8M/CTwOHAW+FJVfQy4oqrOduOcBS7vJrkSeGLsKc50w6Rm7DdoDCtp+abp4ruUUavoGuBlwCVJbtptkgnDzvu6muSWJCeTnJy2WGk/DBSpX6bp4vsO4NGq+kJVfRX4CPAtwFNJDgJ0t+e68c8AV49NfxWjLsHnqao7qupwVR3ezwxIkoZpmoB6HHhDkosz+gr6FuBh4ARwtBvnKHBXd/8EcCTJRUmuAQ4B9y62bGl/PGBiNSYd+eiRkJrWBXuNUFUfT/Jh4JPAM8CngDuAFwPHk9zMKMTe1Y3/YJLjwEPd+LdW1bNLql9Sw5LsGkJ2u2o3aeEbTJL1F7FA48vUD2BbFvHeVJXvq7Q49+20q8crSUhzaOGLnTR0BpQ0A4NJWh0Dagm8QOtm8L2VlsuAWhL3UbRvPwHjibrS8hlQS+QGrG3zvj++r9JqGFArYFdQO7bCxXCS2mdAzWCeEwu3NmiGVDsMGakfDKg5GDaStHwG1JxmCSlbUZI0OwNqBtu7huYNKYNKkvZmQM1oUfsvDClJ2p0BhWEhSS0yoDrzdNfNM934nyRpZwbUAtgCk6TF2/P3oPpmVT+FsNfv3Eh6Pn8XSrMaXEDtZ0WfNdwMKWl6hpBmZRcffnAkqUUG1DbzXMrIgJOkxTOgOv6GkyS1ZdABNe+FXSVJ51v1l/dBB5SBI0mLs+pt6qADal4GmyStnwG1jeEkSW0woCRJTTKgJElNMqAkSU0yoCRJTTKgJE3NX4TWKhlQkqYySzAZYloEA0rSUtja0n4ZUJKkJhlQ0gxsEUx3MrsXX9YiGFA94Ad8/ca7q3w/puNVWbRfBlTD7MNvx/aN7Sa+LwaOVm1wP/kuLZsbamk1bEFJkppkQPXEJnYpqU2ui1oVA6phdiWpJePBZEhpFVrZB/VF4Cvdbeu+jhXWuc+QWmmt+9CLOrv3ohe1dhZa6zzr4pTTbOwyXbK+1PrynR5IK9+EkpysqsPrrmMvfakT+lNrX+oEa12GvtQJ1rpqdvFJkppkQEmSmtRSQN2x7gKm1Jc6oT+19qVOsNZl6EudYK0r1cw+KEmSxrXUgpIk6a+tPaCS3JDkVJLTSW5roJ6fS3IuyQNjwy5LcneSR7rbS8ceu72r/VSS61dY59VJfifJw0keTPKDDdf6wiT3Jvl0V+uPt1pr99oHknwqyUcbr/OxJJ9Jcn+Sk43X+tIkH07y2W6dfWNrtSZ5Vbcst/6+nOQ9rdU59trv7T5PDyT5UPc5a7LWuW1dkHQdf8AB4HPAK4ELgU8D1665pm8FXg88MDbsHwO3dfdvA/5Rd//aruaLgGu6eTmwojoPAq/v7r8E+MOunhZrDfDi7v4LgI8Db2ix1u71fwj4FeCjrb7/3es/BnzdtmGt1noM+N7u/oXAS1uttavhAPB5RufoNFcncCXwKPCi7v/jwPe0WOu+5nOtLw5vBH5r7P/bgdvXvlDgFTw/oE4BB7v7B4FTk+oFfgt445pqvgt4a+u1AhcDnwS+ucVagauAe4A381xANVdn93qPcX5ANVcr8DXdxjSt1zr2mt8J/F6rdTIKqCeAyxhdcOGjXc3N1bqfv3V38W0t5C1numGtuaKqzgJ0t5d3w5uoP8krgNcxapk0WWvXbXY/cA64u6parfWngR8G/mpsWIt1AhTwsST3JbmlG9Zira8EvgD8fNd1+rNJLmm01i1HgA9195urs6r+GPhJ4HHgLPClqvpYi7Xux7oDatJ1UPp0WOHa60/yYuDXgPdU1Zd3G3XCsJXVWlXPVtVrGbVQrkvyml1GX0utSb4bOFdV9007yYRhq3z/31RVrwfeBtya5Ft3GXedtV7AqNv8Z6rqdYwua7bb/ua1LtckFwLvAH51r1EnDFtJnd2+pRsZdde9DLgkyU27TTJhWBU6ZtoAAAHNSURBVPPb2nUH1Bng6rH/rwKeXFMtu3kqyUGA7vZcN3yt9Sd5AaNw+uWq+kjLtW6pqj8Dfhe4gfZqfRPwjiSPAXcCb07ySw3WCUBVPdndngN+Hbiu0VrPAGe6VjPAhxkFVou1wijwP1lVT3X/t1jndwCPVtUXquqrwEeAb2m01rmtO6A+ARxKck33reUIcGLNNU1yAjja3T/KaH/P1vAjSS5Kcg1wCLh3FQUlCfBB4OGq+qnGa/36JC/t7r+I0Yfrs63VWlW3V9VVVfUKRuvib1fVTa3VCZDkkiQv2brPaP/DAy3WWlWfB55I8qpu0FuAh1qstfNunuve26qntTofB96Q5OJuW/AW4OFGa53funeCAW9ndATa54D3NVDPhxj16X6V0beOm4GvZbTj/JHu9rKx8d/X1X4KeNsK6/xbjJrofwDc3/29vdFavxH4VFfrA8A/6IY3V+vY6387zx0k0VydjPbrfLr7e3Drs9Nird1rvxY42a0DvwFc2mKtjA7i+RPgb4wNa67O7rV/nNEXvQeAX2R0hF6Ttc7755UkJElNWncXnyRJExlQkqQmGVCSpCYZUJKkJhlQkqQmGVCSpCYZUJKkJhlQkqQm/f/BJbnO8HidowAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "f, axarr = plt.subplots(3, 1, figsize=(8, 24))\n", + "\n", + "axarr[0].imshow(mask_image[:, :, 0], cmap='gray')\n", + "axarr[0].set_title('footprints', fontsize=16)\n", + "axarr[1].imshow(mask_image[:, :, 1], cmap='gray')\n", + "axarr[1].set_title('edges', fontsize=16)\n", + "axarr[2].imshow(mask_image[:, :, 2], cmap='gray')\n", + "axarr[2].set_title('contacts', fontsize=16);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, we'll work with just the first channel for the simplest case of going from footprints to polygons. We'll use the `solaris.vector.mask.mask_to_poly_geojson()` function. This function has a number of arguments for customizing function:\n", + "\n", + "- pred_arr: The prediction array (in this case, `mask_image`)\n", + "- channel_scaling: Scaling factors to use if using a multi-channel mask; see the next example.\n", + "- reference_im: A georeferenced image that has the same extent as `mask_image` to use for georeferencing polygons. This is optional.\n", + "- output_path: The path to the file to save. If not provided, the geometries are returned in a geopandas `GeoDataFrame`, but no file is saved.\n", + "- output_type: Should the saved file be a `'csv'` or a `'geojson'`?\n", + "- min_area: Use this argument to set a minimum area for geometries to be retained. This can be useful to eliminate speckling or very small, erroneous predictions.\n", + "- bg_threshold: The value to set to separate background from foreground pixels in the mask. In this example, we'll use `1` because anything >0 is foreground.\n", + "- simplify: A boolean to indicate whether or not you'd like to use the Douglas-Peucker algorithm to simplify geometries. This can _dramatically_ accelerate processing of geometries later, and can also make your geometries look nicer!\n", + "- tolerance: The tolerance parameter for the Douglas-Peucker simplification algorithm. Only has an effect if `simplify=True`.\n", + "\n", + "Let's convert the first channel of the above mask to georegistered polygons." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
geometryvalue
0POLYGON ((542 0, 542 1, 545 1, 545 2, 546 2, 5...255.0
1POLYGON ((202 52, 202 53, 201 53, 190 53, 190 ...255.0
2POLYGON ((339 54, 339 72, 340 72, 340 78, 352 ...255.0
3POLYGON ((548 55, 548 56, 547 56, 542 56, 542 ...255.0
4POLYGON ((261 59, 261 60, 260 60, 248 60, 248 ...255.0
\n", + "
" + ], + "text/plain": [ + " geometry value\n", + "0 POLYGON ((542 0, 542 1, 545 1, 545 2, 546 2, 5... 255.0\n", + "1 POLYGON ((202 52, 202 53, 201 53, 190 53, 190 ... 255.0\n", + "2 POLYGON ((339 54, 339 72, 340 72, 340 78, 352 ... 255.0\n", + "3 POLYGON ((548 55, 548 56, 547 56, 542 56, 542 ... 255.0\n", + "4 POLYGON ((261 59, 261 60, 260 60, 248 60, 248 ... 255.0" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "geoms = sol.vector.mask.mask_to_poly_geojson(mask_image[:, :, 0])\n", + "geoms.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There's the output! We'll use a shapely convenience function to visualize them:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from shapely.ops import cascaded_union\n", + "cascaded_union(geoms['geometry'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And there the geometries are! Jus like the input mask (flipped vertically because they count up instead of down; if you georeference your outputs, this won't matter.)\n", + "\n", + "What if we want to use some complicated logic around a multi-channel mask to generate predictions? For example, what if we want to predict where edges and contact points are, then subtract those values to make sure we separate buildings well (a common challenge for building footprint extraction algorithms!) To do so, we'll use the `channel_scaling` argument, which allows you to specify the following operation:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "$$mask(x, y) = \\sum_{c}^{ } mask[x, y, c]\\times channel\\_scaling[c]$$" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Where c is the channel index. So, in this example, let's say we want to subtract the edges and contact layers from the footprint - we will set `channel_scaling=[1, -1, -1]`:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "geoms = sol.vector.mask.mask_to_poly_geojson(mask_image, channel_scaling=[1, -1, -1])\n", + "cascaded_union(geoms['geometry'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Though not readily apparent in this particular example, this can be extremely useful with imperfect predictions." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "solaris", + "language": "python", + "name": "solaris" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/tutorials/notebooks/api_mask_to_vector.rst b/docs/tutorials/notebooks/api_mask_to_vector.rst deleted file mode 100644 index 0e74061b..00000000 --- a/docs/tutorials/notebooks/api_mask_to_vector.rst +++ /dev/null @@ -1,12 +0,0 @@ -Creating vector labels from a predicted mask -============================================ - -*Tutorial coming soon!* - -------------- - - -Follow us at our blog `The DownlinQ `_ or -`on Twitter `_ for updates! - -`Click here to view solaris on GitHub `_ diff --git a/docs/tutorials/notebooks/api_training_custom.ipynb b/docs/tutorials/notebooks/api_training_custom.ipynb new file mode 100644 index 00000000..d256bcca --- /dev/null +++ b/docs/tutorials/notebooks/api_training_custom.ipynb @@ -0,0 +1,185 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Training your own custom model using `solaris`\n", + "\n", + "If you want to go beyond using [the pretrained models in solaris](../../pretrained_models.html), you can train your own. Here's a primer for how to do so, where we'll walk through training the [SpaceNet 4 Baseline model](https://github.com/cosmiq/cosmiq_sn4_baseline) fresh. If you want to use one of the existing models in `solaris`, [check out this tutorial](api_training_spacenet.ipynb).\n", + "\n", + "First, you'll need to [create a YAML config file](creating_the_yaml_config_file.ipynb) for your model. This config should differ from a pre-trained model in a couple of key places:\n", + "\n", + "- model_name: Don't use one of the model names for a pre-trained model in solaris; give it another name.\n", + "- model_path: If you have pre-trained weights to load in, put the path to those weights here; otherwise, leave it blank.\n", + "\n", + "Fill out all of the model-specific parameters (width/height of inputs, mask channels, the neural network framework, optimizer, learning rate, etc.) according to the model you plan to use.\n", + "\n", + "Next, you'll need to create your model. See below for the SpaceNet 4 Baseline example:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Conv2DTranspose\n", + "from tensorflow.keras.layers import concatenate, BatchNormalization, Dropout\n", + "from tensorflow.keras import Model\n", + "\n", + "def cosmiq_sn4_baseline(input_shape=(512, 512, 3), base_depth=64):\n", + " \"\"\"Keras implementation of untrained TernausNet model architecture.\n", + "\n", + " Arguments:\n", + " ----------\n", + " input_shape (3-tuple): a tuple defining the shape of the input image.\n", + " base_depth (int): the base convolution filter depth for the first layer\n", + " of the model. Must be divisible by two, as the final layer uses\n", + " base_depth/2 filters. The default value, 64, corresponds to the\n", + " original TernausNetV1 depth.\n", + "\n", + " Returns:\n", + " --------\n", + " An uncompiled Keras Model instance with TernausNetV1 architecture.\n", + "\n", + " \"\"\"\n", + " inputs = Input(input_shape)\n", + " conv1 = Conv2D(base_depth, 3, activation='relu', padding='same')(inputs)\n", + " pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n", + "\n", + " conv2_1 = Conv2D(base_depth*2, 3, activation='relu',\n", + " padding='same')(pool1)\n", + " pool2 = MaxPooling2D(pool_size=(2, 2))(conv2_1)\n", + "\n", + " conv3_1 = Conv2D(base_depth*4, 3, activation='relu',\n", + " padding='same')(pool2)\n", + " conv3_2 = Conv2D(base_depth*4, 3, activation='relu',\n", + " padding='same')(conv3_1)\n", + " pool3 = MaxPooling2D(pool_size=(2, 2))(conv3_2)\n", + "\n", + " conv4_1 = Conv2D(base_depth*8, 3, activation='relu',\n", + " padding='same')(pool3)\n", + " conv4_2 = Conv2D(base_depth*8, 3, activation='relu',\n", + " padding='same')(conv4_1)\n", + " pool4 = MaxPooling2D(pool_size=(2, 2))(conv4_2)\n", + "\n", + " conv5_1 = Conv2D(base_depth*8, 3, activation='relu',\n", + " padding='same')(pool4)\n", + " conv5_2 = Conv2D(base_depth*8, 3, activation='relu',\n", + " padding='same')(conv5_1)\n", + " pool5 = MaxPooling2D(pool_size=(2, 2))(conv5_2)\n", + "\n", + " conv6_1 = Conv2D(base_depth*8, 3, activation='relu',\n", + " padding='same')(pool5)\n", + "\n", + " up7 = Conv2DTranspose(base_depth*4, 2, strides=(2, 2), activation='relu',\n", + " padding='same')(conv6_1)\n", + " concat7 = concatenate([up7, conv5_2])\n", + " conv7_1 = Conv2D(base_depth*8, 3, activation='relu',\n", + " padding='same')(concat7)\n", + "\n", + " up8 = Conv2DTranspose(base_depth*4, 2, strides=(2, 2), activation='relu',\n", + " padding='same')(conv7_1)\n", + " concat8 = concatenate([up8, conv4_2])\n", + " conv8_1 = Conv2D(base_depth*8, 3, activation='relu',\n", + " padding='same')(concat8)\n", + "\n", + " up9 = Conv2DTranspose(base_depth*2, 2, strides=(2, 2), activation='relu',\n", + " padding='same')(conv8_1)\n", + " concat9 = concatenate([up9, conv3_2])\n", + " conv9_1 = Conv2D(base_depth*4, 3, activation='relu',\n", + " padding='same')(concat9)\n", + "\n", + " up10 = Conv2DTranspose(base_depth, 2, strides=(2, 2), activation='relu',\n", + " padding='same')(conv9_1)\n", + " concat10 = concatenate([up10, conv2_1])\n", + " conv10_1 = Conv2D(base_depth*2, 3, activation='relu',\n", + " padding='same')(concat10)\n", + "\n", + " up11 = Conv2DTranspose(int(base_depth/2), 2, strides=(2, 2),\n", + " activation='relu', padding='same')(conv10_1)\n", + " concat11 = concatenate([up11, conv1])\n", + "\n", + " out = Conv2D(1, 1, activation='sigmoid', padding='same')(concat11)\n", + "\n", + " return Model(inputs=inputs, outputs=out)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, you'll pass that model to a custom model dictionary for the `solaris` model trainer." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "custom_model_dict = {'model_name': 'cosmiq_sn4_baseline',\n", + " 'weight_path': None,\n", + " 'weight_url': None,\n", + " 'arch': cosmiq_sn4_baseline}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now you can follow roughly the same process as for a pre-trained model: load in the config file, then create your trainer. The major difference here is that you'll pass an additional argument to the trainer, `custom_model_dict`, which provides the model architecture to the trainer:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import solaris as sol\n", + "\n", + "config = sol.utils.config.parse('/Users/nweir/code/cosmiq_repos/solaris/cosmiq_sn4_baseline.yml')\n", + "trainer = sol.nets.train.Trainer(config, custom_model_dict=custom_model_dict)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "At this point, you can treat training as you would a pre-trained model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "trainer.train()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "solaris", + "language": "python", + "name": "solaris" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/tutorials/notebooks/api_training_custom.rst b/docs/tutorials/notebooks/api_training_custom.rst deleted file mode 100644 index dbee7221..00000000 --- a/docs/tutorials/notebooks/api_training_custom.rst +++ /dev/null @@ -1,14 +0,0 @@ -Training a custom model with the `solaris` python API -===================================================== - -*Tutorial coming soon!* - -In the meantime, you can check out our `tutorial on running a model using the CLI `_ - -------------- - - -Follow us at our blog `The DownlinQ `_ or -`on Twitter `_ for updates! - -`Click here to view solaris on GitHub `_ diff --git a/docs/tutorials/notebooks/api_training_spacenet.ipynb b/docs/tutorials/notebooks/api_training_spacenet.ipynb new file mode 100644 index 00000000..ac8f0b45 --- /dev/null +++ b/docs/tutorials/notebooks/api_training_spacenet.ipynb @@ -0,0 +1,155 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Training included SpaceNet models with the `solaris` Python API\n", + "\n", + "We've included a number of SpaceNet models with `solaris`, including pre-trained model weights. You can find more information about your model choices [here](../pretrained_models.html) and the original competitors' code for the models [here](https://github.com/spacenetchallenge/spacenet_off_nadir_solutions).\n", + "\n", + "For this tutorial we'll walk through training a model using XD_XD's SpaceNet 4 model. We'll use the config file for that model, which you can find [here](https://github.com/CosmiQ/solaris/blob/master/solaris/nets/configs/xdxd_spacenet4.yml).\n", + "\n", + "You'll also need to [create training masks](api_masks_tutorial.ipynb) and [create the image reference files](creating_im_reference_csvs.ipynb) before you start.\n", + "\n", + "Once you've completed those steps, you can get down to model training! First, let's load in the configuration:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'model_name': 'xdxd_spacenet4',\n", + " 'model_path': None,\n", + " 'train': False,\n", + " 'infer': True,\n", + " 'pretrained': True,\n", + " 'nn_framework': 'torch',\n", + " 'batch_size': 12,\n", + " 'data_specs': {'width': 512,\n", + " 'height': 512,\n", + " 'image_type': 'zscore',\n", + " 'rescale': False,\n", + " 'rescale_minima': 'auto',\n", + " 'rescale_maxima': 'auto',\n", + " 'channels': 4,\n", + " 'label_type': 'mask',\n", + " 'is_categorical': False,\n", + " 'mask_channels': 1,\n", + " 'val_holdout_frac': 0.2,\n", + " 'data_workers': None},\n", + " 'training_data_csv': '/path/to/training_df.csv',\n", + " 'validation_data_csv': None,\n", + " 'inference_data_csv': '/path/to/test_df.csv',\n", + " 'training_augmentation': {'augmentations': {'DropChannel': {'idx': 3,\n", + " 'axis': 2},\n", + " 'HorizontalFlip': {'p': 0.5},\n", + " 'RandomRotate90': {'p': 0.5},\n", + " 'RandomCrop': {'height': 512, 'width': 512, 'p': 1.0},\n", + " 'Normalize': {'mean': [0.006479, 0.009328, 0.01123],\n", + " 'std': [0.004986, 0.004964, 0.00495],\n", + " 'max_pixel_value': 65535.0,\n", + " 'p': 1.0}},\n", + " 'p': 1.0,\n", + " 'shuffle': True},\n", + " 'validation_augmentation': {'augmentations': {'DropChannel': {'idx': 3,\n", + " 'axis': 2},\n", + " 'CenterCrop': {'height': 512, 'width': 512, 'p': 1.0},\n", + " 'Normalize': {'mean': [0.006479, 0.009328, 0.01123],\n", + " 'std': [0.004986, 0.004964, 0.00495],\n", + " 'max_pixel_value': 65535.0,\n", + " 'p': 1.0}},\n", + " 'p': 1.0},\n", + " 'inference_augmentation': {'augmentations': {'DropChannel': {'idx': 3,\n", + " 'axis': 2,\n", + " 'p': 1.0},\n", + " 'Normalize': {'mean': [0.006479, 0.009328, 0.01123],\n", + " 'std': [0.004986, 0.004964, 0.00495],\n", + " 'max_pixel_value': 65535.0,\n", + " 'p': 1.0}},\n", + " 'p': 1.0},\n", + " 'training': {'epochs': 60,\n", + " 'steps_per_epoch': None,\n", + " 'optimizer': 'Adam',\n", + " 'lr': 0.0001,\n", + " 'opt_args': None,\n", + " 'loss': {'bcewithlogits': None, 'jaccard': None},\n", + " 'loss_weights': {'bcewithlogits': 10, 'jaccard': 2.5},\n", + " 'metrics': {'training': None, 'validation': None},\n", + " 'checkpoint_frequency': 10,\n", + " 'callbacks': {'model_checkpoint': {'filepath': 'xdxd_best.pth',\n", + " 'monitor': 'val_loss'}},\n", + " 'model_dest_path': 'xdxd.pth',\n", + " 'verbose': True},\n", + " 'inference': {'window_step_size_x': None,\n", + " 'window_step_size_y': None,\n", + " 'output_dir': 'inference_out/'}}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import solaris as sol\n", + "\n", + "config = sol.utils.config.parse('/path/to/xdxd_spacenet4.yml')\n", + "config" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As you can see, the YAML gets parsed into a set of nested dictionaries by `solaris`. Relevant pieces of that config then get read during training.\n", + "\n", + "Let's assume that all of the paths in that config are correct. Now, to run training, you can run the following line:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "trainer = sol.nets.train.Trainer(config)\n", + "trainer.train()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That's all there is to it! With just four lines of Python code, you can train a model for geospatial ML!\n", + "\n", + "If you wish to use a custom ML model not provided by `solaris`, check out [the custom model training tutorial](api_training_custom.ipynb)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "solaris", + "language": "python", + "name": "solaris" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/tutorials/notebooks/api_training_spacenet.rst b/docs/tutorials/notebooks/api_training_spacenet.rst deleted file mode 100644 index a0619206..00000000 --- a/docs/tutorials/notebooks/api_training_spacenet.rst +++ /dev/null @@ -1,14 +0,0 @@ -Training a model with SpaceNet data -=================================== - -*Tutorial coming soon!* - -In the meantime, you can check out our `tutorial on running a model using the CLI `_ - -------------- - - -Follow us at our blog `The DownlinQ `_ or -`on Twitter `_ for updates! - -`Click here to view solaris on GitHub `_ diff --git a/docs/tutorials/notebooks/cli_evaluation_tutorial.rst b/docs/tutorials/notebooks/cli_evaluation_tutorial.rst deleted file mode 100644 index 43a589da..00000000 --- a/docs/tutorials/notebooks/cli_evaluation_tutorial.rst +++ /dev/null @@ -1,14 +0,0 @@ -Evaluating model performance with the `solaris` CLI -=================================================== - -*Coming soon!* - -In the meantime, you can check out our `tutorial on evaluating model performance using the Python API `_ - -------------- - - -Follow us at our blog `The DownlinQ `_ or -`on Twitter `_ for updates! - -`Click here to view solaris on GitHub `_ diff --git a/docs/tutorials/notebooks/cli_spacenet_evaluation.ipynb b/docs/tutorials/notebooks/cli_spacenet_evaluation.ipynb new file mode 100644 index 00000000..a283972f --- /dev/null +++ b/docs/tutorials/notebooks/cli_spacenet_evaluation.ipynb @@ -0,0 +1,488 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using the `solaris` CLI to score model performance\n", + "\n", + "Once you have [generated your model predictions](cli_ml_pipeline.ipynb) and [converted predictions to vector format](api_mask_to_vector.ipynb), you'll be ready to score your predictions! Let's go through a test case for some \"predictions\" from the SpaceNet 4 dataset. Just to show you what those look like:\n", + "\n", + "## Ground truth and prediction data formats\n", + "\n", + "#### Predictions" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
ImageIdBuildingIdPolygonWKT_PixConfidence
0Atlanta_nadir8_catid_10300100023BC100_743501_3...0POLYGON ((0.00 712.83, 158.37 710.28, 160.59 6...1
1Atlanta_nadir8_catid_10300100023BC100_743501_3...1POLYGON ((665.82 0.00, 676.56 1.50, 591.36 603...1
2Atlanta_nadir8_catid_10300100023BC100_743501_3...0POLYGON ((182.62 324.15, 194.25 323.52, 197.97...1
3Atlanta_nadir8_catid_10300100023BC100_743501_3...1POLYGON ((92.99 96.94, 117.20 99.64, 114.72 12...1
4Atlanta_nadir8_catid_10300100023BC100_743501_3...2POLYGON ((0.82 29.96, 3.48 40.71, 2.80 51.00, ...1
\n", + "
" + ], + "text/plain": [ + " ImageId BuildingId \\\n", + "0 Atlanta_nadir8_catid_10300100023BC100_743501_3... 0 \n", + "1 Atlanta_nadir8_catid_10300100023BC100_743501_3... 1 \n", + "2 Atlanta_nadir8_catid_10300100023BC100_743501_3... 0 \n", + "3 Atlanta_nadir8_catid_10300100023BC100_743501_3... 1 \n", + "4 Atlanta_nadir8_catid_10300100023BC100_743501_3... 2 \n", + "\n", + " PolygonWKT_Pix Confidence \n", + "0 POLYGON ((0.00 712.83, 158.37 710.28, 160.59 6... 1 \n", + "1 POLYGON ((665.82 0.00, 676.56 1.50, 591.36 603... 1 \n", + "2 POLYGON ((182.62 324.15, 194.25 323.52, 197.97... 1 \n", + "3 POLYGON ((92.99 96.94, 117.20 99.64, 114.72 12... 1 \n", + "4 POLYGON ((0.82 29.96, 3.48 40.71, 2.80 51.00, ... 1 " + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import pandas as pd\n", + "import solaris as sol\n", + "import os\n", + "\n", + "preds = pd.read_csv(os.path.join(sol.data.data_dir, 'sample_preds_competition.csv'))\n", + "preds.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The file shows the image ID, the polygon geometry in WKT format, and a `BuildingId` counter to distinguish between buildings in a single image. The `Confidence` field in this case has no meaning, but can be provided if desired.\n", + "\n", + "#### Ground Truth" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
ImageIdBuildingIdPolygonWKT_PixPolygonWKT_Geo
0Atlanta_nadir8_catid_10300100023BC100_743501_3...0POLYGON ((476.88 884.61, 485.59 877.64, 490.50...1
1Atlanta_nadir8_catid_10300100023BC100_743501_3...1POLYGON ((459.45 858.97, 467.41 853.09, 463.37...1
2Atlanta_nadir8_catid_10300100023BC100_743501_3...2POLYGON ((407.34 754.17, 434.90 780.55, 420.27...1
3Atlanta_nadir8_catid_10300100023BC100_743501_3...3POLYGON ((311.00 760.22, 318.38 746.78, 341.02...1
4Atlanta_nadir8_catid_10300100023BC100_743501_3...4POLYGON ((490.49 742.67, 509.81 731.14, 534.12...1
\n", + "
" + ], + "text/plain": [ + " ImageId BuildingId \\\n", + "0 Atlanta_nadir8_catid_10300100023BC100_743501_3... 0 \n", + "1 Atlanta_nadir8_catid_10300100023BC100_743501_3... 1 \n", + "2 Atlanta_nadir8_catid_10300100023BC100_743501_3... 2 \n", + "3 Atlanta_nadir8_catid_10300100023BC100_743501_3... 3 \n", + "4 Atlanta_nadir8_catid_10300100023BC100_743501_3... 4 \n", + "\n", + " PolygonWKT_Pix PolygonWKT_Geo \n", + "0 POLYGON ((476.88 884.61, 485.59 877.64, 490.50... 1 \n", + "1 POLYGON ((459.45 858.97, 467.41 853.09, 463.37... 1 \n", + "2 POLYGON ((407.34 754.17, 434.90 780.55, 420.27... 1 \n", + "3 POLYGON ((311.00 760.22, 318.38 746.78, 341.02... 1 \n", + "4 POLYGON ((490.49 742.67, 509.81 731.14, 534.12... 1 " + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "truth = pd.read_csv(os.path.join(sol.data.data_dir, 'sample_truth_competition.csv'))\n", + "truth.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "More or less the same thing. So, how does scoring work?\n", + "\n", + "\n", + "## Scoring functions in the `solaris` CLI\n", + "\n", + "Once you have [installed solaris](../../installation.html), you will have access to the `spacenet_eval` command in your command line prompt. This command has a number of possible arguments to control mask creation, described below. If you need a refresher on these within your command line, you can always run `spacenet_eval -h` for usage instructions.\n", + "\n", + "### `spacenet_eval` arguments\n", + "\n", + "- __--proposal\\_csv__, __-p__: \\[str\\] The full path to a CSV-formatted proposal file containing the same columns shown above.\n", + "- __--truth\\_csv__, __-t__: \\[str\\] The full path to a CSV-formatted ground truth file containing the same columns shown above.\n", + "- __--challenge__, __-c__, \\[str, one of `('off-nadir', 'spacenet-buildings2')` \\] The challenge being scored. Because the SpaceNet Off-Nadir Building Footprint Extraction Challenge was scored slightly differently from previous challenges to accommodate the different look angles, the challenge type must be specified here.\n", + "- __--output\\_file__, __-o__: \\[str\\] The path to the output files to be saved. Two files will be saved: the summary file with the name provided in this argument, and one with `'_full'` added before the `'.csv'` extension, which contains the image-by-image breakdown of scores.\n", + "\n", + "### `spacenet_eval` CLI usage example\n", + "\n", + "Assuming you have the two files shown above as your examples:\n", + "\n", + "\n", + "```console\n", + "$ spacenet_eval --proposal_csv /path/to/sample_preds_competition.csv --truth_csv /path/to/sample_truth_competition.csv --challenge 'off-nadir' --output_file /path/to/outputs.csv\n", + "```\n", + "\n", + "Let's look at what the outputs would look like:\n", + "\n", + "#### Summary" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
F1ScoreFalseNegFalsePosPrecisionRecallTruePos
01.0001.01.02319
\n", + "
" + ], + "text/plain": [ + " F1Score FalseNeg FalsePos Precision Recall TruePos\n", + "0 1.0 0 0 1.0 1.0 2319" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "result_summary = pd.read_csv(os.path.join(sol.data.data_dir, 'competition_test_results.csv'))\n", + "result_summary" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this case, the score is perfect because the predictions and ground truth were literally identical.\n", + "\n", + "Here's the image-by-image breakout:\n", + "\n", + "#### Detailed results" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
F1ScoreFalseNegFalsePosPrecisionRecallTruePosimageIDiou_fieldnadir-category
01.0001.01.080Atlanta_nadir8_catid_10300100023BC100_743501_3...iou_scoreNadir
11.0001.01.0112Atlanta_nadir8_catid_10300100023BC100_743501_3...iou_scoreNadir
21.0001.01.072Atlanta_nadir8_catid_10300100023BC100_743501_3...iou_scoreNadir
31.0001.01.01Atlanta_nadir8_catid_10300100023BC100_743501_3...iou_scoreNadir
41.0001.01.052Atlanta_nadir8_catid_10300100023BC100_743501_3...iou_scoreNadir
\n", + "
" + ], + "text/plain": [ + " F1Score FalseNeg FalsePos Precision Recall TruePos \\\n", + "0 1.0 0 0 1.0 1.0 80 \n", + "1 1.0 0 0 1.0 1.0 112 \n", + "2 1.0 0 0 1.0 1.0 72 \n", + "3 1.0 0 0 1.0 1.0 1 \n", + "4 1.0 0 0 1.0 1.0 52 \n", + "\n", + " imageID iou_field nadir-category \n", + "0 Atlanta_nadir8_catid_10300100023BC100_743501_3... iou_score Nadir \n", + "1 Atlanta_nadir8_catid_10300100023BC100_743501_3... iou_score Nadir \n", + "2 Atlanta_nadir8_catid_10300100023BC100_743501_3... iou_score Nadir \n", + "3 Atlanta_nadir8_catid_10300100023BC100_743501_3... iou_score Nadir \n", + "4 Atlanta_nadir8_catid_10300100023BC100_743501_3... iou_score Nadir " + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "full_result = pd.read_csv(os.path.join(sol.data.data_dir, 'competition_test_results_full.csv'))\n", + "full_result.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "These are five rows from the full result file, where each row indicates the scores for a single image chip." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "solaris", + "language": "python", + "name": "solaris" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/environment-gpu.yml b/environment-gpu.yml index 2fc96fa0..2b8f6027 100644 --- a/environment-gpu.yml +++ b/environment-gpu.yml @@ -18,17 +18,14 @@ dependencies: - rtree=0.8.3 - urllib3=1.24.3 - networkx=2.3 - - rasterio=1.0.22 + - rasterio=1.0.24 - scipy=1.2.1 - scikit-image=0.15.0 - tensorflow-gpu=1.13.1 - - cuda92 - pytorch=1.1.0 + - cuda92 - torchvision=0.3.0 - pyyaml=5.1 - - pyproj=2.1.3 - affine=2.2.2 - - pip: - - albumentations==0.2.3 -# - rio-tiler==1.2.7 - - rio-cogeo==1.0.0 + - albumentations>0.2.3 + - rio-cogeo=1.0.0 diff --git a/environment.yml b/environment.yml index dc56ea7b..e732149c 100644 --- a/environment.yml +++ b/environment.yml @@ -9,7 +9,7 @@ dependencies: - shapely=1.6.4 - fiona=1.8.6 - pandas=0.24.2 - - geopandas=0.5.0 + - geopandas>=0.4.1 - opencv=3.4.4 - numpy=1.16.3 - gdal=2.4.1 @@ -18,16 +18,13 @@ dependencies: - rtree=0.8.3 - urllib3=1.24.3 - networkx=2.3 - - rasterio=1.0.22 + - rasterio=1.0.23 - scipy=1.2.1 - scikit-image=0.15.0 - tensorflow=1.13.1 - pytorch=1.1.0 - torchvision=0.3.0 - pyyaml=5.1 - - pyproj=2.1.3 - affine=2.2.2 - - pip: - - albumentations==0.2.3 -# - rio-tiler==1.2.7 - - rio-cogeo==1.0.0 + - albumentations>0.2.3 + - rio-cogeo=1.0.0 diff --git a/requirements.txt b/requirements.txt index e6dca00f..4a7a1b93 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ shapely==1.6.4.post2 fiona==1.8.6 pandas==0.24.2 -geopandas==0.5.0 +geopandas==0.5.1 opencv-python==4.1.0.25 numpy==1.16.4 tqdm==4.32.2 @@ -15,9 +15,10 @@ scikit-image==0.15.0 tensorflow==1.13.1 torch==1.1.0 torchvision==0.3.0 -affine==2.2.2 -albumentations==0.2.3 +affine==2.2.3 +requests==2.22.0 +albumentations==0.3.0 urllib3==1.24.3 pyyaml==5.1.1 -pyproj==2.2.0 +pyproj==2.2.1 rio-cogeo==1.0.0 diff --git a/setup.py b/setup.py index 9ff5aff6..334755f8 100644 --- a/setup.py +++ b/setup.py @@ -63,24 +63,25 @@ def check_output(cmd): else: inst_reqs = ['shapely>=1.6.4', 'fiona>=1.8.6', - 'pandas>=0.23.4', - 'geopandas>=0.4.0', + 'pandas>=0.24.2', + 'geopandas>=0.4.1', 'opencv-python==4.1.0.25', - 'numpy>=1.15.4', + 'numpy>=1.16.4', 'tqdm>=4.32.2', 'GDAL>=2.4.0', 'rtree>=0.8.3', - 'networkx>=2.2', + 'networkx>=2.3', 'rasterio>=1.0.18', - 'rio-cogeo>-1.0.0', - 'scipy>=1.2.0', + 'rio-cogeo>=1.0.0', + 'scipy>=1.3.0', 'urllib3==1.24.3', - 'scikit-image>=0.14.0', + 'scikit-image>=0.15.0', 'tensorflow==1.13.1', 'torch>=1.1.0', - 'matplotlib>=3.1.0', + 'matplotlib>=3.1.1', 'affine>=2.2.2', 'albumentations>=0.2.3', + 'requests>=2.22.0', # 'rio-tiler>=1.2.7', 'pyyaml>=5.1', 'torchvision>=0.3.0'] diff --git a/solaris/__init__.py b/solaris/__init__.py index b63ac150..27b40573 100644 --- a/solaris/__init__.py +++ b/solaris/__init__.py @@ -1,3 +1,3 @@ from . import bin, data, eval, nets, raster, tile, utils, vector -__version__ = "0.1.1" +__version__ = "0.1.2" diff --git a/solaris/data/inference_tiler_test_output.npy b/solaris/data/inference_tiler_test_output.npy index 0a470225..197d41f4 100644 Binary files a/solaris/data/inference_tiler_test_output.npy and b/solaris/data/inference_tiler_test_output.npy differ diff --git a/solaris/data/stitching_first_output.npy b/solaris/data/stitching_first_output.npy index 5f3c5534..63a79e37 100644 Binary files a/solaris/data/stitching_first_output.npy and b/solaris/data/stitching_first_output.npy differ diff --git a/solaris/data/vectortile_test_expected/geoms_733601_3724689.json b/solaris/data/vectortile_test_expected/geoms_733601_3724734.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733601_3724689.json rename to solaris/data/vectortile_test_expected/geoms_733601_3724734.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733601_3724734.json b/solaris/data/vectortile_test_expected/geoms_733601_3724779.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733601_3724734.json rename to solaris/data/vectortile_test_expected/geoms_733601_3724779.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733601_3724779.json b/solaris/data/vectortile_test_expected/geoms_733601_3724824.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733601_3724779.json rename to solaris/data/vectortile_test_expected/geoms_733601_3724824.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733601_3724824.json b/solaris/data/vectortile_test_expected/geoms_733601_3724869.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733601_3724824.json rename to solaris/data/vectortile_test_expected/geoms_733601_3724869.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733601_3724869.json b/solaris/data/vectortile_test_expected/geoms_733601_3724914.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733601_3724869.json rename to solaris/data/vectortile_test_expected/geoms_733601_3724914.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733601_3724914.json b/solaris/data/vectortile_test_expected/geoms_733601_3724959.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733601_3724914.json rename to solaris/data/vectortile_test_expected/geoms_733601_3724959.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733601_3724959.json b/solaris/data/vectortile_test_expected/geoms_733601_3725004.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733601_3724959.json rename to solaris/data/vectortile_test_expected/geoms_733601_3725004.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733601_3725004.json b/solaris/data/vectortile_test_expected/geoms_733601_3725049.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733601_3725004.json rename to solaris/data/vectortile_test_expected/geoms_733601_3725049.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733601_3725049.json b/solaris/data/vectortile_test_expected/geoms_733601_3725094.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733601_3725049.json rename to solaris/data/vectortile_test_expected/geoms_733601_3725094.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733601_3725094.json b/solaris/data/vectortile_test_expected/geoms_733601_3725139.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733601_3725094.json rename to solaris/data/vectortile_test_expected/geoms_733601_3725139.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733646_3724689.json b/solaris/data/vectortile_test_expected/geoms_733646_3724734.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733646_3724689.json rename to solaris/data/vectortile_test_expected/geoms_733646_3724734.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733646_3724734.json b/solaris/data/vectortile_test_expected/geoms_733646_3724779.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733646_3724734.json rename to solaris/data/vectortile_test_expected/geoms_733646_3724779.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733646_3724779.json b/solaris/data/vectortile_test_expected/geoms_733646_3724824.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733646_3724779.json rename to solaris/data/vectortile_test_expected/geoms_733646_3724824.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733646_3724824.json b/solaris/data/vectortile_test_expected/geoms_733646_3724869.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733646_3724824.json rename to solaris/data/vectortile_test_expected/geoms_733646_3724869.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733646_3724869.json b/solaris/data/vectortile_test_expected/geoms_733646_3724914.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733646_3724869.json rename to solaris/data/vectortile_test_expected/geoms_733646_3724914.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733646_3724914.json b/solaris/data/vectortile_test_expected/geoms_733646_3724959.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733646_3724914.json rename to solaris/data/vectortile_test_expected/geoms_733646_3724959.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733646_3724959.json b/solaris/data/vectortile_test_expected/geoms_733646_3725004.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733646_3724959.json rename to solaris/data/vectortile_test_expected/geoms_733646_3725004.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733646_3725004.json b/solaris/data/vectortile_test_expected/geoms_733646_3725049.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733646_3725004.json rename to solaris/data/vectortile_test_expected/geoms_733646_3725049.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733646_3725049.json b/solaris/data/vectortile_test_expected/geoms_733646_3725094.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733646_3725049.json rename to solaris/data/vectortile_test_expected/geoms_733646_3725094.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733646_3725094.json b/solaris/data/vectortile_test_expected/geoms_733646_3725139.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733646_3725094.json rename to solaris/data/vectortile_test_expected/geoms_733646_3725139.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733691_3724689.json b/solaris/data/vectortile_test_expected/geoms_733691_3724734.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733691_3724689.json rename to solaris/data/vectortile_test_expected/geoms_733691_3724734.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733691_3724734.json b/solaris/data/vectortile_test_expected/geoms_733691_3724779.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733691_3724734.json rename to solaris/data/vectortile_test_expected/geoms_733691_3724779.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733691_3724779.json b/solaris/data/vectortile_test_expected/geoms_733691_3724824.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733691_3724779.json rename to solaris/data/vectortile_test_expected/geoms_733691_3724824.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733691_3724824.json b/solaris/data/vectortile_test_expected/geoms_733691_3724869.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733691_3724824.json rename to solaris/data/vectortile_test_expected/geoms_733691_3724869.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733691_3724869.json b/solaris/data/vectortile_test_expected/geoms_733691_3724914.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733691_3724869.json rename to solaris/data/vectortile_test_expected/geoms_733691_3724914.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733691_3724914.json b/solaris/data/vectortile_test_expected/geoms_733691_3724959.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733691_3724914.json rename to solaris/data/vectortile_test_expected/geoms_733691_3724959.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733691_3724959.json b/solaris/data/vectortile_test_expected/geoms_733691_3725004.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733691_3724959.json rename to solaris/data/vectortile_test_expected/geoms_733691_3725004.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733691_3725004.json b/solaris/data/vectortile_test_expected/geoms_733691_3725049.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733691_3725004.json rename to solaris/data/vectortile_test_expected/geoms_733691_3725049.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733691_3725049.json b/solaris/data/vectortile_test_expected/geoms_733691_3725094.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733691_3725049.json rename to solaris/data/vectortile_test_expected/geoms_733691_3725094.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733691_3725094.json b/solaris/data/vectortile_test_expected/geoms_733691_3725139.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733691_3725094.json rename to solaris/data/vectortile_test_expected/geoms_733691_3725139.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733736_3724689.json b/solaris/data/vectortile_test_expected/geoms_733736_3724734.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733736_3724689.json rename to solaris/data/vectortile_test_expected/geoms_733736_3724734.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733736_3724734.json b/solaris/data/vectortile_test_expected/geoms_733736_3724779.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733736_3724734.json rename to solaris/data/vectortile_test_expected/geoms_733736_3724779.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733736_3724779.json b/solaris/data/vectortile_test_expected/geoms_733736_3724824.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733736_3724779.json rename to solaris/data/vectortile_test_expected/geoms_733736_3724824.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733736_3724824.json b/solaris/data/vectortile_test_expected/geoms_733736_3724869.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733736_3724824.json rename to solaris/data/vectortile_test_expected/geoms_733736_3724869.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733736_3724869.json b/solaris/data/vectortile_test_expected/geoms_733736_3724914.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733736_3724869.json rename to solaris/data/vectortile_test_expected/geoms_733736_3724914.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733736_3724914.json b/solaris/data/vectortile_test_expected/geoms_733736_3724959.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733736_3724914.json rename to solaris/data/vectortile_test_expected/geoms_733736_3724959.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733736_3724959.json b/solaris/data/vectortile_test_expected/geoms_733736_3725004.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733736_3724959.json rename to solaris/data/vectortile_test_expected/geoms_733736_3725004.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733736_3725004.json b/solaris/data/vectortile_test_expected/geoms_733736_3725049.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733736_3725004.json rename to solaris/data/vectortile_test_expected/geoms_733736_3725049.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733736_3725049.json b/solaris/data/vectortile_test_expected/geoms_733736_3725094.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733736_3725049.json rename to solaris/data/vectortile_test_expected/geoms_733736_3725094.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733736_3725094.json b/solaris/data/vectortile_test_expected/geoms_733736_3725139.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733736_3725094.json rename to solaris/data/vectortile_test_expected/geoms_733736_3725139.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733781_3724689.json b/solaris/data/vectortile_test_expected/geoms_733781_3724734.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733781_3724689.json rename to solaris/data/vectortile_test_expected/geoms_733781_3724734.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733781_3724734.json b/solaris/data/vectortile_test_expected/geoms_733781_3724779.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733781_3724734.json rename to solaris/data/vectortile_test_expected/geoms_733781_3724779.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733781_3724779.json b/solaris/data/vectortile_test_expected/geoms_733781_3724824.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733781_3724779.json rename to solaris/data/vectortile_test_expected/geoms_733781_3724824.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733781_3724824.json b/solaris/data/vectortile_test_expected/geoms_733781_3724869.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733781_3724824.json rename to solaris/data/vectortile_test_expected/geoms_733781_3724869.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733781_3724869.json b/solaris/data/vectortile_test_expected/geoms_733781_3724914.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733781_3724869.json rename to solaris/data/vectortile_test_expected/geoms_733781_3724914.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733781_3724914.json b/solaris/data/vectortile_test_expected/geoms_733781_3724959.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733781_3724914.json rename to solaris/data/vectortile_test_expected/geoms_733781_3724959.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733781_3724959.json b/solaris/data/vectortile_test_expected/geoms_733781_3725004.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733781_3724959.json rename to solaris/data/vectortile_test_expected/geoms_733781_3725004.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733781_3725004.json b/solaris/data/vectortile_test_expected/geoms_733781_3725049.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733781_3725004.json rename to solaris/data/vectortile_test_expected/geoms_733781_3725049.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733781_3725049.json b/solaris/data/vectortile_test_expected/geoms_733781_3725094.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733781_3725049.json rename to solaris/data/vectortile_test_expected/geoms_733781_3725094.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733781_3725094.json b/solaris/data/vectortile_test_expected/geoms_733781_3725139.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733781_3725094.json rename to solaris/data/vectortile_test_expected/geoms_733781_3725139.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733826_3724689.json b/solaris/data/vectortile_test_expected/geoms_733826_3724734.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733826_3724689.json rename to solaris/data/vectortile_test_expected/geoms_733826_3724734.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733826_3724734.json b/solaris/data/vectortile_test_expected/geoms_733826_3724779.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733826_3724734.json rename to solaris/data/vectortile_test_expected/geoms_733826_3724779.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733826_3724779.json b/solaris/data/vectortile_test_expected/geoms_733826_3724824.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733826_3724779.json rename to solaris/data/vectortile_test_expected/geoms_733826_3724824.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733826_3724824.json b/solaris/data/vectortile_test_expected/geoms_733826_3724869.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733826_3724824.json rename to solaris/data/vectortile_test_expected/geoms_733826_3724869.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733826_3724869.json b/solaris/data/vectortile_test_expected/geoms_733826_3724914.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733826_3724869.json rename to solaris/data/vectortile_test_expected/geoms_733826_3724914.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733826_3724914.json b/solaris/data/vectortile_test_expected/geoms_733826_3724959.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733826_3724914.json rename to solaris/data/vectortile_test_expected/geoms_733826_3724959.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733826_3724959.json b/solaris/data/vectortile_test_expected/geoms_733826_3725004.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733826_3724959.json rename to solaris/data/vectortile_test_expected/geoms_733826_3725004.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733826_3725004.json b/solaris/data/vectortile_test_expected/geoms_733826_3725049.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733826_3725004.json rename to solaris/data/vectortile_test_expected/geoms_733826_3725049.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733826_3725049.json b/solaris/data/vectortile_test_expected/geoms_733826_3725094.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733826_3725049.json rename to solaris/data/vectortile_test_expected/geoms_733826_3725094.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733826_3725094.json b/solaris/data/vectortile_test_expected/geoms_733826_3725139.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733826_3725094.json rename to solaris/data/vectortile_test_expected/geoms_733826_3725139.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733871_3724689.json b/solaris/data/vectortile_test_expected/geoms_733871_3724734.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733871_3724689.json rename to solaris/data/vectortile_test_expected/geoms_733871_3724734.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733871_3724734.json b/solaris/data/vectortile_test_expected/geoms_733871_3724779.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733871_3724734.json rename to solaris/data/vectortile_test_expected/geoms_733871_3724779.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733871_3724779.json b/solaris/data/vectortile_test_expected/geoms_733871_3724824.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733871_3724779.json rename to solaris/data/vectortile_test_expected/geoms_733871_3724824.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733871_3724824.json b/solaris/data/vectortile_test_expected/geoms_733871_3724869.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733871_3724824.json rename to solaris/data/vectortile_test_expected/geoms_733871_3724869.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733871_3724869.json b/solaris/data/vectortile_test_expected/geoms_733871_3724914.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733871_3724869.json rename to solaris/data/vectortile_test_expected/geoms_733871_3724914.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733871_3724914.json b/solaris/data/vectortile_test_expected/geoms_733871_3724959.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733871_3724914.json rename to solaris/data/vectortile_test_expected/geoms_733871_3724959.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733871_3724959.json b/solaris/data/vectortile_test_expected/geoms_733871_3725004.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733871_3724959.json rename to solaris/data/vectortile_test_expected/geoms_733871_3725004.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733871_3725004.json b/solaris/data/vectortile_test_expected/geoms_733871_3725049.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733871_3725004.json rename to solaris/data/vectortile_test_expected/geoms_733871_3725049.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733871_3725049.json b/solaris/data/vectortile_test_expected/geoms_733871_3725094.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733871_3725049.json rename to solaris/data/vectortile_test_expected/geoms_733871_3725094.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733871_3725094.json b/solaris/data/vectortile_test_expected/geoms_733871_3725139.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733871_3725094.json rename to solaris/data/vectortile_test_expected/geoms_733871_3725139.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733916_3724689.json b/solaris/data/vectortile_test_expected/geoms_733916_3724734.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733916_3724689.json rename to solaris/data/vectortile_test_expected/geoms_733916_3724734.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733916_3724734.json b/solaris/data/vectortile_test_expected/geoms_733916_3724779.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733916_3724734.json rename to solaris/data/vectortile_test_expected/geoms_733916_3724779.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733916_3724779.json b/solaris/data/vectortile_test_expected/geoms_733916_3724824.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733916_3724779.json rename to solaris/data/vectortile_test_expected/geoms_733916_3724824.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733916_3724824.json b/solaris/data/vectortile_test_expected/geoms_733916_3724869.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733916_3724824.json rename to solaris/data/vectortile_test_expected/geoms_733916_3724869.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733916_3724869.json b/solaris/data/vectortile_test_expected/geoms_733916_3724914.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733916_3724869.json rename to solaris/data/vectortile_test_expected/geoms_733916_3724914.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733916_3724914.json b/solaris/data/vectortile_test_expected/geoms_733916_3724959.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733916_3724914.json rename to solaris/data/vectortile_test_expected/geoms_733916_3724959.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733916_3724959.json b/solaris/data/vectortile_test_expected/geoms_733916_3725004.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733916_3724959.json rename to solaris/data/vectortile_test_expected/geoms_733916_3725004.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733916_3725004.json b/solaris/data/vectortile_test_expected/geoms_733916_3725049.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733916_3725004.json rename to solaris/data/vectortile_test_expected/geoms_733916_3725049.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733916_3725049.json b/solaris/data/vectortile_test_expected/geoms_733916_3725094.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733916_3725049.json rename to solaris/data/vectortile_test_expected/geoms_733916_3725094.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733916_3725094.json b/solaris/data/vectortile_test_expected/geoms_733916_3725139.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733916_3725094.json rename to solaris/data/vectortile_test_expected/geoms_733916_3725139.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733961_3724689.json b/solaris/data/vectortile_test_expected/geoms_733961_3724734.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733961_3724689.json rename to solaris/data/vectortile_test_expected/geoms_733961_3724734.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733961_3724734.json b/solaris/data/vectortile_test_expected/geoms_733961_3724779.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733961_3724734.json rename to solaris/data/vectortile_test_expected/geoms_733961_3724779.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733961_3724779.json b/solaris/data/vectortile_test_expected/geoms_733961_3724824.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733961_3724779.json rename to solaris/data/vectortile_test_expected/geoms_733961_3724824.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733961_3724824.json b/solaris/data/vectortile_test_expected/geoms_733961_3724869.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733961_3724824.json rename to solaris/data/vectortile_test_expected/geoms_733961_3724869.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733961_3724869.json b/solaris/data/vectortile_test_expected/geoms_733961_3724914.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733961_3724869.json rename to solaris/data/vectortile_test_expected/geoms_733961_3724914.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733961_3724914.json b/solaris/data/vectortile_test_expected/geoms_733961_3724959.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733961_3724914.json rename to solaris/data/vectortile_test_expected/geoms_733961_3724959.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733961_3724959.json b/solaris/data/vectortile_test_expected/geoms_733961_3725004.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733961_3724959.json rename to solaris/data/vectortile_test_expected/geoms_733961_3725004.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733961_3725004.json b/solaris/data/vectortile_test_expected/geoms_733961_3725049.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733961_3725004.json rename to solaris/data/vectortile_test_expected/geoms_733961_3725049.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733961_3725049.json b/solaris/data/vectortile_test_expected/geoms_733961_3725094.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733961_3725049.json rename to solaris/data/vectortile_test_expected/geoms_733961_3725094.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_733961_3725094.json b/solaris/data/vectortile_test_expected/geoms_733961_3725139.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_733961_3725094.json rename to solaris/data/vectortile_test_expected/geoms_733961_3725139.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_734006_3724689.json b/solaris/data/vectortile_test_expected/geoms_734006_3724734.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_734006_3724689.json rename to solaris/data/vectortile_test_expected/geoms_734006_3724734.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_734006_3724734.json b/solaris/data/vectortile_test_expected/geoms_734006_3724779.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_734006_3724734.json rename to solaris/data/vectortile_test_expected/geoms_734006_3724779.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_734006_3724779.json b/solaris/data/vectortile_test_expected/geoms_734006_3724824.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_734006_3724779.json rename to solaris/data/vectortile_test_expected/geoms_734006_3724824.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_734006_3724824.json b/solaris/data/vectortile_test_expected/geoms_734006_3724869.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_734006_3724824.json rename to solaris/data/vectortile_test_expected/geoms_734006_3724869.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_734006_3724869.json b/solaris/data/vectortile_test_expected/geoms_734006_3724914.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_734006_3724869.json rename to solaris/data/vectortile_test_expected/geoms_734006_3724914.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_734006_3724914.json b/solaris/data/vectortile_test_expected/geoms_734006_3724959.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_734006_3724914.json rename to solaris/data/vectortile_test_expected/geoms_734006_3724959.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_734006_3724959.json b/solaris/data/vectortile_test_expected/geoms_734006_3725004.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_734006_3724959.json rename to solaris/data/vectortile_test_expected/geoms_734006_3725004.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_734006_3725004.json b/solaris/data/vectortile_test_expected/geoms_734006_3725049.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_734006_3725004.json rename to solaris/data/vectortile_test_expected/geoms_734006_3725049.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_734006_3725049.json b/solaris/data/vectortile_test_expected/geoms_734006_3725094.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_734006_3725049.json rename to solaris/data/vectortile_test_expected/geoms_734006_3725094.geojson diff --git a/solaris/data/vectortile_test_expected/geoms_734006_3725094.json b/solaris/data/vectortile_test_expected/geoms_734006_3725139.geojson similarity index 100% rename from solaris/data/vectortile_test_expected/geoms_734006_3725094.json rename to solaris/data/vectortile_test_expected/geoms_734006_3725139.geojson diff --git a/solaris/nets/__init__.py b/solaris/nets/__init__.py index 03958b29..63f614e7 100644 --- a/solaris/nets/__init__.py +++ b/solaris/nets/__init__.py @@ -1,2 +1,11 @@ +import os + +weights_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'weights') + from . import callbacks, datagen, infer, losses, metrics, model_io from . import optimizers, train, transform, zoo + + +if not os.path.isdir(weights_dir): + os.mkdir(weights_dir) diff --git a/solaris/nets/_torch_losses.py b/solaris/nets/_torch_losses.py index ba20fd02..a816c3f0 100644 --- a/solaris/nets/_torch_losses.py +++ b/solaris/nets/_torch_losses.py @@ -9,6 +9,17 @@ from itertools import filterfalse as ifilterfalse +class TorchDiceLoss(nn.Module): + def __init__(self, weight=None, size_average=True, per_image=False): + super().__init__() + self.size_average = size_average + self.register_buffer('weight', weight) + self.per_image = per_image + + def forward(self, input, target): + return soft_dice_loss(input, target, per_image=self.per_image) + + class TorchFocalLoss(nn.Module): """Implementation of Focal Loss[1]_ modified from Catalyst [2]_ . @@ -267,6 +278,25 @@ def mean(l, ignore_nan=False, empty=0): return acc / n +def dice_round(preds, trues): + preds = preds.float() + return soft_dice_loss(preds, trues) + + +def soft_dice_loss(outputs, targets, per_image=False): + batch_size = outputs.size()[0] + eps = 1e-5 + if not per_image: + batch_size = 1 + dice_target = targets.contiguous().view(batch_size, -1).float() + dice_output = outputs.contiguous().view(batch_size, -1) + intersection = torch.sum(dice_output * dice_target, dim=1) + union = torch.sum(dice_output, dim=1) + torch.sum(dice_target, dim=1) + eps + loss = (1 - (2 * intersection + eps) / union).mean() + + return loss + + torch_losses = { 'l1loss': nn.L1Loss, 'l1': nn.L1Loss, @@ -309,5 +339,7 @@ def mean(l, ignore_nan=False, empty=0): 'focalloss': TorchFocalLoss, 'focal': TorchFocalLoss, 'jaccard': TorchJaccardLoss, - 'jaccardloss': TorchJaccardLoss + 'jaccardloss': TorchJaccardLoss, + 'dice': TorchDiceLoss, + 'diceloss': TorchDiceLoss } diff --git a/solaris/nets/configs/config_skeleton.yml b/solaris/nets/configs/config_skeleton.yml index 7b00af00..427802e6 100644 --- a/solaris/nets/configs/config_skeleton.yml +++ b/solaris/nets/configs/config_skeleton.yml @@ -8,7 +8,7 @@ model_name: # include the name of the model to be used here. See the docs # for options. -model_src_path: # leave this blank unless you're using a custom model not +model_path: # leave this blank unless you're using a custom model not # native to solaris. solaris will automatically find your # model. train: true # set to false for inference only @@ -119,3 +119,8 @@ inference: # set to the same size as the input image size for zero # overlap; to average predictions across multiple images, # use a smaller step size. + stitching_method: # the method to use to stitch together tiles used during + # inference. defaults to average if not provided. see + # the documentation for sol.raster.image.stitch_images() + # for more. + output_dir: inference_out # the path to save inference outputs to. diff --git a/solaris/nets/configs/selimsef_densenet121unet_spacenet4.yml b/solaris/nets/configs/selimsef_densenet121unet_spacenet4.yml new file mode 100644 index 00000000..730c6429 --- /dev/null +++ b/solaris/nets/configs/selimsef_densenet121unet_spacenet4.yml @@ -0,0 +1,139 @@ +model_name: selimsef_spacenet4_densenet121unet + +model_path: +train: true +infer: false + +pretrained: true +nn_framework: torch +batch_size: 32 + +data_specs: + width: 384 + height: 384 + image_type: zscore + rescale: false + rescale_minima: auto + rescale_maxima: auto + additional_inputs: + channels: 4 + label_type: mask + is_categorical: false + mask_channels: 3 + val_holdout_frac: 0.2 + data_workers: + +training_data_csv: '/path/to/training_df.csv' +validation_data_csv: +inference_data_csv: '/path/to/test_df.csv' + +training_augmentation: + augmentations: + RandomScale: + scale_limit: + - 0.5 + - 1.5 + interpolation: nearest + Rotate: + limit: + - 5 + - 6 + border_mode: constant + p: 0.3 + RandomCrop: + height: 416 + width: 416 + always_apply: true + p: 1.0 + Normalize: + mean: + - 0.006479 + - 0.009328 + - 0.01123 + - 0.02082 + std: + - 0.004986 + - 0.004964 + - 0.004950 + - 0.004878 + max_pixel_value: 65535.0 + p: 1.0 + p: 1.0 + shuffle: true + +validation_augmentation: + augmentations: + CenterCrop: + height: 384 + width: 384 + p: 1.0 + Normalize: + mean: + - 0.006479 + - 0.009328 + - 0.01123 + - 0.02082 + std: + - 0.004986 + - 0.004964 + - 0.004950 + - 0.004878 + max_pixel_value: 65535.0 + p: 1.0 + p: 1.0 + +inference_augmentation: + augmentations: + Normalize: + mean: + - 0.006479 + - 0.009328 + - 0.01123 + - 0.02082 + std: + - 0.004986 + - 0.004964 + - 0.004950 + - 0.004878 + max_pixel_value: 65535.0 + p: 1.0 + p: 1.0 +training: + epochs: 70 + steps_per_epoch: + optimizer: AdamW + lr: 2e-4 + opt_args: + weight_decay: 0.0001 + loss: + focal: + dice: + loss_weights: + focal: 1 + dice: 1 + metrics: + training: + validation: + checkpoint_frequency: 10 + callbacks: + lr_schedule: + schedule_type: 'arbitrary' + schedule_dict: + milestones: + - 1 + - 5 + - 15 + - 30 + - 50 + - 60 + gamma: 0.5 + model_checkpoint: + filepath: 'selimsef_densenet121_best.pth' + monitor: val_loss + model_dest_path: 'selimsef_densenet121.pth' + verbose: true + +inference: + window_step_size_x: + window_step_size_y: + output_dir: 'inference_out' diff --git a/solaris/nets/configs/selimsef_densenet161unet_spacenet4.yml b/solaris/nets/configs/selimsef_densenet161unet_spacenet4.yml new file mode 100644 index 00000000..6d7f74b4 --- /dev/null +++ b/solaris/nets/configs/selimsef_densenet161unet_spacenet4.yml @@ -0,0 +1,139 @@ +model_name: selimsef_spacenet4_densenet161unet + +model_path: +train: true +infer: false + +pretrained: true +nn_framework: torch +batch_size: 20 + +data_specs: + width: 384 + height: 384 + image_type: zscore + rescale: false + rescale_minima: auto + rescale_maxima: auto + additional_inputs: + channels: 4 + label_type: mask + is_categorical: false + mask_channels: 3 + val_holdout_frac: 0.2 + data_workers: + +training_data_csv: '/path/to/training_df.csv' +validation_data_csv: +inference_data_csv: '/path/to/test_df.csv' + +training_augmentation: + augmentations: + RandomScale: + scale_limit: + - 0.5 + - 1.5 + interpolation: nearest + Rotate: + limit: + - 5 + - 6 + border_mode: constant + p: 0.3 + RandomCrop: + height: 416 + width: 416 + always_apply: true + p: 1.0 + Normalize: + mean: + - 0.006479 + - 0.009328 + - 0.01123 + - 0.02082 + std: + - 0.004986 + - 0.004964 + - 0.004950 + - 0.004878 + max_pixel_value: 65535.0 + p: 1.0 + p: 1.0 + shuffle: true + +validation_augmentation: + augmentations: + CenterCrop: + height: 384 + width: 384 + p: 1.0 + Normalize: + mean: + - 0.006479 + - 0.009328 + - 0.01123 + - 0.02082 + std: + - 0.004986 + - 0.004964 + - 0.004950 + - 0.004878 + max_pixel_value: 65535.0 + p: 1.0 + p: 1.0 + +inference_augmentation: + augmentations: + Normalize: + mean: + - 0.006479 + - 0.009328 + - 0.01123 + - 0.02082 + std: + - 0.004986 + - 0.004964 + - 0.004950 + - 0.004878 + max_pixel_value: 65535.0 + p: 1.0 + p: 1.0 +training: + epochs: 60 + steps_per_epoch: + optimizer: AdamW + lr: 2e-4 + opt_args: + weight_decay: 0.0001 + loss: + focal: + dice: + loss_weights: + focal: 1 + dice: 1 + metrics: + training: + validation: + checkpoint_frequency: 10 + callbacks: + lr_schedule: + schedule_type: 'arbitrary' + schedule_dict: + milestones: + - 1 + - 5 + - 15 + - 30 + - 45 + - 55 + gamma: 0.5 + model_checkpoint: + filepath: 'selimsef_densenet161_best.pth' + monitor: val_loss + model_dest_path: 'selimsef_densenet161.pth' + verbose: true + +inference: + window_step_size_x: + window_step_size_y: + output_dir: 'inference_out' diff --git a/solaris/nets/configs/selimsef_resnet34unet_spacenet4.yml b/solaris/nets/configs/selimsef_resnet34unet_spacenet4.yml new file mode 100644 index 00000000..728baf64 --- /dev/null +++ b/solaris/nets/configs/selimsef_resnet34unet_spacenet4.yml @@ -0,0 +1,139 @@ +model_name: selimsef_spacenet4_resnet34unet + +model_path: +train: false +infer: true + +pretrained: true +nn_framework: torch +batch_size: 42 + +data_specs: + width: 384 + height: 384 + image_type: zscore + rescale: false + rescale_minima: auto + rescale_maxima: auto + additional_inputs: + channels: 4 + label_type: mask + is_categorical: false + mask_channels: 3 + val_holdout_frac: 0.2 + data_workers: 12 + +training_data_csv: '/path/to/training_df.csv' +validation_data_csv: +inference_data_csv: '/path/to/test_df.csv' + +training_augmentation: + augmentations: + RandomScale: + scale_limit: + - 0.5 + - 1.5 + interpolation: nearest + Rotate: + limit: + - 5 + - 6 + border_mode: constant + p: 0.3 + RandomCrop: + height: 416 + width: 416 + always_apply: true + p: 1.0 + Normalize: + mean: + - 0.006479 + - 0.009328 + - 0.01123 + - 0.02082 + std: + - 0.004986 + - 0.004964 + - 0.004950 + - 0.004878 + max_pixel_value: 65535.0 + p: 1.0 + p: 1.0 + shuffle: true + +validation_augmentation: + augmentations: + CenterCrop: + height: 384 + width: 384 + p: 1.0 + Normalize: + mean: + - 0.006479 + - 0.009328 + - 0.01123 + - 0.02082 + std: + - 0.004986 + - 0.004964 + - 0.004950 + - 0.004878 + max_pixel_value: 65535.0 + p: 1.0 + p: 1.0 + +inference_augmentation: + augmentations: + Normalize: + mean: + - 0.006479 + - 0.009328 + - 0.01123 + - 0.02082 + std: + - 0.004986 + - 0.004964 + - 0.004950 + - 0.004878 + max_pixel_value: 65535.0 + p: 1.0 + p: 1.0 +training: + epochs: 70 + steps_per_epoch: + optimizer: AdamW + lr: 2e-4 + opt_args: + weight_decay: 0.0001 + loss: + focal: + dice: + loss_weights: + focal: 1 + dice: 1 + metrics: + training: + validation: + checkpoint_frequency: 10 + callbacks: + lr_schedule: + schedule_type: 'arbitrary' + schedule_dict: + milestones: + - 1 + - 5 + - 15 + - 30 + - 50 + - 60 + gamma: 0.5 + model_checkpoint: + filepath: 'selimsef_resnet34_best.pth' + monitor: val_loss + model_dest_path: 'selimsef_resnet34.pth' + verbose: true + +inference: + window_step_size_x: + window_step_size_y: + output_dir: 'inference_out/' diff --git a/solaris/nets/configs/selimsef_scse50unet_spacenet4.yml b/solaris/nets/configs/selimsef_scse50unet_spacenet4.yml new file mode 100644 index 00000000..6f70c763 --- /dev/null +++ b/solaris/nets/configs/selimsef_scse50unet_spacenet4.yml @@ -0,0 +1,198 @@ +################################################################################ +################# SOLARIS MODEL CONFIGURATION SKELETON ######################### +################################################################################ + +# This skeleton lays out the required instructions for running a model using +# solaris. See the full documentation at [INCLUDE DOC LINK HERE] for details on +# options, required arguments, and sample usage. + +model_name: selimsef_scse50unet_spacenet4 + +model_path: # leave this blank unless you're using a custom model not + # native to solaris. solaris will automatically find your + # model. +train: true # set to false for inference only +infer: false # set to false for training only + +pretrained: false # use pretrained weights associated with the model? +nn_framework: torch +batch_size: 8 + +data_specs: + width: 384 + height: 384 + image_type: zscore # format of images read into the neural net. options + # are 'normalized', 'zscore', '8bit', '16bit'. + rescale: false # should image pixel values be rescaled before pre-processing? + # If so, the image will be rescaled to the pixel range defined + # by rescale_min and rescale_max below. + rescale_minima: auto # the minimum values to use in rescaling (if + # rescale=true). If 'auto', the minimum pixel intensity + # in each channel of the image will be subtracted. If + # a single value is provided, that value will be set to + # zero for each channel in the input image. + # if a list of values are provided, then minima in the + # separate channels (in that order) will be set to that + # value PRIOR to any augmentation. + rescale_maxima: auto # same as rescale_minima, but for the maximum value for + # each channel in the image. + additional_inputs: # a list of additional columns in the training CSV (and + - angle # validation CSV if applicable) That will be passed to + # the model. Those values will not be augmented. + # This list MUST be in the same order as the additional + # input values are expected by the model. + channels: 4 # number of channels in the input imagery. + label_type: mask # one of ['mask', 'bbox'] + is_categorical: false # are the labels binary (default) or categorical? + mask_channels: 3 # number of channels in the training mask + val_holdout_frac: 0.2 # if empty, assumes that separate data ref files define the + # training and validation dataset. If a float between 0 and + # 1, indicates the fraction of training data that's held + # out for validation (and validation_data_csv will be + # ignored) + data_workers: # number of cpu threads to use for loading and preprocessing + # input images. +# other_inputs: # this can provide a list of additional inputs to pass to the + # neural net for training. These inputs should be specified in + # extra columns of the csv files (denoted below), either as + # filepaths to additional data to load or as values to include. + # NOTE: This is not currently implemented. + +training_data_csv: '/path/to/training_df.csv' +validation_data_csv: +inference_data_csv: '/path/to/test_df.csv' # TODO # path to the reference csv that defines inference data. + # see the documentation for the specs of this file. + +training_augmentation: # augmentations for use with training data + augmentations: + RandomScale: + scale_limit: + - 0.5 + - 1.5 + interpolation: nearest + RandomCrop: + height: 384 + width: 384 + p: 1.0 + Rotate: + limit: + - 5 + - 6 + border_mode: constant + p: 0.3 + Normalize: + mean: + - 0.006479 + - 0.009328 + - 0.01123 + - 0.02082 + std: + - 0.004986 + - 0.004964 + - 0.004950 + - 0.004878 + max_pixel_value: 65535.0 + p: 1.0 + # include augmentations here. See the documentation for options and + # required arguments. + p: 1.0 # probability of applying the entire training augmentation pipeline. + shuffle: true # should the image order be shuffled in each epoch. + +validation_augmentation: # augmentations for use with validation data + augmentations: + CenterCrop: + height: 384 + width: 384 + p: 1.0 + Normalize: + mean: + - 0.006479 + - 0.009328 + - 0.01123 + - 0.02082 + std: + - 0.004986 + - 0.004964 + - 0.004950 + - 0.004878 + max_pixel_value: 65535.0 + p: 1.0 + p: 1.0 + +inference_augmentation: + augmentations: + Normalize: + mean: + - 0.006479 + - 0.009328 + - 0.01123 + - 0.02082 + std: + - 0.004986 + - 0.004964 + - 0.004950 + - 0.004878 + max_pixel_value: 65535.0 + p: 1.0 + p: 1.0 +training: + epochs: 52 # number of epochs. A list can also be provided here indicating + # distinct sets of epochs at different learning rates, etc; if so, + # a list of equal length must be provided in the parameter that varies + # with the values for each set of epochs. + steps_per_epoch: # optional argument defining # steps/epoch. If not provided, + # each epoch will include the number of steps needed to go + # through the entire training dataset. + optimizer: AdamW # optimizer function name. see docs for options. + lr: 2e-4 # learning rate. + opt_args: # dictionary of values (e.g. alpha, gamma, momentum) specific to + weight_decay: 0.0001 + loss: + focal: # loss function(s). See docs for options. This should be a list of loss + dice: # names with sublists of loss function hyperparameters (if applicable). + # See the docs for more details and usage examples. + loss_weights: + focal: 1 # (optional) weights to use for each loss function if using + dice: 1 # loss: composite. This must be a set of key:value pairs where + # defining the weight for each sub-loss within the composite. + # If using a composite and a value isn't provided here, all + # losses will be weighted equally. + metrics: # metrics to monitor on the training and validation sets. + training: # training set metrics. + validation: # validation set metrics. + checkpoint_frequency: 10 # how frequently should checkpoints be saved? + # this can be an int, in which case every n epochs + # a checkpoint will be made, or a list, in which case + # checkpoints will be saved on those specific epochs. + # if not provided, only the final model is saved. + callbacks: + lr_schedule: + schedule_type: 'arbitrary' + schedule_dict: + milestones: + - 1 + - 5 + - 15 + - 30 + - 40 + - 50 + gamma: 0.5 + model_checkpoint: + filepath: 'selimsef_best.pth' + monitor: val_loss + model_dest_path: 'selimsef.pth' # path to save the trained model output and checkpoint(s) + # to. Should be a filename ending in .h5, .hdf5 for keras + # or .pth, .pt for torch. Epoch numbers will be appended + # for checkpoints. + verbose: true # verbose text output during training + +inference: + window_step_size_x: # size of each step for the sliding window for inference. + # set to the same size as the input image size for zero + # overlap; to average predictions across multiple images, + # use a smaller step size. + window_step_size_y: # size of each step for the sliding window for inference. + # set to the same size as the input image size for zero + # overlap; to average predictions across multiple images, + # use a smaller step size. + output_dir: 'inference_out/' diff --git a/solaris/nets/configs/xdxd_spacenet4.yml b/solaris/nets/configs/xdxd_spacenet4.yml index 9d632c03..04076b4d 100644 --- a/solaris/nets/configs/xdxd_spacenet4.yml +++ b/solaris/nets/configs/xdxd_spacenet4.yml @@ -1,66 +1,32 @@ -################################################################################ -################# SOLARIS MODEL CONFIGURATION SKELETON ######################### -################################################################################ - -# This skeleton lays out the required instructions for running a model using -# solaris. See the full documentation at [INCLUDE DOC LINK HERE] for details on -# options, required arguments, and sample usage. - model_name: xdxd_spacenet4 -model_src_path: # leave this blank unless you're using a custom model not - # native to solaris. solaris will automatically find your - # model. -train: true # set to false for inference only -infer: false # set to false for training only +model_path: +train: false +infer: true -pretrained: false # use pretrained weights associated with the model? +pretrained: true nn_framework: torch -batch_size: 4 +batch_size: 12 data_specs: width: 512 height: 512 - image_type: zscore # format of images read into the neural net. options - # are 'normalized', 'zscore', '8bit', '16bit'. - rescale: false # should image pixel values be rescaled before pre-processing? - # If so, the image will be rescaled to the pixel range defined - # by rescale_min and rescale_max below. - rescale_minima: auto # the minimum values to use in rescaling (if - # rescale=true). If 'auto', the minimum pixel intensity - # in each channel of the image will be subtracted. If - # a single value is provided, that value will be set to - # zero for each channel in the input image. - # if a list of values are provided, then minima in the - # separate channels (in that order) will be set to that - # value PRIOR to any augmentation. - rescale_maxima: auto # same as rescale_minima, but for the maximum value for - # each channel in the image. - channels: 4 # number of channels in the input imagery. - label_type: mask # one of ['mask', 'bbox'] - is_categorical: false # are the labels binary (default) or categorical? - mask_channels: 1 # number of channels in the training mask - val_holdout_frac: # if empty, assumes that separate data ref files define the - # training and validation dataset. If a float between 0 and - # 1, indicates the fraction of training data that's held - # out for validation (and validation_data_csv will be - # ignored) - data_workers: # number of cpu threads to use for loading and preprocessing - # input images. -# other_inputs: # this can provide a list of additional inputs to pass to the - # neural net for training. These inputs should be specified in - # extra columns of the csv files (denoted below), either as - # filepaths to additional data to load or as values to include. - # NOTE: This is not currently implemented. -training_data_csv: # TODO # path to the reference csv that defines training data. - # see the documentation for the specifications of this file. -validation_data_csv: # TODO # path to the validation ref csv. See the docs. If - # val_holdout_frac is specified (under data_specs), then - # this argument will be ignored. -inference_data_csv: # TODO # path to the reference csv that defines inference data. - # see the documentation for the specs of this file. + image_type: zscore + rescale: false + rescale_minima: auto + rescale_maxima: auto + channels: 4 + label_type: mask + is_categorical: false + mask_channels: 1 + val_holdout_frac: 0.2 + data_workers: -training_augmentation: # augmentations for use with training data +training_data_csv: '/path/to/training_df.csv' +validation_data_csv: +inference_data_csv: '/path/to/test_df.csv' + +training_augmentation: augmentations: DropChannel: idx: 3 @@ -74,84 +40,81 @@ training_augmentation: # augmentations for use with training data width: 512 p: 1.0 Normalize: - mean: # mean values after dividing by max pixel value + mean: - 0.006479 - 0.009328 - 0.01123 - std: # std after dividing by max pixel value + std: - 0.004986 - 0.004964 - 0.004950 - p: 1.0 max_pixel_value: 65535.0 - p: 1.0 # probability of applying the entire training augmentation pipeline. - shuffle: true # should the image order be shuffled in each epoch. -validation_augmentation: # augmentations for use with validation data + p: 1.0 + p: 1.0 + shuffle: true +validation_augmentation: augmentations: + DropChannel: + idx: 3 + axis: 2 CenterCrop: height: 512 width: 512 p: 1.0 Normalize: + mean: + - 0.006479 + - 0.009328 + - 0.01123 + std: + - 0.004986 + - 0.004964 + - 0.004950 + max_pixel_value: 65535.0 p: 1.0 - # include augmentations here - p: 1.0 # probability of applying the full validation augmentation pipeline. -inference_augmentation: # this is optional. If not provided, - # validation_augmentation will be used instead. - + p: 1.0 +inference_augmentation: + augmentations: + DropChannel: + idx: 3 + axis: 2 + p: 1.0 + Normalize: + mean: + - 0.006479 + - 0.009328 + - 0.01123 + std: + - 0.004986 + - 0.004964 + - 0.004950 + max_pixel_value: 65535.0 + p: 1.0 + p: 1.0 training: - epochs: 50 # number of epochs. A list can also be provided here indicating - # distinct sets of epochs at different learning rates, etc; if so, - # a list of equal length must be provided in the parameter that varies - # with the values for each set of epochs. - steps_per_epoch: # optional argument defining # steps/epoch. If not provided, - # each epoch will include the number of steps needed to go - # through the entire training dataset. - optimizer: Adam # optimizer function name. see docs for options. - lr: 1e-4 # learning rate. - opt_args: # dictionary of values (e.g. alpha, gamma, momentum) specific to - # the optimizer. + epochs: 60 + steps_per_epoch: + optimizer: Adam + lr: 1e-4 + opt_args: loss: bcewithlogits: jaccard: - loss_weights: - - bcewithlogits: 1 # (optional) weights to use for each loss function if using - - jaccard: 0.25 # loss: composite. This must be a set of key:value pairs where - # defining the weight for each sub-loss within the composite. - # If using a composite and a value isn't provided here, all - # losses will be weighted equally. - metrics: # metrics to monitor on the training and validation sets. - training: # training set metrics. - - f1_score - - recall - - precision - - validation: # validation set metrics. - - f1_score - - recall - - precision - checkpoint_frequency: 10 # how frequently should checkpoints be saved? - # this can be an int, in which case every n epochs - # a checkpoint will be made, or a list, in which case - # checkpoints will be saved on those specific epochs. - # if not provided, only the final model is saved. + bcewithlogits: 10 + jaccard: 2.5 + metrics: + training: + validation: + checkpoint_frequency: 10 callbacks: model_checkpoint: - filepath: 'xdxd_sn4_best.pth' + filepath: 'xdxd_best.pth' monitor: val_loss - model_dest_path: 'xdxd_sn4.pth' # path to save the trained model output and checkpoint(s) - # to. Should be a filename ending in .h5, .hdf5 for keras - # or .pth, .pt for torch. Epoch numbers will be appended - # for checkpoints. - verbose: true # verbose text output during training + model_dest_path: 'xdxd.pth' + verbose: true inference: - window_step_size_x: # size of each step for the sliding window for inference. - # set to the same size as the input image size for zero - # overlap; to average predictions across multiple images, - # use a smaller step size. - window_step_size_y: # size of each step for the sliding window for inference. - # set to the same size as the input image size for zero - # overlap; to average predictions across multiple images, - # use a smaller step size. + window_step_size_x: + window_step_size_y: + output_dir: 'inference_out/' diff --git a/solaris/nets/datagen.py b/solaris/nets/datagen.py index 5bbf9724..249d930a 100644 --- a/solaris/nets/datagen.py +++ b/solaris/nets/datagen.py @@ -199,10 +199,20 @@ def __getitem__(self, idx): if len(mask.shape) == 2: mask = mask[:, :, np.newaxis] sample = {'image': image, 'mask': mask} + if self.aug: sample = self.aug(**sample) - sample['image'] = _check_channel_order(sample['image'], 'torch').astype(np.float32) - sample['mask'] = _check_channel_order(sample['mask'], 'torch').astype(np.float32) + # add in additional inputs (if applicable) + additional_inputs = self.config['data_specs'].get('additional_inputs', + None) + if additional_inputs is not None: + for input in additional_inputs: + sample[input] = self.df[input].iloc[idx] + + sample['image'] = _check_channel_order(sample['image'], + 'torch').astype(np.float32) + sample['mask'] = _check_channel_order(sample['mask'], + 'torch').astype(np.float32) return sample @@ -248,19 +258,16 @@ def __call__(self, im): """ # read in the image if it's a path if isinstance(im, str): - im = imread(im, make_8bit=True) + im = imread(im) # determine how many samples will be generated with the sliding window src_im_height = im.shape[0] src_im_width = im.shape[1] y_steps = int(1+np.ceil((src_im_height-self.height)/self.y_step)) x_steps = int(1+np.ceil((src_im_width-self.width)/self.x_step)) - n_chips = ((y_steps)*(x_steps)) if len(im.shape) == 2: # if there's no channel axis im = im[:, :, np.newaxis] # create one - will be needed for model - output_arr = np.empty(shape=(n_chips, - self.height, self.width, - im.shape[2]), dtype=im.dtype) top_left_corner_idxs = [] + output_arr = [] for y in range(y_steps): if self.y_step*y + self.height > im.shape[0]: y_min = im.shape[0] - self.height @@ -277,9 +284,10 @@ def __call__(self, im): x_min:x_min + self.width, :] if self.aug is not None: - subarr = self.aug(image=subarr) - output_arr[len(top_left_corner_idxs), :, :, :] = subarr + subarr = self.aug(image=subarr)['image'] + output_arr.append(subarr) top_left_corner_idxs.append((y_min, x_min)) + output_arr = np.stack(output_arr).astype(np.float32) if self.framework in ['torch', 'pytorch']: output_arr = np.moveaxis(output_arr, 3, 1) return output_arr, top_left_corner_idxs, (src_im_height, src_im_width) diff --git a/solaris/nets/infer.py b/solaris/nets/infer.py index 2b0bab97..d72f4200 100644 --- a/solaris/nets/infer.py +++ b/solaris/nets/infer.py @@ -11,7 +11,7 @@ class Inferer(object): """Object for training `solaris` models using PyTorch or Keras.""" - def __init__(self, config): + def __init__(self, config, custom_model_dict=None): self.config = config self.batch_size = self.config['batch_size'] self.framework = self.config['nn_framework'] @@ -21,11 +21,14 @@ def __init__(self, config): if self.config['train']: self.model_path = self.config['training']['model_dest_path'] else: - self.model_path = self.config['model_path'] + self.model_path = self.config.get('model_path', None) self.model = get_model(self.model_name, self.framework, - self.model_path) - self.window_step_x = self.config['inference'].get('window_step_size_x') - self.window_step_y = self.config['inference'].get('window_step_size_y') + self.model_path, pretrained=True, + custom_model_dict=custom_model_dict) + self.window_step_x = self.config['inference'].get('window_step_size_x', + None) + self.window_step_y = self.config['inference'].get('window_step_size_y', + None) if self.window_step_x is None: self.window_step_x = self.config['data_specs']['width'] if self.window_step_y is None: @@ -33,6 +36,8 @@ def __init__(self, config): self.stitching_method = self.config['inference'].get( 'stitching_method', 'average') self.output_dir = self.config['inference']['output_dir'] + if not os.path.isdir(self.output_dir): + os.makedirs(self.output_dir) def __call__(self, infer_df): """Run inference. @@ -52,9 +57,9 @@ def __call__(self, infer_df): x_step=self.window_step_x, y_step=self.window_step_y, augmentations=process_aug_dict( - self.config['inference']['inference_augmentation']) + self.config['inference_augmentation']) ) - for im_path in infer_df['image']: + for idx, im_path in enumerate(infer_df['image']): inf_input, idx_refs, ( src_im_height, src_im_width) = inf_tiler(im_path) @@ -63,12 +68,22 @@ def __call__(self, infer_df): batch_size=self.batch_size) elif self.framework in ['torch', 'pytorch']: + with torch.no_grad(): + self.model.eval() if torch.cuda.is_available(): device = torch.device('cuda') self.model = self.model.cuda() else: device = torch.device('cpu') inf_input = torch.from_numpy(inf_input).float().to(device) + # add additional input data, if applicable + if self.config['data_specs'].get('additional_inputs', + None) is not None: + inf_input = [inf_input] + for i in self.config['data_specs']['additional_inputs']: + inf_input.append( + infer_df[i].iloc[idx].to(device)) + subarr_preds = self.model(inf_input) subarr_preds = subarr_preds.cpu().data.numpy() stitched_result = stitch_images(subarr_preds, diff --git a/solaris/nets/metrics.py b/solaris/nets/metrics.py index 79b60187..1f9dfa20 100644 --- a/solaris/nets/metrics.py +++ b/solaris/nets/metrics.py @@ -11,10 +11,16 @@ def get_metrics(framework, config): # TODO: enable passing kwargs to these metrics. This will require # writing a wrapper function that'll receive the inputs from the model # and pass them along with the kwarg to the metric function. - for m in config['training']['metrics']['training']: - training_metrics.append(metric_dict[m]) - for m in config['training']['metrics']['validation']: - validation_metrics.append(metric_dict[m]) + if config['training']['metrics'].get('training', []) is None: + training_metrics = [] + else: + for m in config['training']['metrics'].get('training', []): + training_metrics.append(metric_dict[m]) + if config['training']['metrics'].get('validation', []) is None: + validation_metrics = [] + else: + for m in config['training']['metrics'].get('validation', []): + validation_metrics.append(metric_dict[m]) return {'train': training_metrics, 'val': validation_metrics} diff --git a/solaris/nets/model_io.py b/solaris/nets/model_io.py index 7bb08736..a528728a 100644 --- a/solaris/nets/model_io.py +++ b/solaris/nets/model_io.py @@ -1,35 +1,37 @@ import os from tensorflow import keras import torch +from warnings import warn +import requests +import numpy as np +from tqdm import tqdm +from ..nets import weights_dir from .zoo import model_dict -# below dictionary lists models compatible with solaris. alternatively, your -# own model can be used by using the path to the model as the value for -# model_name in the config file. - - def get_model(model_name, framework, model_path=None, pretrained=False, custom_model_dict=None): """Load a model from a file based on its name.""" - - md = model_dict.get(model_name, None) - if md is None: # if the model's not provided by solaris - if custom_model_dict is None: + if custom_model_dict is not None: + md = custom_model_dict + else: + md = model_dict.get(model_name, None) + if md is None: # if the model's not provided by solaris raise ValueError(f"{model_name} can't be found in solaris and no " "custom_model_dict was provided. Check your " "model_name in the config file and/or provide a " "custom_model_dict argument to Trainer().") - else: - md = custom_model_dict - if model_path is None: + if model_path is None or custom_model_dict is not None: model_path = md.get('weight_path') model = md.get('arch')() if model is not None and pretrained: try: model = _load_model_weights(model, model_path, framework) except (OSError, FileNotFoundError): - pass # TODO: IMPLEMENT MODEL DOWNLOAD FROM STORAGE HERE + warn(f'The model weights file {model_path} was not found.' + ' Attempting to download from the SpaceNet repository.') + weight_path = _download_weights(md) + model = _load_model_weights(model, weight_path, framework) return model @@ -46,9 +48,13 @@ def _load_model_weights(model, path, framework): elif framework.lower() in ['torch', 'pytorch']: # pytorch already throws the right error on failed load, so no need # to fix exception - model.load_state_dict(path) + loaded = torch.load(path) + if isinstance(loaded, torch.nn.Module): # if it's a full model already + model.load_state_dict(loaded.state_dict()) + else: + model.load_state_dict(loaded) - return model + return model def reset_weights(model, framework): @@ -82,3 +88,27 @@ def _reset_torch_weights(torch_layer): if isinstance(torch_layer, torch.nn.Conv2d) or \ isinstance(torch_layer, torch.nn.Linear): torch_layer.reset_parameters() + + +def _download_weights(model_dict): + """Download pretrained weights for a model.""" + weight_url = model_dict.get('weight_url', None) + weight_dest_path = model_dict.get('weight_path', os.path.join( + weights_dir, weight_url.split('/')[-1])) + if weight_url is None: + raise KeyError("Can't find the weights file.") + else: + r = requests.get(weight_url, stream=True) + if r.status_code != 200: + raise ValueError('The file could not be downloaded. Check the URL' + ' and network connections.') + total_size = int(r.headers.get('content-length', 0)) + block_size = 1024 + with open(weight_dest_path, 'wb') as f: + for chunk in tqdm(r.iter_content(block_size), + total=np.ceil(total_size//block_size), + unit='KB', unit_scale=False): + if chunk: + f.write(chunk) + + return weight_dest_path diff --git a/solaris/nets/optimizers.py b/solaris/nets/optimizers.py index 1af1c3cd..ec7921d9 100644 --- a/solaris/nets/optimizers.py +++ b/solaris/nets/optimizers.py @@ -1,30 +1,9 @@ """Wrappers for training optimizers.""" - +import math import torch from tensorflow import keras -torch_optimizers = { - 'adadelta': torch.optim.Adadelta, - 'adam': torch.optim.Adam, - 'sparseadam': torch.optim.SparseAdam, - 'adamax': torch.optim.Adamax, - 'asgd': torch.optim.ASGD, - 'rmsprop': torch.optim.RMSprop, - 'sgd': torch.optim.SGD, -} - -keras_optimizers = { - 'adadelta': keras.optimizers.Adadelta, - 'adagrad': keras.optimizers.Adagrad, - 'adam': keras.optimizers.Adam, - 'adamax': keras.optimizers.Adamax, - 'nadam': keras.optimizers.Nadam, - 'rmsprop': keras.optimizers.RMSprop, - 'sgd': keras.optimizers.SGD -} - - def get_optimizer(framework, config): """Get the optimizer specified in config for model training. @@ -49,3 +28,136 @@ def get_optimizer(framework, config): return torch_optimizers.get(config['training']['optimizer'].lower()) elif framework == 'keras': return keras_optimizers.get(config['training']['optimizer'].lower()) + + +class TorchAdamW(torch.optim.Optimizer): + """AdamW algorithm as implemented in `Torch_AdamW`_. + + The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_. + The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_. + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay coefficient (default: 1e-2) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + .. _Torch_AdamW: https://github.com/pytorch/pytorch/pull/3740 + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=1e-2, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad) + super(TorchAdamW, self).__init__(params, defaults) + + def __setstate__(self, state): + super(TorchAdamW, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + # Perform stepweight decay + p.data.mul_(1 - group['lr'] * group['weight_decay']) + + # Perform optimization step + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse' + 'gradients, please consider SparseAdam' + ' instead') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p.data) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(1 - beta1, grad) + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 + + p.data.addcdiv_(-step_size, exp_avg, denom) + + return loss + + +torch_optimizers = { + 'adadelta': torch.optim.Adadelta, + 'adam': torch.optim.Adam, + 'adamw': TorchAdamW, + 'sparseadam': torch.optim.SparseAdam, + 'adamax': torch.optim.Adamax, + 'asgd': torch.optim.ASGD, + 'rmsprop': torch.optim.RMSprop, + 'sgd': torch.optim.SGD, +} + +keras_optimizers = { + 'adadelta': keras.optimizers.Adadelta, + 'adagrad': keras.optimizers.Adagrad, + 'adam': keras.optimizers.Adam, + 'adamax': keras.optimizers.Adamax, + 'nadam': keras.optimizers.Nadam, + 'rmsprop': keras.optimizers.RMSprop, + 'sgd': keras.optimizers.SGD +} diff --git a/solaris/nets/torch_callbacks.py b/solaris/nets/torch_callbacks.py index 85e8335d..f30921b5 100644 --- a/solaris/nets/torch_callbacks.py +++ b/solaris/nets/torch_callbacks.py @@ -135,7 +135,8 @@ class TorchModelCheckpoint(object): """ def __init__(self, filepath='', monitor='loss', verbose=False, - save_best_only=False, mode='auto', period=1): + save_best_only=False, mode='auto', period=1, + weights_only=True): self.filepath = filepath self.monitor = monitor @@ -143,13 +144,15 @@ def __init__(self, filepath='', monitor='loss', verbose=False, self.monitor = metric_dict[self.monitor] self.verbose = verbose self.save_best_only = save_best_only + self.period = period + self.weights_only = weights_only self.mode = mode if self.mode == 'auto': if self.monitor in ['loss', 'val_loss']: self.mode = 'min' else: self.mode = 'max' - self.period = period + self.epoch = 0 self.last_epoch = 0 self.last_saved_value = None @@ -200,7 +203,7 @@ def __call__(self, model, loss_value=None, y_true=None, y_pred=None): metric_value = self.monitor(y_true, y_pred) if self.check_is_best_value(metric_value): self.last_saved_value = metric_value - self.save(model) + self.save(model, self.weights_only) self.last_epoch = self.epoch def check_is_best_value(self, value): @@ -212,7 +215,7 @@ def check_is_best_value(self, value): else: return False - def save(self, model, weights_only=False): + def save(self, model, weights_only): """Save the model. Arguments @@ -225,13 +228,17 @@ def save(self, model, weights_only=False): entire model must be saved to resume training without re-defining the model architecture, optimizer, and loss function. """ - save_name = os.path.splitext(self.filepath)[0] + '_{}+{}'.format( + save_name = os.path.splitext(self.filepath)[0] + '_epoch{}_{}'.format( self.epoch, np.round(self.last_saved_value, 3)) save_name = save_name + os.path.splitext(self.filepath)[1] + if isinstance(model, torch.nn.DataParallel): + to_save = model.module + else: + to_save = model if weights_only: - torch.save(model.state_dict(), save_name) + torch.save(to_save.state_dict(), save_name) else: - torch.save(model, save_name) + torch.save(to_save, save_name) torch_callback_dict = { diff --git a/solaris/nets/train.py b/solaris/nets/train.py index 2f3ff9be..e0a7c1e4 100644 --- a/solaris/nets/train.py +++ b/solaris/nets/train.py @@ -1,6 +1,7 @@ """Training code for `solaris` models.""" import numpy as np +import pandas as pd from .model_io import get_model, reset_weights from .datagen import make_data_generator from .losses import get_loss @@ -9,7 +10,6 @@ from .torch_callbacks import TorchEarlyStopping, TorchTerminateOnNaN from .torch_callbacks import TorchModelCheckpoint from .metrics import get_metrics -from ..utils.core import get_data_paths import torch from torch.optim.lr_scheduler import _LRScheduler import tensorflow as tf @@ -26,7 +26,8 @@ def __init__(self, config, custom_model_dict=None): self.model_name = self.config['model_name'] self.model_path = self.config.get('model_path', None) self.model = get_model(self.model_name, self.framework, - self.model_path, custom_model_dict) + self.model_path, self.pretrained, + custom_model_dict) self.train_df, self.val_df = get_train_val_dfs(self.config) self.train_datagen = make_data_generator(self.framework, self.config, self.train_df, stage='train') @@ -78,7 +79,7 @@ def initialize_model(self): self.optimizer = self.optimizer( self.model.parameters(), lr=self.lr ) - # wrap in lr_scheduler if one was created + # wrap in lr_scheduler if one was created for cb in self.callbacks: if isinstance(cb, _LRScheduler): self.optimizer = cb( @@ -110,7 +111,14 @@ def train(self): # TRAINING self.model.train() for batch_idx, batch in enumerate(self.train_datagen): - data = batch['image'].cuda() + if self.config['data_specs'].get('additional_inputs', + None) is not None: + data = [] + for i in ['image'] + self.config[ + 'data_specs']['additional_inputs']: + data.append(torch.Tensor(batch[i]).cuda()) + else: + data = batch['image'].cuda() target = batch['mask'].cuda().float() self.optimizer.zero_grad() output = self.model(data) @@ -121,7 +129,7 @@ def train(self): if self.verbose and batch_idx % 10 == 0: print(' loss at batch {}: {}'.format( - batch_idx, loss)) + batch_idx, loss), flush=True) # calculate metrics # for metric in self.metrics['train']: # with tf_sess.as_default(): @@ -133,7 +141,14 @@ def train(self): torch.cuda.empty_cache() val_loss = [] for batch_idx, batch in enumerate(self.val_datagen): - data = batch['image'].cuda() + if self.config['data_specs'].get('additional_inputs', + None) is not None: + data = [] + for i in ['image'] + self.config[ + 'data_specs']['additional_inputs']: + data.append(torch.Tensor(batch[i]).cuda()) + else: + data = batch['image'].cuda() target = batch['mask'].cuda().float() val_output = self.model(data) val_loss.append(self.loss(val_output, target)) @@ -189,7 +204,8 @@ def save_model(self): if self.framework == 'keras': self.model.save(self.config['training']['model_dest_path']) elif self.framework == 'torch': - torch.save(self.model, self.config['training']['model_dest_path']) + if isinstance(self.model, nn.DataParallel): + torch.save(self.model.module, self.config['training']['model_dest_path']) def get_train_val_dfs(config): @@ -213,14 +229,14 @@ def get_train_val_dfs(config): for training. """ - train_df = get_data_paths(config['training_data_csv']) + train_df = pd.read_csv(config['training_data_csv']) if config['data_specs']['val_holdout_frac'] is None: if config['validation_data_csv'] is None: raise ValueError( "If val_holdout_frac isn't specified in config," " validation_data_csv must be.") - val_df = get_data_paths(config['validation_data_csv']) + val_df = pd.read_csv(config['validation_data_csv']) else: val_frac = config['data_specs']['val_holdout_frac'] diff --git a/solaris/nets/zoo/__init__.py b/solaris/nets/zoo/__init__.py new file mode 100644 index 00000000..50d87908 --- /dev/null +++ b/solaris/nets/zoo/__init__.py @@ -0,0 +1,36 @@ +import os +from .. import weights_dir +from .xdxd_sn4 import XDXD_SpaceNet4_UNetVGG16 +from .selim_sef_sn4 import SelimSef_SpaceNet4_ResNet34UNet +from .selim_sef_sn4 import SelimSef_SpaceNet4_DenseNet121UNet +from .selim_sef_sn4 import SelimSef_SpaceNet4_DenseNet161UNet + +model_dict = { + 'xdxd_spacenet4': { + 'weight_path': os.path.join(weights_dir, + 'xdxd_spacenet4_solaris_weights.pth'), + 'weight_url': 'https://s3.amazonaws.com/spacenet-dataset/spacenet-model-weights/spacenet-4/xdxd_spacenet4_solaris_weights.pth', + 'arch': XDXD_SpaceNet4_UNetVGG16 + }, + 'selimsef_spacenet4_resnet34unet': { + 'weight_path': os.path.join( + weights_dir, 'selimsef_spacenet4_resnet34unet_solaris_weights.pth' + ), + 'weight_url': 'https://s3.amazonaws.com/spacenet-dataset/spacenet-model-weights/spacenet-4/selimsef_spacenet4_resnet34unet_solaris_weights.pth', + 'arch': SelimSef_SpaceNet4_ResNet34UNet + }, + 'selimsef_spacenet4_densenet121unet': { + 'weight_path': os.path.join( + weights_dir, 'selimsef_spacenet4_densenet121unet_solaris_weights.pth' + ), + 'weight_url': 'https://s3.amazonaws.com/spacenet-dataset/spacenet-model-weights/spacenet-4/selimsef_spacenet4_densenet121unet_solaris_weights.pth', + 'arch': SelimSef_SpaceNet4_DenseNet121UNet + }, + 'selimsef_spacenet4_densenet161unet': { + 'weight_path': os.path.join( + weights_dir, 'selimsef_spacenet4_densenet161unet_solaris_weights.pth' + ), + 'weight_url': 'https://s3.amazonaws.com/spacenet-dataset/spacenet-model-weights/spacenet-4/selimsef_spacenet4_densenet161unet_solaris_weights.pth', + 'arch': SelimSef_SpaceNet4_DenseNet161UNet + } + } diff --git a/solaris/nets/zoo/selim_sef_sn4.py b/solaris/nets/zoo/selim_sef_sn4.py new file mode 100644 index 00000000..6424aa6e --- /dev/null +++ b/solaris/nets/zoo/selim_sef_sn4.py @@ -0,0 +1,580 @@ +import torch +from torch import nn +import torch.nn.functional as F +from torch.utils import model_zoo +from functools import partial +from collections import OrderedDict +import os +import math +import re + +model_urls = { + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth', + 'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth' +} + + +def conv3x3(in_planes, out_planes, stride=1): + "3x3 convolution with padding" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ConvBottleneck(nn.Module): + def __init__(self, in_channels, out_channels): + super().__init__() + self.seq = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=1), + nn.ReLU(inplace=True) + ) + + def forward(self, dec, enc): + x = torch.cat([dec, enc], dim=1) + return self.seq(x) + + +class UnetDecoderBlock(nn.Module): + def __init__(self, in_channels, middle_channels, out_channels): + super().__init__() + self.layer = nn.Sequential( + nn.Upsample(scale_factor=2), + nn.Conv2d(in_channels, out_channels, 3, padding=1), + nn.ReLU(inplace=True) + ) + + def forward(self, x): + return self.layer(x) + + +class ResNet(nn.Module): + def __init__(self, block, layers, in_channels=3): + self.inplanes = 64 + super(ResNet, self).__init__() + self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, + padding=3, bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + return x + + +def resnet34(**kwargs): + """Constructs a ResNet-34 model. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) + return model + + +def densenet121(pretrained=True, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), + **kwargs) + if pretrained: + # '.'s are no longer allowed in module names, but pervious _DenseLayer + # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. + # They are also in the checkpoints in model_urls. This pattern is used + # to find such keys. + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + state_dict = model_zoo.load_url(model_urls['densenet121']) + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + model.state_dict()['features.conv0.weight'][:, :3, ...] = state_dict['features.conv0.weight'].data + + pretrained_dict = {k: v for k, v in state_dict.items() if k != 'features.conv0.weight'} + model.load_state_dict(pretrained_dict, strict=False) + return model + + +def densenet161(pretrained=True, **kwargs): + r"""Densenet-161 model from + `"Densely Connected Convolutional Networks" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = DenseNet(num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24), + **kwargs) + if pretrained: + # '.'s are no longer allowed in module names, but pervious _DenseLayer + # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. + # They are also in the checkpoints in model_urls. This pattern is used + # to find such keys. + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + state_dict = model_zoo.load_url(model_urls['densenet161']) + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + model.state_dict()['features.conv0.weight'][:, :3, ...] = state_dict['features.conv0.weight'].data + + pretrained_dict = {k: v for k, v in state_dict.items() if k != 'features.conv0.weight'} + model.load_state_dict(pretrained_dict, strict=False) + + return model + + +encoder_params = { + 'resnet34': { + 'filters': [64, 64, 128, 256, 512], + 'decoder_filters': [64, 128, 256, 512], + 'last_upsample': 64, + 'init_op': partial(resnet34, in_channels=4) + }, + 'densenet161': + {'filters': [96, 384, 768, 2112, 2208], + 'decoder_filters': [64, 128, 256, 256], + 'last_upsample': 64, + 'url': None, + 'init_op': densenet161 + }, + 'densenet121': + {'filters': [64, 256, 512, 1024, 1024], + 'decoder_filters': [64, 128, 256, 256], + 'last_upsample': 64, + 'url': None, + 'init_op': densenet121 + } + } + + +class AbstractModel(nn.Module): + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): + m.weight.data = nn.init.kaiming_normal_(m.weight.data) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + @property + def first_layer_params_name(self): + return 'conv1' + + +class EncoderDecoder(AbstractModel): + def __init__(self, num_classes, num_channels=3, encoder_name='resnet34'): + if not hasattr(self, 'first_layer_stride_two'): + self.first_layer_stride_two = False + if not hasattr(self, 'decoder_block'): + self.decoder_block = UnetDecoderBlock + if not hasattr(self, 'bottleneck_type'): + self.bottleneck_type = ConvBottleneck + + self.filters = encoder_params[encoder_name]['filters'] + self.decoder_filters = encoder_params[encoder_name].get( + 'decoder_filters', self.filters[:-1]) + self.last_upsample_filters = encoder_params[encoder_name].get( + 'last_upsample', self.decoder_filters[0]//2) + + super().__init__() + + self.num_channels = num_channels + self.num_classes = num_classes + + self.bottlenecks = nn.ModuleList([ + self.bottleneck_type(self.filters[-i - 2] + f, f) for i, f in + enumerate(reversed(self.decoder_filters[:]))]) + + self.decoder_stages = nn.ModuleList([ + self.get_decoder(idx) for idx in + range(0, len(self.decoder_filters))]) + + if self.first_layer_stride_two: + self.last_upsample = self.decoder_block(self.decoder_filters[0], + self.last_upsample_filters, + self.last_upsample_filters) + + self.final = self.make_final_classifier( + self.last_upsample_filters if self.first_layer_stride_two else self.decoder_filters[0], num_classes) + + self._initialize_weights() + + encoder = encoder_params[encoder_name]['init_op']() + self.encoder_stages = nn.ModuleList([self.get_encoder(encoder, idx) + for idx in + range(len(self.filters))]) + + # noinspection PyCallingNonCallable + def forward(self, x): + enc_results = [] + for stage in self.encoder_stages: + x = stage(x) + enc_results.append(torch.cat(x, dim=1) if isinstance(x, tuple) else x.clone()) + last_dec_out = enc_results[-1] + x = last_dec_out + for idx, bottleneck in enumerate(self.bottlenecks): + rev_idx = - (idx + 1) + x = self.decoder_stages[rev_idx](x) + x = bottleneck(x, enc_results[rev_idx - 1]) + + if self.first_layer_stride_two: + x = self.last_upsample(x) + + f = self.final(x) + + return f + + def get_decoder(self, layer): + in_channels = self.filters[layer + 1] if layer + 1 == len( + self.decoder_filters + ) else self.decoder_filters[layer + 1] + return self.decoder_block(in_channels, + self.decoder_filters[layer], + self.decoder_filters[max(layer, 0)]) + + def make_final_classifier(self, in_filters, num_classes): + return nn.Sequential( + nn.Conv2d(in_filters, num_classes, 1, padding=0) + ) + + def get_encoder(self, encoder, layer): + raise NotImplementedError + + @property + def first_layer_params(self): + return _get_layers_params([self.encoder_stages[0]]) + + @property + def layers_except_first_params(self): + layers = get_slice(self.encoder_stages, 1, -1) + [self.bottlenecks, + self.decoder_stages, + self.final] + return _get_layers_params(layers) + + +class _DenseLayer(nn.Sequential): + def __init__(self, num_input_features, growth_rate, bn_size, drop_rate): + super(_DenseLayer, self).__init__() + self.add_module('norm1', nn.BatchNorm2d(num_input_features)), + self.add_module('relu1', nn.ReLU(inplace=True)), + self.add_module('conv1', nn.Conv2d(num_input_features, + bn_size * growth_rate, + kernel_size=1, stride=1, + bias=False)), + self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)), + self.add_module('relu2', nn.ReLU(inplace=True)), + self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, + kernel_size=3, stride=1, padding=1, + bias=False)), + self.drop_rate = drop_rate + + def forward(self, x): + new_features = super(_DenseLayer, self).forward(x) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, + training=self.training) + return torch.cat([x, new_features], 1) + + +class _DenseBlock(nn.Sequential): + def __init__(self, num_layers, num_input_features, bn_size, + growth_rate, drop_rate): + super(_DenseBlock, self).__init__() + for i in range(num_layers): + layer = _DenseLayer(num_input_features + i * growth_rate, + growth_rate, bn_size, drop_rate) + self.add_module('denselayer%d' % (i + 1), layer) + + +class _Transition(nn.Sequential): + def __init__(self, num_input_features, num_output_features): + super(_Transition, self).__init__() + self.add_module('norm', nn.BatchNorm2d(num_input_features)) + self.add_module('relu', nn.ReLU(inplace=True)) + self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, + kernel_size=1, stride=1, bias=False)) + self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) + + +class DenseNet(nn.Module): + r"""Densenet-BC model class, based on + `"Densely Connected Convolutional Networks" `_ + + Args: + growth_rate (int) - how many filters to add each layer (`k` in paper) + block_config (list of 4 ints) - how many layers in each pooling block + num_init_features (int) - the number of filters to learn in the first convolution layer + bn_size (int) - multiplicative factor for number of bottle neck layers + (i.e. bn_size * k features in the bottleneck layer) + drop_rate (float) - dropout rate after each dense layer + num_classes (int) - number of classification classes + """ + + def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), + num_init_features=64, bn_size=4, drop_rate=0, + num_classes=1000): + + super(DenseNet, self).__init__() + + # First convolution + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(4, num_init_features, kernel_size=7, stride=2, + padding=3, bias=False)), + ('norm0', nn.BatchNorm2d(num_init_features)), + ('relu0', nn.ReLU(inplace=True)), + ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)), + ])) + + # Each denseblock + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = _DenseBlock(num_layers=num_layers, + num_input_features=num_features, + bn_size=bn_size, + growth_rate=growth_rate, + drop_rate=drop_rate) + self.features.add_module('denseblock%d' % (i + 1), block) + num_features = num_features + num_layers * growth_rate + if i != len(block_config) - 1: + trans = _Transition(num_input_features=num_features, + num_output_features=num_features // 2) + self.features.add_module('transition%d' % (i + 1), trans) + num_features = num_features // 2 + + # Final batch norm + self.features.add_module('norm5', nn.BatchNorm2d(num_features)) + + # Linear layer + self.classifier = nn.Linear(num_features, num_classes) + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal(m.weight.data) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + m.bias.data.zero_() + + def forward(self, x): + features = self.features(x) + out = F.relu(features, inplace=True) + out = F.avg_pool2d(out, kernel_size=7, stride=1).view(features.size(0), + -1) + out = self.classifier(out) + return out + + +class SelimSef_SpaceNet4_ResNet34UNet(EncoderDecoder): + def __init__(self): + self.first_layer_stride_two = True + super().__init__(3, 4, 'resnet34') + + def get_encoder(self, encoder, layer): + if layer == 0: + return nn.Sequential( + encoder.conv1, + encoder.bn1, + encoder.relu) + elif layer == 1: + return nn.Sequential( + encoder.maxpool, + encoder.layer1) + elif layer == 2: + return encoder.layer2 + elif layer == 3: + return encoder.layer3 + elif layer == 4: + return encoder.layer4 + + +class SelimSef_SpaceNet4_DenseNet121Unet(EncoderDecoder): + def __init__(self): + self.first_layer_stride_two = True + super().__init__(3, 3, 'densenet121') + + def get_encoder(self, encoder, layer): + if layer == 0: + return nn.Sequential( + encoder.features.conv0, # conv + encoder.features.norm0, # bn + encoder.features.relu0 # relu + ) + elif layer == 1: + return nn.Sequential(encoder.features.pool0, + encoder.features.denseblock1) + elif layer == 2: + return nn.Sequential(encoder.features.transition1, + encoder.features.denseblock2) + elif layer == 3: + return nn.Sequential(encoder.features.transition2, + encoder.features.denseblock3) + elif layer == 4: + return nn.Sequential(encoder.features.transition3, + encoder.features.denseblock4, + encoder.features.norm5, + nn.ReLU()) + + +class SelimSef_SpaceNet4_DenseNet161Unet(EncoderDecoder): + def __init__(self): + self.first_layer_stride_two = True + super().__init__(3, 3, 'densenet161') + + def get_encoder(self, encoder, layer): + if layer == 0: + return nn.Sequential( + encoder.features.conv0, # conv + encoder.features.norm0, # bn + encoder.features.relu0 # relu + ) + elif layer == 1: + return nn.Sequential(encoder.features.pool0, + encoder.features.denseblock1) + elif layer == 2: + return nn.Sequential(encoder.features.transition1, + encoder.features.denseblock2) + elif layer == 3: + return nn.Sequential(encoder.features.transition2, + encoder.features.denseblock3) + elif layer == 4: + return nn.Sequential(encoder.features.transition3, + encoder.features.denseblock4, + encoder.features.norm5, + nn.ReLU()) + + +def _get_layers_params(layers): + return sum((list(l.parameters()) for l in layers), []) + + +def get_slice(features, start, end): + if end == -1: + end = len(features) + return [features[i] for i in range(start, end)] + + +SelimSef_SpaceNet4_DenseNet161UNet = SelimSef_SpaceNet4_DenseNet161Unet +SelimSef_SpaceNet4_DenseNet121UNet = SelimSef_SpaceNet4_DenseNet121Unet diff --git a/solaris/nets/zoo.py b/solaris/nets/zoo/xdxd_sn4.py similarity index 94% rename from solaris/nets/zoo.py rename to solaris/nets/zoo/xdxd_sn4.py index fd7f5a2f..96310b5f 100644 --- a/solaris/nets/zoo.py +++ b/solaris/nets/zoo/xdxd_sn4.py @@ -1,3 +1,4 @@ +import os import torch from torch import nn from torchvision.models import vgg16 @@ -78,7 +79,6 @@ def __init__(self, in_channels, middle_channels, out_channels): def forward(self, x): return self.block(x) - -model_dict = {'xdxd_spacenet4': {'weight_path': None, - 'arch': XDXD_SpaceNet4_UNetVGG16} - } +# below dictionary lists models compatible with solaris. alternatively, your +# own model can be used by using the path to the model as the value for +# model_name in the config file. diff --git a/solaris/tile/vector_tile.py b/solaris/tile/vector_tile.py index c40dd75c..f61834de 100644 --- a/solaris/tile/vector_tile.py +++ b/solaris/tile/vector_tile.py @@ -5,6 +5,7 @@ from ..utils.core import _check_gdf_load, _check_crs from ..utils.tile import save_empty_geojson from ..utils.geo import gdf_get_projection_unit, split_multi_geometries +from ..utils.geo import reproject_geometry from tqdm import tqdm @@ -20,8 +21,8 @@ class VectorTiler(object): """ def __init__(self, dest_dir=None, dest_crs=None, output_format='GeoJSON', - verbose=False): - if verbose: + verbose=False, super_verbose=False): + if verbose or super_verbose: print('Preparing the tiler...') self.dest_dir = dest_dir if not os.path.isdir(self.dest_dir): @@ -30,12 +31,14 @@ def __init__(self, dest_dir=None, dest_crs=None, output_format='GeoJSON', self.dest_crs = _check_crs(dest_crs) self.output_format = output_format self.verbose = verbose - if self.verbose: + self.super_verbose = super_verbose + if self.verbose or self.super_verbose: print('Initialization done.') - def tile(self, src, tile_bounds, geom_type='Polygon', + def tile(self, src, tile_bounds, tile_bounds_crs=None, geom_type='Polygon', split_multi_geoms=True, min_partial_perc=0.0, - dest_fname_base='geoms', obj_id_col=None): + dest_fname_base='geoms', obj_id_col=None, + output_ext='.geojson'): """Tile `src` into vector data tiles bounded by `tile_bounds`. Arguments @@ -47,6 +50,11 @@ def tile(self, src, tile_bounds, geom_type='Polygon', A :class:`list` made up of ``[left, bottom, right, top]`` sublists (this can be extracted from :class:`solaris.tile.raster_tile.RasterTiler` after tiling imagery) + tile_bounds_crs : int, optional + The EPSG code for the CRS that the tile bounds are in. If not + provided, it's assumed that the CRS is the same as in `src`. This + argument must be provided if the bound coordinates and `src` are + not in the same CRS, otherwise tiling will not occur correctly. geom_type : str, optional (default: "Polygon") The type of geometries contained within `src`. Defaults to ``"Polygon"``, can also be ``"LineString"``. @@ -69,30 +77,34 @@ def tile(self, src, tile_bounds, geom_type='Polygon', a unique identifier for each geometry (e.g. the ``"BuildingId"`` column in many SpaceNet datasets.) See :func:`solaris.utils.geo.split_multi_geometries` for more. + output_ext : str, optional, (default: geojson) + Extension of output files, can be 'geojson' or 'json'. """ - tile_gen = self.tile_generator(src, tile_bounds, geom_type, - split_multi_geoms, + tile_gen = self.tile_generator(src, tile_bounds, tile_bounds_crs, + geom_type, split_multi_geoms, min_partial_perc, obj_id_col=obj_id_col) for tile_gdf, tb in tqdm(tile_gen): if self.proj_unit not in ['meter', 'metre']: out_path = os.path.join( - self.dest_dir, '{}_{}_{}.json'.format(dest_fname_base, - np.round(tb[0], 3), - np.round(tb[1], 3))) + self.dest_dir, '{}_{}_{}{}'.format(dest_fname_base, + np.round(tb[0], 3), + np.round(tb[3], 3), + output_ext)) else: out_path = os.path.join( - self.dest_dir, '{}_{}_{}.json'.format(dest_fname_base, - int(tb[0]), - int(tb[1]))) + self.dest_dir, '{}_{}_{}{}'.format(dest_fname_base, + int(tb[0]), + int(tb[3]), + output_ext)) if len(tile_gdf) > 0: tile_gdf.to_file(out_path, driver='GeoJSON') else: save_empty_geojson(out_path, self.dest_crs) - def tile_generator(self, src, tile_bounds, geom_type='Polygon', - split_multi_geoms=True, min_partial_perc=0.0, - obj_id_col=None): + def tile_generator(self, src, tile_bounds, tile_bounds_crs=None, + geom_type='Polygon', split_multi_geoms=True, + min_partial_perc=0.0, obj_id_col=None): """Generate `src` vector data tiles bounded by `tile_bounds`. Arguments @@ -104,6 +116,11 @@ def tile_generator(self, src, tile_bounds, geom_type='Polygon', A :class:`list` made up of ``[left, bottom, right, top]`` sublists (this can be extracted from :class:`solaris.tile.raster_tile.RasterTiler` after tiling imagery) + tile_bounds_crs : int, optional + The EPSG code for the CRS that the tile bounds are in. If not + provided, it's assumed that the CRS is the same as in `src`. This + argument must be provided if the bound coordinates and `src` are + not in the same CRS, otherwise tiling will not occur correctly. geom_type : str, optional (default: "Polygon") The type of geometries contained within `src`. Defaults to ``"Polygon"``, can also be ``"LineString"``. @@ -132,14 +149,37 @@ def tile_generator(self, src, tile_bounds, geom_type='Polygon', boundaries contained by `tile_gdf`. """ self.src = _check_gdf_load(src) + if self.verbose: + print("Num tiles:", len(tile_bounds)) + self.src_crs = _check_crs(self.src.crs) + # check if the tile bounds and vector are in the same crs + if tile_bounds_crs is not None: + tile_bounds_crs = _check_crs(tile_bounds_crs) + else: + tile_bounds_crs = self.src_crs + if self.src_crs != tile_bounds_crs: + reproject_bounds = True # used to transform tb for clip_gdf() + else: + reproject_bounds = False + self.proj_unit = gdf_get_projection_unit( self.src).strip('"').strip("'") if getattr(self, 'dest_crs', None) is None: self.dest_crs = self.src_crs - for tb in tile_bounds: - tile_gdf = clip_gdf(self.src, tb, min_partial_perc, - geom_type) + for i, tb in enumerate(tile_bounds): + if self.super_verbose: + print("\n", i, "/", len(tile_bounds)) + if reproject_bounds: + tile_gdf = clip_gdf(self.src, + reproject_geometry(box(*tb), + tile_bounds_crs, + self.src_crs), + min_partial_perc, + geom_type, verbose=self.super_verbose) + else: + tile_gdf = clip_gdf(self.src, tb, min_partial_perc, geom_type, + verbose=self.super_verbose) if self.src_crs != self.dest_crs: tile_gdf = tile_gdf.to_crs(epsg=self.dest_crs) if split_multi_geoms: @@ -178,7 +218,7 @@ def search_gdf_polygon(gdf, tile_polygon): def clip_gdf(gdf, tile_bounds, min_partial_perc=0.0, geom_type="Polygon", - use_sindex=True): + use_sindex=True, verbose=False): """Clip GDF to a provided polygon. Clips objects within `gdf` to the region defined by @@ -215,6 +255,8 @@ def clip_gdf(gdf, tile_bounds, min_partial_perc=0.0, geom_type="Polygon", use_sindex : bool, optional Use the `gdf` sindex be used for searching. Improves efficiency but requires `libspatialindex `__ . + verbose : bool, optional + Switch to print relevant values. Returns ------- @@ -227,7 +269,7 @@ def clip_gdf(gdf, tile_bounds, min_partial_perc=0.0, geom_type="Polygon", tb = box(*tile_bounds) elif isinstance(tile_bounds, Polygon): tb = tile_bounds - if use_sindex: + if use_sindex and (geom_type == "Polygon"): gdf = search_gdf_polygon(gdf, tb) # if geom_type == "LineString": @@ -238,6 +280,7 @@ def clip_gdf(gdf, tile_bounds, min_partial_perc=0.0, geom_type="Polygon", gdf['origarea'] = 0 else: gdf['origarea'] = gdf.area + if 'origlen' in gdf.columns: pass else: @@ -256,9 +299,17 @@ def clip_gdf(gdf, tile_bounds, min_partial_perc=0.0, geom_type="Polygon", cut_gdf = cut_gdf.loc[cut_gdf['partialDec'] > min_partial_perc, :] cut_gdf['truncated'] = (cut_gdf['partialDec'] != 1.0).astype(int) else: - cut_gdf = cut_gdf[cut_gdf.geom_type != "GeometryCollection"] + # assume linestrings + # remove null + cut_gdf = cut_gdf[cut_gdf['geometry'].notnull()] cut_gdf['partialDec'] = 1 cut_gdf['truncated'] = 0 + # cut_gdf = cut_gdf[cut_gdf.geom_type != "GeometryCollection"] + if len(cut_gdf) > 0 and verbose: + print("clip_gdf() - gdf.iloc[0]:", gdf.iloc[0]) + print("clip_gdf() - tb:", tb) + print("clip_gdf() - gdf_cut:", cut_gdf) + # TODO: IMPLEMENT TRUNCATION MEASUREMENT FOR LINESTRINGS return cut_gdf diff --git a/solaris/utils/config.py b/solaris/utils/config.py index 8090470e..6db07395 100644 --- a/solaris/utils/config.py +++ b/solaris/utils/config.py @@ -27,6 +27,11 @@ def parse(path): raise ValueError('"inference_data_csv" must be provided if "infer".') if config['training']['lr'] is not None: config['training']['lr'] = float(config['training']['lr']) + # TODO: IMPLEMENT UPDATING VALUES BASED ON EMPTY ELEMENTS HERE! + if config['validation_augmentation'] is not None \ + and config['inference_augmentation'] is None: + config['inference_augmentation'] = config['validation_augmentation'] + return config diff --git a/solaris/utils/geo.py b/solaris/utils/geo.py index 18b4744f..a09e55aa 100644 --- a/solaris/utils/geo.py +++ b/solaris/utils/geo.py @@ -15,6 +15,7 @@ from shapely.wkt import loads from shapely.geometry import Point, Polygon, LineString from shapely.geometry import MultiLineString, MultiPolygon, mapping, shape +from shapely.geometry.collection import GeometryCollection from shapely.ops import cascaded_union from fiona.transform import transform import osr @@ -462,6 +463,8 @@ def geometries_internal_intersection(polygons): # first, filter down to the ones that have _some_ intersection with others intersect_lists = intersect_lists[ intersect_lists.apply(lambda x: len(x) > 1)] + if len(intersect_lists) == 0: # if there are no real intersections + return GeometryCollection() # same result as failed union below # the below is a royal pain to follow. what it does is create a dataframe # with two columns: 'gs_idx' and 'intersectors'. 'gs_idx' corresponds to # a polygon's original index in gs, and 'intersectors' gives a list of diff --git a/solaris/vector/mask.py b/solaris/vector/mask.py index 813de68b..7f7d8c38 100644 --- a/solaris/vector/mask.py +++ b/solaris/vector/mask.py @@ -395,23 +395,33 @@ def contact_mask(df, contact_spacing=10, meters=False, out_file=None, buffered_geoms = buffered_geoms[geom_col] # create a single multipolygon that covers all of the intersections intersect_poly = geometries_internal_intersection(buffered_geoms) - # create a small df containing the intersections to make a footprint from - df_for_footprint = pd.DataFrame({'shape_name': ['overlap'], - 'geometry': [intersect_poly]}) - # use `footprint_mask` to create the overlap mask - contact_msk = footprint_mask( - df_for_footprint, reference_im=reference_im, geom_col='geometry', - do_transform=do_transform, affine_obj=affine_obj, shape=shape, - out_type=out_type, burn_value=burn_value - ) - footprint_msk = footprint_mask( - df, reference_im=reference_im, geom_col=geom_col, - do_transform=do_transform, affine_obj=affine_obj, shape=shape, - out_type=out_type, burn_value=burn_value - ) - contact_msk[footprint_msk > 0] = 0 - contact_msk = contact_msk > 0 - output_arr = contact_msk.astype('uint8')*burn_value + + # handle case where there's no intersection + if intersect_poly.is_empty: + output_arr = np.zeros(shape=shape, dtype='uint8') + + else: + # create a df containing the intersections to make footprints from + df_for_footprint = pd.DataFrame({'shape_name': ['overlap'], + 'geometry': [intersect_poly]}) + # catch bowties + df_for_footprint['geometry'] = df_for_footprint['geometry'].apply( + lambda x: x.buffer(0) + ) + # use `footprint_mask` to create the overlap mask + contact_msk = footprint_mask( + df_for_footprint, reference_im=reference_im, geom_col='geometry', + do_transform=do_transform, affine_obj=affine_obj, shape=shape, + out_type=out_type, burn_value=burn_value + ) + footprint_msk = footprint_mask( + df, reference_im=reference_im, geom_col=geom_col, + do_transform=do_transform, affine_obj=affine_obj, shape=shape, + out_type=out_type, burn_value=burn_value + ) + contact_msk[footprint_msk > 0] = 0 + contact_msk = contact_msk > 0 + output_arr = contact_msk.astype('uint8')*burn_value if out_file: meta = reference_im.meta.copy() @@ -646,18 +656,78 @@ def buffer_df_geoms(df, buffer, meters=False, reference_im=None, return buffered_df -def mask_to_poly_geojson(mask_arr, reference_im=None, output_path=None, - output_type='csv', min_area=40, bg_value=0, - do_transform=None, simplify=False, +def preds_to_binary(pred_arr, channel_scaling=None, bg_threshold=0): + """Convert a set of predictions from a neural net to a binary mask. + + Arguments + --------- + pred_arr : :class:`numpy.ndarray` + A set of predictions generated by a neural net (generally in ``float`` + dtype). This can be a 2D array or a 3D array, in which case it will + be convered to a 2D mask output with optional channel scaling (see + the `channel_scaling` argument). If a filename is provided instead of + an array, the image will be loaded using scikit-image. + channel_scaling : `list`-like of `float`s, optional + If `pred_arr` is a 3D array, this argument defines how each channel + will be combined to generate a binary output. channel_scaling should + be a `list`-like of length equal to the number of channels in + `pred_arr`. The following operation will be performed to convert the + multi-channel prediction to a 2D output :: + + sum(pred_arr[channel]*channel_scaling[channel]) + + If not provided, no scaling will be performend and channels will be + summed. + + bg_threshold : `int` or `float`, optional + The cutoff to set to distinguish between background and foreground + pixels in the final binary mask. Binarization takes place *after* + channel scaling and summation (if applicable). Defaults to 0. + + Returns + ------- + mask_arr : :class:`numpy.ndarray` + A 2D boolean ``numpy`` array with ``True`` for foreground pixels and + ``False`` for background. + """ + pred_arr = _check_skimage_im_load(pred_arr).copy() + + if len(pred_arr.shape) == 3: + if pred_arr.shape[0] < pred_arr.shape[-1]: + pred_arr = np.moveaxis(pred_arr, 0, -1) + if channel_scaling is None: # if scale values weren't provided + channel_scaling = np.ones(shape=(pred_arr.shape[-1]), + dtype='float') + pred_arr = np.sum(pred_arr*np.array(channel_scaling), axis=-1) + + mask_arr = (pred_arr > bg_threshold).astype('uint8') + + return mask_arr*255 + + +def mask_to_poly_geojson(pred_arr, channel_scaling=None, reference_im=None, + output_path=None, output_type='csv', min_area=40, + bg_threshold=0, do_transform=None, simplify=False, tolerance=0.5, **kwargs): """Get polygons from an image mask. Arguments --------- - mask_arr : :class:`numpy.ndarray` of ints + pred_arr : :class:`numpy.ndarray` A 2D array of integers. Multi-channel masks are not supported, and must be simplified before passing to this function. Can also pass an image file path here. + channel_scaling : :class:`list`-like, optional + If `pred_arr` is a 3D array, this argument defines how each channel + will be combined to generate a binary output. channel_scaling should + be a `list`-like of length equal to the number of channels in + `pred_arr`. The following operation will be performed to convert the + multi-channel prediction to a 2D output :: + + sum(pred_arr[channel]*channel_scaling[channel]) + + If not provided, no scaling will be performend and channels will be + summed. reference_im : str, optional The path to a reference geotiff to use for georeferencing the polygons in the mask. Required if saving to a GeoJSON (see the ``output_type`` @@ -672,15 +742,16 @@ def mask_to_poly_geojson(mask_arr, reference_im=None, output_path=None, The minimum area of a polygon to retain. Filtering is done AFTER any coordinate transformation, and therefore will be in destination units. - bg_value : int, optional - The value in ``mask_arr`` that denotes background (non-object). + bg_threshold : int, optional + The cutoff in ``mask_arr`` that denotes background (non-object). Defaults to ``0``. simplify : bool, optional If ``True``, will use the Douglas-Peucker algorithm to simplify edges, saving memory and processing time later. Defaults to ``False``. tolerance : float, optional The tolerance value to use for simplification with the Douglas-Peucker - algorithm. Defaults to 0.5. Only has an effect if ``simplify=True``. + algorithm. Defaults to ``0.5``. Only has an effect if + ``simplify=True``. Returns ------- @@ -688,7 +759,9 @@ def mask_to_poly_geojson(mask_arr, reference_im=None, output_path=None, A GeoDataFrame of polygons. """ - mask_arr = _check_skimage_im_load(mask_arr) + + mask_arr = preds_to_binary(pred_arr, channel_scaling, bg_threshold) + if do_transform and reference_im is None: raise ValueError( 'Coordinate transformation requires a reference image.') @@ -702,7 +775,7 @@ def mask_to_poly_geojson(mask_arr, reference_im=None, output_path=None, transform = Affine(1, 0, 0, 0, 1, 0) # identity transform crs = None - mask = mask_arr != bg_value + mask = mask_arr > bg_threshold mask = mask.astype('uint8') polygon_generator = features.shapes(mask_arr, diff --git a/tests/test_raster/test_image.py b/tests/test_raster/test_image.py index ade0ccb2..370d318e 100644 --- a/tests/test_raster/test_image.py +++ b/tests/test_raster/test_image.py @@ -38,9 +38,9 @@ def test_stitch_InferenceTiler_output(self): restored_im = stitch_images(tiles, idx_refs=tile_inds, out_width=900, out_height=900) expected_result = sol.utils.io.imread( - os.path.join(data_dir, 'sample_geotiff.tif'), make_8bit=True) + os.path.join(data_dir, 'sample_geotiff.tif')) - assert np.array_equal(restored_im, expected_result) + assert np.array_equal(restored_im[:, :, 0], expected_result) def test_stitch_firstval(self): inf_tiler = sol.nets.datagen.InferenceTiler('keras', diff --git a/tests/test_vector/test_mask.py b/tests/test_vector/test_mask.py index 920a306d..35fe8c8b 100644 --- a/tests/test_vector/test_mask.py +++ b/tests/test_vector/test_mask.py @@ -1,14 +1,11 @@ import os import numpy as np -import pandas as pd import geopandas as gpd -from affine import Affine -from shapely.geometry import Polygon import skimage -import rasterio from solaris.data import data_dir from solaris.vector.mask import footprint_mask, boundary_mask, \ - contact_mask, df_to_px_mask, mask_to_poly_geojson, road_mask + contact_mask, df_to_px_mask, mask_to_poly_geojson, road_mask, \ + preds_to_binary class TestFootprintMask(object): @@ -237,6 +234,27 @@ def test_mask_to_gdf_geoxform_simplified(self): 'gdf_from_mask_2.geojson')) assert truth_gdf[['geometry', 'value']].equals(gdf) + def test_flatten_multichannel_mask(self): + anarr = np.array([[[0, 0, 0, 1], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]], + [[1, 1, 0, 0], + [1, 1, 1, 0], + [0, 0, 0, 0], + [0, 0, 0, 1]], + [[1, 0, 0, 1], + [0, 1, 0, 1], + [0, 1, 1, 0], + [0, 0, 0, 0]]], dtype='float') + scaling_vector = [0.25, 1., 2.] + result = preds_to_binary(anarr, scaling_vector, bg_threshold=0.5) + assert np.array_equal(result, + np.array([[255, 255, 0, 255], + [255, 255, 255, 255], + [0, 255, 255, 0], + [0, 0, 0, 255]], dtype='uint8')) + class TestRoadMask(object): """Test(s) for solaris.vector.mask.road_mask."""