From 45a51d90094e637d4290284bfafee5971bc7f982 Mon Sep 17 00:00:00 2001
From: Ethan Tang
Date: Mon, 10 Jul 2017 17:10:45 -0700
Subject: [PATCH] Preparing Tensorflow branch for upstream merge
---
.gitignore | 12 +
.travis.yml | 8 +-
README.md | 4 +-
digits-lint | 4 +-
digits/config/tensorflow.py | 2 +-
.../view/rawData/header_template.html | 7 +
digits/model/forms.py | 8 +-
.../model/images/classification/test_views.py | 15 ++
digits/model/views.py | 10 +-
.../standard-networks/tensorflow/alexnet.py | 7 +-
.../tensorflow/alexnet_slim.py | 7 +-
.../tensorflow/binary_segmentation.py | 23 --
.../standard-networks/tensorflow/googlenet.py | 201 +++++++++++++++
digits/standard-networks/tensorflow/lenet.py | 7 +-
.../tensorflow/lenet_slim.py | 7 +-
.../standard-networks/tensorflow/rnn_mnist.py | 53 ----
.../standard-networks/tensorflow/siamese.py | 38 ---
.../tensorflow/siamese_simple.py | 38 ---
digits/standard-networks/tensorflow/vgg16.py | 2 +-
.../custom_network_explanation.html | 7 +
.../models/images/classification/new.html | 26 ++
.../generic/custom_network_explanation.html | 7 +
.../models/images/generic/large_graph.html | 39 +++
.../templates/models/images/generic/new.html | 26 ++
digits/tools/tensorflow/gan_grid.py | 28 +-
digits/tools/tensorflow/gandisplay.py | 70 ++---
digits/tools/tensorflow/main.py | 4 +-
digits/tools/tensorflow/model.py | 75 +++---
digits/tools/tensorflow/tf_data.py | 12 +-
digits/tools/tensorflow/utils.py | 8 +-
docs/BuildDigits.md | 4 +
docs/BuildTensorflow.md | 51 +++-
docs/BuildTorch.md | 2 +-
docs/DevelopmentSetup.md | 36 +++
docs/GettingStartedTensorflow.md | 244 ++++++++++++++++++
docs/GettingStartedTorch.md | 3 -
docs/ModelStore.md | 16 ++
docs/images/Select_TensorFlow.png | Bin 0 -> 36045 bytes
docs/images/TensorBoard.png | Bin 0 -> 56199 bytes
docs/images/job-dir.png | Bin 0 -> 29935 bytes
docs/images/visualize-btn.png | Bin 0 -> 46115 bytes
docs/images/visualize_button.png | Bin 0 -> 15731 bytes
examples/autoencoder/README.md | 47 +++-
.../autoencoder/autoencoder-TF.py | 0
examples/binary-segmentation/README.md | 12 +-
.../binary_segmentation-TF.py | 22 ++
.../segmentation-model.lua | 2 +-
examples/fine-tuning/README.md | 6 +
examples/fine-tuning/lenet-fine-tune-tf.py | 84 ++++++
examples/fine-tuning/lenet-fine-tune.lua | 8 +-
examples/gan/README.md | 4 +-
examples/gan/gan_embeddings.py | 4 +-
examples/gan/network-celebA-encoder.py | 45 ++--
examples/gan/network-celebA.py | 58 ++---
examples/gan/network-mnist-encoder.py | 33 ++-
examples/gan/network-mnist.py | 44 ++--
examples/question-answering/memn2n.py | 14 +-
examples/regression/README.md | 41 ++-
examples/regression/regression_mnist-TF.py | 33 +++
examples/siamese/README.md | 4 +
examples/siamese/siamese-TF.py | 40 +++
packaging/deb/build.sh | 2 +
packaging/deb/templates/control | 4 +-
plugins/view/gan/digitsViewPluginGan/forms.py | 7 +-
plugins/view/gan/digitsViewPluginGan/view.py | 17 +-
requirements.txt | 2 +-
scripts/travis/bust-cache.sh | 1 -
scripts/travis/install-caffe.sh | 1 -
scripts/travis/install-openblas.sh | 28 --
scripts/travis/install-tensorflow.sh | 3 +-
scripts/travis/install-torch.sh | 1 -
scripts/travis/ppa-upload.sh | 2 +
scripts/travis/pypi-upload.sh | 2 +
73 files changed, 1216 insertions(+), 466 deletions(-)
create mode 100644 digits/extensions/view/rawData/header_template.html
delete mode 100644 digits/standard-networks/tensorflow/binary_segmentation.py
create mode 100644 digits/standard-networks/tensorflow/googlenet.py
delete mode 100644 digits/standard-networks/tensorflow/rnn_mnist.py
delete mode 100644 digits/standard-networks/tensorflow/siamese.py
delete mode 100644 digits/standard-networks/tensorflow/siamese_simple.py
create mode 100644 digits/templates/models/images/generic/large_graph.html
create mode 100644 docs/DevelopmentSetup.md
create mode 100644 docs/GettingStartedTensorflow.md
create mode 100644 docs/images/Select_TensorFlow.png
create mode 100644 docs/images/TensorBoard.png
create mode 100644 docs/images/job-dir.png
create mode 100644 docs/images/visualize-btn.png
create mode 100644 docs/images/visualize_button.png
rename digits/standard-networks/tensorflow/autoencoder.py => examples/autoencoder/autoencoder-TF.py (100%)
create mode 100644 examples/binary-segmentation/binary_segmentation-TF.py
create mode 100644 examples/fine-tuning/lenet-fine-tune-tf.py
create mode 100644 examples/regression/regression_mnist-TF.py
create mode 100644 examples/siamese/siamese-TF.py
delete mode 100755 scripts/travis/install-openblas.sh
diff --git a/.gitignore b/.gitignore
index ed945921c..3aa64db37 100644
--- a/.gitignore
+++ b/.gitignore
@@ -17,4 +17,16 @@ TAGS
/build/
/dist/
*.egg-info/
+
+#Intellij files
+.idea/
+
+#vscode
+.vscode/
+
+#.project
+.project
/.project
+
+#.tb
+.tb/
\ No newline at end of file
diff --git a/.travis.yml b/.travis.yml
index 8b4a505ee..898c3c3e6 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -11,6 +11,8 @@ env:
- OPENBLAS_ROOT=~/openblas
- CAFFE_ROOT=~/caffe
- TORCH_ROOT=~/torch
+ - OMP_NUM_THREADS=1
+ - OPENBLAS_MAIN_FREE=1
- secure: "WSqrE+PQm76DdoRLRGKTK6fRWfXZjIb0BWCZm3IgHgFO7OE6fcK2tBnpDNNw4XQjmo27FFWlEhxN32g18P84n5PvErHaH65IuS9Nv6FkLlPXZlVqGNxbPmEA4oTkD/6Y6kZyZWZtLh2+/1ijuzQAPnIy/4BEuL8pdO+PsoJ9hYM="
matrix:
- DIGITS_TEST_FRAMEWORK=caffe CAFFE_FORK=NVIDIA
@@ -73,7 +75,6 @@ matrix:
cache:
apt: true
directories:
- - $OPENBLAS_ROOT
- $CAFFE_ROOT
- $TORCH_ROOT
@@ -95,6 +96,7 @@ addons:
- libhdf5-serial-dev
- libleveldb-dev
- liblmdb-dev
+ - libopenblas-dev
- libopencv-dev
- libprotobuf-dev
- libsnappy-dev
@@ -128,15 +130,13 @@ before_install:
install:
- mkdir -p ~/.config/matplotlib
- echo "backend:agg" > ~/.config/matplotlib/matplotlibrc
- - ./scripts/travis/install-openblas.sh $OPENBLAS_ROOT
- ./scripts/travis/install-caffe.sh $CAFFE_ROOT
- if [ "$DIGITS_TEST_FRAMEWORK" == "torch" ]; then travis_wait ./scripts/travis/install-torch.sh $TORCH_ROOT; else unset TORCH_ROOT; fi
+ - pip install -r ./requirements.txt --force-reinstall
- if [ "$DIGITS_TEST_FRAMEWORK" == "tensorflow" ]; then travis_wait ./scripts/travis/install-tensorflow.sh; fi
- - pip install -r ./requirements.txt
- pip install -r ./requirements_test.txt
- pip install -e .
- if [ "$WITH_PLUGINS" != "false" ]; then find ./plugins/*/* -maxdepth 0 -type d | xargs -n1 pip install -e; fi
script:
- ./digits-test -v
-
diff --git a/README.md b/README.md
index 434ccae73..7798f094e 100644
--- a/README.md
+++ b/README.md
@@ -4,6 +4,8 @@
DIGITS (the **D**eep Learning **G**PU **T**raining **S**ystem) is a webapp for training deep learning models.
+The currently supported frameworks are: Caffe 1, Torch, and Tensorflow
+
# Installation
| Installation method | Supported platform[s] | Available versions | Instructions |
@@ -18,6 +20,7 @@ Once you have installed DIGITS, visit [docs/GettingStarted.md](docs/GettingStart
Then, take a look at some of the other documentation at [docs/](docs/) and [examples/](examples/):
+* [Getting started with TensorFlow](docs/GettingStartedTensorflow.md)
* [Getting started with Torch](docs/GettingStartedTorch.md)
* [Fine-tune a pretrained model](examples/fine-tuning/README.md)
* [Train an autoencoder network](examples/autoencoder/README.md)
@@ -44,4 +47,3 @@ Then, take a look at some of the other documentation at [docs/](docs/) and [exam
* Please let us know by [filing a new issue](https://github.com/NVIDIA/DIGITS/issues/new)
* Bonus points if you want to contribute by opening a [pull request](https://help.github.com/articles/using-pull-requests/)!
* You will need to send a signed copy of the [Contributor License Agreement](CLA) to digits@nvidia.com before your change can be accepted.
-
diff --git a/digits-lint b/digits-lint
index b11b50021..fc5f3e892 100755
--- a/digits-lint
+++ b/digits-lint
@@ -5,9 +5,9 @@ set -e
echo "=== Checking for Python lint ..."
if which flake8 >/dev/null 2>&1; then
- python2 `which flake8` .
+ python2 `which flake8` --exclude ./examples,./digits/standard-networks/tensorflow,./digits/jobs .
else
- python2 -m flake8 .
+ python2 -m flake8 --exclude ./examples,./digits/standard-networks/tensorflow,./digits/jobs .
fi
echo "=== Checking for JavaScript lint ..."
diff --git a/digits/config/tensorflow.py b/digits/config/tensorflow.py
index bbf6f46b7..10a4465fe 100644
--- a/digits/config/tensorflow.py
+++ b/digits/config/tensorflow.py
@@ -34,7 +34,7 @@ def test_tf_import(python_exe):
if not tf_enabled:
print('Tensorflow support disabled.')
-# print('Failed importing Tensorflow with python executable "%s"\n%s' % (tf_python_exe, err))
+# print('Failed importing Tensorflow with python executable "%s"\n%s' % (tf_python_exe, err))
if tf_enabled:
option_list['tensorflow'] = {
diff --git a/digits/extensions/view/rawData/header_template.html b/digits/extensions/view/rawData/header_template.html
new file mode 100644
index 000000000..fcd137d9d
--- /dev/null
+++ b/digits/extensions/view/rawData/header_template.html
@@ -0,0 +1,7 @@
+{# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #}
+
+{% from "helper.html" import print_flashes %}
+{% from "helper.html" import print_errors %}
+{% from "helper.html" import mark_errors %}
+
+{{data}}
diff --git a/digits/model/forms.py b/digits/model/forms.py
index 34b3ed2ce..dcb3e1a11 100644
--- a/digits/model/forms.py
+++ b/digits/model/forms.py
@@ -313,10 +313,10 @@ def validate_lr_multistep_values(form, field):
def validate_custom_network_snapshot(form, field):
pass
- #if form.method.data == 'custom':
- # for filename in field.data.strip().split(os.path.pathsep):
- # if filename and not os.path.exists(filename):
- # raise validators.ValidationError('File "%s" does not exist' % filename)
+# if form.method.data == 'custom':
+# for filename in field.data.strip().split(os.path.pathsep):
+# if filename and not os.path.exists(filename):
+# raise validators.ValidationError('File "%s" does not exist' % filename)
# Select one of several GPUs
select_gpu = wtforms.RadioField(
diff --git a/digits/model/images/classification/test_views.py b/digits/model/images/classification/test_views.py
index 5f58ce73a..6673ef7cc 100644
--- a/digits/model/images/classification/test_views.py
+++ b/digits/model/images/classification/test_views.py
@@ -174,6 +174,7 @@ class BaseViewsTestWithDataset(BaseViewsTest,
AUG_HSV_H = None
AUG_HSV_S = None
AUG_HSV_V = None
+ OPTIMIZER = None
@classmethod
def setUpClass(cls):
@@ -242,6 +243,8 @@ def create_model(cls, network=None, **kwargs):
data['aug_hsv_s'] = cls.AUG_HSV_S
if cls.AUG_HSV_V is not None:
data['aug_hsv_v'] = cls.AUG_HSV_V
+ if cls.OPTIMIZER is not None:
+ data['solver_type'] = cls.OPTIMIZER
data.update(kwargs)
@@ -1158,6 +1161,10 @@ class TestCaffeLeNet(BaseTestCreated, test_utils.CaffeMixin):
).read()
+class TestCaffeLeNetADAMOptimizer(TestCaffeLeNet):
+ OPTIMIZER = 'ADAM'
+
+
class TestTorchCreatedCropInForm(BaseTestCreatedCropInForm, test_utils.TorchMixin):
pass
@@ -1196,6 +1203,10 @@ def test_inference_while_training(self):
raise unittest.SkipTest('Torch CPU inference on CuDNN-trained model not supported')
+class TestTorchLeNetADAMOptimizer(TestTorchLeNet):
+ OPTIMIZER = 'ADAM'
+
+
class TestTorchLeNetHdf5Shuffle(TestTorchLeNet):
BACKEND = 'hdf5'
SHUFFLE = True
@@ -1366,6 +1377,10 @@ class TestTensorflowLeNet(BaseTestCreated, test_utils.TensorflowMixin):
'lenet.py')).read()
+class TestTensorflowLeNetADAMOptimizer(TestTensorflowLeNet):
+ OPTIMIZER = 'ADAM'
+
+
class TestTensorflowLeNetSlim(BaseTestCreated, test_utils.TensorflowMixin):
IMAGE_WIDTH = 28
IMAGE_HEIGHT = 28
diff --git a/digits/model/views.py b/digits/model/views.py
index fa8aedfc6..28df10231 100644
--- a/digits/model/views.py
+++ b/digits/model/views.py
@@ -295,13 +295,13 @@ def download(job_id, extension):
mode = 'gz'
elif extension in ['tar.bz2']:
mode = 'bz2'
- with tarfile.open(fileobj=b, mode='w:%s' % mode) as tf:
+ with tarfile.open(fileobj=b, mode='w:%s' % mode) as tar:
for path, name in job.download_files(epoch):
- tf.add(path, arcname=name)
- tf_info = tarfile.TarInfo("info.json")
- tf_info.size = len(info_io.getvalue())
+ tar.add(path, arcname=name)
+ tar_info = tarfile.TarInfo("info.json")
+ tar_info.size = len(info_io.getvalue())
info_io.seek(0)
- tf.addfile(tf_info, info_io)
+ tar.addfile(tar_info, info_io)
elif extension in ['zip']:
with zipfile.ZipFile(b, 'w') as zf:
for path, name in job.download_files(epoch):
diff --git a/digits/standard-networks/tensorflow/alexnet.py b/digits/standard-networks/tensorflow/alexnet.py
index 93dc48f50..361ead2d2 100644
--- a/digits/standard-networks/tensorflow/alexnet.py
+++ b/digits/standard-networks/tensorflow/alexnet.py
@@ -87,7 +87,8 @@ def conv_net(x, weights, biases):
@model_property
def loss(self):
- loss = digits.classification_loss(self.inference, self.y)
- accuracy = digits.classification_accuracy(self.inference, self.y)
- self.summaries.append(tf.scalar_summary(accuracy.op.name, accuracy))
+ model = self.inference
+ loss = digits.classification_loss(model, self.y)
+ accuracy = digits.classification_accuracy(model, self.y)
+ self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy))
return loss
diff --git a/digits/standard-networks/tensorflow/alexnet_slim.py b/digits/standard-networks/tensorflow/alexnet_slim.py
index d51ec5e36..5655b4a1b 100644
--- a/digits/standard-networks/tensorflow/alexnet_slim.py
+++ b/digits/standard-networks/tensorflow/alexnet_slim.py
@@ -24,7 +24,8 @@ def inference(self):
@model_property
def loss(self):
- loss = digits.classification_loss(self.inference, self.y)
- accuracy = digits.classification_accuracy(self.inference, self.y)
- self.summaries.append(tf.scalar_summary(accuracy.op.name, accuracy))
+ model = self.inference
+ loss = digits.classification_loss(model, self.y)
+ accuracy = digits.classification_accuracy(model, self.y)
+ self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy))
return loss
diff --git a/digits/standard-networks/tensorflow/binary_segmentation.py b/digits/standard-networks/tensorflow/binary_segmentation.py
deleted file mode 100644
index 8b9ff6298..000000000
--- a/digits/standard-networks/tensorflow/binary_segmentation.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Tensorflow Triangle binary segmentation model using TensorFlow-Slim
-
-def build_model(params):
- _x = tf.reshape(params['x'], shape=[-1, params['input_shape'][0], params['input_shape'][1], params['input_shape'][2]])
- with slim.arg_scope([slim.conv2d, slim.conv2d_transpose, slim.fully_connected],
- weights_initializer=tf.contrib.layers.xavier_initializer(),
- weights_regularizer=slim.l2_regularizer(0.0005) ):
-
- model = slim.conv2d(_x, 32, [3, 3], padding='SAME', scope='conv1') # 1*H*W -> 32*H*W
- model = slim.conv2d(model, 1024, [16, 16], padding='VALID', scope='conv2', stride=16) # 32*H*W -> 1024*H/16*W/16
- model = slim.conv2d_transpose(model, params['input_shape'][2], [16, 16], stride=16, padding='VALID', activation_fn=None, scope='deconv_1')
-
- def loss(y):
- y = tf.reshape(y, shape=[-1, params['input_shape'][0], params['input_shape'][1], params['input_shape'][2]])
- # For a fancy tensorboard summary: put the input, label and model side by side (sbs) for a fancy image summary:
- # sbs = tf.concat(2, [_x, y, model])
- # tf.image_summary(sbs.op.name, sbs, max_images=3, collections=[digits.GraphKeys.SUMMARIES_TRAIN])
- return digits.mse_loss(model, y)
-
- return {
- 'model' : model,
- 'loss' : loss
- }
diff --git a/digits/standard-networks/tensorflow/googlenet.py b/digits/standard-networks/tensorflow/googlenet.py
new file mode 100644
index 000000000..9c8997c50
--- /dev/null
+++ b/digits/standard-networks/tensorflow/googlenet.py
@@ -0,0 +1,201 @@
+# The auxillary branches as spcified in the original googlenet V1 model do exist in this implementation of
+# googlenet but it is not used. To use it, be sure to check self.is_training to ensure that it is only used
+# during training.
+
+class UserModel(Tower):
+
+ all_inception_settings = {
+ '3a': [[64], [96, 128], [16, 32], [32]],
+ '3b': [[128], [128, 192], [32, 96], [64]],
+ '4a': [[192], [96, 208], [16, 48], [64]],
+ '4b': [[160], [112, 224], [24, 64], [64]],
+ '4c': [[128], [128, 256], [24, 64], [64]],
+ '4d': [[112], [144, 288], [32, 64], [64]],
+ '4e': [[256], [160, 320], [32, 128], [128]],
+ '5a': [[256], [160, 320], [32, 128], [128]],
+ '5b': [[384], [192, 384], [48, 128], [128]]
+ }
+
+ @model_property
+ def inference(self):
+ # rescale to proper form, really we expect 224 x 224 x 1 in HWC form
+ model = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
+
+ conv_7x7_2s_weight, conv_7x7_2s_bias = self.create_conv_vars([7, 7, self.input_shape[2], 64], 'conv_7x7_2s')
+ model = self.conv_layer_with_relu(model, conv_7x7_2s_weight, conv_7x7_2s_bias, 2)
+
+ model = self.max_pool(model, 3, 2)
+
+ model = tf.nn.local_response_normalization(model)
+
+ conv_1x1_vs_weight, conv_1x1_vs_bias = self.create_conv_vars([1, 1, 64, 64], 'conv_1x1_vs')
+ model = self.conv_layer_with_relu(model, conv_1x1_vs_weight, conv_1x1_vs_bias, 1, 'VALID')
+
+ conv_3x3_1s_weight, conv_3x3_1s_bias = self.create_conv_vars([3, 3, 64, 192], 'conv_3x3_1s')
+ model = self.conv_layer_with_relu(model, conv_3x3_1s_weight, conv_3x3_1s_bias, 1)
+
+ model = tf.nn.local_response_normalization(model)
+
+ model = self.max_pool(model, 3, 2)
+
+ inception_settings_3a = InceptionSettings(192, UserModel.all_inception_settings['3a'])
+ model = self.inception(model, inception_settings_3a, '3a')
+
+ inception_settings_3b = InceptionSettings(256, UserModel.all_inception_settings['3b'])
+ model = self.inception(model, inception_settings_3b, '3b')
+
+ model = self.max_pool(model, 3, 2)
+
+ inception_settings_4a = InceptionSettings(480, UserModel.all_inception_settings['4a'])
+ model = self.inception(model, inception_settings_4a, '4a')
+
+ # first auxiliary branch for making training faster
+ aux_branch_1 = self.auxiliary_classifier(model, 512, "aux_1")
+
+ inception_settings_4b = InceptionSettings(512, UserModel.all_inception_settings['4b'])
+ model = self.inception(model, inception_settings_4b, '4b')
+
+ inception_settings_4c = InceptionSettings(512, UserModel.all_inception_settings['4c'])
+ model = self.inception(model, inception_settings_4c, '4c')
+
+ inception_settings_4d = InceptionSettings(512, UserModel.all_inception_settings['4d'])
+ model = self.inception(model, inception_settings_4d, '4d')
+
+ # second auxiliary branch for making training faster
+ aux_branch_2 = self.auxiliary_classifier(model, 528, "aux_2")
+
+ inception_settings_4e = InceptionSettings(528, UserModel.all_inception_settings['4e'])
+ model = self.inception(model, inception_settings_4e, '4e')
+
+ model = self.max_pool(model, 3, 2)
+
+ inception_settings_5a = InceptionSettings(832, UserModel.all_inception_settings['5a'])
+ model = self.inception(model, inception_settings_5a, '5a')
+
+ inception_settings_5b = InceptionSettings(832, UserModel.all_inception_settings['5b'])
+ model = self.inception(model, inception_settings_5b, '5b')
+
+ model = self.avg_pool(model, 7, 1, 'VALID')
+
+ fc_weight, fc_bias = self.create_fc_vars([1024, self.nclasses], 'fc')
+ model = self.fully_connect(model, fc_weight, fc_bias)
+
+ return model
+
+ @model_property
+ def loss(self):
+ model = self.inference
+ loss = digits.classification_loss(model, self.y)
+ accuracy = digits.classification_accuracy(model, self.y)
+ self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy))
+ return loss
+
+
+ def inception(self, model, inception_setting, layer_name):
+ weights, biases = self.create_inception_variables(inception_setting, layer_name)
+ conv_1x1 = self.conv_layer_with_relu(model, weights['conv_1x1_1'], biases['conv_1x1_1'], 1)
+
+ conv_3x3 = self.conv_layer_with_relu(model, weights['conv_1x1_2'], biases['conv_1x1_2'], 1)
+ conv_3x3 = self.conv_layer_with_relu(conv_3x3, weights['conv_3x3'], biases['conv_3x3'], 1)
+
+ conv_5x5 = self.conv_layer_with_relu(model, weights['conv_1x1_3'], biases['conv_1x1_3'], 1)
+ conv_5x5 = self.conv_layer_with_relu(conv_5x5, weights['conv_5x5'], biases['conv_5x5'], 1)
+
+ conv_pool = self.max_pool(model, 3, 1)
+ conv_pool = self.conv_layer_with_relu(conv_pool, weights['conv_pool'], biases['conv_pool'], 1)
+
+ final_model = tf.concat([conv_1x1, conv_3x3, conv_5x5, conv_pool], 3)
+
+ return final_model
+
+ def create_inception_variables(self, inception_setting, layer_name):
+ model_dim = inception_setting.model_dim
+ conv_1x1_1_weight, conv_1x1_1_bias = self.create_conv_vars([1, 1, model_dim, inception_setting.conv_1x1_1_layers], layer_name + '-conv_1x1_1')
+ conv_1x1_2_weight, conv_1x1_2_bias = self.create_conv_vars([1, 1, model_dim, inception_setting.conv_1x1_2_layers], layer_name + '-conv_1x1_2')
+ conv_1x1_3_weight, conv_1x1_3_bias = self.create_conv_vars([1, 1, model_dim, inception_setting.conv_1x1_3_layers], layer_name + '-conv_1x1_3')
+ conv_3x3_weight, conv_3x3_bias = self.create_conv_vars([3, 3, inception_setting.conv_1x1_2_layers, inception_setting.conv_3x3_layers], layer_name + '-conv_3x3')
+ conv_5x5_weight, conv_5x5_bias = self.create_conv_vars([5, 5, inception_setting.conv_1x1_3_layers, inception_setting.conv_5x5_layers], layer_name + '-conv_5x5')
+ conv_pool_weight, conv_pool_bias = self.create_conv_vars([1, 1, model_dim, inception_setting.conv_pool_layers], layer_name + '-conv_pool')
+
+ weights = {
+ 'conv_1x1_1': conv_1x1_1_weight,
+ 'conv_1x1_2': conv_1x1_2_weight,
+ 'conv_1x1_3': conv_1x1_3_weight,
+ 'conv_3x3': conv_3x3_weight,
+ 'conv_5x5': conv_5x5_weight,
+ 'conv_pool': conv_pool_weight
+ }
+
+ biases = {
+ 'conv_1x1_1': conv_1x1_1_bias,
+ 'conv_1x1_2': conv_1x1_2_bias,
+ 'conv_1x1_3': conv_1x1_3_bias,
+ 'conv_3x3': conv_3x3_bias,
+ 'conv_5x5': conv_5x5_bias,
+ 'conv_pool': conv_pool_bias
+ }
+
+ return weights, biases
+
+ def auxiliary_classifier(self, model, input_size, name):
+ aux_classifier = self.avg_pool(model, 5, 3, 'VALID')
+
+ conv_weight, conv_bias = self.create_conv_vars([1, 1, input_size, input_size], name + '-conv_1x1')
+ aux_classifier = self.conv_layer_with_relu(aux_classifier, conv_weight, conv_bias, 1)
+
+ fc_weight, fc_bias = self.create_fc_vars([4*4*input_size, self.nclasses], name + '-fc')
+ aux_classifier = self.fully_connect(aux_classifier, fc_weight, fc_bias)
+
+ aux_classifier = tf.nn.dropout(aux_classifier, 0.7)
+
+ return aux_classifier
+
+ def conv_layer_with_relu(self, model, weights, biases, stride_size, padding='SAME'):
+ new_model = tf.nn.conv2d(model, weights, strides=[1, stride_size, stride_size, 1], padding=padding)
+ new_model = tf.nn.bias_add(new_model, biases)
+ new_model = tf.nn.relu(new_model)
+ return new_model
+
+ def max_pool(self, model, kernal_size, stride_size, padding='SAME'):
+ new_model = tf.nn.max_pool(model, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride_size, stride_size, 1], padding=padding)
+ return new_model
+
+ def avg_pool(self, model, kernal_size, stride_size, padding='SAME'):
+ new_model = tf.nn.avg_pool(model, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride_size, stride_size, 1], padding=padding)
+ return new_model
+
+ def fully_connect(self, model, weights, biases):
+ fc_model = tf.reshape(model, [-1, weights.get_shape().as_list()[0]])
+ fc_model = tf.matmul(fc_model, weights)
+ fc_model = tf.add(fc_model, biases)
+ fc_model = tf.nn.relu(fc_model)
+ return fc_model
+
+ def create_conv_vars(self, size, name):
+ weight = self.create_weight(size, name + '_W')
+ bias = self.create_bias(size[3], name + '_b')
+ return weight, bias
+
+ def create_fc_vars(self, size, name):
+ weight = self.create_weight(size, name + '_W')
+ bias = self.create_bias(size[1], name + '_b')
+ return weight, bias
+
+ def create_weight(self, size, name):
+ weight = tf.get_variable(name, size, initializer=tf.contrib.layers.xavier_initializer())
+ return weight
+
+ def create_bias(self, size, name):
+ bias = tf.get_variable(name, [size], initializer=tf.constant_initializer(0.2))
+ return bias
+
+class InceptionSettings():
+
+ def __init__(self, model_dim, inception_settings):
+ self.model_dim = model_dim
+ self.conv_1x1_1_layers = inception_settings[0][0]
+ self.conv_1x1_2_layers = inception_settings[1][0]
+ self.conv_1x1_3_layers = inception_settings[2][0]
+ self.conv_3x3_layers = inception_settings[1][1]
+ self.conv_5x5_layers = inception_settings[2][1]
+ self.conv_pool_layers = inception_settings[3][0]
\ No newline at end of file
diff --git a/digits/standard-networks/tensorflow/lenet.py b/digits/standard-networks/tensorflow/lenet.py
index 677c905d6..f52a78205 100644
--- a/digits/standard-networks/tensorflow/lenet.py
+++ b/digits/standard-networks/tensorflow/lenet.py
@@ -67,7 +67,8 @@ def conv_net(x, weights, biases):
@model_property
def loss(self):
- loss = digits.classification_loss(self.inference, self.y)
- accuracy = digits.classification_accuracy(self.inference, self.y)
- self.summaries.append(tf.scalar_summary(accuracy.op.name, accuracy))
+ model = self.inference
+ loss = digits.classification_loss(model, self.y)
+ accuracy = digits.classification_accuracy(model, self.y)
+ self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy))
return loss
\ No newline at end of file
diff --git a/digits/standard-networks/tensorflow/lenet_slim.py b/digits/standard-networks/tensorflow/lenet_slim.py
index 58f64f020..8d3a71b77 100644
--- a/digits/standard-networks/tensorflow/lenet_slim.py
+++ b/digits/standard-networks/tensorflow/lenet_slim.py
@@ -20,7 +20,8 @@ def inference(self):
@model_property
def loss(self):
- loss = digits.classification_loss(self.inference, self.y)
- accuracy = digits.classification_accuracy(self.inference, self.y)
- self.summaries.append(tf.scalar_summary(accuracy.op.name, accuracy))
+ model = self.inference
+ loss = digits.classification_loss(model, self.y)
+ accuracy = digits.classification_accuracy(model, self.y)
+ self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy))
return loss
diff --git a/digits/standard-networks/tensorflow/rnn_mnist.py b/digits/standard-networks/tensorflow/rnn_mnist.py
deleted file mode 100644
index 73aa0bdba..000000000
--- a/digits/standard-networks/tensorflow/rnn_mnist.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from tensorflow.python.ops import rnn, rnn_cell
-
-def build_model(params):
- n_hidden = 28
- n_classes = params['nclasses']
- n_steps = params['input_shape'][0]
- n_input = params['input_shape'][1]
-
- x = tf.reshape(params['x'], shape=[-1, params['input_shape'][0], params['input_shape'][1], params['input_shape'][2]])
-
- tf.image_summary(x.op.name, x, max_images=1, collections=[digits.GraphKeys.SUMMARIES_TRAIN])
- x = tf.squeeze(x)
-
-
-
- # Define weights
- weights = {
- 'w1': tf.get_variable('w1', [n_hidden, params['nclasses']])
- }
- biases = {
- 'b1': tf.get_variable('b1', [params['nclasses']])
- }
-
- # Prepare data shape to match `rnn` function requirements
- # Current data input shape: (batch_size, n_steps, n_input)
- # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
-
- # Permuting batch_size and n_steps
- x = tf.transpose(x, [1, 0, 2])
- # Reshaping to (n_steps*batch_size, n_input)
- x = tf.reshape(x, [-1, n_input])
- # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
- x = tf.split(0, n_steps, x)
-
- # Define a lstm cell with tensorflow
- lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
-
- # Get lstm cell output
- outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
-
- # Linear activation, using rnn inner loop last output
- model = tf.matmul(outputs[-1], weights['w1']) + biases['b1']
-
- def loss(y):
- loss = digits.classification_loss(model, y)
- accuracy = digits.classification_accuracy(model, y)
- tf.scalar_summary(accuracy.op.name, accuracy, collections=[digits.GraphKeys.SUMMARIES_TRAIN])
- return loss
-
- return {
- 'model' : model,
- 'loss' : loss
- }
diff --git a/digits/standard-networks/tensorflow/siamese.py b/digits/standard-networks/tensorflow/siamese.py
deleted file mode 100644
index 2b0fe588e..000000000
--- a/digits/standard-networks/tensorflow/siamese.py
+++ /dev/null
@@ -1,38 +0,0 @@
-def build_model(params):
- _x = tf.reshape(params['x'], shape=[-1, params['input_shape'][0], params['input_shape'][1], params['input_shape'][2]])
- #tf.image_summary(_x.op.name, _x, max_images=10, collections=[digits.GraphKeys.SUMMARIES_TRAIN])
-
- # Split out the color channels
- _, model_g, model_b = tf.split(3, 3, _x, name='split_channels')
- #tf.image_summary(model_g.op.name, model_g, max_images=10, collections=[digits.GraphKeys.SUMMARIES_TRAIN])
- #tf.image_summary(model_b.op.name, model_b, max_images=10, collections=[digits.GraphKeys.SUMMARIES_TRAIN])
-
- with slim.arg_scope([slim.conv2d, slim.fully_connected],
- weights_initializer=tf.contrib.layers.xavier_initializer(),
- weights_regularizer=slim.l2_regularizer(0.0005) ):
- with tf.variable_scope("siamese") as scope:
- def make_tower(net):
- net = slim.conv2d(net, 20, [5, 5], padding='VALID', scope='conv1')
- net = slim.max_pool2d(net, [2, 2], padding='VALID', scope='pool1')
- net = slim.conv2d(net, 50, [5, 5], padding='VALID', scope='conv2')
- net = slim.max_pool2d(net, [2, 2], padding='VALID', scope='pool2')
- net = slim.flatten(net)
- net = slim.fully_connected(net, 500, scope='fc1')
- net = slim.fully_connected(net, 2, activation_fn=None, scope='fc2')
- return net
-
- model_g = make_tower(model_g)
- model_g = tf.reshape(model_g, shape=[-1, 2])
- scope.reuse_variables()
- model_b = make_tower(model_b)
- model_b = tf.reshape(model_b, shape=[-1, 2])
-
- def loss(y):
- y = tf.reshape(y, shape=[-1])
- y = tf.to_float(y)
- return digits.constrastive_loss(model_g, model_b, y)
-
- return {
- 'model' : model_g,
- 'loss' : loss,
- }
diff --git a/digits/standard-networks/tensorflow/siamese_simple.py b/digits/standard-networks/tensorflow/siamese_simple.py
deleted file mode 100644
index bd0cd8d15..000000000
--- a/digits/standard-networks/tensorflow/siamese_simple.py
+++ /dev/null
@@ -1,38 +0,0 @@
-def build_model(params):
- _x = tf.reshape(params['x'], shape=[-1, params['input_shape'][0], params['input_shape'][1], params['input_shape'][2]])
- #tf.image_summary(_x.op.name, _x, max_images=10, collections=[digits.GraphKeys.SUMMARIES_TRAIN])
-
- # Split out the channel in two
- lhs, rhs = tf.split(0, 2, _x, name='split_batch')
-
- with slim.arg_scope([slim.conv2d, slim.fully_connected],
- weights_initializer=tf.contrib.layers.xavier_initializer(),
- weights_regularizer=slim.l2_regularizer(0.0005) ):
- with tf.variable_scope("siamese") as scope:
- def make_tower(net):
- net = slim.conv2d(net, 20, [5, 5], padding='VALID', scope='conv1')
- net = slim.max_pool2d(net, [2, 2], padding='VALID', scope='pool1')
- net = slim.conv2d(net, 50, [5, 5], padding='VALID', scope='conv2')
- net = slim.max_pool2d(net, [2, 2], padding='VALID', scope='pool2')
- net = slim.flatten(net)
- net = slim.fully_connected(net, 500, scope='fc1')
- net = slim.fully_connected(net, 2, activation_fn=None, scope='fc2')
- return net
-
- lhs = make_tower(lhs)
- lhs = tf.reshape(lhs, shape=[-1, 2])
- scope.reuse_variables()
- rhs = make_tower(rhs)
- rhs = tf.reshape(rhs, shape=[-1, 2])
-
- def loss(y):
- y = tf.reshape(y, shape=[-1])
- ylhs, yrhs = tf.split(0, 2, y, name='split_label')
- y = tf.equal(ylhs, yrhs)
- y = tf.to_float(y)
- return digits.constrastive_loss(lhs, rhs, y)
-
- return {
- 'model' : tf.concat(0, [lhs, rhs]),
- 'loss' : loss,
- }
diff --git a/digits/standard-networks/tensorflow/vgg16.py b/digits/standard-networks/tensorflow/vgg16.py
index 3ce89edcc..6efd55bde 100644
--- a/digits/standard-networks/tensorflow/vgg16.py
+++ b/digits/standard-networks/tensorflow/vgg16.py
@@ -28,5 +28,5 @@ def inference(self):
def loss(self):
loss = digits.classification_loss(self.inference, self.y)
accuracy = digits.classification_accuracy(self.inference, self.y)
- self.summaries.append(tf.scalar_summary(accuracy.op.name, accuracy))
+ self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy))
return loss
diff --git a/digits/templates/models/images/classification/custom_network_explanation.html b/digits/templates/models/images/classification/custom_network_explanation.html
index c18686a89..90aaaf5bb 100644
--- a/digits/templates/models/images/classification/custom_network_explanation.html
+++ b/digits/templates/models/images/classification/custom_network_explanation.html
@@ -105,3 +105,10 @@ Specifying a custom Torch network
Use this field to enter a Torch network using Lua code.
Refer to the documentation for more information.
+
+Specifying a custom Tensorflow network
+
+
+ Use this field to enter a Tensorflow network using python.
+ Refer to the documentation for more information.
+
\ No newline at end of file
diff --git a/digits/templates/models/images/classification/new.html b/digits/templates/models/images/classification/new.html
index e93e61f05..2def43d80 100644
--- a/digits/templates/models/images/classification/new.html
+++ b/digits/templates/models/images/classification/new.html
@@ -624,9 +624,35 @@ Data Augmentations
return the_data;
}
+//copied from https://stackoverflow.com/questions/4565112/javascript-how-to-find-out-if-the-user-browser-is-chrome/13348618#13348618
+function isChrome() {
+ var isChromium = window.chrome,
+ winNav = window.navigator,
+ vendorName = winNav.vendor,
+ isOpera = winNav.userAgent.indexOf("OPR") > -1,
+ isIEedge = winNav.userAgent.indexOf("Edge") > -1,
+ isIOSChrome = winNav.userAgent.match("CriOS");
+
+ if(isIOSChrome){
+ return true;
+ } else if(isChromium !== null && isChromium !== undefined && vendorName === "Google Inc." && isOpera == false && isIEedge == false) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
function visualizeNetwork() {
var framework = $('#framework').val();
var is_tf = framework.includes("ensorflow") // @TODO(tzaman) - dirty
+
+ if (is_tf) {
+ if (!isChrome()) {
+ bootbox.alert({title: "Visualization Error", message: "Tensorflow network visualization is only available for Google Chrome"});
+ return;
+ }
+ }
+
var num_sel_gpus = 0
var sel_gpus = $("#select_gpus").val()
if (sel_gpus) {
diff --git a/digits/templates/models/images/generic/custom_network_explanation.html b/digits/templates/models/images/generic/custom_network_explanation.html
index 7100a1724..93e01f00d 100644
--- a/digits/templates/models/images/generic/custom_network_explanation.html
+++ b/digits/templates/models/images/generic/custom_network_explanation.html
@@ -89,3 +89,10 @@ Specifying a custom Torch network
Use this field to enter a Torch network using Lua code.
Refer to the documentation for more information.
+
+Specifying a custom Tensorflow network
+
+
+ Use this field to enter a Tensorflow network using python.
+ Refer to the documentation for more information.
+
\ No newline at end of file
diff --git a/digits/templates/models/images/generic/large_graph.html b/digits/templates/models/images/generic/large_graph.html
new file mode 100644
index 000000000..495043caf
--- /dev/null
+++ b/digits/templates/models/images/generic/large_graph.html
@@ -0,0 +1,39 @@
+{# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. #}
+
+{% extends "layout.html" %}
+
+{% block title %}
+{{job.name()}} - Large graph
+{% endblock %}
+
+{% block head %}
+
+{% endblock %}
+
+{% block nav %}
+{{job.job_type()}}
+{% endblock %}
+
+{% block content %}
+
+{% set task = job.train_task() %}
+
+
+
+
+ {% set combined_graph_data = job.train_task().combined_graph_data(cull=False) %}
+ {% if combined_graph_data %}
+
+
+ {% else %}
+
No data.
+ {% endif %}
+
+
+
+
+
+{% endblock %}
+
diff --git a/digits/templates/models/images/generic/new.html b/digits/templates/models/images/generic/new.html
index ba9a41405..ea682ff6c 100644
--- a/digits/templates/models/images/generic/new.html
+++ b/digits/templates/models/images/generic/new.html
@@ -593,9 +593,35 @@ Data Augmentations
return the_data;
}
+//copied from https://stackoverflow.com/questions/4565112/javascript-how-to-find-out-if-the-user-browser-is-chrome/13348618#13348618
+function isChrome() {
+ var isChromium = window.chrome,
+ winNav = window.navigator,
+ vendorName = winNav.vendor,
+ isOpera = winNav.userAgent.indexOf("OPR") > -1,
+ isIEedge = winNav.userAgent.indexOf("Edge") > -1,
+ isIOSChrome = winNav.userAgent.match("CriOS");
+
+ if(isIOSChrome){
+ return true;
+ } else if(isChromium !== null && isChromium !== undefined && vendorName === "Google Inc." && isOpera == false && isIEedge == false) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
function visualizeNetwork() {
var framework = $('#framework').val();
var is_tf = framework.includes("ensorflow") // @TODO(tzaman) - dirty
+
+ if (is_tf) {
+ if (!isChrome()) {
+ bootbox.alert({title: "Visualization Error", message: "Tensorflow network visualization is only available for Google Chrome"});
+ return;
+ }
+ }
+
var num_sel_gpus = 0
var sel_gpus = $("#select_gpus").val()
if (sel_gpus) {
diff --git a/digits/tools/tensorflow/gan_grid.py b/digits/tools/tensorflow/gan_grid.py
index ddbe37071..a93e6c35b 100644
--- a/digits/tools/tensorflow/gan_grid.py
+++ b/digits/tools/tensorflow/gan_grid.py
@@ -22,13 +22,11 @@
import datetime
import inspect
-import json
import logging
import math
import numpy as np
import os
import pickle
-import time
from six.moves import xrange # noqa
import tensorflow as tf
@@ -48,14 +46,11 @@
import tf_data
import gandisplay
-
-
# Constants
TF_INTRA_OP_THREADS = 0
TF_INTER_OP_THREADS = 0
MIN_LOGS_PER_TRAIN_EPOCH = 8 # torch default: 8
-
CELEBA_ALL_ATTRIBUTES = """
5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs
Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair Bushy_Eyebrows
@@ -67,7 +62,8 @@
""".split()
CELEBA_EDITABLE_ATTRIBUTES = [
- 'Bald', 'Black_Hair', 'Blond_Hair', 'Eyeglasses', 'Male', 'Mustache', 'Smiling', 'Young', 'Attractive', 'Pale_Skin', 'Big_Nose'
+ 'Bald', 'Black_Hair', 'Blond_Hair', 'Eyeglasses', 'Male', 'Mustache',
+ 'Smiling', 'Young', 'Attractive', 'Pale_Skin', 'Big_Nose'
]
CELEBA_EDITABLE_ATTRIBUTES_IDS = [CELEBA_ALL_ATTRIBUTES.index(attr) for attr in CELEBA_EDITABLE_ATTRIBUTES]
@@ -377,29 +373,18 @@ def Inference(sess, model):
with open(FLAGS.attributes_file, 'rb') as f:
attribute_zs = pickle.load(f)
- while not False: # model.queue_coord.should_stop():
-
+ while not False:
+ # model.queue_coord.should_stop():
attributes = app.GetAttributes()
-
z = np.zeros(100)
for idx, attr_scale in enumerate(attributes):
- z += (attr_scale / 25. ) * attribute_zs[CELEBA_EDITABLE_ATTRIBUTES_IDS[idx]]
+ z += (attr_scale / 25) * attribute_zs[CELEBA_EDITABLE_ATTRIBUTES_IDS[idx]]
feed_dict = {model.time_placeholder: float(t),
model.attribute_placeholder: z}
preds = sess.run(fetches=inference_op, feed_dict=feed_dict)
- if FLAGS.visualize_inf:
- save_weight_visualization(weight_vars, activation_ops, w, a)
-
- # @TODO(tzaman): error on no output?
- #for i in range(len(keys)):
- # # for j in range(len(preds)):
- # # We're allowing multiple predictions per image here. DIGITS doesnt support that iirc
- # logging.info('Predictions for image ' + str(model.dataloader.get_key_index(keys[i])) +
- # ': ' + json.dumps(preds[i].tolist()))
- #logging.info('Predictions shape: %s' % str(preds.shape))
app.DisplayCell(preds)
t += 1e-5 * app.GetSpeed() * FLAGS.batch_size
@@ -445,7 +430,7 @@ def l2_norm(x):
return euclidean_norm
def dot_product(x, y):
- return tf.reduce_sum(tf.mul(x,y))
+ return tf.reduce_sum(tf.mul(x, y))
def slerp(initial, final, progress):
omega = tf.acos(dot_product(initial / l2_norm(initial), final / l2_norm(final)))
@@ -480,6 +465,7 @@ def slerp(initial, final, progress):
return batch, time_placeholder, attribute_placeholder
+
def main(_):
# Always keep the cpu as default
diff --git a/digits/tools/tensorflow/gandisplay.py b/digits/tools/tensorflow/gandisplay.py
index 9f8d6d8df..3479ffa19 100644
--- a/digits/tools/tensorflow/gandisplay.py
+++ b/digits/tools/tensorflow/gandisplay.py
@@ -1,21 +1,18 @@
-import wxversion
-
-import wx
-import numpy as np
-import random
import time
-
+import numpy as np
+import wx
# This has been set up to optionally use the wx.BufferedDC if
# USE_BUFFERED_DC is True, it will be used. Otherwise, it uses the raw
# wx.Memory DC , etc.
-#USE_BUFFERED_DC = False
+# USE_BUFFERED_DC = False
USE_BUFFERED_DC = True
myEVT = wx.NewEventType()
DISPLAY_GRID_EVT = wx.PyEventBinder(myEVT, 1)
+
class MyEvent(wx.PyCommandEvent):
"""Event to signal that a count value is ready"""
def __init__(self, etype, eid, value=None):
@@ -30,6 +27,7 @@ def GetValue(self):
"""
return self._value
+
class BufferedWindow(wx.Window):
"""
@@ -62,11 +60,10 @@ def __init__(self, *args, **kwargs):
self.paint_count = 0
def Draw(self, dc):
- ## just here as a place holder.
- ## This method should be over-ridden when subclassed
+ # just here as a place holder.
+ # This method should be over-ridden when subclassed
pass
-
def OnPaint(self, event):
# All that is needed here is to draw the buffer to screen
if USE_BUFFERED_DC:
@@ -75,11 +72,11 @@ def OnPaint(self, event):
dc = wx.PaintDC(self)
dc.DrawBitmap(self._Buffer, 0, 0)
- def OnSize(self,event):
+ def OnSize(self, event):
# The Buffer init is done here, to make sure the buffer is always
# the same size as the Window
- #Size = self.GetClientSizeTuple()
- Size = self.ClientSize
+ # Size = self.GetClientSizeTuple()
+ Size = self.ClientSize
# Make new offscreen bitmap: this bitmap will always have the
# current drawing in it, so it can be used to save the image to
@@ -88,9 +85,9 @@ def OnSize(self,event):
self.UpdateDrawing()
def SaveToFile(self, FileName, FileType=wx.BITMAP_TYPE_PNG):
- ## This will save the contents of the buffer
- ## to the specified file. See the wxWindows docs for
- ## wx.Bitmap::SaveFile for the details
+ # This will save the contents of the buffer
+ # to the specified file. See the wxWindows docs for
+ # wx.Bitmap::SaveFile for the details
self._Buffer.SaveFile(FileName, FileType)
def UpdateDrawing(self):
@@ -106,21 +103,21 @@ def UpdateDrawing(self):
dc = wx.MemoryDC()
dc.SelectObject(self._Buffer)
self.Draw(dc)
- del dc # need to get rid of the MemoryDC before Update() is called.
+ del dc # need to get rid of the MemoryDC before Update() is called.
self.Refresh()
self.Update()
+
class DrawWindow(BufferedWindow):
def __init__(self, *args, **kwargs):
- ## Any data the Draw() function needs must be initialized before
- ## calling BufferedWindow.__init__, as it will call the Draw
- ## function.
+ # Any data the Draw() function needs must be initialized before
+ # calling BufferedWindow.__init__, as it will call the Draw function.
self.DrawData = {}
BufferedWindow.__init__(self, *args, **kwargs)
def Draw(self, dc):
- dc.SetBackground( wx.Brush("White") )
- dc.Clear() # make sure you clear the bitmap!
+ dc.SetBackground(wx.Brush("White"))
+ dc.Clear() # make sure you clear the bitmap!
# Here's the actual drawing code.
for key, data in self.DrawData.items():
@@ -131,22 +128,24 @@ def Draw(self, dc):
img_count = data.shape[0]
height = data.shape[1]
width = data.shape[2]
- channels = data.shape[3]
grid_size = int(np.sqrt(img_count))
size = (grid_size * width, grid_size * height)
- if True: # self.size != size:
+
+ if True: # self.size != size:
self.size = size
self.SetSize(size)
- image = wx.EmptyImage(width,height)
+ image = wx.EmptyImage(width, height)
+
for i in xrange(img_count):
x = width * (i // grid_size)
y = height * (i % grid_size)
s = data[i].tostring()
image.SetData(s)
- wxBitmap = image.ConvertToBitmap() #
+
+ wxBitmap = image.ConvertToBitmap()
dc.DrawBitmap(wxBitmap, x=x, y=y)
@@ -158,11 +157,11 @@ class TestFrame(wx.Frame):
def __init__(self, parent=None, grid_size=640, attributes=[]):
wx.Frame.__init__(self, parent,
- size = (grid_size + self.SLIDER_WIDTH + self.SLIDER_BORDER, grid_size + self.STATUS_HEIGHT),
+ size=(grid_size + self.SLIDER_WIDTH + self.SLIDER_BORDER, grid_size + self.STATUS_HEIGHT),
title="GAN Demo",
style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
- ## Set up the MenuBar
+ # Set up the MenuBar
MenuBar = wx.MenuBar()
file_menu = wx.Menu()
@@ -185,8 +184,10 @@ def __init__(self, parent=None, grid_size=640, attributes=[]):
# Sliders
vbox = wx.BoxSizer(wx.VERTICAL)
- self.speed_slider = wx.Slider(panel, -1, value=5, minValue=0, maxValue=10, pos=wx.DefaultPosition, size=(self.SLIDER_WIDTH, -1),
- style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL | wx.SL_LABELS)
+ self.speed_slider = wx.Slider(panel, -1, value=5, minValue=0, maxValue=10, pos=wx.DefaultPosition,
+ size=(self.SLIDER_WIDTH, -1),
+ style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL | wx.SL_LABELS)
+
slider_text = wx.StaticText(panel, label='Speed')
vbox.Add(slider_text, 0, wx.ALIGN_CENTRE)
vbox.Add(self.speed_slider, 0, wx.ALIGN_CENTRE)
@@ -194,8 +195,10 @@ def __init__(self, parent=None, grid_size=640, attributes=[]):
self.attribute_sliders = []
for attribute in attributes:
slider_text = wx.StaticText(panel, label=attribute)
- slider = wx.Slider(panel, -1, value=0, minValue=-100, maxValue=100, pos=wx.DefaultPosition, size=(self.SLIDER_WIDTH, -1),
- style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL | wx.SL_LABELS)
+ slider = wx.Slider(panel, -1, value=0, minValue=-100, maxValue=100, pos=wx.DefaultPosition,
+ size=(self.SLIDER_WIDTH, -1),
+ style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL | wx.SL_LABELS)
+
vbox.Add(slider_text, 0, wx.ALIGN_CENTRE)
vbox.Add(slider, 0, wx.ALIGN_CENTRE)
self.attribute_sliders.append(slider)
@@ -221,7 +224,7 @@ def __init__(self, parent=None, grid_size=640, attributes=[]):
self.Bind(DISPLAY_GRID_EVT, self.OnDisplayCell)
- def OnQuit(self,event):
+ def OnQuit(self, event):
self.Close(True)
def OnDisplayCell(self, evt):
@@ -236,6 +239,7 @@ def OnDisplayCell(self, evt):
self.last_fps_update = time.time()
self.last_frame_timestamp = time.time()
+
class DemoApp(wx.App):
def __init__(self, arg, grid_size, attributes):
diff --git a/digits/tools/tensorflow/main.py b/digits/tools/tensorflow/main.py
index 93f0a8e09..5175efec1 100644
--- a/digits/tools/tensorflow/main.py
+++ b/digits/tools/tensorflow/main.py
@@ -493,7 +493,7 @@ def main(_):
if FLAGS.validation_db:
with tf.name_scope(digits.STAGE_VAL) as stage_scope:
- val_model = Model(digits.STAGE_VAL, FLAGS.croplen, nclasses)
+ val_model = Model(digits.STAGE_VAL, FLAGS.croplen, nclasses, reuse_variable=True)
val_model.create_dataloader(FLAGS.validation_db)
val_model.dataloader.setup(FLAGS.validation_labels,
False,
@@ -544,7 +544,7 @@ def main(_):
load_snapshot(sess, FLAGS.weights, tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))
# Tensorboard: Merge all the summaries and write them out
- writer = tf.train.SummaryWriter(os.path.join(FLAGS.summaries_dir, 'tb'), sess.graph)
+ writer = tf.summary.FileWriter(os.path.join(FLAGS.summaries_dir, 'tb'), sess.graph)
# If we are inferencing, only do that.
if FLAGS.inference_db:
diff --git a/digits/tools/tensorflow/model.py b/digits/tools/tensorflow/model.py
index ed9ac3bb2..0dc0ba804 100644
--- a/digits/tools/tensorflow/model.py
+++ b/digits/tools/tensorflow/model.py
@@ -54,13 +54,14 @@ def average_gradients(tower_grads):
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
- grad = tf.concat(0, grads)
- grad = tf.reduce_mean(grad, 0)
+ grads_transformed = tf.concat(grads, 0)
+ grads_transformed = tf.reduce_mean(grads_transformed, 0)
+
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
- grad_and_var = (grad, v)
+ grad_and_var = (grads_transformed, v)
average_grads.append(grad_and_var)
return average_grads
@@ -69,7 +70,7 @@ class Model(object):
"""
@TODO(tzaman)
"""
- def __init__(self, stage, croplen, nclasses, optimization=None, momentum=None):
+ def __init__(self, stage, croplen, nclasses, optimization=None, momentum=None, reuse_variable=False):
self.stage = stage
self.croplen = croplen
self.nclasses = nclasses
@@ -82,12 +83,13 @@ def __init__(self, stage, croplen, nclasses, optimization=None, momentum=None):
self.summaries = []
self.towers = []
self._train = None
+ self._reuse = reuse_variable
# Touch to initialize
- if optimization:
- self.learning_rate
- self.global_step
- self.optimizer
+ # if optimization:
+ # self.learning_rate
+ # self.global_step
+ # self.optimizer
def create_dataloader(self, db_path):
self.dataloader = tf_data.LoaderFactory.set_source(db_path, is_inference=(self.stage == digits.STAGE_INF))
@@ -126,9 +128,9 @@ def create_model(self, obj_UserModel, stage_scope, batch_x=None):
else:
with tf.name_scope('parallelize'):
# Split them up
- batch_x_split = tf.split(0, len(available_devices), batch_x, name='split_batch')
+ batch_x_split = tf.split(batch_x, len(available_devices), 0, name='split_batch')
if self.stage != digits.STAGE_INF: # Has no labels
- batch_y_split = tf.split(0, len(available_devices), batch_y, name='split_batch')
+ batch_y_split = tf.split(batch_y, len(available_devices), 0, name='split_batch')
# Run the user model through the build_model function that should be filled in
grad_towers = []
@@ -146,35 +148,35 @@ def create_model(self, obj_UserModel, stage_scope, batch_x=None):
x=batch_x_split[dev_i],
y=None)
- with tf.variable_scope(digits.GraphKeys.MODEL, reuse=dev_i > 0):
+ with tf.variable_scope(digits.GraphKeys.MODEL, reuse=dev_i > 0 or self._reuse):
tower_model.inference # touch to initialize
- if self.stage == digits.STAGE_INF:
- # For inferencing we will only use the inference part of the graph
- continue
+ # Reuse the variables in this scope for the next tower/device
+ tf.get_variable_scope().reuse_variables()
- with tf.name_scope(digits.GraphKeys.LOSS):
- for loss in self.get_tower_losses(tower_model):
- tf.add_to_collection(digits.GraphKeys.LOSSES, loss['loss'])
+ if self.stage == digits.STAGE_INF:
+ # For inferencing we will only use the inference part of the graph
+ continue
- # Assemble all made within this scope so far. The user can add custom
- # losses to the digits.GraphKeys.LOSSES collection
- losses = tf.get_collection(digits.GraphKeys.LOSSES, scope=scope_tower)
- losses += ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope=None)
- tower_loss = tf.add_n(losses, name='loss')
+ with tf.name_scope(digits.GraphKeys.LOSS):
+ for loss in self.get_tower_losses(tower_model):
+ tf.add_to_collection(digits.GraphKeys.LOSSES, loss['loss'])
- self.summaries.append(tf.scalar_summary(tower_loss.op.name, tower_loss))
+ # Assemble all made within this scope so far. The user can add custom
+ # losses to the digits.GraphKeys.LOSSES collection
+ losses = tf.get_collection(digits.GraphKeys.LOSSES, scope=scope_tower)
+ losses += ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope=None)
+ tower_loss = tf.add_n(losses, name='loss')
- # Reuse the variables in this scope for the next tower/device
- tf.get_variable_scope().reuse_variables()
+ self.summaries.append(tf.summary.scalar(tower_loss.op.name, tower_loss))
- if self.stage == digits.STAGE_TRAIN:
- grad_tower_losses = []
- for loss in self.get_tower_losses(tower_model):
- grad_tower_loss = self.optimizer.compute_gradients(loss['loss'], loss['vars'])
- grad_tower_loss = tower_model.gradientUpdate(grad_tower_loss)
- grad_tower_losses.append(grad_tower_loss)
- grad_towers.append(grad_tower_losses)
+ if self.stage == digits.STAGE_TRAIN:
+ grad_tower_losses = []
+ for loss in self.get_tower_losses(tower_model):
+ grad_tower_loss = self.optimizer.compute_gradients(loss['loss'], loss['vars'])
+ grad_tower_loss = tower_model.gradientUpdate(grad_tower_loss)
+ grad_tower_losses.append(grad_tower_loss)
+ grad_towers.append(grad_tower_losses)
# Assemble and average the gradients from all towers
if self.stage == digits.STAGE_TRAIN:
@@ -235,7 +237,7 @@ def summary(self):
if not len(self.summaries):
logging.error("No summaries defined. Please define at least one summary.")
exit(-1)
- return tf.merge_summary(self.summaries)
+ return tf.summary.merge(self.summaries)
@model_property
def global_step(self):
@@ -250,7 +252,7 @@ def learning_rate(self):
# define it entirely in tf ops, instead of a placeholder and feeding.
with tf.device('/cpu:0'):
lr = tf.placeholder(tf.float32, shape=[], name='learning_rate')
- self.summaries.append(tf.scalar_summary('lr', lr))
+ self.summaries.append(tf.summary.scalar('lr', lr))
return lr
@model_property
@@ -283,9 +285,10 @@ def get_tower_losses(self, tower):
"""
Return list of losses
- If user-defined model returns only one loss then this is encapsulated into the expected list of
- dicts structure
+ If user-defined model returns only one loss then this is encapsulated into
+ the expected list of dicts structure
"""
+
if isinstance(tower.loss, list):
return tower.loss
else:
diff --git a/digits/tools/tensorflow/tf_data.py b/digits/tools/tensorflow/tf_data.py
index a2071bd62..f0775ba58 100644
--- a/digits/tools/tensorflow/tf_data.py
+++ b/digits/tools/tensorflow/tf_data.py
@@ -320,9 +320,8 @@ def create_input_pipeline(self):
with tf.name_scope('mean_subtraction'):
single_data = self.mean_loader.subtract_mean_op(single_data)
if LOG_MEAN_FILE:
- self.summaries.append(tf.image_summary('mean_image',
- tf.expand_dims(self.mean_loader.tf_mean_image, 0),
- max_images=1))
+ expanded_data = tf.expand_dims(self.mean_loader.tf_mean_image, 0)
+ self.summaries.append(tf.summary.image('mean_image', expanded_data, max_outputs=1))
# (Random) Cropping
if self.croplen:
@@ -397,8 +396,7 @@ def create_input_pipeline(self):
shapes=[[0], self.get_shape(), []], # Only makes sense is dynamic_pad=False #@TODO(tzaman) - FIXME
min_after_dequeue=5*self.batch_size,
allow_smaller_final_batch=True, # Happens if total%batch_size!=0
- name='batcher'
- )
+ name='batcher')
else:
batch = tf.train.batch(
single_batch,
@@ -409,8 +407,7 @@ def create_input_pipeline(self):
num_threads=NUM_THREADS_DATA_LOADER if not self.is_inference else 1,
capacity=max_queue_capacity, # Max amount that will be loaded and queued
allow_smaller_final_batch=True, # Happens if total%batch_size!=0
- name='batcher',
- )
+ name='batcher')
self.batch_k = batch[0] # Key
self.batch_x = batch[1] # Input
@@ -849,6 +846,7 @@ def __del__(self):
for db in self.h5dbs:
db.close()
+
class GanGridLoader(LoaderFactory):
"""
The GanGridLoader generates data for a GAN.
diff --git a/digits/tools/tensorflow/utils.py b/digits/tools/tensorflow/utils.py
index 495a755d0..3e927e12f 100644
--- a/digits/tools/tensorflow/utils.py
+++ b/digits/tools/tensorflow/utils.py
@@ -43,7 +43,7 @@ def classification_loss(pred, y):
"""
Definition of the loss for regular classification
"""
- ssoftmax = tf.nn.sparse_softmax_cross_entropy_with_logits(pred, y, name='cross_entropy_single')
+ ssoftmax = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=y, name='cross_entropy_single')
return tf.reduce_mean(ssoftmax, name='cross_entropy_batch')
@@ -55,7 +55,7 @@ def constrastive_loss(lhs, rhs, y, margin=1.0):
"""
Contrastive loss confirming to the Caffe definition
"""
- d = tf.reduce_sum(tf.square(tf.sub(lhs, rhs)), 1)
+ d = tf.reduce_sum(tf.square(tf.subtract(lhs, rhs)), 1)
d_sqrt = tf.sqrt(1e-6 + d)
loss = (y * d) + ((1 - y) * tf.square(tf.maximum(margin - d_sqrt, 0)))
return tf.reduce_mean(loss) # Note: constant component removed (/2)
@@ -92,11 +92,11 @@ def chw_to_hwc(x):
def bgr_to_rgb(x):
- return tf.reverse(x, [False, False, True])
+ return tf.reverse(x, [2])
def rgb_to_bgr(x):
- return tf.reverse(x, [False, False, True])
+ return tf.reverse(x, [2])
def get_available_gpus():
diff --git a/docs/BuildDigits.md b/docs/BuildDigits.md
index 2bc6c4f8d..e1e12241a 100644
--- a/docs/BuildDigits.md
+++ b/docs/BuildDigits.md
@@ -72,6 +72,10 @@ optional arguments:
Now that you're up and running, check out the [Getting Started Guide](GettingStarted.md).
+# Development
+
+If you are interested in developing for DIGITS or work with its source code, check out the [Development Setup Guide](DevelopmentSetup.md)
+
## Troubleshooting
Most configuration options should have appropriate defaults.
diff --git a/docs/BuildTensorflow.md b/docs/BuildTensorflow.md
index 292a94279..33e22a2d2 100644
--- a/docs/BuildTensorflow.md
+++ b/docs/BuildTensorflow.md
@@ -1,19 +1,48 @@
-# Building Torch
+# Installing TensorFlow
-DIGITS now supports Tensorflow as an optional alternative backend to Caffe or Torch.
+DIGITS now supports TensorFlow as an optional alternative backend to Caffe or Torch.
-> NOTE: Tensorflow support is still experimental!
+> NOTE: TensorFlow support is still experimental!
-@TODO
+We recommend installing TensorFlow in a fixed and separate environment. This is because TensorFlow support is still in development and stability is not ensured.
-Table of Contents
-=================
-* @TODO
+Installation for [Ubuntu](https://www.tensorflow.org/install/install_linux#installing_with_virtualenv)
-We recommend installing Tensorflow in a fixed and separate environment.
+Installation for [Mac](https://www.tensorflow.org/install/install_mac#installing_with_virtualenv)
-Installation per: https://www.tensorflow.org/versions/r0.10/get_started/os_setup.html#virtualenv-installation
+## Requirements
-## Prerequisites
+DIGITS is current targetting tensorflow-gpu V1.1.
-@TODO
+TensorFlow for DIGITS requires one or more NVIDIA GPUs with CUDA Compute Capbility of 3.0 or higher. See [the official GPU support list](https://developer.nvidia.com/cuda-gpus) to see if your GPU supports it.
+
+Along with that requirement, the following should be installed
+
+* One or more NVIDIA GPUs ([details](InstallCuda.md#gpu))
+* An NVIDIA driver ([details and installation instructions](InstallCuda.md#driver))
+* A CUDA toolkit ([details and installation instructions](InstallCuda.md#cuda-toolkit))
+* cuDNN 5.1 ([download page](https://developer.nvidia.com/cudnn))
+
+### A Note About cuDNN and TensorFlow
+Currently tensorflow v1.1 targets cuDNN 5.1. The latest cuDNN version is 6. **To have tensorflow running in digits, you must have cuDNN 5.1 installed. Currently, cuDNN 6 is incompatiable with tensorflow.** To install it, use the following command in a terminal
+
+```
+sudo apt-get install libcudnn5
+```
+
+
+## Installation
+
+These instructions are based on [the official TensorFlow instructions]
+(https://www.tensorflow.org/versions/master/install/)
+
+TensorFlow comes with pip, to install it, just simply use the command
+```
+pip install tensorflow-gpu=1.2.0
+```
+
+TensorFlow should then install effortlessly and pull in all its required dependices.
+
+## Getting Started With TensorFlow In DIGITS
+
+Follow [these instructions](GettingStartedTensorflow.md) for information on getting started with TensorFlow in DIGITS
diff --git a/docs/BuildTorch.md b/docs/BuildTorch.md
index d59b8031a..b354b1eae 100644
--- a/docs/BuildTorch.md
+++ b/docs/BuildTorch.md
@@ -16,7 +16,7 @@ Install some dependencies with Deb packages:
sudo apt-get install --no-install-recommends git software-properties-common
```
-## Basic install
+## Basic Installation
These instructions are based on [the official Torch instructions](http://torch.ch/docs/getting-started.html).
```sh
diff --git a/docs/DevelopmentSetup.md b/docs/DevelopmentSetup.md
new file mode 100644
index 000000000..a22b5a2b3
--- /dev/null
+++ b/docs/DevelopmentSetup.md
@@ -0,0 +1,36 @@
+# Development
+
+The source code for DIGITS is available on [github](https://github.com/NVIDIA/DIGITS).
+
+To have access to your local machine, you may clone from the github repository with
+```
+git clone https://github.com/NVIDIA/DIGITS.git
+```
+Or you may download the source code as a zip file from the github website.
+
+## Running DIGITS in Development
+
+DIGITS comes with the script to run for a development server.
+To run the development server, use
+```
+./digits-devserver
+```
+
+## Running unit tests for DIGITS
+
+To successfully run all the unit tests, the following plugins have to be installed
+```
+sudo pip install -e ./plugins/data/imageGradients
+sudo pip install -e ./plugins/view/imageGradients
+sudo pip install -r ./requirements_test.txt
+```
+
+To run all the tests for DIGITS, use
+```
+./digits-test
+```
+
+If you would like to have a verbose output with the name of the tests, use
+```
+./digits-test -v
+```
\ No newline at end of file
diff --git a/docs/GettingStartedTensorflow.md b/docs/GettingStartedTensorflow.md
new file mode 100644
index 000000000..3e161b03c
--- /dev/null
+++ b/docs/GettingStartedTensorflow.md
@@ -0,0 +1,244 @@
+# Getting Started with TensorFlowâ„¢ in DIGITS
+
+Table of Contents
+=================
+* [Enabling Support For TensorFlow In DIGITS](#enabling-support-for-tensorflow-in-digits)
+* [Selecting TensorFlow When Creating A Model In DIGITS](#selecting-tensorflow-when-creating-a-model-in-digits)
+* [Defining A TensorFlow Model In DIGITS](#defining-a-tensorflow-model-in-digits)
+ * [Provided Properties](#provided-properties)
+ * [Internal Properties](#internal-properties)
+ * [Tensors](#tensors)
+* [Other TensorFlow Tools in DIGITS](#other-tensorflow-tools-in-digits)
+ * [Provided Helpful Functions](#provided-helpful-functions)
+ * [Visualization With TensorBoard](#visualization-with-tensorboard)
+* [Examples](#examples)
+ * [Simple Auto-Encoder Network](#simple-auto-encoder-network)
+ * [Freezing Variables in Pre-Trained Models by Renaming](#freezing-variables-in-pre-trained-models-by-renaming)
+ * [Multi-GPU Training](#multi-gpu-training)
+
+## Enabling Support For TensorFlow In DIGITS
+
+DIGITS will automatically enable support for TensorFlow if it detects that TensorFlow-gpu is installed in the system. This is done by a line of python code that attempts to ```import tensorflow``` to see if it actually imports.
+
+If DIGITS cannot enable tensorflow, a message will be printed in the console saying: ```TensorFlow support is disabled```
+
+## Selecting TensorFlow When Creating A Model In DIGITS
+
+Click on the "TensorFlow" tab on the model creation page
+
+![Select TensorFlow](images/Select_TensorFlow.png)
+
+## Defining A TensorFlow Model In DIGITS
+
+To define a TensorFlow model in DIGITS, you need to write a python class that follows this basic template
+
+```python
+class UserModel(Tower):
+
+ @model_propertyOther TensorFlow Tools in DIGITS
+ def inference(self):
+ # Your code here
+ return model
+
+ @model_property#with tf.variable_scope(digits.GraphKeys.MODEL, reuse=None):
+ def loss(self):
+ # Your code here
+ return loss
+```
+
+For example, this is what it looks like for [LeNet-5](http://yann.lecun.com/exdb/lenet/), a model that was created for the classification of hand written digits by Yann Lecun:
+
+```python
+class UserModel(Tower):
+
+ @model_property
+ def inference(self):
+ x = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
+ # scale (divide by MNIST std)
+ x = x * 0.0125
+ with slim.arg_scope([slim.conv2d, slim.fully_connected],
+ weights_initializer=tf.contrib.layers.xavier_initializer(),
+ weights_regularizer=slim.l2_regularizer(0.0005) ):
+ model = slim.conv2d(x, 20, [5, 5], padding='VALID', scope='conv1')
+ model = slim.max_pool2d(model, [2, 2], padding='VALID', scope='pool1')
+ model = slim.conv2d(model, 50, [5, 5], padding='VALID', scope='conv2')
+ model = slim.max_pool2d(model, [2, 2], padding='VALID', scope='pool2')
+ model = slim.flatten(model)
+ model = slim.fully_connected(model, 500, scope='fc1')
+ model = slim.dropout(model, 0.5, is_training=self.is_training, scope='do1')
+ model = slim.fully_connected(model, self.nclasses, activation_fn=None, scope='fc2')
+ return model
+
+ @model_property
+ def loss(self):
+ loss = digits.classification_loss(self.inference, self.y)
+ accuracy = digits.classification_accuracy(self.inference, self.y)
+ self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy))
+ return loss
+```
+
+The properties ```inference``` and ```loss``` must be defined and the class must be called ```UserModel``` and it must inherit ```Tower```. This is how DIGITS will interact with the python code.
+
+### Provided Properties
+
+Properties that are accessible through ```self```
+
+Property name | Type | Description
+--------------|-----------|------------
+`nclasses` | number | Number of classes (for classification datasets). For other type of datasets, this is undefined
+`input_shape` | Tensor | Shape (1D Tensor) of the first input Tensor. For image data, this is set to height, width, and channels accessible by [0], [1], and [2] respectively.
+`is_training` | boolean | Whether this is a training graph
+`is_inference` | boolean | Whether this graph is created for inference/testing
+`x` | Tensor | The input node, with the shape of [N, H, W, C]
+`y` | Tensor | The label, [N] for scalar labels, [N, H, W, C] otherwise. Defined only if self.is_training is True
+
+### Internal Properties
+
+These properties are in the `UserModel` class written by the user
+
+Property name | Return Type | Description
+--------------|-------------|------------
+`__init()__` | None | The constructor for the `UserModel` class
+`inference()` | Tensor | Called during training and inference
+`loss()` | Tensor | Called during training to determine the loss and variables to train
+
+### Tensors
+
+The network are fed with TensorFlow Tensor objects that are in [N, H, W, C] format.
+
+## Other TensorFlow Tools in DIGITS
+
+DIGITS provides a few useful tools to help with your development with TensorFlow.
+
+### Provided Helpful Functions
+
+DIGITS provides a few helpful functions to help you with creating the model. Here are the functions we provide inside the `digits` class
+
+Function Name | Parameters | Description
+--------------------|---------------------|-------------
+`classification_loss` | pred - the images to be classified
y - the labels | Used for classification training to calculate the loss of image classification
+`mse_loss` | lhs - left hand tensor
rhs - right hand tensor | Used for calculating the mean square loss between 2 tensors
+`constrastive_loss` | lhs - left hand tensor
rhs - right hand tensor
y - `labels` | Calculates the contrastive loss with respect to the Caffe definition
+`classification_accuracy` | pred - the image to be classified
y - the labels | Used to measure how accurate the classification task is
+`nhwc_to_nchw` | x - the tensor to transpose | Transpose the tensor that was originally NHWC format to NCHW. The tensor must be a degree of 4
+`nchw_to_nhwc` | x - the tensor to transpose | Transpose the tensor that was originally NCHW format to NHWC. The tensor must be a degree of 4
+`hwc_to_chw` | x - the tensor to transpose | Transpose the tensor that was originally HWC format to CHW. The tensor must be a degree of 3
+`chw_to_hwc` | x - the tensor to transpose | Transpose the tensor that was originally CHW format to HWC. The tensor must be a degree of 3
+`bgr_to_rgb` | x - the tensor to transform | Transform the tensor that was originally in BGR channels to RGB.
+`rgb_to_bgr` | x - the tensor to transform | Transform the tensor that was originally in RGB channels to BGR.
+
+### Visualization With TensorBoard
+
+![TensorBoard](images/TensorBoard.png)
+
+TensorBoard is a visualization tools provided by TensorFlow to see the graph of your neural network. DIGITS provides easy access to TensorBoard network visualization for your network while creating it. This can be accessed by clicking on the ```Visualize``` button under ```Custom Network``` as seen in the image below.
+
+![Visualize TensorBoard](images/visualize_button.png)
+
+If there is something wrong with the network model, DIGITS will automatically provide with you the stacktrace and the error message to help you understand where the problem is.
+
+You can also spin up the full Tensorboard server while your model is training with the command
+```
+$ tensorboard --logdir /tb/
+```
+where `` is the directory where them model is being trained at, which can be found here:
+
+![Job Dir](images/job-dir.png)
+
+Afterwards, you can open up the Tensorboard page by going to
+```http://localhost:6006```
+
+Or you can click the ```Tensorboard``` link under Visualization
+
+![Visualize Button](images/visualize-btn.png)
+
+To know more about how TensorBoard works, its official documentation is availabile in the [official tensorflow documentaton](https://www.tensorflow.org/get_started/summaries_and_tensorboard)
+
+## Examples
+
+### Simple Auto-Encoder Network
+
+The following network is a simple auto encoder to demostate the structure of how to use tensorflow in DIGITS. An auto encoder is a 2 part network that basically acts as a compression mechanism. The first part will try to compress an image to a size smaller than original while the second part will try to decompress the compressed representation created by the compression network.
+
+```python
+class UserModel(Tower):
+
+ @model_property
+ def inference(self):
+
+ # the order for input shape is [0] -> H, [1] -> W, [2] -> C
+ # this is because tensorflow's default order is NHWC
+ model = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
+ image_dim = self.input_shape[0] * self.input_shape[1]
+
+ with slim.arg_scope([slim.fully_connected],
+ weights_initializer=tf.contrib.layers.xavier_initializer(),
+ weights_regularizer=slim.l2_regularizer(0.0005)):
+
+ # first we reshape the images to something
+ model = tf.reshape(_x, shape=[-1, image_dim])
+
+ # encode the image
+ model = slim.fully_connected(model, 300, scope='fc1')
+ model = slim.fully_connected(model, 50, scope='fc2')
+
+ # decode the image
+ model = slim.fully_connected(model, 300, scope='fc3')
+ model = slim.fully_connected(model, image_dim, activation_fn=None, scope='fc4')
+
+ # form it back to the original
+ model = tf.reshape(model, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
+
+ return model
+
+ @model_property
+ def loss(self):
+
+ # In an autoencoder, we compare the encoded and then decoded image with the original
+ original = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
+
+ # self.inference is called to get the processed image
+ model = self.inference
+ loss = digits.mse_loss(original, model)
+
+ return loss
+```
+
+### Freezing Variables in Pre-Trained Models by Renaming
+
+The following is a demonstration of how to specifying which weights we would like to use for training. This works best if we are using a pre-trained model. This is applicable for fine tuning a model.
+
+When you originally train a model, tensorflow will save the variables with their specified names. When you reload the model to retrain it, tensorflow will simutainously reload all those variables and mark them available to retrain if they are specified in the model definition. When you change the name of the variables in the model, tensorflow will then know to not train that variable and thus "freezes" it.
+
+```python
+class UserModel(Tower):
+
+ @model_property
+ def inference(self):
+
+ model = construct_model()
+ """code to construct the network omitted"""
+
+ # assuming the original model have weight2 and bias2 variables
+ # in here, we renamed them by adding the suffix _not_in_use
+ # this tells TensorFlow that these variables in the pre-trained model should
+ # not be retrained and it should be frozen
+ # If we would like to freeze a weight, all we have to do is just rename it
+ self.weights = {
+ 'weight1': tf.get_variable('weight1', [5, 5, self.input_shape[2], 20], initializer=tf.contrib.layers.xavier_initializer()),
+ 'weight2': tf.get_variable('weight2_not_in_use', [5, 5, 20, 50], initializer=tf.contrib.layers.xavier_initializer())
+ }
+
+ self.biases = {
+ 'bias1': tf.get_variable('bias1', [20], initializer=tf.constant_initializer(0.0)),
+ 'bias2': tf.get_variable('bias2_not_in_use', [50], initializer=tf.constant_initializer(0.0))
+ }
+
+ return model
+
+ @model_property
+ def loss(self):
+ loss = calculate_loss()
+ """code to calculate loss omitted"""
+ return loss
+```
diff --git a/docs/GettingStartedTorch.md b/docs/GettingStartedTorch.md
index b1bb1703b..8eae62386 100644
--- a/docs/GettingStartedTorch.md
+++ b/docs/GettingStartedTorch.md
@@ -14,9 +14,6 @@ Table of Contents
* [Supervised Regression Learning](#supervised-regression-learning)
* [Command Line Inference](#command-line-inference)
* [Multi-GPU training](#multi-gpu-training)
-* [Tutorials](#tutorials)
- * [Training an autoencoder](#training-an-autoencoder)
- * [Training a regression model](#training-a-regression-model)
## Enabling support for Torch7 in DIGITS
diff --git a/docs/ModelStore.md b/docs/ModelStore.md
index 818e72dee..9ecffc023 100644
--- a/docs/ModelStore.md
+++ b/docs/ModelStore.md
@@ -1,6 +1,22 @@
# Model Store
## Introduction
+Model Store lists models in user-specified servers.
+Users can imports models from Model Store into DIGITS.
+
+
+## Setting up environment variable
+The configuration of Model Store requires one environment variable DIGITS_MODEL_STORE_URL to be set.
+NVIDIA plans to publish one public Model Store at http://developer.download.nvidia.com/compute/machine-learning/modelstore/5.0.
+You can set up the environment variable with that url before launching DIGITS.
+For example, run the following command in your Bash shell.
+``` shell
+export DIGITS_MODEL_STORE_URL='http://developer.download.nvidia.com/compute/machine-learning/modelstore/5.0'
+```
+If multiple model stores are available, specify their url's, separated by the comma (,).
+``` shell
+export DIGITS_MODEL_STORE_URL='http://localhost/mymodelstore,http://dlserver/teammodelstore'
+```
DIGITS 5.0 introduces the concept of a "model store," which is a collection of trained models that can be used as pre-trained weights to accelerate training convergence.
A DIGITS server can be configured to connect to one or more model stores to download these trained models from the store to the server.
diff --git a/docs/images/Select_TensorFlow.png b/docs/images/Select_TensorFlow.png
new file mode 100644
index 0000000000000000000000000000000000000000..cd2859a16b360ee26372d7ec383e99917b55fed1
GIT binary patch
literal 36045
zcmb@t1yoes_dl$nB2rS)AVZ17NcSKxbV+x2OE)M83@wsFjndr=(kcxDNOyO~(D@&I
ze7@_u*85w(wchvto;9pF!@XypefQbt-p}5jeZ$@=N?|`GdwTEQJ!~0iu*$uA4+HPr
zyRY%&0qQ>~MqhJKUuZ63GHOqrJegipT0s3Ib(PR^Rduv*^)Plezh~*-Xm8HyV(M&e
z?%-nO=(>;A0=jqa#XT9YsG4Wm&a97KoMpz{oznX&N$Fo7v)(H|4gB)R`jJK3YNL$)Dc$@}FE-e|kn*#$=EG^j;Ug#zyx&Nn!#rz*}
z3D80D)YMedc)gII?EZp5Rx3=y^UQ5UPUbB1myv@zX#^&Z6Oaf!}JawSS$qSTz
ze-31k=IZ|G-24CM;{PXGo>a@Xerrsomvbj}QgSs>*Ks}X8CF+vU!vjmmf}esF`xV}
z-O|z#bIcdgQRTOLDdA^(2ybZ{hu>s#E~c8?ZV&WENGO!ozssLN_kWKG7PE|
zM_n2gd({tqa(nfj&EX{HaH2jiI8kv}{CSx!{tE^`sQ|44yTTeXRT?H47`r=l99G~I
zu>Ktf>bGt@DziP;tM%@k$GfuDQH>n)cefn4iw9WF$%ki4XJ-Ciz24nprsDO0dt}FS
zT{ET?C5#}YKZao<<3i7#xH*lj&YixVjt@nbA6At
z8w0n6-R(EfGq}(On&=634ln~m5Vhf+Oco|(N#~J{CIG-}P2}e+DE7G8E|{*e#{a}H
zuQVWVe)xmKb|=SO;q6eZMAXcZuxIlH5wt0AAN0qI%nS*(OxDI
z;Cwt%po;EZa;&aDRkdr!jpxK&u+C=26*;v^m3xh-z*4`98Bo{f^Q-Z^$LRT#|MBDN
z&6_VGE!4+(efOvH;D-D#%7E=W;W9h+F`rDUE-EAF%(d;Jg!x^}yhei8jnxN5UbSUX
zY=oJd|H=AU&@Bbg@o2*^u{PJ9e3z2#=6;x4O#(Uxk;i3#b>s!1X2tL``6|I0gF4Tf
z&ilo2!pPKbt4EtHO+F{33h&QiPd*qn3DOSL>>gdTw4|1jZm3fo!1t&%4B%Bp_e@DM
ze{bJNi=Ya!4)yhTWLzRJNr8X>40Xeb80|VRwN6kE(szUfsgRD)borKFx!+2?cEJIg)z^dN{^2d}~g4iNJ32@`AI|#bi)u=^C?Vrc+#CB1tK$
zru#enVu0O!eLhWY`zy9P^rZ1m@Z~1cK4U?6=wt%puCObO;SU0K#bM&&844bIKH_;T
zylpdcQLvf$BB4MN{CwZgtcaFTC&BMzwxO`QXyfvO&Nl6S%Xwq2!0mO6<_d+BMO>qv
z=~{z#tp@Nezsq`3&WS1{bS1?Fc6}4M4IJ=JxLGTnyu~$0`|(N9$#y+yyllh?%n&z|
z6N_wmP$I3Lb}}Rpp}K$N%?WPHqrY~HEt5bMC->=$ado=$(4Y
zv$egV+KK%oc>wn${s|CC4{ih;W%(lMf$jW-1NtmNI+>%-!t8c6`_2r6VrfoF+No!g
z*)8?fMg@@kqEb6ldKvXB`%zUkla+>&vbt;;1M`Nc5fw{LNO8l+v`f|Jgy`~gmE|=Y
z^Z?zk_M_nX_!wDW?{pw#QA5o23`;~u<>v{xBNlZ*s5S!YV1{bTNd4kwhX9)_lNUb9
zsw=|Dw_G;MTOWGenJjm7+@S%8lQE!j!qm@w4c^j?5UB^5`n}E0cQ}+9w?t4I#wly&
z4$ZO?l<9y>>#~^eguO-K;I{b&&)d-R&(Hf2fQ?m|rkOVkJO*)=UA-opIqQK`+KVgu
z`psINU*7tKc3E7Xt2=rQcw|#I@HjzFih0zpu9Hrc%ptBxX92`Tu^CY*Ji#>j_JfZ2
zdpCPG&?=deZwl8y-Fkfk5(J+#mv2SxWKnWxH(ziRx
zRU%>v;eCUTTZG6Q=B=(dV>LtGnD|m@J981RJ>|MsQc=#miue$7Z8_hd=UHy8lF?V!
zXTXNF>9E=dQsFgUY@s?dhPci%WK(~-Jt}&O3?L&l(=|Tx$vc_d!-yXx#~~~Txap-8
zH4Q!CeRl8Og41JD?#LPskK9xlnMvH|O>NDUMutAIy+`_~Gu<2;tphRkr$#lTK42rRiQ@mSNCQE}>
z^G8Lil&S?GBTbjPHWMRUv$)?4hDxWIagK{_ZP20AVoa|7P%LK1w-zg`t~@J{;${%k!Z~6NSyXG
zcD*NZzFsz73u9U6Xj9fHOO{tb|=(YoE2Vt?0ZT2~nD4
z+S>0&gQP@!Rj&`1SOm*N+8aM#iGD9STUEAnKp
z!XU1}yq@H-Y$b_|0DV(&?QJcnsn1mVP7KkQ(|+hmMqlDNKRt!B%*LsH0chRBRu4T-dyNF$KfpsRy#?ecAtCZFiG*pLQlP>
zZst0G26ribeTGkgB8_X^mIjEqg7WByr`=IrO#R}N0gVSb5t~5@jVHmuDzBKwwjlWu
z3Ar3DcIr2(;$YXjN*Px7WB9^VMOde+zCqvF1$kv3VEzyXzO~-ZPk8=whoOJW?V@dO
zJ0_Q{>iV>0abmmZHu8q2z{g=bCBV0ZCN6+`DQAX~Cg3TN#SPPVIaWV@f;sOaAjajX
zNDx3mfg1Lb1@pxp_O*8)Vbwd-ca>C@^pS@BRw{sqDZHbGSUY$eMxQuWS%`pNB
zS(Q^G`U=BG1prGmn@yza+q#il@rD>uh`Mi?!6zhd}|Q@GSNQ#
zDPHcb_TAWLw?VluLGTMG2M;Bq&F5F
zrLb3DMHk5oG;9;XU%ZiT6F>((+0=6>#|`86=F(XM(Bao3o;oiTMG#ToiGw411Nnz{
zg2fVxWPxk5#2*z1@GD%_fPTLZ8c*G=Lw8jYdQ+BYLV^$=uFash(jl5
z<@>BR{IMDeniIp*UPVi*XJom-IOVhwvR-7Sq&d&g|B^^Z((8Z7mby$sXR?d-Jd}U7
zPr76Yq6Mft)_=A`^W2@y$tk?L$;sNCxwh<>2tQV%oxqmqIex{GwXtA7mG0)xp{|9Z
zr#<~#DUU9n^o2pDtRr_TiQD1NBTM01m68;sTN`c9v?1%g6eSKb^|?GS($~?Y9NI3Tq@D`H
zcFTjRMG=VJcA@pGrhr>07HSpA6nhbGc%E~UH=ot}p!K6;w|w2L66bbZwERG9t-2o&
zNT75(8(~@kLF}fD-%Vw*G%UtivQXK%n(V-(np_>C(^;gI~c+dn#P0?pQCwL{Doa&gJ`yE0ndlKdLul*_x-$9Kf
zz&l6+ZT!gr4|9k-WsXMXm5yl{ogHIUMZ}uiB=ysn)AO`Q{~c$OP*PK!Aseq&PFkuD
zIGH*Uta#jG#PH|@e&;6K%IRpE@3nfMyBqVc^;q7WgG5eLC>#8ElU85{O=ItgR_?3b
zUgrLR?z;N@EmFC=8o|txQty;tICB4vOeX$XDBG9-mO
zMP_H_LYdSguQbX3P~AFIk&$?G#-1U>Go!;z*qfjs0vnlNA%W(&X)^AVSlrY=xq5=y
zE2clDzC?ZtD)`YLE{MBKsQ#+n%UAvU5WTaCMbqhcAxlt;H~?25QM0wK$eC=%^5SZp
zPXDE5Y6N=rZ>!b32BDg1?qZZP_Gfa7u0r)zuq0(8Foep}DLR
z=Wkfdz79oLmNuyY(iR;AdB01n*$hsrPO*KMi>SN5ESjW{vO{nb~lGb%%H+&>YZ_WxvCHHy23G&+PZ{i?9D+0>)yK4IE%V@RHK(C
zu_I*-{$!jEgP%96N+3G*F^x^R@L;kC!%2Av?x;^3ROCW9G1==jP_lS>j~m+cd~Up3CgLiYLs9c&0%6)Q>295
zj&BFs;LqCBn=<>&$$NY`6Lp_f4$H53O@9?Z{(O2KwgE=Pep!Pr6j~t=2$9djhp10z
zEPUtKhnhlAm5^ljXrZMp)0Dll_a@8T9=F^wtT)2T#{*{BdFERo*Qrz(IqS5pMx%SS
zcR*B?EgNCrc3Oe9oWJjJhMC8z&F_Y6HnqNGZV>tME=Hh+cHt}O+uBF$WAwkY6V!5_
zG-}fy8?w05T!o|+K6Z-!>5$lvT_w?C+K7
zm6n@6n|pNT0Sb$qJo7dP>Ss6VxP60}2tkmLhj|U4C*|V$b5)5P{fOYC@_%U2&{eUq
zOg$3&*(Zsd)y(lWn>0J2v)YBdG?>1I=}`^3;*8Mc&q$Xh;fg*;(tt;^bLK(BJ&{fa
zbRPtU0Ppr!w|oadG}<)QoMe2r5@`vQf(Xier@PA-vIdsl>-dN?k
zAuXsnbnHW5tG42hbNhA0?UG&VGZrX)!OP<8wBB4!k`#yM3^8DpBXk*Z57X
z52MXpo(h-hi0g|w(76J7F)G9N{UsL(P_|f+|J#+8!I@X+FRwz%Lmm5=$(YQ>#!0r<
zEZ;Ne0i_H|gJ(0+J3`TFqv=2gHPX3Y2|aNGA`vDQgeC$XPcuF5wY!d7MY472X5&4@
z!-*IkLiA$2Jr(~;EyYeeZ9!soZbIgIgZzB^x57uaEEPUq(gB>nDu@frrZapt?`aXo
z(NN*XVc%UW=6B&or5?Qng+$kU#1$2ti_2n}Sy@?z1eSZkW}Zpp*6KQn8$0BUX8MRZ
z{fPGVB#t6#&Rfy;a8|t?u%bI4FLd1qS8MXAM9CC&186E^e6*Uxz|Ras`%6||!>2^c
zfPw6;P(Mb>QN!fxMl-w+$@)g1W(x-@0z`$qHun3CC9d4*WvnAL8?KSTwK29Ux>e0O
zCO&xwlE(XjQ3*wz&w;ap5w&|Hr#2lky7Jt-RKD(MI+I6TmbL6uATc*{KZ~$_^TJ;<
zVuUOD0!RhAA#b~aoFs@MOV7-I%P|=@O6YOxW
zJJVtY07{GD`_{wDY!G%NiMYp>0B`u3!Ki8ffNM|hTv?pZ8qlV{91m>5$XO~s=*H2k
ze9=W@RBRzvP@a7oJ|wo*l{`J!FjF?bV%uoCzF{niHqoZCf75%j)Tjfxz4j(j;`*NM
z@)qj-a3Y+H8TZ(e(@a-EQsaP;o^pT0J8+(%!#6zH#=-R_`ki`@JGoQ@?AY6ixle~O=DO*O-`=joQJje>=z7M9tkoy=N`9qrq+{PKfA
zk0@03GxBi|J2t^IQlg+?IXZ_(oi|^tFr;v2MZ7Z{x=BMN1_=3TArU3p>m(+~UJxEZ
z#QtVSMsOrBc78|~-CZP!=Jn?4N-w7ILEAJIanmT;)gWt11yP=@7G|@WjTgXn(
ze#yLhX*jJ3DZoBKEN2xWE!3x}<;JbBxU-MIpS-Al$DefMne>J+QJUh}R=;Hidzz?Q
zM_o2qgEmda{BrxV3O3A4VpIT5+4ELe{0Od|ifgWB8z!x*Zjf!hvm-fM8xIw%Z+taI
z)#O@K7@WU4w~7q#S^hS25D(vVA!y>Rk-95%!+PiLwV|d$43+kKQ3nrURN?iDo$0D;
z=r0FoaE#@ur%z5ug10KkiLq^2Si>iTH~0OgN4f|OSsQEI}G%#l^zuw&>a1=H%+iv
zmQiw95e%8KSy8Al+cV~tr_LG=U<1kubBzjerjCFG%>F
z=eyY&ERwr*wSY$)4T*Y3C6!Uqn#qT)VG4WF{D#HFL0op#O9
zxesir$scZb+@{B#i&k|w(Tf-~=Fr*{y?V(%Y{g!CwZ1KcmuKi%M$GGOFt#N3lt4g-;qK}fXvlpSt_B?m7q4xegqhr!YV*00k
z<9p%t@78`!ej8~J!edWVR1MSZ1NbmtnC_NvO2`S%QOXpPidG9dncP5)z3k4dEZwD
z2b7I=1EaNEh6>YQG9hxshp0s>*)j!3a)&pADmq9~&Dr>Y<8vr@MaO-$~@j
zYOk@4t0P;<##>6q*$Zmo04+pp*s@E%lcSrzZc)PSc69Lc%Fp==p`*j4NWP==f|(?V
zmauL{j?@+Ii!NU&U4V01<7#kjGTNPwvf17e$hxjU+m|^ucs7wf4sL+(TP@rDouT#b
zb&lz5EFI|aYAD{~)fA+1nY?U#2ZmC+fNmnLz@{HLp!3f)Lm(Nr)wTSF=czHpXN}a)
zSSmBvb5AhwMSuW+DzOJm(&PsIOE>^v@?ibyX3xZUMXG=dkb1aW+p;X&`ttyXIg7kl|dRQ*^4(U5MO`YfrF1DJPxd0-Pga&tovUl`E^m8Mc;;&R>1@W$~G3Tz#
zP5Z(S!r7dvvb_GswRLzcIwQLqYXe>mhxKseo2~mMmJhx6wO>XGIGZ{Jwrk2|90cZe
z8C<6W0@mm5qPVM`czYeqkO!N`=o(cWCBxgBoOJ6J85!RBe@~1ErrwvCl|&IQ`ppO*
zI1wJZFi*wE;HtlMp9@Yj7GA}G5?4c+-H~{$OBT2x~sj*w*(`6DxOW
z{ln&ihQJobCas&i)z(weS162p1NgOlf=j8Ut-ZOmWpg}np!WUw!{5vwN#EmDwC#Ub
z%r7nfE3^4Ok$e9aMCtz|&ir&kR6|3fRRo6wjQUPUNT3z|O+?>&4-?eX)XWOSJodiH
z3We9y%rgi3-!sXi8ffo2hB1NcmPBUEQLhqUq`BM-M){
zc|o!y`VS#0BzWxIWKhP-&hF&y&cn%hbR^DknF&U*t0U3PL=2o9_K#JLLyZ%P
zn*Owcg6_Gwxs5@T;A)+cf`S5LvJn1%nBq5gnp4ybwrYy+St7MuJV^}zPEXDIX0$|uDHIoW^w$a
zqlFjo`o8$z!Gsd9FChWPkBfdYUZ@r%Q#M|UGDS-Ob&>JQ@XUWct>YjPSV+`)!{pn4
zr-g0dc#`of(MtX+$gePB^!2|-2aQ=ZTK{uyXpI*3e?_C`7^?t~(P^toAt+LTM$e<}H
zWDd}d4#~fC{o|ftc6NA-lnUZID&s_Z@wVJa5oSPhVfkn=nh?+?*cN{%5l-
z(8Sz)M_n(r=h{EjC>#A2?@6f9`Z(bKah8!US?>7uqX$0*
z>ip(yewnw`68<%CbpPq$%lh%r^1;`8cssuk+8Ls>%{hNp)zuXqBQ>an%{liKS|t>O
zLLz@HzOi9P9g|r>X3;3lY3cPi>O4{G9~T)MoCHxxh?Eo=dz8*=xh-4;2zqC!jUV*|
zH}YRK&+D~eFJ0jFrJ29v&fYa3rlM-*FQ~uVsXyrJ#%^_~plxRS^3Rg~GeMuXkDYMZ
zY6_WB&UnuJ7}GRFzE5bDVh)FFpLS%b~
z8of=;B}Pkgoq+YMW894grQY68d^{(0rcfM%u12qI0ra3
zO(XX$7v~-(+#cJ-IIKvPzxqtiLV**{l$YCVa&7g#3BaZc_t`{C
zbob5vk!tvY2KxrEo%|_`0s6+(tcOG3>MEPw3@;#%YlqbCJ~9A
z{MPjZ79_fCM{-}#eJ2eg7rW-HW_E1jq3~^=`OTMD`M7=;vxg5<(RtU2mg|lKE&R(3
zJNfTTWoA)DAeU~t>F&_@mBV7lXj+N=;~qRzuttg$)2KGWmZ
zcfr#^l)KE?h1DZu4dtsG#58!@6peJAdI~m*+<0=}#AeJu;kKU!7K&4uMPAp({c>me
zXOw7;kX(`sBy!u;OZpn$qE7`t+
zHD9K@Dd#Pd2GJJm-~M}VfreHH+q5&B&UHT}lUvQoj4mznGU!gDu3M;Ip@vY|M@II?
zehQUKNU7)mweJsK>5j{f9etbLFfC$xU?&DF_DOZCwpjb3YlB7vK^EY;^7@8gu|Qrdm;}haOT-;*gv*74=70_m{Lk5f;&&|HQ6%G$}5xtP>Cs~aJBT#xADB6(tTP$&y;t}
zI`az)1%a`eYV+nhY3*$SkLnN9`T4D}O#+B%wuNS8Qsa`6^4xITNzcjuDYG;BBI!*g
zX)0!ZS{lCPwqEp^d4x!zk6vVKSIFlk06{0GM-oflx{-pJusc%7e126@BygXbm)S>{
z8A<97C>8TBl$9EpQ=`9&;iNyT)%6j+eEm!j^Q4vqCKKx|P@*H3T_P)(?(Lx>ZPWsKWXQ*gNa_~xpiyea
z%j*&JD4iG*L8Q~0=s(aWwASp^HpFF*{nc)$mmaTYO1#15e+~TuAHOScAMInCQs%%|
zD)>&$>Q$5Wt-`v}PEM0O7c|`+*1!bW_4+Y&!;8Vo&gmj&%kx8H}1M(9t$%I4cv%f1Y#O^-4Za4efW`6u?(_8Oy%sdol
zSKqA$YBiJn#lu8HkpLeLV-{^UC5GA8FC7ADqoP90sx1~XUU>ekE_}T_PtHL2R6+iC
zL!sk0FV0C%yIXj7*4qx6jI)e={sS49@KE^d^svd8QfkY=v!&T9{wG(l$iYe9Z=bvu
zGbA!O&WkDeBX1|h?(3ao>*;?W#Tdwl8^Ko`a)Pe$k5^CykCy*{)%_Iz|8mwJkjp3(
z?RnKa`lT1*$n@`xGJSVh*|n?)rSlB!#Q!_MW6F*247KP#!Xi6l{STn4@)HX0ut`3|
z=}TKBZ)U{+5W7FMOlpD-1Mr}@>
z8KfGUnVWMDiF-cP($+@BK91Q^e~|F&j~&sJR}VR}ng@o@x#VAS%@f@aDi@8SDjD$q
z%V0wO@4@%^)K=VjkeoNh>xl>
zu%rn4oKBgoO2<5uXsBh|OxLs+xIK%jrM^6~A|VgY6!}`ejS*o074}i6tc2wa5w$f7
z_?+&XRaOh*Qum!?$s}MoWd0F`QGJ6aUh=aiA0^5fz0L(hNRs5xuA}x=
z&u91PC4aD9j6`BfD7%_Xb#fu>zG<`y^w)tlhey1w#3+vgOGBZ4>9VOX6|ohb^ZzLH
zlgyp!YY{97Jiz3bq_)PeKcLVZDMZCcK7Um(5`3pBwlcQPkD1pWb+FY)D7qd8p!M@p
zKAzkTVx+thun_l7bpCa#a=eXC9dNyOhM!gPk8N6KPfy)NVMWFrdB!2bsmaM$b5;UW
zb@&MxLQsOnQNxLo*Te79E{j`X8PdRSB`Z)2e>vV}YS8S~X;?r2V?_o;_U@pAc__ds
z{;p&UdE;_rP&r7nuYEkN##-mV=fXE48X)L2Dae?54Z1!O7s&-}ujDbRl+BLeV=$fv
zmrj(MjCp>zRqUB+KTNVU9{A}?{=ALbyr@kO;xz8iC9iGXq-}I_
zo|J9vrw|N)Z-)SJf~|&P+F6SYKZxv#**&nr0b2z-h|X1n{;=a7YHf=&I+Hcd
zlH2bXhykA+)RyjqDM+{UaS~WaB99dm=TtsAq*h!T
zL*j7q`hKH#&8&M<)?mIfC;6o%F780tSLMTJI?&?&VvPV~F;h7t6YhtV?%A0s2ArL+BR(+Y&=DTaR^eoMoY*Q)UVv`8ns*Nx$fOh_5*g)`P4j%9Q!R^;{
zx*&3^iwGpY+KubZkYtk^iduh{VM4n7NQF}3W4-&^r~86T0p~B+HMdEdjax$w-n{x&
zk8B!vC3WlmW-zaI);|JOHIaF_a&Goi=kQb>!w{D6ETG1KkKWhq(?;RkmX9s5F
zZTi;b)n^Kr{10XgMEt>tw$!O!0l|}pZib!y@j3+w2&|YV3503#>h1=ahdvk|_AbM4
zPxI#27$25Eo*1j*mmbdUSyDyn9dvcYWD9=mOQoSR|IxiMzlTV9b~+}HF>1Lx56cHGmkZ3xC>N2`4Pde&80FDUEE!v-_4HPEzVPYjd
zjgzhU{07SMDIxoHK+h{C^{3f6*0tT-Ea235pIx4HrSTOoj=upsN-PHkXa1sWpqAKi;UQ-JJkNAiDCO
z1mHcAS&3MZ3xhy_+}=REowyVOB|3~*vYKB}^dh30f{fb*)U+N?V)QsYeA8dJX#cb~
z;B)LvsT*^%vl+XcJ__^(O~{!X&q=B!r}YweIn3bES208-9raD
zTYQzoIH-T~0axTORAQtp1E7au>9o}eBs#lc;t$u
zG>Y0^=>X`|1)m_tVwy@Q?XJEh{gTgD!;fvn@=25~t35k5%g+c{aXieTO_D{4BNl*Q
zi9k^3xN9n9gC}qEW-hlNGXp|b>HrwpJ^^Top!YN>WbZEnk35n6>`(l}Zjy&)MkXW*
zBHP^R4S%H_j5l?4`I*ipOq}aw98Q+|Ek2YS=$2anXDb9a`g@PFuhJ{%m2-2xI(>>-
z4^g%g>txMV=@;b~dX(&o=^u}k27IFS`A~TVK!;^(G?9ZHs&F87q;cJ|xak39el=iQ
z$D%-EgMe?c$Ut^en?9#Ft&91s4O_ma*|q+tLhNGUdO4uLsyyv6+m1@FID7iapI@@PPyD4a|h;#q!tv$%C
z?{L|lNmFIm1@ozF)o0nNAvq7qm=rcL?ApiSpr{fD%zdSt}uuee7%d>#jzD1ZGg6FgdD^$j&r=FucMo7<_gB_ki{AgHpasBxEa3oS+!e6x2E$$6&^BqJ@{TzE
zSHHG>S|2yz_G^>XvxCKI<3P-d*y!4FIaqjQH0Vl7Kp@bkDTH?Hv)8*<=>|&R(>ais
ziUmX7c-%AsBeIM2-Gc;QqymOgsc0!cnDfWL2gsJoDp`|((ukT$CY_0%e!FBDF95DE}uO=36iZFWoqq-}C7jaZ;Q`2mLS+r&I`(eL#wCa|+tjC?qUQDc!nBN^@mY
zwhTG8X>H!*RN|R+3w*2H>5-v(=CXI{=%z0r6GvxtGB(TDe2#=lQv=K$yoGuH0U$YS
zmkuH%cf3lM{@3YSUj5!C^riTK!hxisFzGTjrnl}CRY^W!r35oekmJkx8QJD_eq7&`
zk*lVtoT87A_#7fKlz*o5%v|;lpE@ZBc7Cz_oYch+^HOH_8PIF~#w#zTmZDR*$IheQ
z9X1h{K!-JT>;I|kozB>g-Vcy!*{?-;Nl8gX;Ic&s9>=k2EdvS~Y}iwW1aQm0X;U<;
z_{qrH{ASwe*)}(Dghsf4gT`Hidh6|HPmU%7{Q9#Pa(f6-C3tG6jW*2JD&NlJ3vtd`
z;w$T)V!X`vp;w+57b7Ye6uyncGQT|?FaAK(d#vC7iuf@mb19t$zCv=XYkk7tIw3ZK
z5jgvr^CYOa2*%Lj@0sBWDHXi5OMpPm1LG>DKSPGQtyax@9Bz92GB*xkbR
zL!GfD2=*?+XR`alSJ~zlhpF+h7rg6UG_e{PpLG`atEL@g$3H7{Ofq6V
z6-k%8Fv$ktx5n+$`rCfl7}nYXY@D`=-&sfi04llZLQL4{#8cDr1yp&vLTL8~Jp(2`
z(lR<)wg--l*e+C{0o{P|NJ*=!-CvZt9WsauKu;O0&3*m4(G+RT`
z+zvXt2&k^Ryd`#~@V87^l@uoeIO2T8YIpQ^+wm(LT;`ABo~h?%D3$#B)a91Tj2CNG
zAH$7TG4zHP0QhS8WpNc7aF;Kx`_mUp)XhkMKb$~dQ95CME*;$rxz;RuM_(uu`)P~|
zJpMdGW~yff)dGjSoe!|Io3=e8?Sz!?+}Fs?M#+O1UQf-$!3+jJ?oAL8t-P5k9%wGG
z;_;RN+(ij$pvxp!V;mkHUd658aBA*MP}$iZf4leE3Jh`#uII?^Q@CVMm1(-RIa3q>
zGH_Tu$U-Pwvc$!a-flPvop=L(c$Z96ObvY4s=^;2hOF6R#XeJBpH_sO3!I-hJGS<~
z?Qq?QjuHyyvzh%gT(9D0rqi{~Y~iLp$<(@^zlnhS@4DsZ7+rg#LvJT+wo)OG>bmYQ
zN|2JKGwRZ3V`)n5L{-GZOhDVjw~NuiLooM8_wQxsNX`jsC@a(8hsi4_Pz#2lV(a&L
zczFdyW#dnC9L89jh(hoD>^~22A4Rls2(_Z!d;Rj*+fP?FRWi%T#f8)RAu5T7qKDpF
zYO1dFd$_M1;kO*rT7*AsmQlg?>PP;dz`AENVZ@D!yBF4_;sX2!^46;yP!%y70xp{!
z`CbN@u~dZjzH=^D6dC{Lebn4e#n#9GaHrL|9^QX=zPwt$?>2vlMx&qq#mzmb{>{z(
z$rq5w|94FP;q(V8tML2j{or3I%E#RT3)T~m7?OWE&Eo>L|CK-FB%c{AN71#XPJekz
zCE^M#po4(U;67?tXcD~8!xSVHWO?sT|BCrsH6;?S5tQM;w-+b&lCzf
zu?Q}uI1uVdEnzOLb(?WOS(e^C5^+tU7iketV~FcE({%|sPStvhU|8BEcW!DH+
zm1{IV6wGejGx+ZJWd)q>wjTKTRT-3(6&KS1P`oFKt^^F()I>!^p81&y$ht!;{fyuS$2KFSycEJ1U6meeF0et0px7d*%?N=X~8=nsu$aZ4n@xs2%KqTzw
z*4KcRz-b8YUjyX#?p(sgskvCg`%;!Z_3qYNXu
z$EfFn(9mUdG9=&3;MFF(*u{l?g{GCmP8hI7h-z0efX_y$RO8J^Jbu-QS-RnI7}Ag`
z2HNWf=Vp*#diIIIQb|oLJpggBG7?`V;+J09=Fp@v?{u4m)X7p(zZhyAbh6Im^`N+0
zwhZO<>KW*4%i+d5%n7aCV|q7zJed$5?}!g59zM0ZlAoKt;L2&U6J|P_tlqCeGP(oJ
zhEKt;{Dp@P%w^>f<1;hDhI!cxuo#SD!8&d>apZw@-Dd(u*%?5hHx#$Mh7(rL^^^6k
z&60jbdcGf(L=(CCz5Nm8U>*pn&SG@awZFe#AbpSe?!5E&UzSvig{9@Tt3ZKT;iwI&
z(Zvx+~PkWN#`hE!~z-4^k|WiFk@JnXp?Z
z@NX@^r>~^fwo#zeRc@HR@<^SKhv%XT2XSm_(=V4w)8nNA<_;Usjmt7+Q${uR{@%sn
zLCrSv>9y%eP2y5-9csUgcnd4rI(Z)so$q`St|M0^n)izn1j^(L*Cy!IJ$HsHi|@AI
zrIUL%FHXk?l!8uM(}e4a?tIsy7xB3jLI>9SCXu+>zU~vyqxjs@y@(0isNJ@T9gkt#
zv-Dofc^IYl0<1xH&tqrZ`QntIuJZRRlXx<2<6gpe1-*Bq-d}c{y*Ia7$A5yl4ZM{N
zW=!~ao9V7PiLUNwA_g*CHegq>x~?c4dx@p)Dg+uVuD%Ai8qk{7#wd1P*akwVekd=SQ~+3cds;
zJr0#mdhLy07mRSry1ZscUsR#08+$46Yp#H{>{oui*DXP(;I~I*VFxSB99#S<^RJX;
zug&~%SYqFjQ3#|L0PAuFSN5O6sxG&6y_+&14gS8tcAL#6>57wdM}`${3gt;bU;)29
zGAI~om^rw;b+p*kx8(?%|O$;^ckP2c$Jy
z3@vp@U-w0x>sCU5j$0>VACS4WL(Oc9lIYE*LtVrj_%bM(DFImG7aE2CMn3taQQW-l+&lgM4E|mvbfcRaD;{A
zb1U1~n|GXxMO9$6=}*%vFYwFTa)_X%URS}w!+np^eTW&$rczwy*K0PrQ=4cJ#v~-A=r=O91bsk6vxdff<
z_5?N$7SDhr;@p_rLtLG#q>D91n+|5rCZ`?p4WAYWW3!#@lE+0cctS+&4w*(6dLD81mK+Nq?trr<&6P+48ARx}!u7LH}g
z>z*;Sog^~z&DB6!8e+HDp+7&j@<`v&*_337^X#S~o)}f@6m9FsM+Cfca9kPks{Hin
zlb7uho1rG=!=q)#zKFinFj-T>q$Zz>{sjZuzjRXl0+%>qn
zdmstHg1ZHGcO8=8K|+GdV8PwpEw~Tv?(X&`=iYP9ci(^S)%04k+N!Ios;hSW_O4xn
zxw7!+q^wnyP82N?6SVBu7||E_*bha7Oz+d_D6w`fZO_Q3v_m^v8>0;6w3fAg!m^xlz=3c%F9N@}wD}g_n!4i?~;{@H*)RpZ(w)
z^r62m#o|mJZwftHag;DwKH--YS6`0fE`G7=_QN|cZ@2sF2Z6V)i0zV5rdHyTOj#yh
zEf6n!*L`&UwEo!f(>eJxg?dD|&m=62GC+}xvU)Vb-VuWmHR4o=UyZ|#{+liclkC8@RF5f$P^bw2twn>+v_edjf
z{_`;gjg8nHC<*jpb4C`t0y708IyCE2@~0U{m><;8M=lK&Aanc@9)MNXZwB;89Eqn-?lxUt!1$GCI8X5l#lKQt;k3tYzZOVf8*_TwhE@khPMTC_>bs@}x
zExQyZ(Q>1`4f77;?c=hbQavo{Jr6UWZrK`mM5)kg1nnDMCC1a;V{{DU7U^Y{XmN2d
z3j3Tb$eY7yFr(vROT2|)V?*XWm{any*|t;JIfDmXgCDum=ISdy3OL(+7aIBYW!xaj
z)SWE^4YZY*k8(B6H9dHZdqpyBH>uSnlOKZ7aqiX`I>CE8sr6bEhvq6OSxjl6r2_^?
z@VCTdEdTH}#Cs>{D`KrYIbCMReO{Ys}&TRC$lw>D9{QUzz>w+W;_}wQNx~J%fHDYSgh+F;#jb47Z-c
zoKQGQJ}g$@ip;W{yeA6)^p}GU6FR+-8}agB#YjtRbj4;yDXKRlg9-yz4{2O04KZrc%K#9t7>t*>%K(rI2RqgOnACH
zJ^{Ic3RVu9{DBW$Rb^4;iOI4zNB8k(@|yQ_oqN@lnSeM6JB<$sNjCDxz&eDPztP89
zVp1I#tLszp%Y4rye*>9IWxCSN;yp^mrLMM)vekQ-vp%a`3W=*pd%kZ$7;VMi)7UQ6
zbXIQoBx?mNob;yP;0l5H2Q5+y7PE@-3ssRl6slfj_4kjYORUZ?rq0>&D(5y
zOHq#<^y;5gDcS-+rD8zm5xwC?z%BI_77eonBPeX>z<>ldo8`=3ZLtsn)V-`%REh
z*=19qto-ht3yuaYrW4s>S%pF=EdNTKcY^Mn(5sa3L>qA>l3pcjEsonkMGL>2{zA
z)OlSNe4uW~Rx_9_%Tag*UKJNKZ7(w4z@g&aX2E^rWSS+g+7uB4B4g<6kw1FSYsnfy@d}!=zhlY`kqWA
zsQNPAI*BwX*vlk9dx#esX-X3LH9W90tWK(=E|oW~#9GHgi~#(i`m$H|WyPXTAWy7N
z`S9**S1{#O&m9AK&GG1P-f`UGk`^p8fMLY9t3gspPu1gl!cVnEY90kTEvUDS8Lmm`
zoW%pg^G}yAtsAz>4Ypw{YDLpb?O0!nT$frbT-cJ!Ro2dhG=^9Sa$X}*U1DK%4A7PO
zvh^>`^&>C7y9(2Y3f(a3cQdmOoISqOdZQdhS7mjUmidaBB;P75?EG~Cnk4%7`kaag
z6gL7|*5z2DwL|7HGJY#X))@{ZR_cJbVA?@pe%EWI&LUNsZtMXXDwMWIspUaLH{JRb
z9cyd&_uIHzb
z9A}WbEZ2?sZP(1Bbfz34vJ=zB$_$xR#>Y=e9zXu{m3f
ze3t!4xfv6XliE@c7#A@93Xym)d=6Xhs>H`~(2=bl-A)#;5Yy
z2VeYbak8^;Ildwz+cOl@wFKV>8|oc<)D@DaS!I)v9rj{8X`6%lvvqjoD9$h(w9;75Y2D6x9+N7RQ`jJi*zRZ(6+7s5
z#Hq3yN2xOji8$!t(TS2@5SR_dSSk^zGr0{7j&F+1z;wSFbYuqRS)Nwy7I`E>k&0*|
z*FD29D6uAV*R!w85gJd!VZ;|(avR(fRUWmHUCzj9?~ss3=F!YiN$69X>+gFJ+V-f@
zNVpQGoZ*^<-j_SmGKF+CZ3V@y-cE+;k
z-FB)a}a76+&F@SfPh!o?mBH5HF%ZL+23mRvWLm|n@dp^OERm8Q>c
z=4SAai#hCWd{{WF)hg0Y8Rm$38S8jlyM#ivHR4ks=30Giqq%)&4|?9_eT#Zv*?Fmn
zY&*8+P6&{soC+RqL(F*xuMRFY!vw9%a#5PTF^;ns51)!d!ek+6pU9
z{@AyDJo%w+%WS*$3xm7;HOB~i803hxfFvGgt&>l!#kEX{gx%mZwS2aYTOxNdulxy}
zEbWuyB?KErr?b1E+1gE~laJ=+bZ7|Ad_=;&{G!j{w#3~%{#|j1dYJYI*}R@9as02V
zBaVg-1vok`LF$=jTb?o*r&*$+W!HmUFR*X8q});~U|sH|N3PV5I5`p&K$c-@AuPFW
z#|1DegFWg?BgrR|b#;n&Q>0RPp~GbpK46OG
zh3}{2Kq}sI&kGI1x)&dg#C044{(?I*~WnTbiuMXjSR4YUNM>e}DR(UVpW$gG#1j
zpnjvQyBW=u{4{NCPn|JelC~Oa-_lm)ce`1|`Sq+D+1{hl)O+u0Z{u@Rb*p;h%bTfo
zI!}oY&YiQj&*^5O%oJNWQ{;MRDSKkm!30lg&wFFXQ*0lyukUp;Ksm?4#JpvAF9VHx
z!YyrVCatPFgV3CpO4j$L3Ww9|$+*nIlauFJitiGl6m#O*q?4z5=zV=H%l7t(m?@E|
zs39eH63dn-u#qJK&G+n^%$WU?%KSzZS_Ij1@i34XMRQ72Np~>m6)OqPUA&dq#!hNw
zf;$uIk)TZ#jK4owu9wHeb2usvjQDg1O#pC`>L|+PHTbf%v=#^Ch?ULs
z?#(prTV2Zs7kv#obUpBVEjRg$wM&IwdXvT>h3?mUd00xr*54R0-g$Pq)Dy%_>(+Ob82dApM~}?l!15>|35D<5maAvy
zT$rY*D7(hYMW>=8LLvWAmWyX^o6lchEvS6MXBJcJjt9=8G*fr-bi}3U-rCLy+Qwi23F$OUIVDRKMi5Nkw61T!oi;xZUL5cAZaTUK}^;W{FpN`-u*eyRVlh@h^VAi!c^~2{F515nfA4uRdoPWjLvrL90xF2}A_J4dO
zPK_S2SVbv=FnqWuikLyE$s*{OIaY9LhGYnfUzE_~HL#Z7D=uu<|B&d1*0?il9uzsn
zimcE6+;v9EmF1>59*WEaVdZb|xP{bPQy&+jnFRFbDV~;N`<3sN+{bIX(z#H!!guT4
z>Tz=4z2UWX_D?2We+h5ri(~hcocYdHL#)JJDg0VlSk#RLsB&HU)_8cJp<(JLV-N<6
zzJVb-kQzb==BpdWo|3c#+}SdIOA_oz1N4cx--fp8JVjY@yuX@k
z>{IC>`x~XmJ%QqhY?Ko%6jL}ZMqI=$64AbX9uBQMtqv!So%NLtqo)dHSnMbMB<7Pb|M*GaNYn{|mf6p~Fy>ek
zBz^KwZ@kqq`@1?m1U4ryY&b=@($uxZk<#~WrF|lf!`b~=i#7W#xBuZmyB^|9-%l!^
zQ&!TQG?TFtLur-Zkz=h_!^{!$C#m0pTw@x;zj>V*Z)XL73HI)%%~Ld1>MBHpSujt8
zm81|;Ry<+f-ny8}LkFspzj9N289~KQD}5zOljmUV!SjIc{$1$-I$H=(bMKEY&l8u6
z1MutKJ&a(l>hUzYu_$9mXcH&H=#o8M&eVb%cIn~wa(fP6?agu#a?)_n*F-swpEkXr
z-+Vw9{n(KG>ZD2a=iU3AFGn=OuLJO3P}|&pVg)(ZTXalT+sp44r$uh(-8UnQqL<|R
z%Y*K3AWkcu(TrBzC4B?lMYhsi5aZQUnzd1R&;$l}yHAH);n0=s_m^$^qy(3NqaAgr
z4L+6xEtm8j5(3688+*2~iTVIeooe*IO8`jBk~wLL^S4{%j&cB~4oFUI^-?p<>f
zzWV__#KfpOqueUzTuFB5vmmWYauOM;IdKG;s@YrEI=`Vggr*gURAXxk0Gvn$7pKmt
zUx4uSi*J(1SQCk}QLlG=nw>SFGSqCenlt9;6>r=&jP6_5wIazzEd74dv8R#26--6?stKgoT)Mw9%9IDwkRAV~OnEUET_yy%{*R5>a+Yd;Z
z-d9;6A76^q-2`MecZoRkLAo!XYW2t_PXv0)wn=%}90oufmxpO(BXXhv8}VbFB$dzN
zaZh>HSOCV=_^nurHFCJ}hz4@;Hkx-UK73HGyBKhz93!Uv*bMbzf$nkICAWS8o7
z6jt)%-nYr(VueGbL&uVj#L`NK(%qJ=)o=3?)I|(YV2Fz!?;gVTN-Btp%2zq}_E$~0
z>+^Kk-Ocr9mBh#;_;BJGZIXadKwL8t_od}FJ>Rhq7-L6k-0f@IMSiQQd_wCpxM?nx+mLC}37RJv+R~
zM>ajj9eGld%5J_!#k)*psI7i;f5*IwnQgq`V()@fU`G5q*c*;}2eEqqIO8>82_#dHkOgJC^L6MJSMMa1x*
zWxx1pCw+5vOSf&$@NzMFL&$n!AV*$k>*Fju#SaA^&!LC3r0$A+x4{(9U_nZg(YlFc
zZ%OUcmc4McZW)tqcqvqDW5e?tAqP3O8?<_QoKcO1W%zu%T{-Q^g7wTJRy5MMdZ<=k
znwU2f%HN#9{Pg^>HqYtznr503&f|##41quX3BXr0O$to%FK28iuuUl_-n?2Z5Um|Q
zi*_R4*8=7U`zda`&;gEge)?GN&rX@UNd6t?8H9N&YF6ZIdoUEr67J_Xh
zzCv+$Hq>O}aS==n4i1J=JTbh^*GOL|pxbY_7dqFowT+^+{2)G?j?xVj?oQkiU>fvW
zY{p~!jr2Ra_8*18wpmNxu+)8iEfzPMy+{`kF?*M9Bvr>91FGLKaN9g41FbW}7erMH
z4eJ}jP>WKGkGk6tb_}{Z%D6Vl(J2Bwj{AW92APD;JyCZ#B
zQ1L?V{7q3eAW9VR9*VNKp?_j^M~F=-l>dnunnB`D!R|67sO41ud&%Ixv7k~zhur_Y
zJ8Nv`e#LJ`1a%#X$-ev@C-kV$xoxoFwQtW~&-m-&M{(jx!+&>u6(VNY|0Li)F}39O
z*bw})_kLN$m>2c$bMLc`^Errv{*C>5@-e_L;P`MmSwuwSbUPXIpSK0T+oZ3gpst+{
zAAKshc>hhN#YJj8xvn=0jr*O>m|120^XWfXL`Ff4)tA~W^*HKH03qyLJffqZ!RjNe
zlKA%>+joV&h2NofdgXs!Mw9&e&U)h;p?f=mzq1a>DZY&z^}qR!u>t;K_|K`|A}>1G
z|4!t0@0b6#v|^Sz*`0D*|L^-*?BF*Uiu@h#pCaKaBLDwYBu;!b+(fMpX9Egij~8jr
z58H`c=B#=RF4sL0S%{x+OJxeu{7UPqM+75-#*?JITWc5XMj{IX?fvf7wy)kegm>vT
zKGo+!QK;qkw_CycMdJOAmUytQ*Z41Kg;sTEe5p78$s{t0YwV!fmVlcOPoUi6>+Oc
z(`4t$Do^ftAg!}I0OP)XT}$UIb%5`a`dvTalkd?i#97L@1_wMj3%uv)
zFePiOFEr%p%JOpgHvZyNqT_ss=besIn6(^9NbG7Tq2;R6UP~JJpYH%Y#9j!sk@)=P
zZod@yHFWr~OVT~?C&(5~Syj(Lr_wL!rxnMRK8)-kDD4mjs~6{Y5+cXHy>(cQY%KwNQLYb(om@=PnE
z&t^4ekHQ-maq~1?$)yf7+=5@z%Sm}F!$(@xhEwmS?&l?%Q;pGnWIj(M
z)4qN1@59UElBGJUuTBPT3BT>CzIgTNSbtrZw^pX)cpYG0JggmK9HxKmmJ>FOXW!Sq
zOn&ozP1Df+u@+*Z6{A9)aQD9Ft>b>aUZHcGt;UhgE?vdEO$z_s<-#muK{M?p3$^eF
z@vEkq0ts;3qo04>t?F2hNMP*e{m!nhtIQeDw(idSrYC~fgx#dG<^D*haj`^|@zCvF
z-neh`3|d!npzZ{Edvm92ieqKd41AN|6mwdhx9KgVZb_xWuY!uU4Fn9~7#+IrOM^YJ
z!N+y51^V~Xd|&PdwG$$Ty*qck*R1b>v3@#Moju;HE)8Sl|8}p6HyVi-
z05#$WtI*aE8T7aAvW$&VCZ(DAf^gx?IMh`oKeVyIH3N~>~Zm-0-JA@cy
z2nFQYu9MK5by=7NRkHlv7TXtaR;m+ZB7>NHQ@&%2d#!fFMaip~Ap+5bID{Njv9L=y
zkM0l4c_yHiivjhlM+B<1wtbhsZ2_+PLI|_*&I0&b?zZ`KM6EeyK4WoU5}8&UD~7dt
zt&F(hJ~_>92Qi{g1#<TH$x!s6io)RX~$iWI5sywqi#4KlbGR*$emq6
z`zHr?_uA{vxw*OerN>#e`@5ZoPZd#-fzQe&CO*?Xj}8zm(EEpCkpCoR1w=c
z({IS%ElbTON2qD!hlDOIEe
zlKIpZT@0t_)FqJE999qF)@q_>8Qze$>=E0v)a@N>@Jl4MK6s6p`=~yYK7*eo=SNWU
z1-xoTHz)!Wfrhy_MH_>iy+PcArD4Wy$!YY?ZcwJ9m>-=K_2c0qQ!*N1y}Ap&7?d;zTR;`ImHSnc(w7G
zE@t29n*+OswKXBCO-@VaBcjBFV)mDxlgz+$T{*>*Tr$cHEJq!+V_#>R|lH
z)MiRN8F!MFw)423LN6~r@Akc>)F-QS^8JzadzQeEJbWrCOS4$A5hN@TML_G3n=|bA32R>EXHW9J6rOgE70=v7pX{nVqpwtL}K!MrI
zZT#2ocXTX(IBM`VH2Mo?V#eYX8^+8wC(7--)h(e!45Nv-63}MgHtnC&=cP8R6V3sJ
z2IS|#EP}w%WacxUrD3%##)A6`-({OdJJ^iY+Lat9_xxZS{?rKftpr>J5fW~>-*q*m
zd=ehMow&(e=2x10uQ#{IA3l^J3C_b}02B^z!cFUBxUl}C19~hj1SKbc%S?>Hv*}#k
zX*jSD6d=6%k*79+WKrW_hzlpe@5B181X6Uq6asRQsl06f7Mt&G-xq14P$s12t}&<2
zyKAn6&Z-XIP)_B!UZ2L$~7kSO<|A#9wUMYI1_!(6l_pUUEmL>M?ywXgN9Qavl&dvg0Rb55jUZVLVld9(
zgl*BAa;(u-B>?9yVazA9E~)l9j5K
zf=?u=Wag~D$BM}TFE!Sh-$y1^^<+#6G>g|8SEBDb>CrlZ{6}s@?0+0J=e22D6!WdN
zDq$^w`)*~Fl{EzfjlcBm&03tBYwg#myK{(`;^Xk`dl0vlAImiDji5j=_^~JD=sJI1
zFx0ay5GaTdX!a|Jy$ucsfin*b6W2)4usagy+*&VGcKT>VgPBj5U(R_LS`_GMa9UBf
zLC*mAb6UN*
&ajMaf(ZqLv?bGB|~24nBe3tt!38_=~C1E
z?vOQ@Hro%qdz4j7;>mD;whk-))20E(yl867<;V?{2LDQwQAWT$$({R`@KEv6Q7Y{1JFl?rZVRXr%`T%3IAzo+hGjXrGd-xL#L~7W
z)B*j3NzuWyGzGS5+1r``Seeeu;^J0IEo_4(V~y;Qn2C2pM?HIZChk`bjE793rsCOC
z4SYnYLc6gkwCuBmc>wRpsO5Emr>Q@^E`t3SHX5Yj&8emB0waA
zUsccA=tJwvEBDJiwID+;Yim*8Dj4yCPy!m`%&E~)MQyVLa2~KjrtgG?C@T;6_2rMX
zqr!9Mek?8#QZx+@KM$2t5aOWQ-v}}lRPPOyn9hDt4_5}2$+CUT>F!ep1yfsdMX4ip
z4~9L?#7PmyXC`McUna#87wQWtx|O~5Or_O~cAb3N#~qFNbfG6GyMx#vF{a4iV_>}6
z8T9>VlXpN38_D+_?Dkpu{6K1&z(F0Qgl@x=+oAdr7;00DaaH$4B7NwG^NK|0O!hY
z3OMj}Z_rCH0qvRj#NAnR|9VCyQ^{1p;!lhlpP
z0=VLAKg^b(57U1@iFOg0>;2P}TS8niw_zKzBjs2=8~wy3ut%WBoG9m8%vSC}TaC
zKeSpZnsx>hYgcd0(|oUHw>73={L6=bp9!;l`=D?Mmpt4SD#11WW$qkx*epi%C%62|
zxGaHee8EQJh|Bi?JE`Ap{hu4_<6k8J{LPSsvGCw~a<#1EN}5g{vdAz6*5te-a)YE_2)9L`Y?liB
zl;MjSAKH?gp1o4yuOu}AmlVgBBn0TI^rD#p9^fgxy*Q|-H?5OuOR8Q8_f{g5DfpH66K@}x`)pH{k@Fwxrh;>|ZLzfRl9rpxjj2u!yehi5b4A7x;p)X>~;q*B#D
zj+Rq2mL@q6A?cNhoD7oEaJVs0f9ZS7=0lgz9zNk7^q(G<;wacV2JPp-rbdbGe|3%a
zM1|$$V^dRVtntfp{hDe8aKOZBe46Nj(xR%HDxZ+nJ9iW|ZEnWWCJ0d(H9_w9MBe*TUS_7n#$P&Tz`&o49LNAazKbm$k(
z25pwT33ykaSb8i=ijS8UI!1<07Z$LecD&r18i;3WY-ori6Vax}SsKq1dHE9&f$1I8
z2ZVWE!MGNq%w?!$f!!swlfVnX;g_5S2890gE!yauK?p!ii$kXp3Vo+!G2KTzTl!b5
z2_@-`=G3m@yBk|2&mtLC~SMC=+b|vGisGL9N=c
zUP`S-0zPU?8`DQ8&)G<41jG+gOpwQ=RD|z&v3
zlAC?&w-SgP@Z1ed8P<=<2Qh@i0bd?`G;PUI
zzi!}QGM`035Y*
zbsk`-8C_}fGJ&SPy(3XHa((M%S0M%mgoCu*?zYf8*#z9*OXN74|6!l4F29R!Iiddf
zHaBT7ko>rV8d*HRtxlUVDk?u`ZECcOfj0K=+S*z<+Vhi>9aHAVCjmA*!ndpTB-(qj
z?rd06&E;(JN~veM_al7w6240*8n2S+O;wGmiu^!h(m(n4B77|wp@-vNC2t_x<9BJD
zn>9I2X3BX8xi>)Fgg(V$kB+ew4ke$D8)5_gUWdW;uWe>K2(?^O_&w%q#A-5697u1n
zj|U<@Sxxsc3-&4P<#pBVSVX*peSdhDaWb6)6+c?4GUtSjeOh=!^=0tI`U7a`1!0Yw?*pUVVutIJyRUKCWwJSQ)vtIHgjKS)e~6^X
zD0JlAxq7o2Odca+T8iuSY&Wy$kFmD9E8umpbGqJ(2>&O9lZ!n?OOc&kTpaK$O~l6%
z0bNNuc9>BT*er-;sxelxM{1VKT?gUxVxdnKejk-^Z{C@1(B6M>8!70d{oa$)%V06b
z$HAis-;jyc?NzhQUuET{%szE#FWuE}@avO+jw=P-bD26`N=!4W1g!Y|$FyQRm}v(W
zP~d3}E5=4{TN2TF<#ToYYEt!K&}oX_2+f;x?SOLlzi8)g(Ccef;vkCW%PLC+Yd%#i
zR5u_)$Dxc-q^)^5iesrA>6(qWX6F~0INAO7myO0aCdhFbU+ITl82FYaOtb%BZ^Etz
z3trU!!&^U_VEEEw{taf~Vg4^3=V%T#{JP3||1X$+TxlRbY5W(b!}XE+@!7C0h9obvy{Q_*=Xt{0AyqT)^252K^yZ|GDZygvYG&
z&&cvW*!vDl^uM@#QGww7&61ra8{W6mpv!+~zdJB;{wlcBsC;1wZ9lb~@P8ip2vzR<
zgPlJI^!^WG_r8YvZ;t)R8U(DT;fjCX`nMkJ|H(n`u>CeScc>Kg`4HppeB2P_w3#V`
zId=B%ENkR=_s?Stux>Km{#b
zfwmq_5HHE5Rr`NMALn9@rdAq2I$dzW@(_fcAm?e#pisoXwUlV
z3V~;doo~#0Y&<+(rfU-S>mJSn1*GYKz(C7!vBqy%&>rhA#`_&=I;?xF#XGp_U{S3N
z-X{MaKzq@l_66g2V-c11l
zrL^Yuyybl7`i!Gm5k!DV;**I15m6a67%D3kcZulvP`5SorZDLQZRH=~RaPTG)O%
z)^3h1f~Gk54#xy0MLJ
zizuA$Gl;MejZELJ^;eloAs_!&pdY!2Am;nfI$QQfTY`U-1Ugy}00_9uwFF2i^^@KR^|>B1wtc0m4eG1&9g
zt>{+Y+qg-j_xjO748e)6_$ab*sH;rO)fAb_SIsay3=&tQQ}2{*15#=lRIe3rZRE8v
zHH;I>5(6iK!5RGDu^C`WGT0d&p$o~Sd+jG$(+@INUh;dpyOfOXkZln;sZ7Tg{@^=C
zRw}zZHYqZw*M~Na;j41$ifoIOuR|PJ1=9(U0Sp~^Gpia7vraHGt_8ZO+13$ye_C#)
z_x`eaYGRTKc}!d_<6EQR!J(NJm-`vG!Uw`(tIe*l|Y7gz6>h2S|1T0&HF3LnOulepcepg!Im#;hRW>|&E7Q6e}
zO+lvNqq&A0h2niwWM%5|t0>%FuN@hyA38DFl}&6U#UC5bJ|uOi`Ye^$*JpU>*55v(
zozn)`H>YUa*@i9+!-Tx|$Q8b}2sjrZLgzbHfx<&o4Ur>E14$KAF6+PyL(C|abJ9Q1
z+TrziU3vcjKyg+cRTO?oC>sqa-CuipPj|me|L9(LdWZI&!^w0o?^F_L#2M}Rf%ZI2
zjNm{#G~u_j@y{vAfQ`BPTLV>L4jH#QiP-Hd8m?g10R$IO`|l9iN=hR`CnM3kQo548}9yG+$m$rcApopYJ{uIJP>dcU1E-N7s|;+Fj~n
zj+c+}m%NLlawX$F5k|lErAa5(<>UKoe=%NFM`7P_J|5#4++V>IVUtiX)gMN!oUb{4
zC_v)R{oI_c99KcdF&c~^%Fgk8A`ITvvV9|AuTsx*ceYVI6s
z$by*b!>L}5au>pNa#eao${AcV`0WDstclxf>azuy?gq^k
z+O9plt9xcI8aWO^D`L~FTRA$iJBCwGP-xZ%Su;6%_FTlIj1zqgjE3s5>SxUIn1-Yi
zQ=v<~oe(aymqV8xE2gw>1wZzQl+9IG%`ftI?s%IG+Z0tPMZfN(Ssc(y+?F7yvnVRZ
z1SGyTKghn3G5RzNCF9v|kPI&yveWd=qkJoD%AbY#jF$uu=baCc_4N23K}0=y-?k7*
z<$V$!l{Gw~5kE1>v@*1aGSh9=;^r&-3K>im<4&3rYIbdg0d}lFC%#A1!4tGC_f_dl
z8_DbH6p`c=-&!Xa$(Zy>ba|gSmcMOl_!+?g67>#l&gJW?DH?vx+=r(a=k81c#mxSj
z%jPQ2o>W`tpK%BL?(Mop_kP@U$O;2U$U9anEh8dYcZl}aIz6sWhfk{gL8j57hnu`v
z(b!Br77)$cO_Mh-4ZZ6XzK3x_2sD9fI&hRK$FF$f*LRYMG=oinhGAqv`%>SqH6Mh6AlsRQq{1I%}1Tb&g3>UG%U}rSBv`xfgc5n9>?&P+1c8<`jHoD1id1i
zo161;E`*F!R2JF*dtDtr)Mm1=kQ)#^J1Y_RMk{j7Y;bCT!y$L7@h#PYI3kHLig_D}4uDeDIjKK)KENmNfK
zzc~+Y!LQNh`aG&2VaJFcaS{VtSS^?$VAgGq^BF75F)>TGC+I=;zI#=VSyH(xYTg
zKuCkS?0x@d;uQgAJLVTa4B|SI%MMg&U1iq)vzv=d=^IiV8rqe6Zjw$Czjv@3V^T}UfX+{~?tiRA7*h0knc=n?Xc5@Xs3nPJ5zp`eU91DM?9XNrd?J=;Z
zKWQ#Myc4r3`9MZa%J&1-Av#iRkl2##xuYNUL|BoEEb=D`Az!>sZ}X;ZxjeZk9s%4>
zb0*6&qGBAX3?Xt;YE*OEnyU%PiQvThPj}-M?|P-`<_0pR%SU&FsvZ~a6N6_$HY^AM
z`&>ezot3Vl%*>%sM&H%($!j)1GkGv2`{6r}(fMk{xO-f|-np`KYZ=^Ve^)FA#CH~S
zOV3xk#K*$&cD(W#!uFDLfQF;b5NqYo?bBQC!VrU4gM?b{AlBuSs*<^vS7yafOs<+Q
z$G|A$M=u0s(MmkcIQ2`z{!0LgxnEuKuN_`0!$fIh7v}(iB51P;Y*9(vijOKu=QqlsIm2DsTJdweP(xUK^WQ^
z2`$YX@M~>#9|Fty8z*4_ml47xVdolAHe0oFX_AyCZg@tNv#{~;=Id=K!B&0>G+9*=
zwxT|kJQS@h1^4|oAdB4&%2axXyYUZNheGq;G_qx`nuwpfQo7m(JBX6$`8jAfKXB3;
zxm+9)12;?k_Ts&r0Bm9|(`$Hty2HYFzGQ)ZXAAFFL#`jb(~-1V9>VxLe2|Kh7H@>*
z_W;WDELrIrH7zUVNdjQO*dEojlz&x{(3qV0=T%0>7rwH@uUUI~P-fG(r)&B7Vf^7f
zne>{x=undLM#oan_zSO>ypYS?@AxQ*UqXUX65!vZLx1p?vY={IxEvqdf^N29B7a8P
z1`D{WkOGFwA_)UE_}Wy?VDfk3%Z`32Q60s`8Ds7i&aQS|@9S+0hdXi?VA>wtwSqy@
zElc9%HnADJ<#np}KEl1QHzUb>mN;;2ax#e;B=I^0#%U+nf$Q7FFO%a^)Q)Q}+Nh7A
zOnrba*9>x3+OI&U0O#*;(3F{VF4#a;9X8ohvY=soX|9R)i-Te0egIery@~lP*_S=F
zwQWTGb?xDgAbe-uKn^uTLLiC$U{X2T)YRnVdjU1CM(tcswcyzk02E*JrL0EOSEQ$p
zANYi;cjtCZ1TW>Ui<6|rR*%LbAVte1>sidyDiEg``Q^^Qr|TzGGAhGjWU=>_6y?up
zSiVvwW#fYh$>Ov5aGL%42bkBpbyv!47;?K3&fn`4I}>)5BXKYS-hqg$}37k-Ic~rR4)C{NhxVk3nGec
z2UxJASCw7QBg%rksDBp!nL*|I?l92B-qw2~?S96GkI$yk#GStP$SR9eB&otbb8b9%f|P2L1y6VANtn6
zeP|qhTfBJR0d%vI7Dh++(I?OL!;S*#&0~LFHoFKwX(7AdVyvg@o7iXIT
z%K25-y@E?Mrlc`(#9D(>~CqX?@_sC(X2m|30Ms|2|RkKSsHO`2lP$!wX
z^&XzRz`@;>5av=DZ!HW}Or3HI_|v4pzHS*LZojtDqqurDe}7Ms7pFMvz{{QCTtlfx
z#`b=-@@~)TBWC6fReuG9yr3dc<|oykR*$Q3w2P-GJW__GcBzLMCB4egDF#zAxk!RKJ9Z^)5|;3d#w7v;D#7
zD)cxkEG*k2Y2+ea7xL&lyu1w!4U9Np$^C?Z67Yh_E#rUKcskU5J(DtU@j@)FWAFa#
zHZ|}KH=yWesde6P#5qWBk#~H9cqnEMHkjQ?TKJNl4rbX5O^$bMe#!L`A$>#<$J4>V
z)U4|R)3p`$)$G=zSzRfep-vG^Y((}Dne4XoeM`V$redRtfxb_!PEbOwr{|-(V;F?xrR6gsZP92DWQgcb-
z0hH;_CY?!27^uIc1jFGa5i;wYksb9mwX_U
zmXqFVkc@r@J^dqm;V#|u_Dy)TG2Cc)Kn9_;t)p2^fEGbMtgq)Aw^VwYMlrbX=M;8Y
z7Or~_br}r3R<_D;G+n}gd?@ev{tSDhL;&?49V?Tq{>OOi-zhBdYzA(wu4;_2601Yx
zu9>(TZ!*JP%35w^{7xZk8MX+Tnzj~NnqlzCRy2p!qgf0CqBhOq%@hH4!mRW#`8cm)
zTvaTfux1Vk2%{jRQVPi!HY%E`G;DG4{zm>LQ&BN}y@=xcG8QIbzwna6*el5EM_J&+
za$<99W;zi-52Twia!hJz7IsZ*y!bQ)vC;}^kJ5NF`b4I@*}C&IyxcnQnBIo~z-HVj
zb#KiCf4yrC8VV~9r4|C*$n{s8-aM5$`)=~sIYTZ#)q^TD?ePD;{ZO``BRk0h&pJqbnUHwDhxEyE8d&Jm>I{BN-Z{W3Z(a|6YTTAJoy<2p8W
z15-`McfDT?s@$**N)PD9iwF%P#0Y!n4_B}w1DZ?|>kz&wjRjF5J0X8BQ%k>uU;7bx
zY7#0{Zz^t~^+J9p|xg?T?vNa&Ymx1B&}O%+0oFd!VL+Q2c&~vTh0h6$xm`
z5s9QbeHiMg4@Q}u~c1u^cD)WC|6x#7%d_Wr9t6Y0o({$#tygRqj-2T1%@
zK;G77kvW$$ml?j3c<5BL?77MVA@Jz|e~WoYH$Hi1fiP5SIQ27>T|SKOZpK{+rhcT1
zMuPuB{%AC7+kXB4MNq8zO*0D%Hr3iN{)P!qoyzUa4b=YsLNKmg*cfuhNDPF;TD^cX
zv;EqR0A#jRWfl=6-g?|s4e_M#v$KjK?V;mpB#Nijh>5-Lfou=Uu<|PPhunM!tba(Y
zK=?>f{Iq$Ao``siRw`NX5P@ZVSpRCT#j0#vULc)&KC0x5??^U5FAuJsmSai7;D<&I
zwk0pQCT9guH!7ykQ(plRdQKx!V%!PO5dLF7qYzWp~cBIP0?XPreQ9u7ne
zLHPD)$Y6hu)OtO>yt_op(&{Ul+$)*FJDzpSb`
zEml33Rvi+C%%IB^SqFMwG#UacYim&Gp0COX_4It2{#QW8rVS?(nHw1RhB0oS>uX9s7^`pPhU5Q89*(_NVH;9$XHpuR`N2b_XjMeN0M;;5f5$pt
zfVc4x>9>SF>Q9&i#xPmyp@UC#Ae1|BB9G;I4S2Yy)yny#$DZ||o*L;4_tI^;-P?iy
z#~k6m*c$(@$qi-gpQnoWlHTMfvjbaazv?d0GDDg=2YgOD1f9?=CLk#~8kW2>FaeTZ
zBzssP8N14BXOcV`QOU)IrRU-|#IdpE3e^<3-T%WwNuiQqjCRu2-&n1767K=b#*|7S~*
zR9|jtP2m;UASxxbz=4DFc26GH^vXRMZpP&~ujf2k9pm%+dDnFL@c2Kgfd?oV%(yXc
zKkx0nbNgds|Nkm3uCA^wFaP}e`Pc93KkNR`3I&0?+ry@$-*^&k1Lj?woA+GO?5Fr@
z(2(=0cO|ihL%`y}ucelOg=R%vH-MJCqjX-hTf4c^?0@
z{B1MAidTKhZp{XsDtBPQAyXf)F4O&@>p(}nIdj2``4hGSc#kRrkLIsf(8>Q2osz%{
znkJm$0iHSMkqVMoVgwQonwoy&S+5J&kkGf+x%jUE74WQl-u(6U23d$``F5z;MZCda
zk+};*pGt}e`d$WJNdW{|`^)AfPOjew=DhN|4s+7hb%|i1RVz!O8hvw9+e`ziHvD_@
z=1lWfpzXFUF1NP<$B^#It+}6L40ptJ;B{6Fimll*!1}Z5v|~U{Z4>zm_G8Zx3{T7i
z`F-Un%8$6DK!&j)|y-sppf?!FBSACTEQ+g5@_{4QV!bbaFK?pT%y
ziJz_U*5@~z+#dmUTIq5uv9$|o(z0zvhi7TnM%35Uy*pS1v_lRQZ^w@xClqh3**E_F
z{d@A{$r`(I(20OAw3Mcue){_L>!(jsKe7X5lQu@^P5&=j0QBM?Z6#n;4?L_f-CiT+
zu$`TqoSYmVKYxE;AF%CmBA$bRVT1j(*I$4C{++cIxSz|y!b0QUGK0e)mjDkF1PuFZaXPJ{rpGkCiCxvX6DO?ke2Q)6_rUV-AD^agLFwrPC@DJl$vy&
zG1sH+ecru4ykE}!TcI=VxULcZ7}Ni;g5-^BB-b!7Fm6aoi78`XTv@}wxEP3a0bb#{
z%i#n6yXf$bv?>-B*66h26#SLgQC!1O#m2{S!_?Zw%9zE$(B9bC+Tpp4-Hx5>Yy!lt}*
zduj1>d$-1LI#^d+k;ZyhRw(j&c+A|axzKJeq!c-ZnsaSVjOfcBP2?#+Ru(`_SNmy
zppewJzAaB~vVSmD_ZV-SiQ^oJe4o~KQ-$j;v*0ofYhk^1J2rXpfmdj+d6}{ZZ^&wI
zYPaXoqC=Dai1Rs0ZhHT5;({cp%j)l<|@tKbW`?5#wG=%98WN5lf8gBfx*Y8JtX+HG)aSUO
zYur>OU8d&LbpN$wWb109h_lV~^z@?{0e{4Qjv<@5QvWnR2l{|c0r5LVTC${>G{ZE9C9s>-?mhKj@Qk$BL@FWwRcNxOL;x|
z`*l(t8FjCzXW`L!UgQ+*Ml=CI>=I;b(Ve`vFNqsCN7YfE{~i8ObyE)iQZfUF1R1i^
z=o&H4!Q>6yWPa8s3%8t9E6<+?{tbJhKoHMISL*Gfl=ujrc-A#mn{Lsql}NH{aaH(I
ztQ_5}TZm!m;Ogw$HDaV)5c>k^NGdF)9%67F7DOyVC8oh|ibD}h#Hp<-8={*HQ^96YH=u5(itQ?A9+pX0taQ>Q-p
zQB-DbrSYJ0JYUX^TCi-t_TS3c=Ex4n<}ju0v|!RPi7aJ{bo=sY_&bN(ed?6joVi?N
zIa!H+GnX_EbBq9$uB^4i^7?IsBh6dv!o0Jsm7EeT8LHd6Sx-ZT#8@9JlOGAFdwF3(
zu>X5tvTrvo%<1>0nDhCQSC_4bz3I=DCKI}^EY4;B4`TPZ9SiRJe+Y#HSz15Lh`U(K
zZ{Y$YP#0gyPsu8-<
z!hZ8V-Uq0?oRx|o4wp<6ejVPHyA~Af7n~HaB%4f9(|I5vgyVfg82Qgvx!vOU>FP>I;hKL4#pagRc9Mz+3{x43
z^Ig}sj;#p=?khg5lqTdfSMF(MU(UYzo{FS$$;rcun)GX0ns;TW<>|=nhda?(lf
z$YJGbm(L)5jyxu^WBg5saoMS3%|^x@I2E!Y*w2&9=rrpuchw=AhLN|a|8vZI5EDPx
zEG5#M-8wxyyi7~bGZnGJd&4P;LaLpF>ZDcZou2L<7;~qR<|DE&Fpd*
zq|3@Y%oR{rn~c}9oqGK!`W;E`fYuUDhEF9IhIx_
zls_&QlOhO@y1kGqRKq4xKV_T8Mor5iA>N#8sDgLBr(@<-t$5ZN)fTSIZQ^^>;$j_}
z43+asbtfmanD=##noISys8^~C1PqN9ta%+j$>R%z{2ccveL{KLO5VBeb*{%A*%HHi
z69V!6E2)9+Q(4)7n`=z@vvQ0@Bqjw6tY(|U~z17QDR_
zgp0c^diEJ1V<=acmEEgvWA3=xg)6hQlSQJ!)lbgIFyvGEKK3mRPlrGI@~!)_dkc#j
z0wXk`3KY00YZgWZ&HNoV)%d?Yc^aXox_k@s)Q1pCHsLL|3omjn(X$XN`%+>B*>RYq
zaG`re-;PLZf}x!4W$RX=HWWu}&_;mvpk99E`VEh@)Ou%Xo7L>pQ%S`ingZfZ&nx)M
z#1;)xuHhIMB)cUo+SzM|4q>*&1SxgCJYN{SZ4;A}vwW1-MugigH_H*6H)+Z8
zvCOSDe`KcM+Gx=7^4q;WEmIBU^Z+Tsy9MUFo@1o3M0E7A?Ci<|wnH=Pe8MO#*_#5h
zZM0_o2vxh;nc6lS4bMIW)djnzhf(9BH@T~%mjufeN3WUm$n~|$FK=~xp4pJeyHL3*
z<4k647{nejr!4by+hR>Pxg_&o^$WM5yL(Imir!0jF=evkFMt>io~vF7pD@fW9jX=U
z8rRaXQhzJ){9xUJQ-VgP&0WAeXJkCcO(0H7J3*P{p2254@namdG7oWsBxhMa^=#~!
zDprxZWxL!j5ix1~?Q`b+vC*OlGHj+v&Ms3<%)1rJGe6$zi?Yf-Nqa})pE$++Bk2+z_lA;Nc*Du#LS7Ey3sn@
z(%3y*cj95huw~MatejjIZFe%SlWXaSSrNaOeVGPXj+DwfuF43*psPRobtd0c#c^kb
ze%r1Q9{E`Ky2P4Qv4pdif#V`q>2y4BR@g5zh`%doOu`O8w?rix
zY6EKA-4oT!FucPWYQvx^nXtY|cHYL>doz*aEYOA{86DOh>@H8p7yuL&ZM*lU8JA83msfzo3J8
zsl-BvzxqRh{{0$yTEn24C|}8^ZNc(ag;m!=Is;tD55LXaWzwO%{D#Ol|Bi*r{P$7o
zA|(@}!anj;+Ukr)S`(~}10&DL?1GGxWke3Gp2YY?PD+~r@S&mWmU=Zry`|s
zad9IfBeKUk)O#u7!m&?W$9i4pe
zJ|5p15tE?lPM+78;_3Fei?s7~4no%&T(*;>m{bO+={Hlpx}3hz#24qERhjwWPGFeG
zoh8+L;)7>G6Y#2S!GO-IP>qharsl`wcP;O++$n#=T;o;~wLju=5E9`pR=Y1NH0pnN
zskPHc&U~wie{^h5O;$%`JEnA#q2%*fL~tFF{iuMY0u|@Mka(S1eU~RbG^k=Wb8JR2
z?~}zaqM^p1d!fF7|3?}Jv1g5Zj}mgi>VVIrpy?MlJJxk;pKlBQ?yO$ECHNVZNEjkPNRSe>w2XA}0SSj(-9j6-)cl
zCtj`R%lLe{@x0dQZ^ep^9q4u!t0xleZmezmb4I3tw4jlA)o}2_f`0jeq7j*4P;
zlGsajGY$&Fg!k%$zPpPP>
zb(6lv?D#D;IMW?OWNHW*?zM6k{7?ugTCY}i=O+;$QfU**7N;9u_48j3QOoT&FVl*T
z=O63)sFPOo{bB;{#bZaw*w$Ob1o`;xXZf2>TrM1RDqCawA`;R*KVK3F=rB7*+KhTC
zb1t;9H+C^)JS!iH5$2BA;3tX|R?RXJ`=RMo{Djj$Os4cXU;o0YWzG(Xin8Qu_neNr
zy1KUgGP3bOGC}hnAK7uO#En^mCFHbzY*^bk+ft>)lCP0t7Q+wZ?>)bm_Ds58C#3lZDhN?j_hj!`CTb}8SKG2Yj*
zzf;&6ec7^qS0ZzOvDT_AIPn6m;k7)qfpN*B0*|NsLLnkYH5cfkJ5N|lgC+@t6xdYE
zt2Zu4U&~8yThkS_J&e)Lqs-Px8Td+M7$jEE-89GVq%NTHF(P4f-lVTfX5}opOqEt8ng!+v~IME;Gio{X|t|Ji0}MYe*e#LD2Pm
znIQJ>pjkdM>z56T#?UO*!#GJx4aLO7Xbggiu=7}x28Gx)%TdeMF?=&tU0zZmQW>yb
zF&xwgPrOSYPcyKV6%Q{hjdePoSC0F#WR;!c!Xd7zCG+viBFf2z>A+K0R3GaJJDn7^
z;>mv98XtRMva>KRD?1h6Lj_uQuU8SF4A+F&C@lLp7S?RB3EgAH+{W=#uE$_4GaMhq
zzx}qQiCgoP=$_teN?LbZDKTHu4?z{Npqk};zxDV#^!IL$bmk^Z-;-P!l_3?P(dZ<_
zznW&~DywN&V01d`60u=1U@DJgBm8)P`t-Wkd)&&^KpCqPyn=SbL{sB6~eI
zzi%39^WQ=f-rdQW>SxzfQRYhhin!NEoxw#b+RZ8M+WM*5=fu~nh+{?yzr{P#U{4t5
zH0cHaj2EJ_Rz;Q56X5gMCS*}-mcLV&AfSa
z#y{MzndY_Kg>c4d8`q`!*%U6~g0>s3V(NZ@?YlB(j)clNsvY0w5|*A@+B4jnx%Y#E
zI+mZBRZhR4oWH$)WLoWY?4wA*_R{xP>5SFQ*Z1Du?0Lp*HZP|A*i~EgU8mN@E%DH%
zmIx{>K3uyT1j7w_<0dh-**Wt_xmG;j7PZ>8{DRRt+72)KwXX59WGVJ7UrkNTOX<%G
zmc@F1C`g{kPEVxa;T2!L7*keO*5}wF8`K`W@N7VB($b|;TOsTFyglE~fhumQA
zdE&bS{ES&47J@b=t=hgf*R!y=#h+)F6s2@`BG`K^EL<+XdHd$Ar1%?iU)B4O5~O4#
z{OA|$}9Ibbr%InVTHno~8%6~bpQV`0&&VX*|G0Fy{!Os%
zZLOTAKelEa?6c%ZO!vqqCV?R<+#$Ev-AfD;G&&gKdSX1?m~=Hk=keoj?+z4x_a+WH
z{t$Y)tI^T@tKXR-+jRd&DHqvAfkJb?Dyf1dDiUD>R){&O$+K8
z`8It|;+ij`b^(t>oWge}tvJiOKUs~R%XxZQ%BgC+tKKrww91m2TQ78I%&e#&a;Rt>
z)!67GF|yk2R_HagS)F^-eRa!@y?0tmPqxK>#Tn;b@ABrdyj{`4S1jMw@gX{%j{>AD
ziNcM!Siu1SA=maa;`uK-sanQpsE;q5Bn>u9GKGD4c1?YRIZENB3X$COwWp5^v!rxP
z{~qrBHg_w7^+s?awcF^3U>xEgz407yaz{gQ8
zOQ3&~P)lomdRp7-VxWS&YzSv|t^7RwD*t7=s?pKfzx*D%%&4Zx()3#G=X~k%-!f4V
z!RrjsNNJhf<%m5x{~<}4!mc9(-be3KL-@qK9i
zru2j6W>v!8UR2b8LTJ6>yKIZ5D?FFib~6*N1h)GPw$ioP?$TSVeQBUzgzwBm61pYPQztF*g{*EZYn}zOnVzajczlvI
z4LbC!%T~z}K3*$fWMq^`+hRWVz5*5UKgUvDM#g-zKVJR$>4wY31WR~(ItlINu7UJA
z^GJ?7wNVdd=LIqf0LuZgQOufko^FsmocwDSC)L!Czn-xQgoqoE-sCWyq$
z=h>)DO~}$kaniml5Lfa%*=obw{qXz47*i{asFSo%;gMNl!5^!v(aLj3Q?b^e*H`ga
zbz1*?YO|h>i77B~x3|n&C@{urLNuKH4gtZ#7l&xx`R~ziUIF*pAU=JzHxVgcSDzKW
zAM*P>5W{nk)!KUquQ5DMThjBW|Mtcz^L}Ic*}A{KJMEuWuUC=3=_nBz!nA1rbN;C?
z{$y&S$^GUvhf}^kpKQQbQSw`}(Q&KaZ>CXw@L!+q6EOAKw(M;82>P#udGMmAWYaZ-
znyw@I&k1a$6HBldan`1Ng6HS{>s8_ht-K)uZN6TtPq{JFlOOrryy1*i`~N-EMey1G
zT&?^*D~t5Ym`B~LFd>1=J@mG1DYG=xFrU6aeFs0r9{^L;Vysj
zcv&V#L*xVBpEYjy#!=DPKI)I?Bd0;_U=P<=3qfD(lEb
zp}wE?@rbW)B=(=HT)+D$8=-kn{oN+5KpYqE&+E^z2-s<>UgsYhz74%QC->*|XV0fF
z1zvJy2uYznf3Nu&fjWAY)%QJ9K3=Ur*K63h-1Bhz)vH&iwHlxEGvm%Eoj%V$U%k?K
zE8d^)67$o~UFKz1KHHM*k2xUa6L?`)vyC?3
zD?<6`&l79+K5kxnWNp1(VK;kX!xwUoI<1$XcGtZ&p2voq(?l+m+QiApNnd~ZZvysi
z6V!{KKS5%fLCPWCwYJE3eCXgZx-Up@7ShTlRM0~xlc#np8|59(ZMCwrRB^gDdVaDf
zg8gc9u>Nc}Gv0bTnprWCf432n0+D*qCUR!in<@^*h{ygq3j@P*3k&S4S7W)YMwWWh
zE=qd*EE_aICJ7ldV^O=GpYD&2jCk*rB;&qsF*t;;Zv9YEJVhm+cXf2c@!HJ<5HN9Y
za^~jbd{WD8o2}exCFSJcKzyvKa-VGtW?^9g*xvN^1}aorQ?t@(bs#g7E<>5mq$_4M
zzdAoJ?`JU2gzI)YJy0u#0Ji#*ZMXxQ%K1Cg9Y~~z{oL13@0~PR7mhzG$J+Of(Lk5V
zh+{a^=VZ2g()-w-KI!rO$g{ol^z_w&`nvRVBX~41$GNZX3+j$mCOkHR{r&lz*M>&Q
zEbdWLr>3Qyp5o!*<>ux}1`#z9`LGHi3Gwj-HybWq
z@l7c#e89%WcJJOJ{FYFWGhQYpi;+^ZJAuZ#EB)8ryh2!Vk9us>@_6j66&bd1^YGN4
zpB-!EId?=bQc+PoeE2XsJ9~F`x3RJD$&)92Q<0I8z+|GA$NoH!_B*P*XUVxOFE3)Y
z1`*>C61Jf{B_t#)E!R>W`CSm8p`(+DY(6@2PwOSZ!@F0Tmy@%i^F&TomXqjupPE8e
z2^>{bBh$8W@!ayP(QVDmQVyK{AJfxm1O+`$4z@UAyNo!3NqH1T3VdAP>dVW^H~pIC
z?RU!seuwl6s`+cN{*@kAH3R(-mITrIHlv@d;Gg(&DCO
zEi5da`878;-yr2a@T?062sk)!t<}|g@+2}UN=;Lf&$uIkJnb?jrYu`Jahz^Jd0N_|
zwB7@H%eQadpdbD?82Vf7B%I*%cppzmT)YJuX?$E>Sl
z-nBrTbG}gg(`tLJ-LNfWxBiCqm{*dg@Z_+-B}`1e*ROx{_diin3!xCv&HC=O)k2t;
zm#15>?@7M=-O%%V^lWdnAa>x#k1_A#pTWVwa2L+iLR3^Q7uiV(f3LyYQl@l7>>gz-
z6l&6gqubw>9pAryPssVI_qnB|K3ZZTH#-mFrD~IbRoBVJv*VWzsd9Pc5bM-3Y{rlIhURBCgg^Ylv
zUN-tuL-ywH8+bHHz=6&7voXIdTJ~ILhI(OGI`mtS7Cc9}Wc9eU3xDD(${=FSOo;N&
zpVjbNd@*qb`ulfX`T1~Z>FDkd60YtZefU6`v?x!fA}ZSGrbaM3J4!w&N@a$eiN@mr3Lj$jLpvm)r|=+=iz=!J!o5M3rET*P?de+nhh(G@4U
zOpe!rKUJ%?FGL5(?zgGFz5ZNb(S(a6JS^;Nx4=g}Sy+gNXK*qpF0RUAFkfBIYxnGU
zgM!l}sN3Xt=ey!hXAf7`1F!w@K@aoALj9&sY8KAUpLe*0g}vKC?k;LR0g~e~?lkCg
zemr?{a&mBRkdTmYa^lHYF0fJKaJ=^uDq7#_E)-`}XmU(Uh0SCgF{jC`_i}WBK|z*g
zW|d#BX8m47jOf409ha3`iE#b@N2hJ?>?A!%^f{%B%&!>|eJk=e=NTtPJBFu^-_5%u
z56yo(GyR>Cwi$7prXlS{$_)71)*N8JhHnWX?Gkpl2t#IJITn5lN*jMzNX4e0@?a5R5k%Sy
z6{fi0eqTgcjg