From f804ef5ff18ece2dc8ea406dff1b81d702f1a76f Mon Sep 17 00:00:00 2001 From: Aaron Markham Date: Sat, 18 Aug 2018 20:10:14 -0700 Subject: [PATCH 001/160] add new scala build function (#12236) removing unused code --- docs/mxdoc.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/docs/mxdoc.py b/docs/mxdoc.py index 33f64750e816..7092b9ee9eaa 100644 --- a/docs/mxdoc.py +++ b/docs/mxdoc.py @@ -100,11 +100,15 @@ def build_r_docs(app): dest_path = app.builder.outdir + '/api/r/' _run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path) +def build_scala(app): + """build scala for scala docs and clojure docs to use""" + _run_cmd("cd %s/.. && make scalapkg" % app.builder.srcdir) + _run_cmd("cd %s/.. && make scalainstall" % app.builder.srcdir) + def build_scala_docs(app): """build scala doc and then move the outdir""" scala_path = app.builder.srcdir + '/../scala-package' # scaldoc fails on some apis, so exit 0 to pass the check - _run_cmd('cd ..; make scalapkg') _run_cmd('cd ' + scala_path + '; scaladoc `find . -type f -name "*.scala" | egrep \"\/core|\/infer\" | egrep -v \"Suite\"`; exit 0') dest_path = app.builder.outdir + '/api/scala/docs' _run_cmd('rm -rf ' + dest_path) @@ -115,8 +119,6 @@ def build_scala_docs(app): def build_clojure_docs(app): """build clojure doc and then move the outdir""" - _run_cmd("cd %s/.. && make scalapkg" % app.builder.srcdir) - _run_cmd("cd %s/.. && make scalainstall" % app.builder.srcdir) clojure_path = app.builder.srcdir + '/../contrib/clojure-package' _run_cmd('cd ' + clojure_path + '; lein codox') dest_path = app.builder.outdir + '/api/clojure/docs' @@ -411,6 +413,9 @@ def setup(app): if _DOXYGEN_DOCS: print("Building Doxygen!") app.connect("builder-inited", generate_doxygen) + if _SCALA_DOCS or _CLOJURE_DOCS: + print("Building Scala!") + app.connect("builder-inited", build_scala) if _SCALA_DOCS: print("Building Scala Docs!") app.connect("builder-inited", build_scala_docs) From 46437b8102081ff3b18243494f5fc5e3aa4efed7 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Sun, 19 Aug 2018 23:47:42 +0800 Subject: [PATCH 002/160] Don't override global warnings filter (#12245) Currently import mxnet causes warnings.filterwarnings('default', category=DeprecationWarning) to be executed. This is very bad, as there are valid use cases where our users may have decided to filter out DeprecationWarning and we should not overwrite their configuration. --- python/mxnet/base.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/python/mxnet/base.py b/python/mxnet/base.py index 3d8ee0191757..2bfcdd62eda0 100644 --- a/python/mxnet/base.py +++ b/python/mxnet/base.py @@ -24,15 +24,12 @@ import ctypes import os import sys -import warnings import inspect import platform import numpy as np from . import libinfo -warnings.filterwarnings('default', category=DeprecationWarning) - __all__ = ['MXNetError'] #---------------------------- # library loading From 338a40b839436fb79eac51709c1323405a795aff Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Sun, 19 Aug 2018 18:44:23 +0200 Subject: [PATCH 003/160] Disable flaky test deformable_psroipooling (#12246) --- tests/python/unittest/test_operator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index eeaeab8d1667..652bae1fb7cf 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -5106,6 +5106,7 @@ def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, return output_offset +@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713") @with_seed() def test_deformable_psroipooling(): sample_per_part = 4 From 605c569aef6e9beff70c94eb5bd31f99557e22ae Mon Sep 17 00:00:00 2001 From: Anton Chernov Date: Mon, 20 Aug 2018 15:38:03 +0200 Subject: [PATCH 004/160] Disabled flaky test: test_operator_gpu.test_bilinear_sampler (#12249) --- tests/python/unittest/test_operator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 652bae1fb7cf..04617de8b19f 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -3940,6 +3940,7 @@ def test_grid_generator(): assert_almost_equal(exe_add.grad_dict['flow'].asnumpy(), grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5) +@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12248") def test_bilinear_sampler(): from math import floor From b03227d9dbd97f3cdd4c0ba8496e05797847a631 Mon Sep 17 00:00:00 2001 From: Sebastian Bodenstein Date: Mon, 20 Aug 2018 16:40:38 +0200 Subject: [PATCH 005/160] Pad Operator Type Support (#12035) * fix no data type inference for pad * add support for int types * add tests for all types * fix gpu type switch * remove integer support * fix python op test style issues * fix type bug in python tests --- src/operator/pad-inl.h | 11 +++++++++++ tests/python/unittest/test_operator.py | 24 ++++++++++++++---------- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/src/operator/pad-inl.h b/src/operator/pad-inl.h index 520cd124c49a..0b43e2d0cfd2 100644 --- a/src/operator/pad-inl.h +++ b/src/operator/pad-inl.h @@ -189,6 +189,17 @@ class PadProp : public OperatorProperty { return param_.__DICT__(); } + bool InferType(std::vector *in_type, + std::vector *out_type, + std::vector *aux_type) const override { + int dtype = (*in_type)[0]; + type_assign(&dtype, (*out_type)[0]); + + TYPE_ASSIGN_CHECK(*in_type, 0, dtype); + TYPE_ASSIGN_CHECK(*out_type, 0, dtype); + return dtype != -1; + } + bool InferShape(std::vector *in_shape, std::vector *out_shape, std::vector *aux_shape) const override { using namespace mshadow; diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 04617de8b19f..125666ba832c 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -3008,16 +3008,16 @@ def test_roipooling(): numeric_eps=1e-4, rtol=1e-1, atol=1E-4) -def check_pad_with_shape(shape, xpu, pad_width, mode): +def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"): # bind with label - X = mx.symbol.Variable('X') + X = mx.symbol.Variable('X', dtype=dtype) Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width) - x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu()).copyto(xpu) + x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu) # numpy result pad_grouped = list(zip(*[iter(list(pad_width))] * 2)) np_out = np.pad(x.asnumpy(), pad_grouped, mode) # mxnet result - grad = mx.nd.empty(shape, ctx = xpu) + grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype) exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad}) exec1.forward(is_train=True) out = exec1.outputs[0].asnumpy() @@ -3029,16 +3029,20 @@ def check_pad_with_shape(shape, xpu, pad_width, mode): @with_seed() def test_pad(): + ctx = default_context() shape1 = (2, 3, 3, 5) pad1 = (0, 0, 0, 0, 1, 2, 3, 4) shape2 = (2, 3, 3, 5, 4) pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1) - check_pad_with_shape(shape1, default_context(), pad1, 'constant') - check_pad_with_shape(shape1, default_context(), pad1, 'edge') - check_pad_with_shape(shape2, default_context(), pad2, 'constant') - check_pad_with_shape(shape2, default_context(), pad2, 'edge') - check_pad_with_shape(shape1, default_context(), pad1, 'reflect') - check_pad_with_shape(shape2, default_context(), pad2, 'reflect') + # note: this op doesn't support ints yet. Add tests when supported + dtypes = ["float16", "float32", "float64"] + for dtype in dtypes: + check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype) + check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype) + check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype) + check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype) + check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype) + check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype) def np_instance_norm(data, weight, bias, eps): From 7f22b782fcaa199a9c40deaab52930bb0a60f27a Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Mon, 20 Aug 2018 18:43:45 +0200 Subject: [PATCH 006/160] Fix typo in graph_executor (#12252) --- src/executor/graph_executor.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/executor/graph_executor.cc b/src/executor/graph_executor.cc index 0e8070670904..32b14b8e9637 100644 --- a/src/executor/graph_executor.cc +++ b/src/executor/graph_executor.cc @@ -1111,7 +1111,7 @@ void GraphExecutor::InitCachedOps() { } } } - // Note that this modifies the requirment of kWriteInplace + // Note that this modifies the requirement of kWriteInplace for (size_t j = num_forward_outputs_; j < idx.outputs().size(); ++j) { auto& e = idx.outputs()[j]; op_nodes_[e.node_id].exec->req[e.index] = From 14b1e48e78fef8aa6743a76779489dce4424c161 Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Mon, 20 Aug 2018 18:45:26 +0200 Subject: [PATCH 007/160] Tweak ExecType descriptions (#12251) --- include/mxnet/op_attr_types.h | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/include/mxnet/op_attr_types.h b/include/mxnet/op_attr_types.h index 2bb2462d4869..aa5d4e6de784 100644 --- a/include/mxnet/op_attr_types.h +++ b/include/mxnet/op_attr_types.h @@ -87,18 +87,19 @@ struct OpContext { /*! \brief the execution type of the operator */ enum class ExecType { - /*! \brief Forward/Backward are synchronize calls */ + /*! \brief Forward/Backward are synchronous calls */ kSync, /*! - * \brief Forward/Backward are asynchronize, + * \brief Forward/Backward are asynchronous, * will call OpContext.async_on_complete when operation finishes. */ kAsync, /*! - * \brief Cross device copy operation, this is a special operator - * That indicates copy across devices, the input and output can sit on different device. - * In current implementation, copy operator is specially handled by executor. - * This flag is used for special case treatment and future extension of different copy ops. + * \brief Cross device copy operation, this is a special operator that indicates it will copy + * across devices. For example the input and output for this type of operator can potentially + * reside on different devices. In the current implementation, a copy operator is specially + * handled by an executor. This flag is used for special case treatment and future extension of + * different copy ops. */ kCrossDeviceCopy, /*! From aec7d2c0a7288d785279eea0148a5c6b88a277cf Mon Sep 17 00:00:00 2001 From: cclauss Date: Mon, 20 Aug 2018 18:47:46 +0200 Subject: [PATCH 008/160] Python fixes for PyLint test "consider-using-in" (#12214) * PyLint fixes for consider-using-in * Add missing 'in' --- python/mxnet/autograd.py | 2 +- .../mxnet/contrib/onnx/mx2onnx/_op_translations.py | 2 +- python/mxnet/contrib/quantization.py | 2 +- python/mxnet/gluon/nn/conv_layers.py | 12 ++++-------- python/mxnet/gluon/parameter.py | 4 ++-- python/mxnet/gluon/rnn/rnn_layer.py | 2 +- python/mxnet/ndarray/sparse.py | 8 ++++---- python/mxnet/operator.py | 2 +- python/mxnet/test_utils.py | 12 ++++++------ python/mxnet/visualization.py | 4 ++-- tools/caffe_converter/convert_model.py | 10 ++++------ 11 files changed, 27 insertions(+), 33 deletions(-) diff --git a/python/mxnet/autograd.py b/python/mxnet/autograd.py index e5ddaf6a15cd..b3acee27caab 100644 --- a/python/mxnet/autograd.py +++ b/python/mxnet/autograd.py @@ -465,7 +465,7 @@ def backward_entry(num_ograds, num_igrads, ptrs, reqs, is_train, _): "autograd.Function.backward must return NDArrays, not %s"%type(ret) if req == 0: # null return True - elif req == 1 or req == 2: # write or inplace + elif req in (1, 2): # write or inplace igrad[:] = ret elif req == 'add': igrad[:] += ret diff --git a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py index b2c93670bb41..af7fedb33cb9 100644 --- a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py +++ b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py @@ -122,7 +122,7 @@ def convert_string_to_list(string_val): val = val.replace("L", "") val = val.replace("[", "") val = val.replace("]", "") - if val != "" and val != "None": + if val not in ("", "None"): result_list.append(int(val)) return result_list diff --git a/python/mxnet/contrib/quantization.py b/python/mxnet/contrib/quantization.py index 62be40fd210f..8df923908fec 100644 --- a/python/mxnet/contrib/quantization.py +++ b/python/mxnet/contrib/quantization.py @@ -489,7 +489,7 @@ def quantize_model(sym, arg_params, aux_params, excluded_syms.append(nodes[idx]) logger.info('Quantizing symbol') - if quantized_dtype != 'int8' and quantized_dtype != 'uint8': + if quantized_dtype not in ('int8', 'uint8'): raise ValueError('unknown quantized_dtype %s received,' ' expected `int8` or `uint8`' % quantized_dtype) qsym = _quantize_symbol(sym, excluded_symbols=excluded_syms, diff --git a/python/mxnet/gluon/nn/conv_layers.py b/python/mxnet/gluon/nn/conv_layers.py index e1f9b9fd05a0..96ecc21c81b3 100644 --- a/python/mxnet/gluon/nn/conv_layers.py +++ b/python/mxnet/gluon/nn/conv_layers.py @@ -309,8 +309,7 @@ def __init__(self, channels, kernel_size, strides=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, layout='NCHW', activation=None, use_bias=True, weight_initializer=None, bias_initializer='zeros', in_channels=0, **kwargs): - assert layout == 'NCHW' or layout == 'NHWC', \ - "Only supports 'NCHW' and 'NHWC' layout for now" + assert layout in ('NCHW', 'NHWC'), "Only supports 'NCHW' and 'NHWC' layout for now" if isinstance(kernel_size, numeric_types): kernel_size = (kernel_size,)*2 assert len(kernel_size) == 2, "kernel_size must be a number or a list of 2 ints" @@ -391,8 +390,7 @@ def __init__(self, channels, kernel_size, strides=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, layout='NCDHW', activation=None, use_bias=True, weight_initializer=None, bias_initializer='zeros', in_channels=0, **kwargs): - assert layout == 'NCDHW' or layout == 'NDHWC', \ - "Only supports 'NCDHW' and 'NDHWC' layout for now" + assert layout in ('NCDHW', 'NDHWC'), "Only supports 'NCDHW' and 'NDHWC' layout for now" if isinstance(kernel_size, numeric_types): kernel_size = (kernel_size,)*3 assert len(kernel_size) == 3, "kernel_size must be a number or a list of 3 ints" @@ -564,8 +562,7 @@ def __init__(self, channels, kernel_size, strides=(1, 1), padding=(0, 0), output_padding=(0, 0), dilation=(1, 1), groups=1, layout='NCHW', activation=None, use_bias=True, weight_initializer=None, bias_initializer='zeros', in_channels=0, **kwargs): - assert layout == 'NCHW' or layout == 'NHWC', \ - "Only supports 'NCHW' and 'NHWC' layout for now" + assert layout in ('NCHW', 'NHWC'), "Only supports 'NCHW' and 'NHWC' layout for now" if isinstance(kernel_size, numeric_types): kernel_size = (kernel_size,)*2 if isinstance(output_padding, numeric_types): @@ -657,8 +654,7 @@ def __init__(self, channels, kernel_size, strides=(1, 1, 1), padding=(0, 0, 0), output_padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, layout='NCDHW', activation=None, use_bias=True, weight_initializer=None, bias_initializer='zeros', in_channels=0, **kwargs): - assert layout == 'NCDHW' or layout == 'NDHWC', \ - "Only supports 'NCDHW' and 'NDHWC' layout for now" + assert layout in ('NCDHW', 'NDHWC'), "Only supports 'NCDHW' and 'NDHWC' layout for now" if isinstance(kernel_size, numeric_types): kernel_size = (kernel_size,)*3 if isinstance(output_padding, numeric_types): diff --git a/python/mxnet/gluon/parameter.py b/python/mxnet/gluon/parameter.py index 1f6b86c978c6..24c86f4e0fa7 100644 --- a/python/mxnet/gluon/parameter.py +++ b/python/mxnet/gluon/parameter.py @@ -165,7 +165,7 @@ def shape(self, new_shape): return assert len(self._shape) == len(new_shape) and \ - all(j == 0 or i == j for i, j in zip(new_shape, self._shape)), \ + all(j in (0, i) for i, j in zip(new_shape, self._shape)), \ "Expected shape %s is incompatible with given shape %s."%( str(new_shape), str(self._shape)) @@ -231,7 +231,7 @@ def _load_init(self, data, ctx): """(Re)initializes by loading from data.""" if self.shape: for self_dim, data_dim in zip(self.shape, data.shape): - assert self_dim == 0 or self_dim == data_dim, \ + assert self_dim in (0, data_dim), \ "Failed loading Parameter '%s' from saved params: " \ "shape incompatible expected %s vs saved %s"%( self.name, str(self.shape), str(data.shape)) diff --git a/python/mxnet/gluon/rnn/rnn_layer.py b/python/mxnet/gluon/rnn/rnn_layer.py index d2c6ac9d9f2f..daf8ecbf5631 100644 --- a/python/mxnet/gluon/rnn/rnn_layer.py +++ b/python/mxnet/gluon/rnn/rnn_layer.py @@ -37,7 +37,7 @@ def __init__(self, hidden_size, num_layers, layout, i2h_bias_initializer, h2h_bias_initializer, mode, **kwargs): super(_RNNLayer, self).__init__(**kwargs) - assert layout == 'TNC' or layout == 'NTC', \ + assert layout in ('TNC', 'NTC'), \ "Invalid layout %s; must be one of ['TNC' or 'NTC']"%layout self._hidden_size = hidden_size self._num_layers = num_layers diff --git a/python/mxnet/ndarray/sparse.py b/python/mxnet/ndarray/sparse.py index 9c02b8e2cf27..88f5eae0722a 100644 --- a/python/mxnet/ndarray/sparse.py +++ b/python/mxnet/ndarray/sparse.py @@ -527,7 +527,7 @@ def copyto(self, other): return super(CSRNDArray, self).copyto(other) elif isinstance(other, NDArray): stype = other.stype - if stype == 'default' or stype == 'csr': + if stype in ('default', 'csr'): return super(CSRNDArray, self).copyto(other) else: raise TypeError('copyto does not support destination NDArray stype ' + str(stype)) @@ -774,7 +774,7 @@ def copyto(self, other): return super(RowSparseNDArray, self).copyto(other) elif isinstance(other, NDArray): stype = other.stype - if stype == 'default' or stype == 'row_sparse': + if stype in ('default', 'row_sparse'): return super(RowSparseNDArray, self).copyto(other) else: raise TypeError('copyto does not support destination NDArray stype ' + str(stype)) @@ -1531,7 +1531,7 @@ def zeros(stype, shape, ctx=None, dtype=None, **kwargs): if ctx is None: ctx = current_context() dtype = mx_real_t if dtype is None else dtype - if stype == 'row_sparse' or stype == 'csr': + if stype in ('row_sparse', 'csr'): aux_types = _STORAGE_AUX_TYPES[stype] else: raise ValueError("unknown storage type" + stype) @@ -1566,7 +1566,7 @@ def empty(stype, shape, ctx=None, dtype=None): if dtype is None: dtype = mx_real_t assert(stype is not None) - if stype == 'csr' or stype == 'row_sparse': + if stype in ('csr', 'row_sparse'): return zeros(stype, shape, ctx=ctx, dtype=dtype) else: raise Exception("unknown stype : " + str(stype)) diff --git a/python/mxnet/operator.py b/python/mxnet/operator.py index 1da6628e68da..e8fa571d44db 100644 --- a/python/mxnet/operator.py +++ b/python/mxnet/operator.py @@ -464,7 +464,7 @@ def assign(self, dst, req, src): """Helper function for assigning into dst depending on requirements.""" if req == 'null': return - elif req == 'write' or req == 'inplace': + elif req in ('write', 'inplace'): dst[:] = src elif req == 'add': dst[:] += src diff --git a/python/mxnet/test_utils.py b/python/mxnet/test_utils.py index 69d916ef85e3..63b75cf2a23c 100644 --- a/python/mxnet/test_utils.py +++ b/python/mxnet/test_utils.py @@ -639,7 +639,7 @@ def _parse_location(sym, location, ctx, dtype=default_dtype()): ValueError: Symbol arguments and keys of the given location do not match. """ assert isinstance(location, (dict, list, tuple)) - assert dtype == np.float16 or dtype == np.float32 or dtype == np.float64 + assert dtype in (np.float16, np.float32, np.float64) if isinstance(location, dict): if set(location.keys()) != set(sym.list_arguments()): raise ValueError("Symbol arguments and keys of the given location do not match." @@ -698,7 +698,7 @@ def _parse_aux_states(sym, aux_states, ctx, dtype=default_dtype()): >>> _parse_aux_states(fc2, {'batchnorm0_moving_var': mean_states}, None) ValueError: Symbol aux_states names and given aux_states do not match. """ - assert dtype == np.float16 or dtype == np.float32 or dtype == np.float64 + assert dtype in (np.float16, np.float32, np.float64) if aux_states is not None: if isinstance(aux_states, dict): if set(aux_states.keys()) != set(sym.list_auxiliary_states()): @@ -745,7 +745,7 @@ def numeric_grad(executor, location, aux_states=None, eps=1e-4, def as_stype(var, stype, dtype): return mx.nd.cast_storage(mx.nd.array(var, dtype=dtype), stype=stype) - assert dtype == np.float16 or dtype == np.float32 or dtype == np.float64 + assert dtype in (np.float16, np.float32, np.float64) approx_grads = {k: np.zeros(v.shape, dtype=dtype) for k, v in location.items()} for k, v in location.items(): @@ -827,7 +827,7 @@ def check_numeric_gradient(sym, location, aux_states=None, numeric_eps=1e-3, rto --------- ..[1] https://github.com/Theano/Theano/blob/master/theano/gradient.py """ - assert dtype == np.float16 or dtype == np.float32 or dtype == np.float64 + assert dtype in (np.float16, np.float32, np.float64) if ctx is None: ctx = default_context() @@ -970,7 +970,7 @@ def check_symbolic_forward(sym, location, expected, rtol=1E-4, atol=None, >>> ret_expected = np.array([[19, 22], [43, 50]]) >>> check_symbolic_forward(sym_dot, [mat1, mat2], [ret_expected]) """ - assert dtype == np.float16 or dtype == np.float32 or dtype == np.float64 + assert dtype in (np.float16, np.float32, np.float64) if ctx is None: ctx = default_context() @@ -1055,7 +1055,7 @@ def check_symbolic_backward(sym, location, out_grads, expected, rtol=1e-5, atol= >>> grad_expected = ograd.copy().asnumpy() >>> check_symbolic_backward(sym_add, [mat1, mat2], [ograd], [grad_expected, grad_expected]) """ - assert dtype == np.float16 or dtype == np.float32 or dtype == np.float64 + assert dtype in (np.float16, np.float32, np.float64) if ctx is None: ctx = default_context() diff --git a/python/mxnet/visualization.py b/python/mxnet/visualization.py index fc6db1ddcb31..82946204847a 100644 --- a/python/mxnet/visualization.py +++ b/python/mxnet/visualization.py @@ -309,7 +309,7 @@ def looks_like_weight(name): attr["fillcolor"] = cm[1] elif op == "BatchNorm": attr["fillcolor"] = cm[3] - elif op == "Activation" or op == "LeakyReLU": + elif op in ('Activation', 'LeakyReLU'): label = r"%s\n%s" % (op, node["attrs"]["act_type"]) attr["fillcolor"] = cm[2] elif op == "Pooling": @@ -318,7 +318,7 @@ def looks_like_weight(name): "x".join(_str2tuple(node["attrs"]["stride"])) if "stride" in node["attrs"] else "1") attr["fillcolor"] = cm[4] - elif op == "Concat" or op == "Flatten" or op == "Reshape": + elif op in ("Concat", "Flatten", "Reshape"): attr["fillcolor"] = cm[5] elif op == "Softmax": attr["fillcolor"] = cm[6] diff --git a/tools/caffe_converter/convert_model.py b/tools/caffe_converter/convert_model.py index d5c069b57066..5c2a11e4b88b 100644 --- a/tools/caffe_converter/convert_model.py +++ b/tools/caffe_converter/convert_model.py @@ -77,9 +77,8 @@ def convert_model(prototxt_fname, caffemodel_fname, output_prefix=None): layers_proto = caffe_parser.get_layers(caffe_parser.read_prototxt(prototxt_fname)) for layer_name, layer_type, layer_blobs in layer_iter: - if layer_type == 'Convolution' or layer_type == 'InnerProduct' \ - or layer_type == 4 or layer_type == 14 or layer_type == 'PReLU' \ - or layer_type == 'Deconvolution' or layer_type == 39: + if layer_type in ('Convolution', 'InnerProduct', 4, 14, 'PReLU', 'Deconvolution', + 39): if layer_type == 'PReLU': assert (len(layer_blobs) == 1) weight_name = layer_name + '_gamma' @@ -99,7 +98,7 @@ def convert_model(prototxt_fname, caffemodel_fname, output_prefix=None): wmat = np.array(layer_blobs[0].data).reshape(wmat_dim) channels = wmat_dim[1] - if channels == 3 or channels == 4: # RGB or RGBA + if channels in (3, 4): # RGB or RGBA if first_conv: # Swapping BGR of caffe into RGB in mxnet wmat[:, [0, 2], :, :] = wmat[:, [2, 0], :, :] @@ -133,8 +132,7 @@ def convert_model(prototxt_fname, caffemodel_fname, output_prefix=None): arg_params[weight_name] = mx.nd.zeros(wmat.shape) arg_params[weight_name][:] = wmat - - if first_conv and (layer_type == 'Convolution' or layer_type == 4): + if first_conv and layer_type in ('Convolution', 4): first_conv = False elif layer_type == 'Scale': From 383a2d0e4791a3899272c4ecf4ff2535f558fb53 Mon Sep 17 00:00:00 2001 From: Junru Shao Date: Tue, 21 Aug 2018 03:20:41 +0800 Subject: [PATCH 009/160] Fix flaky tests in control flow (#12192) * Fix flaky tests in control flow * Trigger CI * Trigger CI * Trigger CI --- .../unittest/test_contrib_control_flow.py | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/tests/python/unittest/test_contrib_control_flow.py b/tests/python/unittest/test_contrib_control_flow.py index a4b794c95951..76d0218775b4 100644 --- a/tests/python/unittest/test_contrib_control_flow.py +++ b/tests/python/unittest/test_contrib_control_flow.py @@ -237,11 +237,11 @@ def _zeros_like_dict(name_list): for imp_out, sym_out in zip(imp_outs, sym_outs): if imp_out is None or sym_out is None: continue - assert_almost_equal(imp_out, sym_out, rtol=1e-4, atol=1e-4) + assert_almost_equal(imp_out, sym_out, rtol=1e-3, atol=1e-3) for imp_grad, sym_grad in zip(imp_grads, sym_grads): if imp_grad is None or sym_grad is None: continue - assert_almost_equal(imp_grad, sym_grad, rtol=1e-4, atol=1e-4) + assert_almost_equal(imp_grad, sym_grad, rtol=1e-3, atol=1e-3) @with_seed() @@ -888,9 +888,9 @@ def _get_sym_result(is_train, args, args_grad, out_grad): assert len(imp_out) == len(sym_out) assert len(imp_grad) == len(sym_grad) for x, y in zip(imp_out, sym_out): - assert_almost_equal(x, y, rtol=1e-4, atol=1e-4) + assert_almost_equal(x, y, rtol=1e-3, atol=1e-3) for x, y in zip(imp_grad, sym_grad): - assert_almost_equal(x, y, rtol=1e-4, atol=1e-4) + assert_almost_equal(x, y, rtol=1e-3, atol=1e-3) @with_seed() @@ -966,14 +966,14 @@ def _func(*states): for x, y in zip(e_1.outputs, e_2.outputs): x = x.asnumpy() y = y.asnumpy() - assert_almost_equal(x, y, rtol=1e-4, atol=1e-4) + assert_almost_equal(x, y, rtol=1e-3, atol=1e-3) grad_keys = list(e_2.grad_dict.keys()) e_1_grad = [e_1.grad_dict[x] for x in grad_keys] e_2_grad = [e_2.grad_dict[x] for x in grad_keys] for x, y in zip(e_1_grad, e_2_grad): x = x.asnumpy() y = y.asnumpy() - assert_almost_equal(x, y, rtol=1e-4, atol=1e-4) + assert_almost_equal(x, y, rtol=1e-3, atol=1e-3) def _verify_cond(cond_func, then_func, else_func, input_var_shapes, free_var_shapes, is_train): @@ -1053,11 +1053,11 @@ def _get_symbolic_result(out_grads): for imp_out, sym_out in zip(imp_outs, sym_outs): if imp_out is None or sym_out is None: continue - assert_almost_equal(imp_out, sym_out, rtol=1e-5, atol=1e-5) + assert_almost_equal(imp_out, sym_out, rtol=1e-3, atol=1e-3) for imp_grad, sym_grad in zip(imp_grads, sym_grads): if imp_grad is None or sym_grad is None: continue - assert_almost_equal(imp_grad, sym_grad, rtol=1e-5, atol=1e-5) + assert_almost_equal(imp_grad, sym_grad, rtol=1e-3, atol=1e-3) @with_seed() @@ -1174,7 +1174,7 @@ def check_contrib_rnn(cell_type, num_states): trainer = gluon.Trainer(params2, 'sgd', {'learning_rate' : 0.03}) with mx.autograd.record(): res2 = layer(rnn_data, states) - assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=0.001, atol=0.0001) + assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3) res2.backward() trainer.step(batch_size) @@ -1182,7 +1182,7 @@ def check_contrib_rnn(cell_type, num_states): weight1 = val.data() weight2 = params2[key].data() assert_almost_equal(weight1.asnumpy(), weight2.asnumpy(), - rtol=0.001, atol=0.0001) + rtol=1e-3, atol=1e-3) @with_seed() @@ -1294,7 +1294,7 @@ def verify_foreach(step, in_syms, state_syms, free_syms, for i in range(len(outs)): assert e.outputs[i].shape == outs[i].shape assert_almost_equal(e.outputs[i].asnumpy(), outs[i].asnumpy(), - rtol=0.001, atol=0.0001) + rtol=1e-3, atol=1e-3) if (is_train): all_ins = _as_list(in_arrs)[:] all_ins.extend(init_states) @@ -1303,7 +1303,7 @@ def verify_foreach(step, in_syms, state_syms, free_syms, for i in range(size): assert_almost_equal(all_ins[i].grad.asnumpy(), e.grad_arrays[i].asnumpy(), - rtol=0.001, atol=0.0001) + rtol=1e-3, atol=1e-3) # Test cases: # * graph inputs are stored in different orders. @@ -1559,11 +1559,11 @@ def step_nd(in1, states): assert isinstance(states, list) assert len(states) == 1 res = mx.nd.broadcast_add(out, states[0]) - assert_almost_equal(res.asnumpy(), e.outputs[0].asnumpy(), rtol=0.001, atol=0.0001) + assert_almost_equal(res.asnumpy(), e.outputs[0].asnumpy(), rtol=1e-3, atol=1e-3) res.backward(out_grads[0]) - assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy()) - assert_almost_equal(state.grad.asnumpy(), state_grad.asnumpy()) + assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy(), rtol=1e-3, atol=1e-3) + assert_almost_equal(state.grad.asnumpy(), state_grad.asnumpy(), rtol=1e-3, atol=1e-3) def check_foreach_rnn(cell_type, num_states): @@ -1649,12 +1649,12 @@ def sym_group(out): for i in range(len(outputs2)): assert_almost_equal(outputs1[i].asnumpy(), outputs2[i].asnumpy(), - rtol=0.001, atol=0.0001) + rtol=1e-3, atol=1e-3) input_names = out.list_inputs() for i in range(len(e1.grad_arrays)): name = input_names[i] assert_almost_equal(args_grad1[name].asnumpy(), args_grad2[name].asnumpy(), - rtol=0.001, atol=0.0001) + rtol=1e-3, atol=1e-3) @with_seed() @@ -1696,7 +1696,7 @@ def step2(data, states): with mx.autograd.record(): res2 = layer(data, [states]) - assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=0.001, atol=0.0001) + assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3) @with_seed() @@ -1730,7 +1730,7 @@ def hybrid_forward(self, F, data): res2 = layer(data) with mx.autograd.record(): res2 = layer(data) - assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=0.001, atol=0.0001) + assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3) @with_seed() @@ -1762,7 +1762,7 @@ def hybrid_forward(self, F, data): res2 = layer(data) with mx.autograd.record(): res2 = layer(data) - assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=0.001, atol=0.0001) + assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3) if __name__ == '__main__': From e3ff1671f3dd4090ffede873d77113fe164365fc Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Mon, 20 Aug 2018 21:37:36 +0200 Subject: [PATCH 010/160] Tweaked comments for OpExecutor. (#12253) --- src/executor/exec_pass.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/executor/exec_pass.h b/src/executor/exec_pass.h index 8c483e9b2b8e..cd1db0ac1944 100644 --- a/src/executor/exec_pass.h +++ b/src/executor/exec_pass.h @@ -66,13 +66,13 @@ class OpExecutor { virtual ~OpExecutor() {} /*! * \brief Setup the executor for given NDArray member - * this can be called multiple times if NDArray changed during reshape. - * It is safe to call it via asynchronize engine lambda + * This can be called multiple times if NDArray changed during reshape. + * It is safe to call it via an asynchronous engine lambda. */ virtual void Setup() = 0; /*! * \brief run the operator given runtime context on device. - * This function call do not synchronize the stream. + * This function call does not synchronize the stream. * \param rctx The runtime context passed in by environment. */ virtual void Run(RunContext rctx, bool is_gpu) = 0; From c479eb24eaab8857dca254ea76c1179b0f6fe36f Mon Sep 17 00:00:00 2001 From: "Joshua Z. Zhang" Date: Mon, 20 Aug 2018 14:11:01 -0700 Subject: [PATCH 011/160] fix potential floating number overflow, enable float16 (#12118) * fix potential floating number overflow, enable float16 * fix cuda impl * fix cuda imple * fix template substitution for windows * half_f substantiate operand + fix * remove ambiguous operand + for mshadow half_T * fix con't * use int32_t as indices * use overload * try remove ambiguous function overloading * thrust version limit * change sizeof cast from floor to ceil when allocating buffers * cleaner * fix alignment of pointers --- src/operator/contrib/bounding_box-inl.cuh | 4 +- src/operator/contrib/bounding_box-inl.h | 86 ++++++----- src/operator/tensor/sort_op-inl.cuh | 135 ++++++++++++++++-- .../python/unittest/test_contrib_operator.py | 25 ++-- 4 files changed, 184 insertions(+), 66 deletions(-) diff --git a/src/operator/contrib/bounding_box-inl.cuh b/src/operator/contrib/bounding_box-inl.cuh index fb1dacc11f4f..fd5e30b25b2d 100644 --- a/src/operator/contrib/bounding_box-inl.cuh +++ b/src/operator/contrib/bounding_box-inl.cuh @@ -45,9 +45,9 @@ struct valid_score { template int FilterScores(mshadow::Tensor out_scores, - mshadow::Tensor out_sorted_index, + mshadow::Tensor out_sorted_index, mshadow::Tensor scores, - mshadow::Tensor sorted_index, + mshadow::Tensor sorted_index, float valid_thresh) { valid_score pred(static_cast(valid_thresh)); DType * end_scores = thrust::copy_if(thrust::device, scores.dptr_, scores.dptr_ + scores.MSize(), diff --git a/src/operator/contrib/bounding_box-inl.h b/src/operator/contrib/bounding_box-inl.h index f739dbc8a52c..8e963461ec06 100644 --- a/src/operator/contrib/bounding_box-inl.h +++ b/src/operator/contrib/bounding_box-inl.h @@ -150,9 +150,9 @@ inline uint32_t BoxNMSNumVisibleOutputs(const NodeAttrs& attrs) { template int FilterScores(mshadow::Tensor out_scores, - mshadow::Tensor out_sorted_index, + mshadow::Tensor out_sorted_index, mshadow::Tensor scores, - mshadow::Tensor sorted_index, + mshadow::Tensor sorted_index, float valid_thresh) { index_t j = 0; for (index_t i = 0; i < scores.size(0); i++) { @@ -230,7 +230,7 @@ MSHADOW_XINLINE DType BoxArea(const DType *box, int encode) { /*! * \brief compute areas specialized for nms to reduce computation - * + * * \param i the launched thread index (total thread num_batch * topk) * \param out 1d array for areas (size num_batch * num_elem) * \param in 1st coordinate of 1st box (buffer + coord_start) @@ -243,7 +243,7 @@ MSHADOW_XINLINE DType BoxArea(const DType *box, int encode) { struct compute_area { template MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in, - const DType *indices, const DType *batch_start, + const int32_t *indices, const int32_t *batch_start, int topk, int num_elem, int stride, int encode) { int b = i / topk; int k = i % topk; @@ -302,7 +302,7 @@ MSHADOW_XINLINE DType Intersect(const DType *a, const DType *b, int encode) { */ struct nms_impl { template - MSHADOW_XINLINE static void Map(int i, DType *index, const DType *batch_start, + MSHADOW_XINLINE static void Map(int i, int32_t *index, const int32_t *batch_start, const DType *input, const DType *areas, int k, int ref, int num, int stride, int offset_box, int offset_id, @@ -326,8 +326,7 @@ struct nms_impl { intersect *= Intersect(input + ref_offset + 1, input + pos_offset + 1, encode); int ref_area_offset = static_cast(index[ref]); int pos_area_offset = static_cast(index[pos]); - DType iou = intersect / (areas[ref_area_offset] + areas[pos_area_offset] - - intersect); + DType iou = intersect / (areas[ref_area_offset] + areas[pos_area_offset] - intersect); if (iou > thresh) { index[pos] = -1; } @@ -336,7 +335,7 @@ struct nms_impl { /*! * \brief Assign output of nms by indexing input - * + * * \param i the launched thread index (total num_batch) * \param out output array [cls, conf, b0, b1, b2, b3] * \param record book keeping the selected index for backward @@ -349,7 +348,7 @@ struct nms_impl { struct nms_assign { template MSHADOW_XINLINE static void Map(int i, DType *out, DType *record, const DType *input, - const DType *index, const DType *batch_start, + const int32_t *index, const int32_t *batch_start, int k, int num, int stride) { int count = 0; for (int j = 0; j < k; ++j) { @@ -404,7 +403,7 @@ void BoxNMSForward(const nnvm::NodeAttrs& attrs, int num_batch = indim <= 2? 1 : in_shape.ProdShape(0, indim - 2); int num_elem = in_shape[indim - 2]; int width_elem = in_shape[indim - 1]; - MSHADOW_SGL_DBL_TYPE_SWITCH(outputs[0].type_flag_, DType, { + MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { Tensor data = inputs[box_nms_enum::kData] .get_with_shape(Shape3(num_batch, num_elem, width_elem), s); Tensor out = outputs[box_nms_enum::kOut] @@ -415,25 +414,33 @@ void BoxNMSForward(const nnvm::NodeAttrs& attrs, // prepare workspace Shape<1> sort_index_shape = Shape1(num_batch * num_elem); Shape<3> buffer_shape = Shape3(num_batch, num_elem, width_elem); - index_t workspace_size = 4 * sort_index_shape.Size(); Shape<1> batch_start_shape = Shape1(num_batch + 1); - workspace_size += batch_start_shape.Size(); + + // index + index_t int32_size = sort_index_shape.Size() * 3 + batch_start_shape.Size(); + index_t dtype_size = sort_index_shape.Size() * 2; if (req[0] == kWriteInplace) { - workspace_size += buffer_shape.Size(); + dtype_size += buffer_shape.Size(); } + // ceil up when sizeof(DType) is larger than sizeof(DType) + index_t int32_offset = (int32_size * sizeof(int32_t) - 1) / sizeof(DType) + 1; + index_t workspace_size = int32_offset + dtype_size; Tensor workspace = ctx.requested[box_nms_enum::kTempSpace] .get_space_typed(Shape1(workspace_size), s); - Tensor sorted_index(workspace.dptr_, sort_index_shape, s); - Tensor scores(sorted_index.dptr_ + sorted_index.MSize(), + Tensor sorted_index( + reinterpret_cast(workspace.dptr_), sort_index_shape, s); + Tensor all_sorted_index(sorted_index.dptr_ + sorted_index.MSize(), sort_index_shape, s); - Tensor batch_id(scores.dptr_ + scores.MSize(), sort_index_shape, - s); - Tensor areas(batch_id.dptr_ + batch_id.MSize(), sort_index_shape, s); - Tensor batch_start(areas.dptr_ + areas.MSize(), batch_start_shape, s); + Tensor batch_id( + all_sorted_index.dptr_ + all_sorted_index.MSize(), sort_index_shape, s); + Tensor batch_start(batch_id.dptr_ + batch_id.MSize(), batch_start_shape, s); + Tensor scores(workspace.dptr_ + int32_offset, + sort_index_shape, s); + Tensor areas(scores.dptr_ + scores.MSize(), sort_index_shape, s); Tensor buffer = data; if (req[0] == kWriteInplace) { // make copy - buffer = Tensor(batch_start.dptr_ + batch_start.MSize(), buffer_shape, s); + buffer = Tensor(areas.dptr_ + areas.MSize(), buffer_shape, s); buffer = F(data); } @@ -451,10 +458,10 @@ void BoxNMSForward(const nnvm::NodeAttrs& attrs, } // use batch_id and areas as temporary storage - Tensor all_scores = batch_id; - Tensor all_sorted_index = areas; + Tensor all_scores = areas; + // Tensor all_sorted_index = areas; all_scores = reshape(slice<2>(buffer, score_index, score_index + 1), all_scores.shape_); - all_sorted_index = range(0, num_batch * num_elem); + all_sorted_index = range(0, num_batch * num_elem); // filter scores but keep original sorted_index value // move valid score and index to the front, return valid size @@ -474,19 +481,19 @@ void BoxNMSForward(const nnvm::NodeAttrs& attrs, // only sort the valid scores and batch_id Shape<1> valid_score_shape = Shape1(num_valid); Tensor valid_scores(scores.dptr_, valid_score_shape, s); - Tensor valid_sorted_index(sorted_index.dptr_, valid_score_shape, s); - Tensor valid_batch_id(batch_id.dptr_, valid_score_shape, s); + Tensor valid_sorted_index(sorted_index.dptr_, valid_score_shape, s); + Tensor valid_batch_id(batch_id.dptr_, valid_score_shape, s); // sort index by batch_id then score (stable sort) mxnet::op::SortByKey(valid_scores, valid_sorted_index, false); - valid_batch_id = F(valid_sorted_index / ScalarExp(num_elem)); + valid_batch_id = (valid_sorted_index / ScalarExp(num_elem)); mxnet::op::SortByKey(valid_batch_id, valid_sorted_index, true); // calculate batch_start: accumulated sum to denote 1st sorted_index for a given batch_index - valid_batch_id = F(valid_sorted_index / ScalarExp(num_elem)); + valid_batch_id = (valid_sorted_index / ScalarExp(num_elem)); for (int b = 0; b < num_batch + 1; b++) { slice<0>(batch_start, b, b + 1) = reduce_keepdim( - F(valid_batch_id, ScalarExp(b)), 0); + F(valid_batch_id, ScalarExp(b)), 0); } // pre-compute areas of candidates @@ -721,11 +728,11 @@ inline bool MatchingShape(const nnvm::NodeAttrs& attrs, struct bipartite_matching { template MSHADOW_XINLINE static void Map(int i, DType *row_marker, DType *col_marker, - const DType *scores, const DType *sorted_index, + const DType *scores, const int32_t *sorted_index, int num_batch, int num_row, int num_col, float threshold, bool is_ascend, int topk) { int stride = num_row * num_col; - const DType *index = sorted_index + i * stride; + const int32_t *index = sorted_index + i * stride; const DType *score = scores + i * stride; DType *rmarker = row_marker + i * num_row; DType *cmarker = col_marker + i * num_col; @@ -769,7 +776,7 @@ void BipartiteMatchingForward(const nnvm::NodeAttrs& attrs, int row = dshape[dshape.ndim() - 2]; int col = dshape[dshape.ndim() - 1]; int batch_size = dshape.Size() / row / col; - MSHADOW_SGL_DBL_TYPE_SWITCH(outputs[0].type_flag_, DType, { + MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { Tensor scores = inputs[0] .get_with_shape(Shape1(dshape.Size()), s); Tensor row_marker = outputs[0] @@ -777,23 +784,24 @@ void BipartiteMatchingForward(const nnvm::NodeAttrs& attrs, Tensor col_marker = outputs[1] .get_with_shape(Shape2(batch_size, col), s); Shape<1> sort_index_shape = Shape1(dshape.Size()); - index_t workspace_size = sort_index_shape.Size() * 3; + index_t workspace_size = sort_index_shape.Size(); + workspace_size += ((sort_index_shape.Size() * sizeof(int32_t) - 1) / sizeof(DType)) * 2; Tensor workspace = ctx.requested[0] .get_space_typed(Shape1(workspace_size), s); - Tensor sorted_index(workspace.dptr_, - sort_index_shape, s); - Tensor batch_id(sorted_index.dptr_ + sorted_index.MSize(), + Tensor scores_copy(workspace.dptr_, sort_index_shape, s); - Tensor scores_copy(batch_id.dptr_ + batch_id.MSize(), + Tensor sorted_index(reinterpret_cast( + scores_copy.dptr_ + scores_copy.MSize()), sort_index_shape, s); + Tensor batch_id(sorted_index.dptr_ + sorted_index.MSize(), sort_index_shape, s); // sort according to score scores_copy = F(scores); - sorted_index = range(0, dshape.Size()); + sorted_index = range(0, dshape.Size()); mxnet::op::SortByKey(scores_copy, sorted_index, param.is_ascend); - batch_id = F(sorted_index / ScalarExp(row * col)); + batch_id = (sorted_index / ScalarExp(row * col)); mxnet::op::SortByKey(batch_id, scores_copy, true); - batch_id = F(sorted_index / ScalarExp(row * col)); + batch_id = (sorted_index / ScalarExp(row * col)); mxnet::op::SortByKey(batch_id, sorted_index, true); // bipartite matching, parallelization is limited to batch_size diff --git a/src/operator/tensor/sort_op-inl.cuh b/src/operator/tensor/sort_op-inl.cuh index 5ad31053f92e..1a8e2325ef4a 100644 --- a/src/operator/tensor/sort_op-inl.cuh +++ b/src/operator/tensor/sort_op-inl.cuh @@ -24,6 +24,7 @@ */ #ifndef MXNET_OPERATOR_TENSOR_SORT_OP_INL_CUH_ #define MXNET_OPERATOR_TENSOR_SORT_OP_INL_CUH_ +#include #include #include #if defined(_MSC_VER) && __CUDACC_VER_MAJOR__ == 8 && __CUDACC_VER_BUILD__ != 44 @@ -40,6 +41,29 @@ namespace mxnet { namespace op { +namespace cuda { +template +struct less_half +{ + typedef T first_argument_type; + typedef T second_argument_type; + typedef bool result_type; + __host__ __device__ bool operator()(const T &lhs, const T &rhs) const { + return static_cast(lhs) < static_cast(rhs); + } +}; + +template +struct greater_half +{ + typedef T first_argument_type; + typedef T second_argument_type; + typedef bool result_type; + __host__ __device__ bool operator()(const T &lhs, const T &rhs) const { + return static_cast(lhs) < static_cast(rhs); + } +}; +} template inline typename std::enable_if::value, size_t>::type @@ -57,9 +81,12 @@ SortByKeyWorkspaceSize(const size_t num_keys) { } template -inline void SortByKey(mshadow::Tensor keys, mshadow::Tensor values, - bool is_ascend, mshadow::Tensor* workspace, - const int begin_bit, const int end_bit) { +inline typename std::enable_if::value || + std::is_same::value), void>::type +SortByKeyImpl(mshadow::Tensor keys, + mshadow::Tensor values, bool is_ascend, + mshadow::Tensor* workspace, + const int begin_bit, const int end_bit) { CHECK_EQ(keys.CheckContiguous(), true); CHECK_EQ(values.CheckContiguous(), true); #if CUDA_VERSION >= 7000 @@ -128,18 +155,100 @@ inline void SortByKey(mshadow::Tensor keys, mshadow::Tensor -inline void SortByKey(mshadow::Tensor keys, - mshadow::Tensor values, bool is_ascend, - mshadow::Tensor* workspace, const int begin_bit, const int end_bit) { - LOG(FATAL) << "SortByKey for half_t is not implemented!"; +template +inline typename std::enable_if<((!std::is_same::value) && + std::is_same::value), void>::type +SortByKeyImpl(mshadow::Tensor keys, + mshadow::Tensor values, bool is_ascend, + mshadow::Tensor* workspace, + const int begin_bit, const int end_bit) { + CHECK_EQ(keys.CheckContiguous(), true); + CHECK_EQ(values.CheckContiguous(), true); +#if CUDA_VERSION >= 9000 + cudaStream_t stream = mshadow::Stream::GetStream(keys.stream_); + thrust::device_ptr key_iter = thrust::device_pointer_cast(keys.dptr_); + thrust::device_ptr value_iter = thrust::device_pointer_cast( + reinterpret_cast(values.dptr_)); + if (is_ascend) { + thrust::stable_sort_by_key( + thrust::cuda::par.on(stream), + key_iter.get(), key_iter.get() + (keys.size(0)), value_iter.get(), thrust::less()); + } else { + thrust::stable_sort_by_key( + thrust::cuda::par.on(stream), + key_iter.get(), key_iter.get() + (keys.size(0)), value_iter.get(), thrust::greater()); + } + MSHADOW_CUDA_POST_KERNEL_CHECK(SortByKey); +#else + LOG(FATAL) << "SortByKey with fp16 values is only supported for CUDA version >= 9.0"; +#endif +} + +template +inline typename std::enable_if<(std::is_same::value && + (!std::is_same::value)), void>::type +SortByKeyImpl(mshadow::Tensor keys, + mshadow::Tensor values, bool is_ascend, + mshadow::Tensor* workspace, + const int begin_bit, const int end_bit) { + CHECK_EQ(keys.CheckContiguous(), true); + CHECK_EQ(values.CheckContiguous(), true); +#if CUDA_VERSION >= 9000 + cudaStream_t stream = mshadow::Stream::GetStream(keys.stream_); + thrust::device_ptr key_iter = thrust::device_pointer_cast( + reinterpret_cast(keys.dptr_)); + thrust::device_ptr value_iter = thrust::device_pointer_cast(values.dptr_); + if (is_ascend) { + thrust::stable_sort_by_key( + thrust::cuda::par.on(stream), + key_iter, key_iter + (keys.size(0)), value_iter, cuda::less_half()); + } else { + thrust::stable_sort_by_key( + thrust::cuda::par.on(stream), + key_iter, key_iter + (keys.size(0)), value_iter, cuda::greater_half()); + } + MSHADOW_CUDA_POST_KERNEL_CHECK(SortByKey); +#else + LOG(FATAL) << "SortByKey with fp16 keys is only supported for CUDA version >= 9.0"; +#endif +} + +// use thrust sorting when keys or values are half_t +template +inline typename std::enable_if<(std::is_same::value && + std::is_same::value), void>::type +SortByKeyImpl(mshadow::Tensor keys, + mshadow::Tensor values, bool is_ascend, + mshadow::Tensor* workspace, + const int begin_bit, const int end_bit) { + CHECK_EQ(keys.CheckContiguous(), true); + CHECK_EQ(values.CheckContiguous(), true); +#if CUDA_VERSION >= 9000 + cudaStream_t stream = mshadow::Stream::GetStream(keys.stream_); + thrust::device_ptr key_iter = thrust::device_pointer_cast( + reinterpret_cast(keys.dptr_)); + thrust::device_ptr value_iter = thrust::device_pointer_cast( + reinterpret_cast(values.dptr_)); + if (is_ascend) { + thrust::stable_sort_by_key( + thrust::cuda::par.on(stream), + key_iter, key_iter + (keys.size(0)), value_iter, cuda::less_half()); + } else { + thrust::stable_sort_by_key( + thrust::cuda::par.on(stream), + key_iter, key_iter + (keys.size(0)), value_iter, cuda::greater_half()); + } + MSHADOW_CUDA_POST_KERNEL_CHECK(SortByKey); +#else + LOG(FATAL) << "SortByKey with fp16 keys and values is only supported for CUDA version >= 9.0"; +#endif } -template -inline void SortByKey(mshadow::Tensor keys, - mshadow::Tensor values, bool is_ascend, - mshadow::Tensor* workspace, const int begin_bit, const int end_bit) { - LOG(FATAL) << "SortByKey for half_t is not implemented!"; +template +inline void SortByKey(mshadow::Tensor keys, mshadow::Tensor values, + bool is_ascend, mshadow::Tensor* workspace, + const int begin_bit, const int end_bit) { + SortByKeyImpl(keys, values, is_ascend, workspace, begin_bit, end_bit); } } // namespace op diff --git a/tests/python/unittest/test_contrib_operator.py b/tests/python/unittest/test_contrib_operator.py index a220f08d20d4..fc6c1be9c3a1 100644 --- a/tests/python/unittest/test_contrib_operator.py +++ b/tests/python/unittest/test_contrib_operator.py @@ -28,11 +28,12 @@ def test_box_nms_op(): def test_box_nms_forward(data, expected, thresh=0.5, valid=0, topk=-1, coord=2, score=1, cid=0, force=False, in_format='corner', out_format='corner'): - data = mx.nd.array(data) - out = mx.contrib.nd.box_nms(data, overlap_thresh=thresh, valid_thresh=valid, topk=topk, - coord_start=coord, score_index=score, id_index=cid, - force_suppress=force, in_format=in_format, out_format=out_format) - assert_almost_equal(out.asnumpy(), expected) + for dtype in ['float16', 'float32', 'float64']: + data = mx.nd.array(data, dtype=dtype) + out = mx.contrib.nd.box_nms(data, overlap_thresh=thresh, valid_thresh=valid, topk=topk, + coord_start=coord, score_index=score, id_index=cid, + force_suppress=force, in_format=in_format, out_format=out_format) + assert_almost_equal(out.asnumpy(), expected.astype(dtype), rtol=1e-3, atol=1e-3) def test_box_nms_backward(data, grad, expected, thresh=0.5, valid=0, topk=-1, coord=2, score=1, cid=0, force=False, in_format='corner', out_format='corner'): @@ -233,13 +234,13 @@ def generate_boxes(dims): def test_bipartite_matching_op(): def assert_match(inputs, x, y, threshold, is_ascend=False): - inputs = mx.nd.array(inputs) - x = np.array(x) - y = np.array(y) - a, b = mx.nd.contrib.bipartite_matching(inputs, threshold=threshold, is_ascend=is_ascend) - print(a, b) - assert_array_equal(a.asnumpy().astype('int64'), x.astype('int64')) - assert_array_equal(b.asnumpy().astype('int64'), y.astype('int64')) + for dtype in ['float16', 'float32', 'float64']: + inputs = mx.nd.array(inputs, dtype=dtype) + x = np.array(x, dtype=dtype) + y = np.array(y, dtype=dtype) + a, b = mx.nd.contrib.bipartite_matching(inputs, threshold=threshold, is_ascend=is_ascend) + assert_array_equal(a.asnumpy().astype('int64'), x.astype('int64')) + assert_array_equal(b.asnumpy().astype('int64'), y.astype('int64')) assert_match([[0.5, 0.6], [0.1, 0.2], [0.3, 0.4]], [1, -1, 0], [2, 0], 1e-12, False) assert_match([[0.5, 0.6], [0.1, 0.2], [0.3, 0.4]], [-1, 0, 1], [1, 2], 100, True) From 2899715921612ef4dd147004292b5b5d0f83320b Mon Sep 17 00:00:00 2001 From: Sam Skalicky Date: Mon, 20 Aug 2018 16:21:22 -0700 Subject: [PATCH 012/160] [MXNET-792] Fix for issue #9816 with dropout operator and RNG (#12091) * added mshadow op for threshold_eq (theshold currently does <, this will do <=) modified dropout operator to use threshold_eq instead of theshold this will ensure equivalent behavior for the random numbers generated on CPU [0, 1) and GPU (0, 1] removed fixed seed for test_dropout * removed comment about flaky test --- src/operator/mshadow_op.h | 1 + src/operator/nn/dropout-inl.h | 3 ++- tests/python/unittest/test_operator.py | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/operator/mshadow_op.h b/src/operator/mshadow_op.h index 339719375fdd..06a223dda398 100644 --- a/src/operator/mshadow_op.h +++ b/src/operator/mshadow_op.h @@ -275,6 +275,7 @@ MXNET_UNARY_MATH_OP(square_grad, 2.0f * math::id(a)); /*! \brief used for generate Bernoulli mask */ MXNET_BINARY_MATH_OP_NC(threshold, a < b ? DType(1) : DType(0)); +MXNET_BINARY_MATH_OP_NC(threshold_eq, a <= b ? DType(1) : DType(0)); /*! \brief used for generate element of abs */ MXNET_UNARY_MATH_OP(abs, math::fabs(a)); // NOLINT(*) diff --git a/src/operator/nn/dropout-inl.h b/src/operator/nn/dropout-inl.h index 8e4aac613540..b7c40fbdf52a 100644 --- a/src/operator/nn/dropout-inl.h +++ b/src/operator/nn/dropout-inl.h @@ -206,7 +206,7 @@ class DropoutOp { const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast(genImpl.uniform()); - mask_out[i] = mshadow_op::threshold::Map(rand_num, pkeep) * (1.0f / pkeep); + mask_out[i] = mshadow_op::threshold_eq::Map(rand_num, pkeep) * (1.0f / pkeep); dropout_out[i] = input_data[i] * mask_out[i]; }); } @@ -258,6 +258,7 @@ class DropoutOp { this->pkeep_); return; } + // initialize the mask LaunchRNG(s, pgen, mask.Size(), mask.dptr(), diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 125666ba832c..0ff9a106a728 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -5722,8 +5722,7 @@ def test_stack(): check_numeric_gradient(out, inputs) -# test fails with seed 990952066: 0 output seen with dropout ratio=0. See issue #9816 -@with_seed(1234) +@with_seed() def test_dropout(): def zero_count(array, ratio): zeros = 0 @@ -5775,6 +5774,7 @@ def check_dropout_ratio(ratio, shape): exe.arg_arrays[0][:] = 1 exe.forward(is_train=True) + if not math.isnan(max_value): assert exe.outputs[0].asnumpy().max() > 0 else: From a4aced7123ba8aba13f0525276c909f4a78a04ee Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Tue, 21 Aug 2018 01:53:54 +0200 Subject: [PATCH 013/160] Temporarily disable ARMv7 builds (#12260) --- Jenkinsfile | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 6eaee43df043..6757490c803d 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -363,16 +363,16 @@ core_logic: { } } }, - 'ARMv7':{ - node(NODE_LINUX_CPU) { - ws('workspace/build-ARMv7') { - timeout(time: max_time, unit: 'MINUTES') { - utils.init_git() - utils.docker_run('armv7', 'build_armv7', false) - } - } - } - }, + // 'ARMv7':{ + // node(NODE_LINUX_CPU) { + // ws('workspace/build-ARMv7') { + // timeout(time: max_time, unit: 'MINUTES') { + // utils.init_git() + // utils.docker_run('armv7', 'build_armv7', false) + // } + // } + // } + // }, 'ARMv6':{ node(NODE_LINUX_CPU) { ws('workspace/build-ARMv6') { From d7b39f4c055a46c1b819b0dff4ae468c1983e016 Mon Sep 17 00:00:00 2001 From: Hao Jin Date: Mon, 20 Aug 2018 22:38:08 -0700 Subject: [PATCH 014/160] Add support for kAddTo in softmax backward (#11836) --- src/operator/contrib/ctc_loss-inl.h | 2 +- src/operator/nn/softmax-inl.h | 44 ++++++++++++++------------ tests/python/unittest/test_operator.py | 19 +++++++---- 3 files changed, 37 insertions(+), 28 deletions(-) diff --git a/src/operator/contrib/ctc_loss-inl.h b/src/operator/contrib/ctc_loss-inl.h index 0e7b63e58fb3..72209ae286c6 100644 --- a/src/operator/contrib/ctc_loss-inl.h +++ b/src/operator/contrib/ctc_loss-inl.h @@ -426,7 +426,7 @@ class CTCLossOp : public Operator { workspace_bytes)); if (req_grad) { - mxnet_op::SoftmaxGrad(s, + mxnet_op::SoftmaxGrad(s, prob.dptr_, grad.dptr_, grad.dptr_, data.shape_, 2, 1.0); Assign(grad, mxnet::kWriteInplace, grad * alphabet_size); } diff --git a/src/operator/nn/softmax-inl.h b/src/operator/nn/softmax-inl.h index 64b436e7ea0f..4a19db7c36bc 100644 --- a/src/operator/nn/softmax-inl.h +++ b/src/operator/nn/softmax-inl.h @@ -111,7 +111,7 @@ struct log_softmax_bwd { }; -template +template inline void SoftmaxGrad(Stream *s, DType *out, DType *ograd, DType *igrad, Shape shape, int axis, const DType temperature) { @@ -134,13 +134,16 @@ inline void SoftmaxGrad(Stream *s, DType *out, DType *ograd, // By default temperature is 1.0, and only in reinforcement training // users would set it to other values. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime + DType final_result; if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { - igrad[base + j*sa] = OP2::Map(ograd[base + j*sa], out[base + j*sa], sum); + final_result = OP2::Map(ograd[base + j*sa], out[base + j*sa], sum); + KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } else { for (index_t j = 0; j < M; ++j) { - igrad[base + j*sa] = OP2::Map(ograd[base + j*sa], out[base + j*sa], sum)/temperature; + final_result = OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature; + KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } } @@ -202,7 +205,7 @@ inline void Softmax(Stream *s, DType *in, DType *out, } -template +template __global__ void softmax_gradient_kernel(DType *out, DType *ograd, DType *igrad, index_t M, int axis, Shape sshape, Shape stride, const double temperature) { @@ -222,14 +225,16 @@ __global__ void softmax_gradient_kernel(DType *out, DType *ograd, DType *igrad, DType ssum = smem[0]; __syncthreads(); + DType final_result; for (index_t i = x; i < M; i += x_size) { - igrad[base + i*sa] = OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum)/ - static_cast(temperature); + final_result = + OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum) / static_cast(temperature); + KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result); } } -template +template inline void SoftmaxGrad(Stream *s, DType *out, DType *ograd, DType *igrad, Shape shape, int axis, const double temperature) { @@ -241,7 +246,7 @@ inline void SoftmaxGrad(Stream *s, DType *out, DType *ograd, Shape sshape = shape; sshape[axis] = 1; - softmax_gradient_kernel + softmax_gradient_kernel <<::GetStream(s)>>>( out, ograd, igrad, M, axis, sshape, stride, temperature); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_gradient_kernel); @@ -298,24 +303,23 @@ void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs, const std::vector& outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; - CHECK_NE(req[0], kAddTo); const SoftmaxParam& param = nnvm::get(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { - if (shape.ndim() == 2) { - SoftmaxGrad(ctx.get_stream(), inputs[1].dptr(), - inputs[0].dptr(), outputs[0].dptr(), - shape.get<2>(), axis, - static_cast(temperature)); - } else { - SoftmaxGrad(ctx.get_stream(), inputs[1].dptr(), - inputs[0].dptr(), outputs[0].dptr(), - shape.get<3>(), axis, - static_cast(temperature)); - } + MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { + if (shape.ndim() == 2) { + SoftmaxGrad(ctx.get_stream(), inputs[1].dptr(), + inputs[0].dptr(), outputs[0].dptr(), + shape.get<2>(), axis, static_cast(temperature)); + } else { + SoftmaxGrad(ctx.get_stream(), inputs[1].dptr(), + inputs[0].dptr(), outputs[0].dptr(), + shape.get<3>(), axis, static_cast(temperature)); + } + }); }); } diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 0ff9a106a728..e1e5c9e61c26 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -4522,13 +4522,18 @@ def test_invalid_shape(): @with_seed() def test_new_softmax(): for ndim in range(1, 5): - for _ in range(5): - shape = np.random.randint(1, 5, size=ndim) - axis = np.random.randint(-ndim, ndim) - data = np.random.uniform(-2, 2, size=shape) - sym = mx.sym.softmax(axis=axis) - check_symbolic_forward(sym, [data], [np_softmax(data, axis=axis)]) - check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3) + shape = np.random.randint(1, 5, size=ndim) + axis = np.random.randint(-ndim, ndim) + data = np.random.uniform(-2, 2, size=shape) + sym = mx.sym.softmax(axis=axis) + expected_fwd = np_softmax(data, axis=axis) + expected_bwd = np.zeros(shape) + check_symbolic_forward(sym, [data], [expected_fwd]) + for req in ['null', 'add', 'write']: + check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd], + rtol=1e-2, atol=1e-3, grad_req=req) + check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3) + @with_seed() def test_softmax_with_temperature(): From 1abe632ea8e25355369dc1935441feb4c9319388 Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Tue, 21 Aug 2018 15:58:34 +0200 Subject: [PATCH 015/160] [MXNET-848] Pin dockcross base images in CI (#12270) Pin dockcross images to known working versions to avoid CI PR failures. --- ci/docker/Dockerfile.build.android_armv7 | 2 +- ci/docker/Dockerfile.build.android_armv8 | 2 +- ci/docker/Dockerfile.build.armv6 | 2 +- ci/docker/Dockerfile.build.armv8 | 2 -- ci/docker/Dockerfile.build.jetson | 3 --- 5 files changed, 3 insertions(+), 8 deletions(-) diff --git a/ci/docker/Dockerfile.build.android_armv7 b/ci/docker/Dockerfile.build.android_armv7 index f5a8d09b53df..799e29c99127 100755 --- a/ci/docker/Dockerfile.build.android_armv7 +++ b/ci/docker/Dockerfile.build.android_armv7 @@ -18,7 +18,7 @@ # # Dockerfile to build MXNet for Android ARMv7 -FROM dockcross/base:latest +FROM mxnetci/dockcross-linux-base:08212018 MAINTAINER Pedro Larroy "pllarroy@amazon.com" # The cross-compiling emulator diff --git a/ci/docker/Dockerfile.build.android_armv8 b/ci/docker/Dockerfile.build.android_armv8 index d992441eea07..2c2c71c003f0 100755 --- a/ci/docker/Dockerfile.build.android_armv8 +++ b/ci/docker/Dockerfile.build.android_armv8 @@ -18,7 +18,7 @@ # # Dockerfile to build MXNet for Android ARM64/ARMv8 -FROM dockcross/base:latest +FROM mxnetci/dockcross-linux-base:08212018 MAINTAINER Pedro Larroy "pllarroy@amazon.com" RUN apt-get update && apt-get install -y \ diff --git a/ci/docker/Dockerfile.build.armv6 b/ci/docker/Dockerfile.build.armv6 index 156c57da3e1d..78071fa33992 100755 --- a/ci/docker/Dockerfile.build.armv6 +++ b/ci/docker/Dockerfile.build.armv6 @@ -18,7 +18,7 @@ # # Dockerfile to build MXNet for ARMv6 -FROM dockcross/linux-armv6 +FROM mxnetci/dockcross-linux-armv6:08212018 ENV ARCH armv6l ENV HOSTCC gcc diff --git a/ci/docker/Dockerfile.build.armv8 b/ci/docker/Dockerfile.build.armv8 index 458b62ee0946..8818ba4e4a16 100755 --- a/ci/docker/Dockerfile.build.armv8 +++ b/ci/docker/Dockerfile.build.armv8 @@ -18,8 +18,6 @@ # # Dockerfile to build MXNet for ARM64/ARMv8 -# Temporary fix due to https://github.com/apache/incubator-mxnet/issues/10837 -#FROM dockcross/linux-arm64 FROM mxnetci/dockcross-linux-arm64:05082018 ENV ARCH aarch64 diff --git a/ci/docker/Dockerfile.build.jetson b/ci/docker/Dockerfile.build.jetson index cfb5a3fd4dab..4be011af068e 100755 --- a/ci/docker/Dockerfile.build.jetson +++ b/ci/docker/Dockerfile.build.jetson @@ -22,9 +22,6 @@ FROM nvidia/cuda:9.0-cudnn7-devel as cudabuilder - -# Temporary fix due to https://github.com/apache/incubator-mxnet/issues/10837 -# FROM dockcross/linux-arm64 FROM mxnetci/dockcross-linux-arm64:05082018 ENV ARCH aarch64 From 81bc69b710a7173f3961528bd9c480ca37cb8734 Mon Sep 17 00:00:00 2001 From: Alexander Zai Date: Tue, 21 Aug 2018 11:06:44 -0400 Subject: [PATCH 016/160] [MXNET-484] MKLDNN C++ test for LRN operator (#11831) * add GetLRNOp * update boilerplate * fix test name * add ndarrays to inputs * use context as var * filter non 4d * create assert equal func * add kaddto * lrn backwards accepts 3 inputs * increase threshold for assertequal * uncomment kaddto * use ASSERT_FLOAT_EQ * set precision to 6 * init random input for lrn * mkldnn arrays can be init rand * init outputs for kaddto not rand * update req to kaddto * change api to setnewmem * add support for kaddto * support for kaddto to backwards * remove kaddto tests * add todo comment * add filter * update todo comment * increase filter * add array types filter to fixtures * update nsize to 3 * remoev write in place * remove flag * fix lint * fix merge issue * remove kaddto writeinplace from lrn * fix input for concattest * fix lint * add type 4 to input fixtures * fix init type 4 * add todo when lrn supports more than 4 dim * add diff shape to input filter * filter out non default mkldnn * update todo * remove mkldnn filter * bound not random input * fix test * reorder lrn in_data * remove unused var * fix if reshaped / mkldnn * remove comment * add jira link to todos * remove unused var --- src/operator/nn/lrn-inl.h | 1 + src/operator/nn/mkldnn/mkldnn_lrn-inl.h | 49 ++-- tests/cpp/operator/mkldnn.cc | 332 ++++++++++++++++++++---- 3 files changed, 307 insertions(+), 75 deletions(-) diff --git a/src/operator/nn/lrn-inl.h b/src/operator/nn/lrn-inl.h index cb441de99273..630449598128 100644 --- a/src/operator/nn/lrn-inl.h +++ b/src/operator/nn/lrn-inl.h @@ -114,6 +114,7 @@ void LRNBackward(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const TBlob &out_grad, const TBlob &in_data, const TBlob &out_norm, const OpReqType &req, const TBlob &in_grad) { + // LRNBackwards does not support kAddTo or kWriteInPlace using namespace mshadow; using namespace mshadow::expr; const LRNParam& param_ = nnvm::get(attrs.parsed); diff --git a/src/operator/nn/mkldnn/mkldnn_lrn-inl.h b/src/operator/nn/mkldnn/mkldnn_lrn-inl.h index adb72a2a9c46..4b179a7fbc98 100644 --- a/src/operator/nn/mkldnn/mkldnn_lrn-inl.h +++ b/src/operator/nn/mkldnn/mkldnn_lrn-inl.h @@ -61,7 +61,7 @@ inline lrn_forward::primitive_desc GetLRNFwdDesc(const LRNParam ¶m, inline mkldnn::lrn_backward::primitive_desc GetLRNBwd(const LRNParam ¶m, - const mkldnn::memory::desc &diff_in_md, + const mkldnn::memory::desc &data_in_md, const mkldnn::memory::desc &diff_md, const lrn_forward::primitive_desc &lrnFwd_desc) { mkldnn::engine &engine = CpuEngine::Get()->get_engine(); @@ -71,7 +71,7 @@ GetLRNBwd(const LRNParam ¶m, const int nsize = param.nsize; const float k = param.knorm; - lrn_backward::desc lrnBwd_desc(alg, diff_in_md, + lrn_backward::desc lrnBwd_desc(alg, data_in_md, diff_md, nsize, alpha, beta, k); return mkldnn::lrn_backward::primitive_desc(lrnBwd_desc, engine, lrnFwd_desc); @@ -92,16 +92,18 @@ class MKLDNNLRNFwd { ~MKLDNNLRNFwd() {} - void SetDataHandle(const NDArray &data, - const NDArray &output); + void SetNewMem(const NDArray &data, + const NDArray &output, + const OpReqType req); - void Execute(); + void Execute(const NDArray &out_data); private: std::shared_ptr fwd; std::shared_ptr in_mem; std::shared_ptr out_mem; std::shared_ptr ws_mem; + mkldnn_output_t output_mem_t; bool is_train; private: @@ -131,17 +133,18 @@ void MKLDNNLRNFwd::_Init(const LRNParam ¶m, } } -void MKLDNNLRNFwd::SetDataHandle(const NDArray &in_data, - const NDArray &out_data) { - const mkldnn::memory *in_data_mem = in_data.GetMKLDNNData(); - mkldnn::memory *out_data_mem = const_cast(out_data).CreateMKLDNNData( - this->out_mem->get_primitive_desc()); +void MKLDNNLRNFwd::SetNewMem(const NDArray &in_data, + const NDArray &out_data, + const OpReqType req) { + const mkldnn::memory *in_data_mem = in_data.GetMKLDNNData(); + output_mem_t = CreateMKLDNNMem(out_data, this->out_mem->get_primitive_desc(), req); this->in_mem->set_data_handle(in_data_mem->get_data_handle()); - this->out_mem->set_data_handle(out_data_mem->get_data_handle()); + this->out_mem->set_data_handle(output_mem_t.second->get_data_handle()); } -void MKLDNNLRNFwd::Execute() { +void MKLDNNLRNFwd::Execute(const NDArray &out_data) { MKLDNNStream::Get()->RegisterPrim(*(this->fwd)); + CommitOutput(out_data, output_mem_t); MKLDNNStream::Get()->Submit(); } // End of LRN Class and its functions @@ -187,9 +190,12 @@ void MKLDNNLRNForward(const OpContext &ctx, const NDArray &in_data, const OpReqType req, const NDArray &out_data) { - MKLDNNLRNFwd fwd = GetLRNFwd(param, ctx, in_data); - fwd.SetDataHandle(in_data, out_data); - fwd.Execute(); + auto in_buffer = in_data; + if (in_buffer.IsView() && in_buffer.IsMKLDNNData()) + in_buffer = in_buffer.Reorder2Default(); + MKLDNNLRNFwd fwd = GetLRNFwd(param, ctx, in_buffer); + fwd.SetNewMem(in_buffer, out_data, req); + fwd.Execute(out_data); } void MKLDNNLRNBackward(const OpContext &ctx, const LRNParam ¶m, @@ -200,8 +206,15 @@ void MKLDNNLRNBackward(const OpContext &ctx, const LRNParam ¶m, if (req == kNullOp) { return; } + + // TODO(alex): (MXNET-846) figure out why in_grad output incorrect when in_data is nchw8c + auto in_buffer = in_data; + if (in_buffer.IsMKLDNNData()) { + in_buffer = in_data.Reorder2Default(); + } + // Repeat FW for getting workspace - const mkldnn::memory *data_mem = in_data.GetMKLDNNData(); + const mkldnn::memory *data_mem = in_buffer.GetMKLDNNData(); const mkldnn::memory::desc data_md = data_mem->get_primitive_desc().desc(); const lrn_forward::primitive_desc pdesc_fwd = GetLRNFwdDesc(param, ctx.is_train, data_md); @@ -218,10 +231,9 @@ void MKLDNNLRNBackward(const OpContext &ctx, const LRNParam ¶m, lrn_forward(pdesc_fwd, mkldnn::primitive::at(*data_mem), *ws_mem, *dst_temp)); - const mkldnn::memory::desc data_in_md = pdesc_fwd.src_primitive_desc().desc(); const mkldnn::memory *diff_mem = out_grad.GetMKLDNNData(); const mkldnn::memory::desc diff_md = diff_mem->get_primitive_desc().desc(); - const mkldnn::lrn_backward::primitive_desc pdesc_bwd = GetLRNBwd(param, data_in_md, + const mkldnn::lrn_backward::primitive_desc pdesc_bwd = GetLRNBwd(param, data_md, diff_md, pdesc_fwd); mkldnn_output_t diff_src_mem = CreateMKLDNNMem(in_grad, pdesc_bwd.diff_src_primitive_desc(), req); @@ -229,6 +241,7 @@ void MKLDNNLRNBackward(const OpContext &ctx, const LRNParam ¶m, MKLDNNStream::Get()->RegisterPrim( lrn_backward(pdesc_bwd, mkldnn::primitive::at(*data_mem), mkldnn::primitive::at(*diff_mem), *ws_mem, *diff_src_mem.second)); + CommitOutput(in_grad, diff_src_mem); MKLDNNStream::Get()->Submit(); } } // namespace op diff --git a/tests/cpp/operator/mkldnn.cc b/tests/cpp/operator/mkldnn.cc index 59bd3a547b72..14578bec5610 100644 --- a/tests/cpp/operator/mkldnn.cc +++ b/tests/cpp/operator/mkldnn.cc @@ -105,8 +105,7 @@ static void InitDefaultArray(NDArray *arr, bool is_rand = false) { if (is_rand) { data[i] = (std::rand() % 100) - 50; } else { - int shift = size >> 1; - data[i] = i - shift; + data[i] = i % 100 - 50; } } @@ -127,9 +126,8 @@ static void VerifyDefMem(const mkldnn::memory &mem) { = static_cast(mem.get_data_handle()); size_t size = pd.get_size() / sizeof(mshadow::default_real_t); size_t num_same = 0; - int shift = size >> 1; for (int i = 0; i < size; i++) - num_same += data[i] == static_cast(i - shift); + num_same += data[i] == static_cast(i % 100 - 50); EXPECT_EQ(num_same, size); } @@ -155,6 +153,13 @@ static void VerifyMem(const mkldnn::memory &mem) { } } +static bool IsSameShape(mkldnn::memory::primitive_desc pd, TShape shape) { + if (pd.desc().data.ndims != shape.ndim()) return false; + for (size_t i = 0; i < shape.ndim(); i++) + if (pd.desc().data.dims[i] != shape[i]) return false; + return true; +} + static mkldnn::memory::primitive_desc GetMemPD(const TShape s, int dtype, mkldnn::memory::format format) { mkldnn::memory::dims dims(s.ndim()); @@ -370,6 +375,25 @@ struct OpAttrs { std::set requests; int num_inputs; int num_outputs; + int input_types; + int output_types; +}; + +enum ArrayTypes { + Normal = 1, + MKLDNN = 2, + MKLDNNDiffShape = 4, + MKLDNNDiffDim = 8, + NormalReshaped = 16, + MKLDNNReshaped = 32, + MKLDNNReshapedDiffShape = 64, + MKLDNNReshapedDiffDim = 128, + NormalReused = 256, + MKLDNNReused = 512, + MKLDNNReusedDiffDim = 1024, + NormalReshapedReused = 2048, + NormalReusedDiffDtype = 4096, + All = 8191, }; OpAttrs GetCopyOp() { @@ -535,6 +559,38 @@ void PrintVerifyMsg(const NDArrayAttrs &arr1, const NDArrayAttrs &arr2) { t1 << " with " << arr2.desc.c_str() << " " << t2 << "\n"; } +OpAttrs GetLRNOp() { + OpAttrs attrs; + attrs.attrs.op = Op::Get("LRN"); + attrs.num_inputs = 1; + attrs.num_outputs = 2; + attrs.attrs.dict.insert({"nsize" , "3"}); + attrs.attrs.op->attr_parser(&attrs.attrs); + attrs.dispatches.resize(2); + attrs.requests.insert(OpReqType::kWriteTo); + attrs.input_types = ArrayTypes::Normal | + ArrayTypes::MKLDNN | + ArrayTypes::NormalReshaped | + ArrayTypes::MKLDNNReshaped; + attrs.output_types = ArrayTypes::Normal | + ArrayTypes::MKLDNN | + ArrayTypes::NormalReshaped | + ArrayTypes::MKLDNNReshaped; + return attrs; +} + +OpAttrs GetLRNBackwardsOp() { + OpAttrs attrs; + attrs.attrs.op = Op::Get("_backward_LRN"); + attrs.num_inputs = 3; + attrs.num_outputs = 1; + attrs.attrs.dict.insert({"nsize" , "3"}); + attrs.attrs.op->attr_parser(&attrs.attrs); + attrs.dispatches.resize(2); + attrs.requests.insert(OpReqType::kWriteTo); + return attrs; +} + /* * We want to get a few types of NDArrays for testing: * 1. Normal NDArray @@ -557,7 +613,9 @@ void PrintVerifyMsg(const NDArrayAttrs &arr1, const NDArrayAttrs &arr2) { * * num_inputs / dim arguments used to scale shape (used for concat backwards to enlarge input shapes) */ -std::vector GetTestInputArrays(bool rand = false, int num_inputs = 1, int dim = 0) { +std::vector GetTestInputArrays( + int types = ArrayTypes::All, bool rand = false, + int num_inputs = 1, int dim = 0) { TestArrayShapes tas = GetTestArrayShapes(); std::vector shapes = tas.shapes; std::vector pds = tas.pds; @@ -575,8 +633,20 @@ std::vector GetTestInputArrays(bool rand = false, int num_inputs = // Type 1. NDArray arr(shape, Context()); - in_arrs.emplace_back(arr, "Normal NDArray"); - InitDefaultArray(&in_arrs.back().arr, rand); + if (types & ArrayTypes::Normal) { + InitDefaultArray(&arr, rand); + in_arrs.emplace_back(arr, "Normal NDArray"); + } + + // Type 4 + arr = NDArray(shape, Context()); + if (types & ArrayTypes::NormalReshaped) { + InitDefaultArray(&arr, rand); + in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount), + "Reshaped Normal NDArray"); + } + + for (auto pd : pds) { if (num_inputs > 1) { // preserve if matching layout else just expand on 0 dim @@ -591,27 +661,47 @@ std::vector GetTestInputArrays(bool rand = false, int num_inputs = // Type 2, 3. arr = NDArray(shape, Context()); - desc = "MKLDNN NDArray"; - if (shape.ndim() != pd.desc().data.ndims) { + if (shape.ndim() == pd.desc().data.ndims && IsSameShape(pd, shape) + && types & ArrayTypes::MKLDNN) { + desc = "MKLDNN NDArray"; + InitMKLDNNArray(&arr, pd, rand); + in_arrs.emplace_back(arr, desc); + } else if (shape.ndim() == pd.desc().data.ndims && !IsSameShape(pd, shape) + && types & ArrayTypes::MKLDNNDiffShape) { + desc = "MKLDNN NDArray with different shape"; + InitMKLDNNArray(&arr, pd, rand); + in_arrs.emplace_back(arr, desc); + } else if (shape.ndim() != pd.desc().data.ndims && types & ArrayTypes::MKLDNNDiffDim) { std::stringstream ss; - ss << "MKLDNN NDArray with different memory layout " << + ss << "MKLDNN NDArray with different dim " << shape.ndim() << "/" << pd.desc().data.ndims; desc = ss.str(); + InitMKLDNNArray(&arr, pd, rand); + in_arrs.emplace_back(arr, desc); } - InitMKLDNNArray(&arr, pd); - in_arrs.emplace_back(arr, desc); - // Type 4, 5, 6. + + // Type 5, 6. arr = NDArray(shape, Context()); - desc = "Reshaped MKLDNN NDArray"; - if (shape.ndim() != pd.desc().data.ndims) { + if (shape.ndim() == pd.desc().data.ndims && IsSameShape(pd, shape) + && types & ArrayTypes::MKLDNNReshaped) { + desc = "Reshaped MKLDNN NDArray"; + InitMKLDNNArray(&arr, pd, rand); + in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount), desc); + } else if (shape.ndim() == pd.desc().data.ndims && !IsSameShape(pd, shape) + && types & ArrayTypes::MKLDNNReshapedDiffShape) { + desc = "Reshaped MKLDNN NDArray with different shape"; + InitMKLDNNArray(&arr, pd, rand); + in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount), desc); + } else if (shape.ndim() != pd.desc().data.ndims + && types & ArrayTypes::MKLDNNReshapedDiffDim) { std::stringstream ss; - ss << "Reshaped MKLDNN NDArray with different memory layout " - << shape.ndim() << "/" << pd.desc().data.ndims; + ss << "MKLDNN NDArray with different dim " << + shape.ndim() << "/" << pd.desc().data.ndims; desc = ss.str(); + InitMKLDNNArray(&arr, pd, rand); + in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount), desc); } - InitMKLDNNArray(&arr, pd); - in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount), desc); } } return in_arrs; @@ -640,7 +730,7 @@ std::vector GetTestInputArrays(bool rand = false, int num_inputs = std::vector GetTestOutputArrays( const TShape &shp, const std::vector &pds, - std::vectorscale = {1}) { + std::vectorscale = {1}, bool rand = true, int types = ArrayTypes::All) { TShape shape = shp; for (int dim = 0; dim < scale.size(); dim++) @@ -650,39 +740,50 @@ std::vector GetTestOutputArrays( std::string desc; // Type 1. NDArray arr(shape, Context()); - in_arrs.emplace_back(arr, "Normal NDArray"); - InitDefaultArray(&in_arrs.back().arr, true); - // Type 4. + if (types & ArrayTypes::Normal) { + in_arrs.emplace_back(arr, "Normal NDArray"); + InitDefaultArray(&in_arrs.back().arr, rand); + } + TShape tmp_shape = shape; - tmp_shape[0] = shape[0] * 2; - NDArray arr0(tmp_shape, Context()); - InitDefaultArray(&arr0, true); - in_arrs.emplace_back(arr0.Slice(1, shape[0] + 1), "Reshaped NDArray"); + if (types & ArrayTypes::NormalReshaped) { + // Type 4. + tmp_shape[0] = shape[0] * 2; + NDArray arr0(tmp_shape, Context()); + InitDefaultArray(&arr0, rand); + in_arrs.emplace_back(arr0.Slice(1, shape[0] + 1), "Reshaped NDArray"); + } - // Type 5. - // Get a reused version. nnvm::TShape s(1); - s[0] = shape.Size(); - NDArray arr1(s, Context()); - arr1 = arr1.AsArray(shape, arr1.dtype()); - InitDefaultArray(&arr1, true); - in_arrs.emplace_back(arr1, "Reused NDArray"); - - // Type 6. - s[0] = shape.Size() * GetTypeSize(mshadow::default_type_flag); - NDArray arr2(s, Context(), true, mshadow::kUint8); - arr2 = arr2.AsArray(shape, mshadow::default_type_flag); - InitDefaultArray(&arr2, true); - in_arrs.emplace_back(arr2, "Reused NDArray with diff data type"); - - // Type 7 - s[0] = shape.Size() * GetTypeSize(mshadow::default_type_flag) * 2; - NDArray arr3(s, Context(), true, mshadow::kUint8); - tmp_shape[0] = shape[0] * 2; - arr3 = arr3.AsArray(tmp_shape, mshadow::default_type_flag); - InitDefaultArray(&arr3, true); - in_arrs.emplace_back(arr3.Slice(1, shape[0] + 1), "Reused+Reshaped NDArray"); + if (types & ArrayTypes::NormalReused) { + // Type 5. + // Get a reused version. + s[0] = shape.Size(); + NDArray arr1(s, Context()); + arr1 = arr1.AsArray(shape, arr1.dtype()); + InitDefaultArray(&arr1, rand); + in_arrs.emplace_back(arr1, "Reused NDArray"); + } + + if (types & ArrayTypes::NormalReusedDiffDtype) { + // Type 6. + s[0] = shape.Size() * GetTypeSize(mshadow::default_type_flag); + NDArray arr2(s, Context(), true, mshadow::kUint8); + arr2 = arr2.AsArray(shape, mshadow::default_type_flag); + InitDefaultArray(&arr2, rand); + in_arrs.emplace_back(arr2, "Reused NDArray with diff data type"); + } + + if (types & ArrayTypes::NormalReshapedReused) { + // Type 7 + s[0] = shape.Size() * GetTypeSize(mshadow::default_type_flag) * 2; + NDArray arr3(s, Context(), true, mshadow::kUint8); + tmp_shape[0] = shape[0] * 2; + arr3 = arr3.AsArray(tmp_shape, mshadow::default_type_flag); + InitDefaultArray(&arr3, rand); + in_arrs.emplace_back(arr3.Slice(1, shape[0] + 1), "Reused+Reshaped NDArray"); + } for (auto pd : pds) { if (shape.Size() != pd.get_size() / sizeof(mshadow::default_real_t)) @@ -703,8 +804,12 @@ std::vector GetTestOutputArrays( << shape.ndim() << "/" << pd.desc().data.ndims; desc = ss.str(); } - in_arrs.emplace_back(arr, desc); - InitMKLDNNArray(&in_arrs.back().arr, pd, true); + + if ((types & ArrayTypes::MKLDNN && shape.ndim() == pd.desc().data.ndims) || + (types & ArrayTypes::MKLDNNDiffDim && shape.ndim() != pd.desc().data.ndims)) { + in_arrs.emplace_back(arr, desc); + InitMKLDNNArray(&in_arrs.back().arr, pd, rand); + } // Type 8, 9. // Get a reused version. @@ -712,7 +817,7 @@ std::vector GetTestOutputArrays( s[0] = shape.Size(); NDArray arr = NDArray(s, Context()); arr = arr.AsArray(shape, arr.dtype()); - InitMKLDNNArray(&arr, pd, true); + InitMKLDNNArray(&arr, pd, rand); desc = "Reused MKLDNN NDArray"; if (shape.ndim() != pd.desc().data.ndims) { std::stringstream ss; @@ -720,7 +825,11 @@ std::vector GetTestOutputArrays( << shape.ndim() << "/" << pd.desc().data.ndims; desc = ss.str(); } - in_arrs.emplace_back(arr, desc); + + if ((types & ArrayTypes::MKLDNNReused && shape.ndim() == pd.desc().data.ndims) || + (types & ArrayTypes::MKLDNNReusedDiffDim && shape.ndim() != pd.desc().data.ndims)) { + in_arrs.emplace_back(arr, desc); + } } return in_arrs; } @@ -729,7 +838,8 @@ TEST(MKLDNN_NDArray, GetTestInputArraysConcat) { auto in_arrs = GetTestInputArrays(); for (int dim = 0; dim < 5; dim++) { for (int num_inputs = 2; num_inputs < 5; num_inputs++) { - std::vector expanded_arrs = GetTestInputArrays(false, num_inputs, dim); + std::vector expanded_arrs = GetTestInputArrays( + ArrayTypes::All, false, num_inputs, dim); int i = 0; for (auto &arr : in_arrs) { if (dim >= arr.arr.shape().ndim()) @@ -781,6 +891,19 @@ void VerifyCopyResult(const std::vector &in_arrs, tmp1.shape().Size() * sizeof(mshadow::default_real_t)), 0); } +void AssertEqual(const std::vector &in_arrs, + const std::vector &out_arrs) { + NDArray tmp1 = in_arrs[0]->Reorder2Default(); + NDArray tmp2 = out_arrs[0]->Reorder2Default(); + EXPECT_EQ(tmp1.shape().Size(), tmp2.shape().Size()); + TBlob blob1 = tmp1.data(); + TBlob blob2 = tmp2.data(); + mshadow::default_real_t *d1 = static_cast(blob1.dptr_); + mshadow::default_real_t *d2 = static_cast(blob2.dptr_); + for (int i = 0; i < tmp1.shape().Size(); i++) + ASSERT_FLOAT_EQ(d1[i], d2[i]); +} + void VerifyActResult(const std::vector &in_arrs, const std::vector &out_arrs) { NDArray tmp1 = in_arrs[0]->Reorder2Default(); @@ -1055,7 +1178,7 @@ void TestConcatOp(const OpAttrs &attrs, VerifyFunc verify_fn, if (backwards) { std::string str_dim = const_cast(attrs).attrs.dict["dim"]; int dim = std::stoi(str_dim); - in_arrs = GetTestInputArrays(false, attrs.num_outputs, dim); + in_arrs = GetTestInputArrays(ArrayTypes::All, false, attrs.num_outputs, dim); } for (auto &in_arr : in_arrs) { @@ -1094,6 +1217,95 @@ void TestConcatOp(const OpAttrs &attrs, VerifyFunc verify_fn, } } +// compares output of fcompute with fcomputex +void TestOpEx(const OpAttrs &forward_attrs, const OpAttrs &backwards_attrs) { + std::vector inputs(forward_attrs.num_inputs); + std::vector outputs(forward_attrs.num_outputs); + std::vector ex_outputs(forward_attrs.num_outputs); + + std::vector backwards_input(backwards_attrs.num_inputs); + std::vector backwards_outputs(backwards_attrs.num_outputs); + std::vector backwards_ex_outputs(backwards_attrs.num_outputs); + + + std::vector req(forward_attrs.num_outputs); + std::vector back_req(backwards_attrs.num_outputs); + + TestArrayShapes tas = GetTestArrayShapes(); + std::vector pds = tas.pds; + + std::vector in_arrs = GetTestInputArrays(forward_attrs.input_types, true); + std::vector> out_arrs(forward_attrs.num_outputs); + std::vector> ex_out_arrs(forward_attrs.num_outputs); + + if (forward_attrs.requests.find(OpReqType::kWriteTo) != forward_attrs.requests.end()) { + for (int i1 = 0; i1 < in_arrs.size(); i1++) { + auto in_arr = in_arrs[i1]; + + // TODO(alex): (MXNET-845) Remove when MKLDNN supports other dims + if (in_arr.arr.shape().ndim() != 4) + continue; + + for (int i = 0; i < forward_attrs.num_outputs; i++) { + out_arrs[i] = + GetTestOutputArrays(in_arr.arr.shape(), pds, {1}, forward_attrs.output_types); + ex_out_arrs[i] = + GetTestOutputArrays(in_arr.arr.shape(), pds, {1}, forward_attrs.output_types); + } + + for (int i = 0; i < forward_attrs.num_inputs; i++) + inputs[i] = &in_arr.arr; + + for (size_t output_i = 0; output_i < out_arrs[0].size(); output_i++) { + if (out_arrs[0][output_i].arr.IsMKLDNNData()) + continue; + + for (int i = 0; i < forward_attrs.num_outputs; i++) { + req[i] = kWriteTo; + outputs[i] = &out_arrs[i][output_i].arr; + ex_outputs[i] = &ex_out_arrs[i][output_i].arr; + } + Imperative::Get()->set_is_training(true); + + PrintVerifyMsg(in_arr, out_arrs[0][output_i]); + Imperative::Get()->InvokeOp( + Context(), forward_attrs.attrs, inputs, outputs, req, + DispatchMode::kFCompute, mxnet::OpStatePtr()); + Imperative::Get()->InvokeOp( + Context(), forward_attrs.attrs, inputs, ex_outputs, req, + DispatchMode::kFComputeEx, mxnet::OpStatePtr()); + Engine::Get()->WaitForAll(); + AssertEqual(outputs, ex_outputs); + + // backwards test performed same time since output needed + backwards_input[0] = outputs[0]; // output grad + backwards_input[1] = inputs[0]; // input + backwards_input[2] = outputs[1]; // out norm + + auto tmp_output = GetTestInputArrays(forward_attrs.input_types, true)[i1]; + backwards_outputs[0] = &tmp_output.arr; + + auto tmp_output2 = GetTestInputArrays(forward_attrs.input_types, true)[i1]; + backwards_ex_outputs[0] = &tmp_output2.arr; + + for (int i = 0; i < backwards_attrs.num_outputs; i++) + back_req[i] = kWriteTo; + + std::cout << "Backwards: "; + PrintVerifyMsg(out_arrs[0][output_i], tmp_output); + Imperative::Get()->InvokeOp( + Context(), backwards_attrs.attrs, backwards_input, backwards_outputs, + back_req, DispatchMode::kFCompute, mxnet::OpStatePtr()); + Imperative::Get()->InvokeOp( + Context(), backwards_attrs.attrs, backwards_input, backwards_ex_outputs, + back_req, DispatchMode::kFComputeEx, mxnet::OpStatePtr()); + Engine::Get()->WaitForAll(); + AssertEqual(backwards_outputs, backwards_ex_outputs); + } + } + } +} + int CalculateWidthPoolOutput(int width, int kernel, int padding, int stride) { return (width - kernel + 2 * padding) / stride + 1; } @@ -1252,6 +1464,12 @@ TEST(IMPERATIVE, ConcatBackwardsOp) { } } +TEST(IMPERATIVE, LRNOp) { + OpAttrs forward_attrs = GetLRNOp(); + OpAttrs backwards_attrs = GetLRNBackwardsOp(); + TestOpEx(forward_attrs, backwards_attrs); +} + TEST(IMPERATIVE, PoolingOp) { for (int dim = 2; dim < 4; dim++) { for (int kernel = 1; kernel < 4; kernel++) { @@ -1270,7 +1488,7 @@ TEST(IMPERATIVE, PoolingOp) { TEST(MKLDNN_BASE, MKLDNNSum) { std::vector in_arrs = GetTestInputArrays(); - std::vector in_arrs2 = GetTestInputArrays(true); + std::vector in_arrs2 = GetTestInputArrays(ArrayTypes::All, true); TestArrayShapes tas = GetTestArrayShapes(); std::vector pds = tas.pds; @@ -1320,7 +1538,7 @@ TEST(MKLDNN_BASE, MKLDNNSum) { TEST(MKLDNN_BASE, CreateMKLDNNMem) { std::vector in_arrs = GetTestInputArrays(); - std::vector in_arrs2 = GetTestInputArrays(true); + std::vector in_arrs2 = GetTestInputArrays(ArrayTypes::All, true); TestArrayShapes tas = GetTestArrayShapes(); std::vector pds = tas.pds; MKLDNNStream *stream = MKLDNNStream::Get(); From 09ee17c7c2e06cfd416c2528d867427ae506e853 Mon Sep 17 00:00:00 2001 From: cclauss Date: Tue, 21 Aug 2018 17:31:42 +0200 Subject: [PATCH 017/160] [MXNET-696] 'make pylint' should run a current version of PyLint (#12200) * make pylint should run pylint under both Python 2 and Python 3 @vandanavk * Python 3 first and Python 2 second * Update Makefile * pythonX -m pylint --version * pythonX -m pip install --upgrade pylint * make likes tabs while Python likes spaces ;-) * pythonX -m pip install --upgrade --user pylint * Comments: pylint v1.8.3 --> v2.1.1 on Python 2.7.12 or 3.5.2 * pylintrc: Add --disable=useless-object-inheritance * pylint --disable= ten new tests * max-line-length=100 --> 120 * Disable 4 more tests for Python 2 * Remove temporary lines: pip install --upgrade pylint * Add undefined-variable to PyLint enable list Power on! * Remove undefined-variable from enabled * Re-remove the undefined-variable test * Revert changes to Makefile * Remove Python 2 related test change from pylintrc * Force a retest * Fix conflict * Re-resolve the conflict * Force a retest because of Onnx troubles * Remove PyLint disable directive "consider-using-in" #12214 has landed which fixes all instances of "consider-using-in" --- ci/docker/install/ubuntu_python.sh | 4 +-- ci/other/pylintrc | 45 ++++++++++++++++++++++++++++-- 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/ci/docker/install/ubuntu_python.sh b/ci/docker/install/ubuntu_python.sh index e71cac8a3898..0fd91cbf706c 100755 --- a/ci/docker/install/ubuntu_python.sh +++ b/ci/docker/install/ubuntu_python.sh @@ -29,5 +29,5 @@ wget -nv https://bootstrap.pypa.io/get-pip.py python3 get-pip.py python2 get-pip.py -pip2 install nose cpplint==1.3.0 pylint==1.8.3 'numpy<1.15.0,>=1.8.2' nose-timer 'requests<2.19.0,>=2.18.4' h5py==2.8.0rc1 scipy==1.0.1 boto3 -pip3 install nose cpplint==1.3.0 pylint==1.8.3 'numpy<1.15.0,>=1.8.2' nose-timer 'requests<2.19.0,>=2.18.4' h5py==2.8.0rc1 scipy==1.0.1 boto3 +pip2 install nose cpplint==1.3.0 pylint==1.9.3 'numpy<1.15.0,>=1.8.2' nose-timer 'requests<2.19.0,>=2.18.4' h5py==2.8.0rc1 scipy==1.0.1 boto3 +pip3 install nose cpplint==1.3.0 pylint==2.1.1 'numpy<1.15.0,>=1.8.2' nose-timer 'requests<2.19.0,>=2.18.4' h5py==2.8.0rc1 scipy==1.0.1 boto3 diff --git a/ci/other/pylintrc b/ci/other/pylintrc index 082850555f7e..ca31417f321c 100644 --- a/ci/other/pylintrc +++ b/ci/other/pylintrc @@ -71,7 +71,7 @@ confidence= # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=indexing-exception,old-raise-syntax +enable=indexing-exception,old-raise-syntax,undefined-variable # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifiers separated by comma (,) or put this @@ -82,7 +82,46 @@ enable=indexing-exception,old-raise-syntax # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" -disable=design,similarities,no-self-use,attribute-defined-outside-init,locally-disabled,star-args,pointless-except,bad-option-value,global-statement,fixme,suppressed-message,useless-suppression,locally-enabled,no-member,no-name-in-module,import-error,unsubscriptable-object,unbalanced-tuple-unpacking,undefined-variable,protected-access,superfluous-parens,invalid-name,no-else-return,useless-super-delegation,len-as-condition,invalid-unary-operand-type +disable= + design, + similarities, + no-self-use, + attribute-defined-outside-init, + locally-disabled, + star-args, + pointless-except, + bad-option-value, + global-statement, + fixme, + suppressed-message, + useless-suppression, + locally-enabled, + no-member, + no-name-in-module, + import-error, + unsubscriptable-object, + unbalanced-tuple-unpacking, + undefined-variable, + protected-access, + superfluous-parens, + invalid-name, + no-else-return, + useless-super-delegation, + len-as-condition, + invalid-unary-operand-type, + chained-comparison, + consider-using-dict-comprehension, + consider-using-set-comprehension, + invalid-envvar-default, + singleton-comparison, + try-except-raise, + useless-object-inheritance, + useless-return, + c-extension-no-member, + deprecated-lambda, + redefined-builtin, + unexpected-keyword-arg + # disable=unicode-builtin,delslice-method,using-cmp-argument,setslice-method,dict-view-method,parameter-unpacking,range-builtin-not-iterating,print-statement,file-builtin,old-raise-syntax,basestring-builtin,execfile-builtin,indexing-exception,import-star-module-level,coerce-method,long-builtin,old-ne-operator,old-division,no-absolute-import,raw_input-builtin,old-octal-literal,oct-method,xrange-builtin,hex-method,unpacking-in-except,nonzero-method,raising-string,intern-builtin,reload-builtin,metaclass-assignment,cmp-method,filter-builtin-not-iterating,apply-builtin,map-builtin-not-iterating,next-method-called,unichr-builtin,buffer-builtin,dict-iter-method,input-builtin,coerce-builtin,getslice-method,useless-suppression,standarderror-builtin,zip-builtin-not-iterating,suppressed-message,cmp-builtin,backtick,long-suffix,reduce-builtin,round-builtin @@ -117,7 +156,7 @@ evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / stateme [FORMAT] # Maximum number of characters on a single line. -max-line-length=100 +max-line-length=120 # Regexp for a line that is allowed to be longer than the limit. ignore-long-lines=^\s*(# )??$ From ba6f4e129b955767526d7539598f91f3ff9147cb Mon Sep 17 00:00:00 2001 From: Anton Chernov Date: Tue, 21 Aug 2018 17:48:40 +0200 Subject: [PATCH 018/160] fix broken links to subscribe to dev mailing list (#12271) --- docs/community/contribute.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/community/contribute.md b/docs/community/contribute.md index f278425b2fcc..3faa4850e5bf 100644 --- a/docs/community/contribute.md +++ b/docs/community/contribute.md @@ -45,10 +45,10 @@ Please join either or both of the MXNet mailing lists: **For MXNet Users, join the USER mailing list**: -- [MXNet Apache USER mailing list](https://lists.apache.org/list.html?dev@mxnet.apache.org) (user@mxnet.apache.org): To subscribe, send an email to user-subscribe@mxnet.apache.org +- [MXNet Apache USER mailing list](https://lists.apache.org/list.html?user@mxnet.apache.org) (user@mxnet.apache.org): To subscribe, send an email to user-subscribe@mxnet.apache.org **For Contributors to MXNet, join the DEV mailing list**: -- [MXNet Apache USER mailing list](https://lists.apache.org/list.html?dev@mxnet.apache.org) (user@mxnet.apache.org): To subscribe, send an email to user-subscribe@mxnet.apache.org +- [MXNet Apache DEV mailing list](https://lists.apache.org/list.html?dev@mxnet.apache.org) (dev@mxnet.apache.org): To subscribe, send an email to dev-subscribe@mxnet.apache.org * [archive](https://lists.apache.org/list.html?dev@mxnet.apache.org) From 8dbbfad36caffedba08f31c6ef420cda62909c8a Mon Sep 17 00:00:00 2001 From: Lanking Date: Tue, 21 Aug 2018 08:49:43 -0700 Subject: [PATCH 019/160] [MXNET-839] Fix the broken link in the Scala package (#12256) * change links to fix the issue * change space problem * add scalastyle escape --- .../infer/imageclassifier/ImageClassifierExample.scala | 6 +++--- .../infer/objectdetector/SSDClassifierExample.scala | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier/ImageClassifierExample.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier/ImageClassifierExample.scala index 3bbd780d39b9..2a0d967a4b4a 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier/ImageClassifierExample.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier/ImageClassifierExample.scala @@ -28,13 +28,13 @@ import java.io.File import scala.collection.mutable.ListBuffer +// scalastyle:off /** *

* Example inference showing usage of the Infer package on a resnet-152 model. - * @see

Instructions to run this example
+ * @see Instructions to run this example */ +// scalastyle:on object ImageClassifierExample { private val logger = LoggerFactory.getLogger(classOf[ImageClassifierExample]) diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/SSDClassifierExample.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/SSDClassifierExample.scala index c9707cb3ff6f..7c6c7ef12152 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/SSDClassifierExample.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/SSDClassifierExample.scala @@ -29,14 +29,14 @@ import java.nio.file.{Files, Paths} import scala.collection.mutable.ListBuffer +// scalastyle:off /** *

* Example single shot detector (SSD) using the Infer package * on a ssd_resnet50_512 model. - * @see

Instructions to run this example
+ * @see Instructions to run this example */ +// scalastyle:on class SSDClassifierExample { @Option(name = "--model-path-prefix", usage = "the input model directory and prefix of the model") private val modelPathPrefix: String = "/model/ssd_resnet50_512" From 332a66498e02e51df0fad2ae316972d8ddba522c Mon Sep 17 00:00:00 2001 From: Carin Meier Date: Tue, 21 Aug 2018 12:05:11 -0400 Subject: [PATCH 020/160] add gigasquid (Carin Meier) to the Clojure language binding (#12198) --- CODEOWNERS | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index 3e2e352592e1..5a88e89dfb02 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -12,10 +12,11 @@ * @apache/mxnet-committers # Language bindings -/R-package/ @thirdwing -/scala-package/ @yzhliu @nswamy -/perl-package/ @sergeykolychev -/python/ @szha +/R-package/ @thirdwing +/scala-package/ @yzhliu @nswamy +/perl-package/ @sergeykolychev +/python/ @szha +/contrib/clojure-package/ @gigasquid # C++ base /src/kvstore/ @rahul003 @anirudh2290 From 38f80af6a4ac1ec1760772d5a407c39c876c16e6 Mon Sep 17 00:00:00 2001 From: Lanking Date: Tue, 21 Aug 2018 11:09:23 -0700 Subject: [PATCH 021/160] [MXNET-836] RNN Example for Scala (#11753) * initial fix for RNN * add CI test * add encoding format * scala style fix * update readme * test char RNN works * ignore the test due to memory leaks --- .../apache/mxnetexamples/rnn/BucketIo.scala | 19 +- .../org/apache/mxnetexamples/rnn/Lstm.scala | 97 ++++--- .../mxnetexamples/rnn/LstmBucketing.scala | 110 ++++---- .../org/apache/mxnetexamples/rnn/README.md | 48 ++++ .../mxnetexamples/rnn/TestCharRnn.scala | 96 +++---- .../mxnetexamples/rnn/TrainCharRnn.scala | 237 +++++++++--------- .../org/apache/mxnetexamples/rnn/Utils.scala | 3 - .../mxnetexamples/rnn/ExampleRNNSuite.scala | 75 ++++++ 8 files changed, 399 insertions(+), 286 deletions(-) create mode 100644 scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/README.md create mode 100644 scala-package/examples/src/test/scala/org/apache/mxnetexamples/rnn/ExampleRNNSuite.scala diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/BucketIo.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/BucketIo.scala index d4b17074d48c..6d414bb0328a 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/BucketIo.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/BucketIo.scala @@ -34,7 +34,7 @@ object BucketIo { type ReadContent = String => String def defaultReadContent(path: String): String = { - Source.fromFile(path).mkString.replaceAll("\\. |\n", " ") + Source.fromFile(path, "UTF-8").mkString.replaceAll("\\. |\n", " ") } def defaultBuildVocab(path: String): Map[String, Int] = { @@ -56,7 +56,7 @@ object BucketIo { val tmp = sentence.split(" ").filter(_.length() > 0) for (w <- tmp) yield theVocab(w) } - words.toArray + words } def defaultGenBuckets(sentences: Array[String], batchSize: Int, @@ -162,8 +162,6 @@ object BucketIo { labelBuffer.append(NDArray.zeros(_batchSize, buckets(iBucket))) } - private val initStateArrays = initStates.map(x => NDArray.zeros(x._2._1, x._2._2)) - private val _provideData = { val tmp = ListMap("data" -> Shape(_batchSize, _defaultBucketKey)) tmp ++ initStates.map(x => x._1 -> Shape(x._2._1, x._2._2)) } @@ -208,12 +206,13 @@ object BucketIo { tmp ++ initStates.map(x => x._1 -> Shape(x._2._1, x._2._2)) } val batchProvideLabel = ListMap("softmax_label" -> labelBuf.shape) - new DataBatch(IndexedSeq(dataBuf) ++ initStateArrays, - IndexedSeq(labelBuf), - getIndex(), - getPad(), - this.buckets(bucketIdx).asInstanceOf[AnyRef], - batchProvideData, batchProvideLabel) + val initStateArrays = initStates.map(x => NDArray.zeros(x._2._1, x._2._2)) + new DataBatch(IndexedSeq(dataBuf.copy()) ++ initStateArrays, + IndexedSeq(labelBuf.copy()), + getIndex(), + getPad(), + this.buckets(bucketIdx).asInstanceOf[AnyRef], + batchProvideData, batchProvideLabel) } /** diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/Lstm.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/Lstm.scala index bf29a47fcf81..872ef7871fb0 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/Lstm.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/Lstm.scala @@ -18,13 +18,10 @@ package org.apache.mxnetexamples.rnn -import org.apache.mxnet.Symbol +import org.apache.mxnet.{Shape, Symbol} import scala.collection.mutable.ArrayBuffer -/** - * @author Depeng Liang - */ object Lstm { final case class LSTMState(c: Symbol, h: Symbol) @@ -35,27 +32,22 @@ object Lstm { def lstm(numHidden: Int, inData: Symbol, prevState: LSTMState, param: LSTMParam, seqIdx: Int, layerIdx: Int, dropout: Float = 0f): LSTMState = { val inDataa = { - if (dropout > 0f) Symbol.Dropout()()(Map("data" -> inData, "p" -> dropout)) + if (dropout > 0f) Symbol.api.Dropout(data = Some(inData), p = Some(dropout)) else inData } - val i2h = Symbol.FullyConnected(s"t${seqIdx}_l${layerIdx}_i2h")()(Map("data" -> inDataa, - "weight" -> param.i2hWeight, - "bias" -> param.i2hBias, - "num_hidden" -> numHidden * 4)) - val h2h = Symbol.FullyConnected(s"t${seqIdx}_l${layerIdx}_h2h")()(Map("data" -> prevState.h, - "weight" -> param.h2hWeight, - "bias" -> param.h2hBias, - "num_hidden" -> numHidden * 4)) + val i2h = Symbol.api.FullyConnected(data = Some(inDataa), weight = Some(param.i2hWeight), + bias = Some(param.i2hBias), num_hidden = numHidden * 4, name = s"t${seqIdx}_l${layerIdx}_i2h") + val h2h = Symbol.api.FullyConnected(data = Some(prevState.h), weight = Some(param.h2hWeight), + bias = Some(param.h2hBias), num_hidden = numHidden * 4, name = s"t${seqIdx}_l${layerIdx}_h2h") val gates = i2h + h2h - val sliceGates = Symbol.SliceChannel(s"t${seqIdx}_l${layerIdx}_slice")( - gates)(Map("num_outputs" -> 4)) - val ingate = Symbol.Activation()()(Map("data" -> sliceGates.get(0), "act_type" -> "sigmoid")) - val inTransform = Symbol.Activation()()(Map("data" -> sliceGates.get(1), "act_type" -> "tanh")) - val forgetGate = Symbol.Activation()()( - Map("data" -> sliceGates.get(2), "act_type" -> "sigmoid")) - val outGate = Symbol.Activation()()(Map("data" -> sliceGates.get(3), "act_type" -> "sigmoid")) + val sliceGates = Symbol.api.SliceChannel(data = Some(gates), num_outputs = 4, + name = s"t${seqIdx}_l${layerIdx}_slice") + val ingate = Symbol.api.Activation(data = Some(sliceGates.get(0)), act_type = "sigmoid") + val inTransform = Symbol.api.Activation(data = Some(sliceGates.get(1)), act_type = "tanh") + val forgetGate = Symbol.api.Activation(data = Some(sliceGates.get(2)), act_type = "sigmoid") + val outGate = Symbol.api.Activation(data = Some(sliceGates.get(3)), act_type = "sigmoid") val nextC = (forgetGate * prevState.c) + (ingate * inTransform) - val nextH = outGate * Symbol.Activation()()(Map("data" -> nextC, "act_type" -> "tanh")) + val nextH = outGate * Symbol.api.Activation(data = Some(nextC), "tanh") LSTMState(c = nextC, h = nextH) } @@ -74,11 +66,11 @@ object Lstm { val lastStatesBuf = ArrayBuffer[LSTMState]() for (i <- 0 until numLstmLayer) { paramCellsBuf.append(LSTMParam(i2hWeight = Symbol.Variable(s"l${i}_i2h_weight"), - i2hBias = Symbol.Variable(s"l${i}_i2h_bias"), - h2hWeight = Symbol.Variable(s"l${i}_h2h_weight"), - h2hBias = Symbol.Variable(s"l${i}_h2h_bias"))) + i2hBias = Symbol.Variable(s"l${i}_i2h_bias"), + h2hWeight = Symbol.Variable(s"l${i}_h2h_weight"), + h2hBias = Symbol.Variable(s"l${i}_h2h_bias"))) lastStatesBuf.append(LSTMState(c = Symbol.Variable(s"l${i}_init_c_beta"), - h = Symbol.Variable(s"l${i}_init_h_beta"))) + h = Symbol.Variable(s"l${i}_init_h_beta"))) } val paramCells = paramCellsBuf.toArray val lastStates = lastStatesBuf.toArray @@ -87,10 +79,10 @@ object Lstm { // embeding layer val data = Symbol.Variable("data") var label = Symbol.Variable("softmax_label") - val embed = Symbol.Embedding("embed")()(Map("data" -> data, "input_dim" -> inputSize, - "weight" -> embedWeight, "output_dim" -> numEmbed)) - val wordvec = Symbol.SliceChannel()()( - Map("data" -> embed, "num_outputs" -> seqLen, "squeeze_axis" -> 1)) + val embed = Symbol.api.Embedding(data = Some(data), input_dim = inputSize, + weight = Some(embedWeight), output_dim = numEmbed, name = "embed") + val wordvec = Symbol.api.SliceChannel(data = Some(embed), + num_outputs = seqLen, squeeze_axis = Some(true)) val hiddenAll = ArrayBuffer[Symbol]() var dpRatio = 0f @@ -101,22 +93,23 @@ object Lstm { for (i <- 0 until numLstmLayer) { if (i == 0) dpRatio = 0f else dpRatio = dropout val nextState = lstm(numHidden, inData = hidden, - prevState = lastStates(i), - param = paramCells(i), - seqIdx = seqIdx, layerIdx = i, dropout = dpRatio) + prevState = lastStates(i), + param = paramCells(i), + seqIdx = seqIdx, layerIdx = i, dropout = dpRatio) hidden = nextState.h lastStates(i) = nextState } // decoder - if (dropout > 0f) hidden = Symbol.Dropout()()(Map("data" -> hidden, "p" -> dropout)) + if (dropout > 0f) hidden = Symbol.api.Dropout(data = Some(hidden), p = Some(dropout)) hiddenAll.append(hidden) } - val hiddenConcat = Symbol.Concat()(hiddenAll: _*)(Map("dim" -> 0)) - val pred = Symbol.FullyConnected("pred")()(Map("data" -> hiddenConcat, "num_hidden" -> numLabel, - "weight" -> clsWeight, "bias" -> clsBias)) - label = Symbol.transpose()(label)() - label = Symbol.Reshape()()(Map("data" -> label, "target_shape" -> "(0,)")) - val sm = Symbol.SoftmaxOutput("softmax")()(Map("data" -> pred, "label" -> label)) + val hiddenConcat = Symbol.api.Concat(data = hiddenAll.toArray, num_args = hiddenAll.length, + dim = Some(0)) + val pred = Symbol.api.FullyConnected(data = Some(hiddenConcat), num_hidden = numLabel, + weight = Some(clsWeight), bias = Some(clsBias)) + label = Symbol.api.transpose(data = Some(label)) + label = Symbol.api.Reshape(data = Some(label), target_shape = Some(Shape(0))) + val sm = Symbol.api.SoftmaxOutput(data = Some(pred), label = Some(label), name = "softmax") sm } @@ -131,35 +124,35 @@ object Lstm { var lastStates = Array[LSTMState]() for (i <- 0 until numLstmLayer) { paramCells = paramCells :+ LSTMParam(i2hWeight = Symbol.Variable(s"l${i}_i2h_weight"), - i2hBias = Symbol.Variable(s"l${i}_i2h_bias"), - h2hWeight = Symbol.Variable(s"l${i}_h2h_weight"), - h2hBias = Symbol.Variable(s"l${i}_h2h_bias")) + i2hBias = Symbol.Variable(s"l${i}_i2h_bias"), + h2hWeight = Symbol.Variable(s"l${i}_h2h_weight"), + h2hBias = Symbol.Variable(s"l${i}_h2h_bias")) lastStates = lastStates :+ LSTMState(c = Symbol.Variable(s"l${i}_init_c_beta"), - h = Symbol.Variable(s"l${i}_init_h_beta")) + h = Symbol.Variable(s"l${i}_init_h_beta")) } assert(lastStates.length == numLstmLayer) val data = Symbol.Variable("data") - var hidden = Symbol.Embedding("embed")()(Map("data" -> data, "input_dim" -> inputSize, - "weight" -> embedWeight, "output_dim" -> numEmbed)) + var hidden = Symbol.api.Embedding(data = Some(data), input_dim = inputSize, + weight = Some(embedWeight), output_dim = numEmbed, name = "embed") var dpRatio = 0f // stack LSTM for (i <- 0 until numLstmLayer) { if (i == 0) dpRatio = 0f else dpRatio = dropout val nextState = lstm(numHidden, inData = hidden, - prevState = lastStates(i), - param = paramCells(i), - seqIdx = seqIdx, layerIdx = i, dropout = dpRatio) + prevState = lastStates(i), + param = paramCells(i), + seqIdx = seqIdx, layerIdx = i, dropout = dpRatio) hidden = nextState.h lastStates(i) = nextState } // decoder - if (dropout > 0f) hidden = Symbol.Dropout()()(Map("data" -> hidden, "p" -> dropout)) - val fc = Symbol.FullyConnected("pred")()(Map("data" -> hidden, "num_hidden" -> numLabel, - "weight" -> clsWeight, "bias" -> clsBias)) - val sm = Symbol.SoftmaxOutput("softmax")()(Map("data" -> fc)) + if (dropout > 0f) hidden = Symbol.api.Dropout(data = Some(hidden), p = Some(dropout)) + val fc = Symbol.api.FullyConnected(data = Some(hidden), + num_hidden = numLabel, weight = Some(clsWeight), bias = Some(clsBias)) + val sm = Symbol.api.SoftmaxOutput(data = Some(fc), name = "softmax") var output = Array(sm) for (state <- lastStates) { output = output :+ state.c diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/LstmBucketing.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/LstmBucketing.scala index 44ee6e778d27..f7a01bad133a 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/LstmBucketing.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/LstmBucketing.scala @@ -30,9 +30,8 @@ import org.apache.mxnet.module.BucketingModule import org.apache.mxnet.module.FitParams /** - * Bucketing LSTM examples - * @author Yizhi Liu - */ + * Bucketing LSTM examples + */ class LstmBucketing { @Option(name = "--data-train", usage = "training set") private val dataTrain: String = "example/rnn/sherlockholmes.train.txt" @@ -61,6 +60,60 @@ object LstmBucketing { Math.exp(loss / labelArr.length).toFloat } + def runTraining(trainData : String, validationData : String, + ctx : Array[Context], numEpoch : Int): Unit = { + val batchSize = 32 + val buckets = Array(10, 20, 30, 40, 50, 60) + val numHidden = 200 + val numEmbed = 200 + val numLstmLayer = 2 + + logger.info("Building vocab ...") + val vocab = BucketIo.defaultBuildVocab(trainData) + + def BucketSymGen(key: AnyRef): + (Symbol, IndexedSeq[String], IndexedSeq[String]) = { + val seqLen = key.asInstanceOf[Int] + val sym = Lstm.lstmUnroll(numLstmLayer, seqLen, vocab.size, + numHidden = numHidden, numEmbed = numEmbed, numLabel = vocab.size) + (sym, IndexedSeq("data"), IndexedSeq("softmax_label")) + } + + val initC = (0 until numLstmLayer).map(l => + (s"l${l}_init_c_beta", (batchSize, numHidden)) + ) + val initH = (0 until numLstmLayer).map(l => + (s"l${l}_init_h_beta", (batchSize, numHidden)) + ) + val initStates = initC ++ initH + + val dataTrain = new BucketSentenceIter(trainData, vocab, + buckets, batchSize, initStates) + val dataVal = new BucketSentenceIter(validationData, vocab, + buckets, batchSize, initStates) + + val model = new BucketingModule( + symGen = BucketSymGen, + defaultBucketKey = dataTrain.defaultBucketKey, + contexts = ctx) + + val fitParams = new FitParams() + fitParams.setEvalMetric( + new CustomMetric(perplexity, name = "perplexity")) + fitParams.setKVStore("device") + fitParams.setOptimizer( + new SGD(learningRate = 0.01f, momentum = 0f, wd = 0.00001f)) + fitParams.setInitializer(new Xavier(factorType = "in", magnitude = 2.34f)) + fitParams.setBatchEndCallback(new Speedometer(batchSize, 50)) + + logger.info("Start training ...") + model.fit( + trainData = dataTrain, + evalData = Some(dataVal), + numEpoch = numEpoch, fitParams) + logger.info("Finished training...") + } + def main(args: Array[String]): Unit = { val inst = new LstmBucketing val parser: CmdLineParser = new CmdLineParser(inst) @@ -71,56 +124,7 @@ object LstmBucketing { else if (inst.cpus != null) inst.cpus.split(',').map(id => Context.cpu(id.trim.toInt)) else Array(Context.cpu(0)) - val batchSize = 32 - val buckets = Array(10, 20, 30, 40, 50, 60) - val numHidden = 200 - val numEmbed = 200 - val numLstmLayer = 2 - - logger.info("Building vocab ...") - val vocab = BucketIo.defaultBuildVocab(inst.dataTrain) - - def BucketSymGen(key: AnyRef): - (Symbol, IndexedSeq[String], IndexedSeq[String]) = { - val seqLen = key.asInstanceOf[Int] - val sym = Lstm.lstmUnroll(numLstmLayer, seqLen, vocab.size, - numHidden = numHidden, numEmbed = numEmbed, numLabel = vocab.size) - (sym, IndexedSeq("data"), IndexedSeq("softmax_label")) - } - - val initC = (0 until numLstmLayer).map(l => - (s"l${l}_init_c_beta", (batchSize, numHidden)) - ) - val initH = (0 until numLstmLayer).map(l => - (s"l${l}_init_h_beta", (batchSize, numHidden)) - ) - val initStates = initC ++ initH - - val dataTrain = new BucketSentenceIter(inst.dataTrain, vocab, - buckets, batchSize, initStates) - val dataVal = new BucketSentenceIter(inst.dataVal, vocab, - buckets, batchSize, initStates) - - val model = new BucketingModule( - symGen = BucketSymGen, - defaultBucketKey = dataTrain.defaultBucketKey, - contexts = contexts) - - val fitParams = new FitParams() - fitParams.setEvalMetric( - new CustomMetric(perplexity, name = "perplexity")) - fitParams.setKVStore("device") - fitParams.setOptimizer( - new SGD(learningRate = 0.01f, momentum = 0f, wd = 0.00001f)) - fitParams.setInitializer(new Xavier(factorType = "in", magnitude = 2.34f)) - fitParams.setBatchEndCallback(new Speedometer(batchSize, 50)) - - logger.info("Start training ...") - model.fit( - trainData = dataTrain, - evalData = Some(dataVal), - numEpoch = inst.numEpoch, fitParams) - logger.info("Finished training...") + runTraining(inst.dataTrain, inst.dataVal, contexts, 5) } catch { case ex: Exception => logger.error(ex.getMessage, ex) diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/README.md b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/README.md new file mode 100644 index 000000000000..5289fc7b1b4e --- /dev/null +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/README.md @@ -0,0 +1,48 @@ +# RNN Example for MXNet Scala +This folder contains the following examples writing in new Scala type-safe API: +- [x] LSTM Bucketing +- [x] CharRNN Inference : Generate similar text based on the model +- [x] CharRNN Training: Training the language model using RNN + +These example is only for Illustration and not modeled to achieve the best accuracy. + +## Setup +### Download the Network Definition, Weights and Training Data +`obama.zip` contains the training inputs (Obama's speech) for CharCNN examples and `sherlockholmes` contains the data for LSTM Bucketing +```bash +https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/RNN/obama.zip +https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/RNN/sherlockholmes.train.txt +https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/RNN/sherlockholmes.valid.txt +``` +### Unzip the file +```bash +unzip obama.zip +``` +### Arguement Configuration +Then you need to define the arguments that you would like to pass in the model: + +#### LSTM Bucketing +```bash +--data-train +/sherlockholmes.train.txt +--data-val +/sherlockholmes.valid.txt +--cpus + +--gpus + +``` +#### TrainCharRnn +```bash +--data-path +/obama.txt +--save-model-path +/ +``` +#### TestCharRnn +```bash +--data-path +/obama.txt +--model-prefix +/obama +``` \ No newline at end of file diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TestCharRnn.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TestCharRnn.scala index 243b70c0670d..4786d5d59535 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TestCharRnn.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TestCharRnn.scala @@ -25,66 +25,68 @@ import scala.collection.JavaConverters._ /** * Follows the demo, to test the char rnn: * https://github.com/dmlc/mxnet/blob/master/example/rnn/char-rnn.ipynb - * @author Depeng Liang */ object TestCharRnn { private val logger = LoggerFactory.getLogger(classOf[TrainCharRnn]) - def main(args: Array[String]): Unit = { - val stcr = new TestCharRnn - val parser: CmdLineParser = new CmdLineParser(stcr) - try { - parser.parseArgument(args.toList.asJava) - assert(stcr.dataPath != null && stcr.modelPrefix != null && stcr.starterSentence != null) + def runTestCharRNN(dataPath: String, modelPrefix: String, starterSentence : String): Unit = { + // The batch size for training + val batchSize = 32 + // We can support various length input + // For this problem, we cut each input sentence to length of 129 + // So we only need fix length bucket + val buckets = List(129) + // hidden unit in LSTM cell + val numHidden = 512 + // embedding dimension, which is, map a char to a 256 dim vector + val numEmbed = 256 + // number of lstm layer + val numLstmLayer = 3 - // The batch size for training - val batchSize = 32 - // We can support various length input - // For this problem, we cut each input sentence to length of 129 - // So we only need fix length bucket - val buckets = List(129) - // hidden unit in LSTM cell - val numHidden = 512 - // embedding dimension, which is, map a char to a 256 dim vector - val numEmbed = 256 - // number of lstm layer - val numLstmLayer = 3 + // build char vocabluary from input + val vocab = Utils.buildVocab(dataPath) - // build char vocabluary from input - val vocab = Utils.buildVocab(stcr.dataPath) + // load from check-point + val (_, argParams, _) = Model.loadCheckpoint(modelPrefix, 75) - // load from check-point - val (_, argParams, _) = Model.loadCheckpoint(stcr.modelPrefix, 75) + // build an inference model + val model = new RnnModel.LSTMInferenceModel(numLstmLayer, vocab.size + 1, + numHidden = numHidden, numEmbed = numEmbed, + numLabel = vocab.size + 1, argParams = argParams, dropout = 0.2f) - // build an inference model - val model = new RnnModel.LSTMInferenceModel(numLstmLayer, vocab.size + 1, - numHidden = numHidden, numEmbed = numEmbed, - numLabel = vocab.size + 1, argParams = argParams, dropout = 0.2f) + // generate a sequence of 1200 chars + val seqLength = 1200 + val inputNdarray = NDArray.zeros(1) + val revertVocab = Utils.makeRevertVocab(vocab) - // generate a sequence of 1200 chars - val seqLength = 1200 - val inputNdarray = NDArray.zeros(1) - val revertVocab = Utils.makeRevertVocab(vocab) + // Feel free to change the starter sentence + var output = starterSentence + val randomSample = true + var newSentence = true + val ignoreLength = output.length() - // Feel free to change the starter sentence - var output = stcr.starterSentence - val randomSample = true - var newSentence = true - val ignoreLength = output.length() + for (i <- 0 until seqLength) { + if (i <= ignoreLength - 1) Utils.makeInput(output(i), vocab, inputNdarray) + else Utils.makeInput(output.takeRight(1)(0), vocab, inputNdarray) + val prob = model.forward(inputNdarray, newSentence) + newSentence = false + val nextChar = Utils.makeOutput(prob, revertVocab, randomSample) + if (nextChar == "") newSentence = true + if (i >= ignoreLength) output = output ++ nextChar + } - for (i <- 0 until seqLength) { - if (i <= ignoreLength - 1) Utils.makeInput(output(i), vocab, inputNdarray) - else Utils.makeInput(output.takeRight(1)(0), vocab, inputNdarray) - val prob = model.forward(inputNdarray, newSentence) - newSentence = false - val nextChar = Utils.makeOutput(prob, revertVocab, randomSample) - if (nextChar == "") newSentence = true - if (i >= ignoreLength) output = output ++ nextChar - } + // Let's see what we can learned from char in Obama's speech. + logger.info(output) + } - // Let's see what we can learned from char in Obama's speech. - logger.info(output) + def main(args: Array[String]): Unit = { + val stcr = new TestCharRnn + val parser: CmdLineParser = new CmdLineParser(stcr) + try { + parser.parseArgument(args.toList.asJava) + assert(stcr.dataPath != null && stcr.modelPrefix != null && stcr.starterSentence != null) + runTestCharRNN(stcr.dataPath, stcr.modelPrefix, stcr.starterSentence) } catch { case ex: Exception => { logger.error(ex.getMessage, ex) diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TrainCharRnn.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TrainCharRnn.scala index 3afb93686b00..fb59705c9ef0 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TrainCharRnn.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TrainCharRnn.scala @@ -24,143 +24,144 @@ import scala.collection.JavaConverters._ import org.apache.mxnet.optimizer.Adam /** - * Follows the demo, to train the char rnn: - * https://github.com/dmlc/mxnet/blob/master/example/rnn/char-rnn.ipynb - * @author Depeng Liang - */ + * Follows the demo, to train the char rnn: + * https://github.com/dmlc/mxnet/blob/master/example/rnn/char-rnn.ipynb + */ object TrainCharRnn { private val logger = LoggerFactory.getLogger(classOf[TrainCharRnn]) - def main(args: Array[String]): Unit = { - val incr = new TrainCharRnn - val parser: CmdLineParser = new CmdLineParser(incr) - try { - parser.parseArgument(args.toList.asJava) - assert(incr.dataPath != null && incr.saveModelPath != null) - - // The batch size for training - val batchSize = 32 - // We can support various length input - // For this problem, we cut each input sentence to length of 129 - // So we only need fix length bucket - val buckets = Array(129) - // hidden unit in LSTM cell - val numHidden = 512 - // embedding dimension, which is, map a char to a 256 dim vector - val numEmbed = 256 - // number of lstm layer - val numLstmLayer = 3 - // we will show a quick demo in 2 epoch - // and we will see result by training 75 epoch - val numEpoch = 75 - // learning rate - val learningRate = 0.001f - // we will use pure sgd without momentum - val momentum = 0.0f - - val ctx = if (incr.gpu == -1) Context.cpu() else Context.gpu(incr.gpu) - val vocab = Utils.buildVocab(incr.dataPath) - - // generate symbol for a length - def symGen(seqLen: Int): Symbol = { - Lstm.lstmUnroll(numLstmLayer, seqLen, vocab.size + 1, - numHidden = numHidden, numEmbed = numEmbed, - numLabel = vocab.size + 1, dropout = 0.2f) - } + def runTrainCharRnn(dataPath: String, saveModelPath: String, + ctx : Context, numEpoch : Int): Unit = { + // The batch size for training + val batchSize = 32 + // We can support various length input + // For this problem, we cut each input sentence to length of 129 + // So we only need fix length bucket + val buckets = Array(129) + // hidden unit in LSTM cell + val numHidden = 512 + // embedding dimension, which is, map a char to a 256 dim vector + val numEmbed = 256 + // number of lstm layer + val numLstmLayer = 3 + // we will show a quick demo in 2 epoch + // learning rate + val learningRate = 0.001f + // we will use pure sgd without momentum + val momentum = 0.0f + + val vocab = Utils.buildVocab(dataPath) + + // generate symbol for a length + def symGen(seqLen: Int): Symbol = { + Lstm.lstmUnroll(numLstmLayer, seqLen, vocab.size + 1, + numHidden = numHidden, numEmbed = numEmbed, + numLabel = vocab.size + 1, dropout = 0.2f) + } - // initalize states for LSTM - val initC = for (l <- 0 until numLstmLayer) - yield (s"l${l}_init_c_beta", (batchSize, numHidden)) - val initH = for (l <- 0 until numLstmLayer) - yield (s"l${l}_init_h_beta", (batchSize, numHidden)) - val initStates = initC ++ initH + // initalize states for LSTM + val initC = for (l <- 0 until numLstmLayer) + yield (s"l${l}_init_c_beta", (batchSize, numHidden)) + val initH = for (l <- 0 until numLstmLayer) + yield (s"l${l}_init_h_beta", (batchSize, numHidden)) + val initStates = initC ++ initH - val dataTrain = new BucketIo.BucketSentenceIter(incr.dataPath, vocab, buckets, - batchSize, initStates, seperateChar = "\n", - text2Id = Utils.text2Id, readContent = Utils.readContent) + val dataTrain = new BucketIo.BucketSentenceIter(dataPath, vocab, buckets, + batchSize, initStates, seperateChar = "\n", + text2Id = Utils.text2Id, readContent = Utils.readContent) - // the network symbol - val symbol = symGen(buckets(0)) + // the network symbol + val symbol = symGen(buckets(0)) - val datasAndLabels = dataTrain.provideData ++ dataTrain.provideLabel - val (argShapes, outputShapes, auxShapes) = symbol.inferShape(datasAndLabels) + val datasAndLabels = dataTrain.provideData ++ dataTrain.provideLabel + val (argShapes, outputShapes, auxShapes) = symbol.inferShape(datasAndLabels) - val initializer = new Xavier(factorType = "in", magnitude = 2.34f) + val initializer = new Xavier(factorType = "in", magnitude = 2.34f) - val argNames = symbol.listArguments() - val argDict = argNames.zip(argShapes.map(NDArray.zeros(_, ctx))).toMap - val auxNames = symbol.listAuxiliaryStates() - val auxDict = auxNames.zip(auxShapes.map(NDArray.zeros(_, ctx))).toMap + val argNames = symbol.listArguments() + val argDict = argNames.zip(argShapes.map(NDArray.zeros(_, ctx))).toMap + val auxNames = symbol.listAuxiliaryStates() + val auxDict = auxNames.zip(auxShapes.map(NDArray.zeros(_, ctx))).toMap - val gradDict = argNames.zip(argShapes).filter { case (name, shape) => - !datasAndLabels.contains(name) - }.map(x => x._1 -> NDArray.empty(x._2, ctx) ).toMap + val gradDict = argNames.zip(argShapes).filter { case (name, shape) => + !datasAndLabels.contains(name) + }.map(x => x._1 -> NDArray.empty(x._2, ctx) ).toMap - argDict.foreach { case (name, ndArray) => - if (!datasAndLabels.contains(name)) { - initializer.initWeight(name, ndArray) - } + argDict.foreach { case (name, ndArray) => + if (!datasAndLabels.contains(name)) { + initializer.initWeight(name, ndArray) } + } - val data = argDict("data") - val label = argDict("softmax_label") + val data = argDict("data") + val label = argDict("softmax_label") - val executor = symbol.bind(ctx, argDict, gradDict) + val executor = symbol.bind(ctx, argDict, gradDict) - val opt = new Adam(learningRate = learningRate, wd = 0.0001f) + val opt = new Adam(learningRate = learningRate, wd = 0.0001f) - val paramsGrads = gradDict.toList.zipWithIndex.map { case ((name, grad), idx) => - (idx, name, grad, opt.createState(idx, argDict(name))) - } + val paramsGrads = gradDict.toList.zipWithIndex.map { case ((name, grad), idx) => + (idx, name, grad, opt.createState(idx, argDict(name))) + } - val evalMetric = new CustomMetric(Utils.perplexity, "perplexity") - val batchEndCallback = new Callback.Speedometer(batchSize, 50) - val epochEndCallback = Utils.doCheckpoint(s"${incr.saveModelPath}/obama") - - for (epoch <- 0 until numEpoch) { - // Training phase - val tic = System.currentTimeMillis - evalMetric.reset() - var nBatch = 0 - var epochDone = false - // Iterate over training data. - dataTrain.reset() - while (!epochDone) { - var doReset = true - while (doReset && dataTrain.hasNext) { - val dataBatch = dataTrain.next() - - data.set(dataBatch.data(0)) - label.set(dataBatch.label(0)) - executor.forward(isTrain = true) - executor.backward() - paramsGrads.foreach { case (idx, name, grad, optimState) => - opt.update(idx, argDict(name), grad, optimState) - } - - // evaluate at end, so out_cpu_array can lazy copy - evalMetric.update(dataBatch.label, executor.outputs) - - nBatch += 1 - batchEndCallback.invoke(epoch, nBatch, evalMetric) + val evalMetric = new CustomMetric(Utils.perplexity, "perplexity") + val batchEndCallback = new Callback.Speedometer(batchSize, 50) + val epochEndCallback = Utils.doCheckpoint(s"${saveModelPath}/obama") + + for (epoch <- 0 until numEpoch) { + // Training phase + val tic = System.currentTimeMillis + evalMetric.reset() + var nBatch = 0 + var epochDone = false + // Iterate over training data. + dataTrain.reset() + while (!epochDone) { + var doReset = true + while (doReset && dataTrain.hasNext) { + val dataBatch = dataTrain.next() + + data.set(dataBatch.data(0)) + label.set(dataBatch.label(0)) + executor.forward(isTrain = true) + executor.backward() + paramsGrads.foreach { case (idx, name, grad, optimState) => + opt.update(idx, argDict(name), grad, optimState) } - if (doReset) { - dataTrain.reset() - } - // this epoch is done - epochDone = true + + // evaluate at end, so out_cpu_array can lazy copy + evalMetric.update(dataBatch.label, executor.outputs) + + nBatch += 1 + batchEndCallback.invoke(epoch, nBatch, evalMetric) } - val (name, value) = evalMetric.get - name.zip(value).foreach { case (n, v) => - logger.info(s"Epoch[$epoch] Train-$n=$v") + if (doReset) { + dataTrain.reset() } - val toc = System.currentTimeMillis - logger.info(s"Epoch[$epoch] Time cost=${toc - tic}") - - epochEndCallback.invoke(epoch, symbol, argDict, auxDict) + // this epoch is done + epochDone = true } - executor.dispose() + val (name, value) = evalMetric.get + name.zip(value).foreach { case (n, v) => + logger.info(s"Epoch[$epoch] Train-$n=$v") + } + val toc = System.currentTimeMillis + logger.info(s"Epoch[$epoch] Time cost=${toc - tic}") + + epochEndCallback.invoke(epoch, symbol, argDict, auxDict) + } + executor.dispose() + } + + def main(args: Array[String]): Unit = { + val incr = new TrainCharRnn + val parser: CmdLineParser = new CmdLineParser(incr) + try { + parser.parseArgument(args.toList.asJava) + val ctx = if (incr.gpu == -1) Context.cpu() else Context.gpu(incr.gpu) + assert(incr.dataPath != null && incr.saveModelPath != null) + runTrainCharRnn(incr.dataPath, incr.saveModelPath, ctx, 75) } catch { case ex: Exception => { logger.error(ex.getMessage, ex) @@ -172,12 +173,6 @@ object TrainCharRnn { } class TrainCharRnn { - /* - * Get Training Data: E.g. - * mkdir data; cd data - * wget "http://data.mxnet.io/mxnet/data/char_lstm.zip" - * unzip -o char_lstm.zip - */ @Option(name = "--data-path", usage = "the input train data file") private val dataPath: String = "./data/obama.txt" @Option(name = "--save-model-path", usage = "the model saving path") diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/Utils.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/Utils.scala index c2902309679d..3f9a9842e0a9 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/Utils.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/Utils.scala @@ -25,9 +25,6 @@ import org.apache.mxnet.Model import org.apache.mxnet.Symbol import scala.util.Random -/** - * @author Depeng Liang - */ object Utils { def readContent(path: String): String = Source.fromFile(path).mkString diff --git a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/rnn/ExampleRNNSuite.scala b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/rnn/ExampleRNNSuite.scala new file mode 100644 index 000000000000..b393a433305a --- /dev/null +++ b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/rnn/ExampleRNNSuite.scala @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.mxnetexamples.rnn + + +import org.apache.mxnet.{Context, NDArrayCollector} +import org.apache.mxnetexamples.Util +import org.scalatest.{BeforeAndAfterAll, FunSuite, Ignore} +import org.slf4j.LoggerFactory + +import scala.sys.process.Process + +@Ignore +class ExampleRNNSuite extends FunSuite with BeforeAndAfterAll { + private val logger = LoggerFactory.getLogger(classOf[ExampleRNNSuite]) + + override def beforeAll(): Unit = { + logger.info("Downloading LSTM model") + val tempDirPath = System.getProperty("java.io.tmpdir") + logger.info("tempDirPath: %s".format(tempDirPath)) + val baseUrl = "https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/RNN/" + Util.downloadUrl(baseUrl + "obama.zip", tempDirPath + "/RNN/obama.zip") + Util.downloadUrl(baseUrl + "sherlockholmes.train.txt", + tempDirPath + "/RNN/sherlockholmes.train.txt") + Util.downloadUrl(baseUrl + "sherlockholmes.valid.txt", + tempDirPath + "/RNN/sherlockholmes.valid.txt") + // TODO: Need to confirm with Windows + Process(s"unzip $tempDirPath/RNN/obama.zip -d $tempDirPath/RNN/") ! + } + + test("Example CI: Test LSTM Bucketing") { + val tempDirPath = System.getProperty("java.io.tmpdir") + var ctx = Context.cpu() + if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && + System.getenv("SCALA_TEST_ON_GPU").toInt == 1) { + ctx = Context.gpu() + } + LstmBucketing.runTraining(tempDirPath + "/RNN/sherlockholmes.train.txt", + tempDirPath + "/RNN/sherlockholmes.valid.txt", Array(ctx), 1) + } + + test("Example CI: Test TrainCharRNN") { + val tempDirPath = System.getProperty("java.io.tmpdir") + if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && + System.getenv("SCALA_TEST_ON_GPU").toInt == 1) { + val ctx = Context.gpu() + TrainCharRnn.runTrainCharRnn(tempDirPath + "/RNN/obama.txt", + tempDirPath, ctx, 1) + } else { + logger.info("CPU not supported for this test, skipped...") + } + } + + test("Example CI: Test TestCharRNN") { + val tempDirPath = System.getProperty("java.io.tmpdir") + val ctx = Context.gpu() + TestCharRnn.runTestCharRNN(tempDirPath + "/RNN/obama.txt", + tempDirPath + "/RNN/obama", "The joke") + } +} From bf1edafcd65c65e57a1a40c9f978af4a452dcf00 Mon Sep 17 00:00:00 2001 From: Andrew Ayres Date: Tue, 21 Aug 2018 14:48:09 -0700 Subject: [PATCH 022/160] Fix JNI custom op code from deregistering the operator (#11885) --- .../native/org_apache_mxnet_native_c_api.cc | 50 ++++--------------- 1 file changed, 11 insertions(+), 39 deletions(-) diff --git a/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.cc b/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.cc index d944a8d049ca..95325f3a6a2e 100644 --- a/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.cc +++ b/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.cc @@ -1898,9 +1898,6 @@ JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRtcFree // store the user defined CustomOpProp object reference with its name std::unordered_map globalOpPropMap; -// store how many time of the delete function was called -// for a specific CustomOpProp object -std::unordered_map globalOpPropCountMap; // store the user defined CustomOp object reference with its name std::unordered_map globalOpMap; // used for thread safty when insert elements into @@ -1908,6 +1905,7 @@ std::unordered_map globalOpMap; std::mutex mutex_opprop; std::mutex mutex_op; +// Registers a custom operator when called JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxCustomOpRegister (JNIEnv *env, jobject obj, jstring jregName, jobject jopProp) { const char *regName = env->GetStringUTFChars(jregName, 0); @@ -1915,14 +1913,13 @@ JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxCustomOpRegister std::unique_lock lock(mutex_opprop); globalOpPropMap.insert({ key, env->NewGlobalRef(jopProp) }); - globalOpPropCountMap.insert({ key, 0 }); lock.unlock(); + // lambda function to initialize the operator and create all callbacks auto creatorLambda = [](const char *opType, const int numKwargs, const char **keys, const char **values, MXCallbackList *ret) { int success = true; - // set CustomOpProp.kwargs std::string opPropKey(opType); if (globalOpPropMap.find(opPropKey) == globalOpPropMap.end()) { LOG(WARNING) << "CustomOpProp: " << opPropKey << " not found"; @@ -1937,7 +1934,7 @@ JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxCustomOpRegister LOG(WARNING) << "could not find CustomOpProp method init."; success = false; } else { - // call init + // call init and set CustomOpProp.kwargs jclass strCls = env->FindClass("Ljava/lang/String;"); jobjectArray keysArr = env->NewObjectArray(numKwargs, strCls, NULL); jobjectArray valuesArr = env->NewObjectArray(numKwargs, strCls, NULL); @@ -2419,39 +2416,14 @@ JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxCustomOpRegister // del callback auto opPropDel = [](void *state) { - std::string key(reinterpret_cast(state)); - std::unique_lock lock(mutex_opprop); - int count_prop = globalOpPropCountMap.at(key); - if (count_prop < 2) { - globalOpPropCountMap[key] = ++count_prop; - return 1; - } - int success = true; - if (globalOpPropMap.find(key) == globalOpPropMap.end()) { - LOG(WARNING) << "opProp: " << key << " not found"; - success = false; - } else { - JNIEnv *env; - _jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - env->DeleteGlobalRef(globalOpPropMap.at(key)); - _jvm->DetachCurrentThread(); - for (auto it = globalOpPropMap.begin(); it != globalOpPropMap.end(); ) { - if (it->first == key) { - it = globalOpPropMap.erase(it); - } else { - ++it; - } - } - for (auto it = globalOpPropCountMap.begin(); it != globalOpPropCountMap.end(); ) { - if (it->first == key) { - it = globalOpPropCountMap.erase(it); - } else { - ++it; - } - } - } - lock.unlock(); - return success; + /* + * This method seems to be called by the engine to clean up after multiple calls were made + * to the creator lambda. The current creator function isn't allocating a new object but is + * instead reinitializing the object which was created when register was called. This means + * that there doesn't seem to be anything to clean up here (previous efforts were actually + * deregistering the operator). + */ + return 1; }; // TODO(eric): Memory leak. Missing infertype. From 78c88df521b88107b1b1655737f6b668774c94c4 Mon Sep 17 00:00:00 2001 From: Anirudh Date: Tue, 21 Aug 2018 15:56:04 -0700 Subject: [PATCH 023/160] [MXNET-628] Fix example on text classification using LSTMs on IMDB dataset (#12263) * Fix tutorial on text classification using LSTMs on IMDB dataset * add training and test accuracy results --- .../bucket_R/aclImdb_lstm_classification.R | 37 +++++++++++++------ 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/example/rnn/bucket_R/aclImdb_lstm_classification.R b/example/rnn/bucket_R/aclImdb_lstm_classification.R index f8a55a4db50f..f5e6659aadab 100644 --- a/example/rnn/bucket_R/aclImdb_lstm_classification.R +++ b/example/rnn/bucket_R/aclImdb_lstm_classification.R @@ -40,20 +40,34 @@ optimizer <- mx.opt.create("adadelta", rho = 0.92, epsilon = 1e-06, wd = 2e-04, bucket_list <- unique(c(train.data$bucket.names, eval.data$bucket.names)) symbol_buckets <- sapply(bucket_list, function(seq) { - rnn.graph(config = "seq-to-one", cell_type = "lstm", - num_rnn_layer = 1, num_embed = 2, num_hidden = 6, - num_decode = 2, input_size = vocab, dropout = 0.5, - ignore_label = -1, loss_output = "softmax", - output_last_state = F, masking = T) + rnn.graph(config = "seq-to-one", + cell_type = "lstm", + num_rnn_layer = 1, + num_embed = 2, + num_hidden = 6, + num_decode = 2, + input_size = vocab, + dropout = 0.2, + ignore_label = -1, + loss_output = "softmax", + output_last_state = F, + masking = T) }) +# Accuracy on Training Data = 0.84066 model_sentiment_lstm <- mx.model.buckets(symbol = symbol_buckets, - train.data = train.data, eval.data = eval.data, - num.round = num.round, ctx = devices, verbose = FALSE, - metric = mx.metric.accuracy, optimizer = optimizer, - initializer = initializer, - batch.end.callback = NULL, - epoch.end.callback = epoch.end.callback) + train.data = train.data, + eval.data = eval.data, + num.round = num.round, + ctx = devices, + verbose = FALSE, + metric = mx.metric.accuracy, + optimizer = optimizer, + initializer = mx.init.Xavier(rnd_type = "gaussian", + factor_type = "in", + magnitude = 2), + batch.end.callback = mx.callback.log.train.metric(period = 50), + epoch.end.callback = NULL) mx.model.save(model_sentiment_lstm, prefix = "model_sentiment_lstm", iteration = num.round) model <- mx.model.load("model_sentiment_lstm", iteration = num.round) @@ -72,6 +86,7 @@ while (eval.data$iter.next()) { ylabel <- as.array(packer$get()) +# Accuracy on Test Data = 0.81194 acc <- sum(ylabel == ypred)/length(ylabel) message(paste("Acc:", acc)) From 250c4fff1c47a2cc71055485d12815718e55c8a7 Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Wed, 22 Aug 2018 01:04:03 +0200 Subject: [PATCH 024/160] Add fpic correctly in cmake (#12281) --- CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8c3e635682a7..adff53336654 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -131,7 +131,8 @@ else(MSVC) else() set(SUPPORT_F16C FALSE) endif() - set(CMAKE_C_FLAGS "-Wall -Wno-unknown-pragmas -fPIC -Wno-sign-compare") + set(CMAKE_POSITION_INDEPENDENT_CODE ON) + set(CMAKE_C_FLAGS "-Wall -Wno-unknown-pragmas -Wno-sign-compare") if ("${CMAKE_CXX_COMPILER_ID}" MATCHES ".*Clang$") set(CMAKE_C_FLAGS "-Wno-braced-scalar-init") endif() From 96ac3ae540adc3a382dda27fc09006468ab40fb0 Mon Sep 17 00:00:00 2001 From: Vishaal Kapoor <40836875+vishaalkapoor@users.noreply.github.com> Date: Tue, 21 Aug 2018 17:58:17 -0700 Subject: [PATCH 025/160] [MXAPPS-581] Enable remaining tests in CI (#12165) * Add scikit-image dependency. * Enable remaining notebooks. --- ci/docker/install/ubuntu_nightly_tests.sh | 5 +- .../straight_dope/straight_dope_test_utils.py | 16 ++--- .../straight_dope/test_notebooks_multi_gpu.py | 15 ++--- .../test_notebooks_single_gpu.py | 61 ++++++------------- 4 files changed, 35 insertions(+), 62 deletions(-) diff --git a/ci/docker/install/ubuntu_nightly_tests.sh b/ci/docker/install/ubuntu_nightly_tests.sh index 0e6b437a1d89..68358908bdc9 100755 --- a/ci/docker/install/ubuntu_nightly_tests.sh +++ b/ci/docker/install/ubuntu_nightly_tests.sh @@ -32,6 +32,5 @@ apt-get -y install time apt-get install -y subversion maven -y #>/dev/null # Packages needed for the Straight Dope Nightly tests. -pip2 install pandas -pip3 install pandas - +pip2 install pandas scikit-image +pip3 install pandas scikit-image diff --git a/tests/nightly/straight_dope/straight_dope_test_utils.py b/tests/nightly/straight_dope/straight_dope_test_utils.py index ee499a56fbd3..976c234fa997 100644 --- a/tests/nightly/straight_dope/straight_dope_test_utils.py +++ b/tests/nightly/straight_dope/straight_dope_test_utils.py @@ -41,7 +41,7 @@ GIT_REPO = 'https://github.com/zackchase/mxnet-the-straight-dope' KERNEL = os.getenv('MXNET_TEST_KERNEL', None) NOTEBOOKS_DIR = os.path.join(os.path.dirname(__file__), 'tmp_notebook') -RELATIVE_DATA_PATH_REGEX = r'\.\.\/data\/' # Regular expression to match the relative data path. +RELATIVE_PATH_REGEX = r'\.\.(?=\/(data|img)\/)' # Regular expression to match the relative data path. def _test_notebook(notebook, override_epochs=True): """Run Jupyter notebook to catch any execution error. @@ -55,8 +55,9 @@ def _test_notebook(notebook, override_epochs=True): Returns: True if the notebook runs without warning or error. """ - # Some notebooks will fail to run without error if we do not override the data path. - _override_data_path(notebook) + # Some notebooks will fail to run without error if we do not override + # relative paths to the data and image directories. + _override_relative_paths(notebook) if override_epochs: _override_epochs(notebook) @@ -85,9 +86,10 @@ def _override_epochs(notebook): f.write(modified_notebook) -def _override_data_path(notebook): - """Overrides the relative path for the data directory to point to the right place. This is - required as we run the notebooks in a different directory hierarchy more suitable for testing. +def _override_relative_paths(notebook): + """Overrides the relative path for the data and image directories to point + to the right places. This is required as we run the notebooks in a different + directory hierarchy more suitable for testing. Args: notebook : string @@ -100,7 +102,7 @@ def _override_data_path(notebook): notebook = f.read() # Update the location for the data directory. - modified_notebook = re.sub(RELATIVE_DATA_PATH_REGEX, NOTEBOOKS_DIR + '/data/', notebook) + modified_notebook = re.sub(RELATIVE_PATH_REGEX, NOTEBOOKS_DIR, notebook) # Replace the original notebook with the modified one. with io.open(notebook_path, 'w', encoding='utf-8') as f: diff --git a/tests/nightly/straight_dope/test_notebooks_multi_gpu.py b/tests/nightly/straight_dope/test_notebooks_multi_gpu.py index ef07550bdf78..c5dffa455ce1 100644 --- a/tests/nightly/straight_dope/test_notebooks_multi_gpu.py +++ b/tests/nightly/straight_dope/test_notebooks_multi_gpu.py @@ -33,19 +33,16 @@ def setUpClass(self): # Chapter 7 - # TODO(vishaalk): module 'mxnet.gluon' has no attribute 'autograd' - #def test_multiple_gpus_scratch(self): - # assert _test_notebook('chapter07_distributed-learning/multiple-gpus-scratch') + def test_multiple_gpus_scratch(self): + assert _test_notebook('chapter07_distributed-learning/multiple-gpus-scratch') def test_multiple_gpus_gluon(self): assert _test_notebook('chapter07_distributed-learning/multiple-gpus-gluon') - # TODO(vishaalk): Do a dry run, and then enable. - #def test_training_with_multiple_machines(self): - # assert _test_notebook('chapter07_distributed-learning/training-with-multiple-machines') + def test_training_with_multiple_machines(self): + assert _test_notebook('chapter07_distributed-learning/training-with-multiple-machines') # Chapter 8 - # TODO(vishaalk): Module skimage needs to be added to docker image. - # def test_fine_tuning(self): - # assert _test_notebook('chapter08_computer-vision/fine-tuning') + def test_fine_tuning(self): + assert _test_notebook('chapter08_computer-vision/fine-tuning') diff --git a/tests/nightly/straight_dope/test_notebooks_single_gpu.py b/tests/nightly/straight_dope/test_notebooks_single_gpu.py index fca49f43adee..a60498c87868 100644 --- a/tests/nightly/straight_dope/test_notebooks_single_gpu.py +++ b/tests/nightly/straight_dope/test_notebooks_single_gpu.py @@ -34,9 +34,13 @@ 'chapter01_crashcourse/chapter-one-problem-set', 'chapter02_supervised-learning/environment', 'chapter03_deep-neural-networks/kaggle-gluon-kfold', + 'chapter04_convolutional-neural-networks/deep-cnns-alexnet', # > 10 mins. + 'chapter06_optimization/gd-sgd-scratch', # Overflow warning is intended. + 'chapter06_optimization/gd-sgd-gluon', # Overflow warning is intended. 'chapter07_distributed-learning/multiple-gpus-scratch', 'chapter07_distributed-learning/multiple-gpus-gluon', 'chapter07_distributed-learning/training-with-multiple-machines', + 'chapter11_recommender-systems/intro-recommender-systems', # Early draft, non-working. 'chapter12_time-series/intro-forecasting-gluon', 'chapter12_time-series/intro-forecasting-2-gluon', 'chapter13_unsupervised-learning/vae-gluon', @@ -45,6 +49,8 @@ 'chapter17_deep-reinforcement-learning/DDQN', 'chapter19_graph-neural-networks/Graph-Neural-Networks', 'chapter16_tensor_methods/tensor_basics', + 'chapter18_variational-methods-and-uncertainty/bayes-by-backprop', # > 10 mins. + 'chapter18_variational-methods-and-uncertainty/bayes-by-backprop-gluon', # > 10 mins. 'cheatsheets/kaggle-gluon-kfold' ] @@ -139,9 +145,8 @@ def test_plumbing(self): def test_custom_layer(self): assert _test_notebook('chapter03_deep-neural-networks/custom-layer') - # TODO(vishaalk): Load params and Save params are deprecated warning. - #def test_serialization(self): - # assert _test_notebook('chapter03_deep-neural-networks/serialization') + def test_serialization(self): + assert _test_notebook('chapter03_deep-neural-networks/serialization') # Chapter 4 @@ -151,10 +156,6 @@ def test_cnn_scratch(self): def test_cnn_gluon(self): assert _test_notebook('chapter04_convolutional-neural-networks/cnn-gluon') - # TODO(vishaalk): Load params and Save params are deprecated warning. - #def test_deep_cnns_alexnet(self): - # assert _test_notebook('chapter04_convolutional-neural-networks/deep-cnns-alexnet') - def test_very_deep_nets_vgg(self): assert _test_notebook('chapter04_convolutional-neural-networks/very-deep-nets-vgg') @@ -175,22 +176,14 @@ def test_lstm_scratch(self): def test_gru_scratch(self): assert _test_notebook('chapter05_recurrent-neural-networks/gru-scratch') - #def test_rnns_gluon(self): - # assert _test_notebook('chapter05_recurrent-neural-networks/rnns-gluon') + def test_rnns_gluon(self): + assert _test_notebook('chapter05_recurrent-neural-networks/rnns-gluon') # Chapter 6 def test_optimization_intro(self): assert _test_notebook('chapter06_optimization/optimization-intro') - # TODO(vishaalk): RuntimeWarning: Overflow encountered in reduce. - #def test_gd_sgd_scratch(self): - # assert _test_notebook('chapter06_optimization/gd-sgd-scratch') - - # TODO(vishaalk): RuntimeWarning: Overflow encountered in reduce. - #def test_gd_sgd_gluon(self): - # assert _test_notebook('chapter06_optimization/gd-sgd-gluon') - def test_momentum_scratch(self): assert _test_notebook('chapter06_optimization/momentum-scratch') @@ -229,29 +222,20 @@ def test_hybridize(self): # Chapter 8 - # TODO(vishaalk): Load params and Save params are deprecated warning. - #def test_object_detection(self): - # assert _test_notebook('chapter08_computer-vision/object-detection') + def test_object_detection(self): + assert _test_notebook('chapter08_computer-vision/object-detection') - # TODO(vishaalk): Module skimage needs to be added to docker image. - #def test_fine_tuning(self): - # assert _test_notebook('chapter08_computer-vision/fine-tuning') + def test_fine_tuning(self): + assert _test_notebook('chapter08_computer-vision/fine-tuning') - # TODO(vishaalk): - #def test_visual_question_answer(self): - # assert _test_notebook('chapter08_computer-vision/visual-question-answer') + def test_visual_question_answer(self): + assert _test_notebook('chapter08_computer-vision/visual-question-answer') # Chapter 9 def test_tree_lstm(self): assert _test_notebook('chapter09_natural-language-processing/tree-lstm') - # Chapter 11 - - # TODO(vishaalk): Deferred initialization failed because shape cannot be inferred. - #def test_intro_recommender_systems(self): - # assert _test_notebook('chapter11_recommender-systems/intro-recommender-systems') - # Chapter 12 def test_lds_scratch(self): @@ -271,14 +255,5 @@ def test_dcgan(self): def test_generative_adversarial_networks(self): assert _test_notebook('chapter14_generative-adversarial-networks/conditional') - # TODO(vishaalk): Investigate. - #def test_pixel2pixel(self): - # assert _test_notebook('chapter14_generative-adversarial-networks/pixel2pixel') - - # Chapter 18 - - #def test_bayes_by_backprop(self): - # assert _test_notebook('chapter18_variational-methods-and-uncertainty/bayes-by-backprop') - - #def test_bayes_by_backprop_gluon(self): - # assert _test_notebook('chapter18_variational-methods-and-uncertainty/bayes-by-backprop-gluon') + def test_pixel2pixel(self): + assert _test_notebook('chapter14_generative-adversarial-networks/pixel2pixel') From c692ffde3d7fede821043f9ec50ccdd062fbdc6c Mon Sep 17 00:00:00 2001 From: Junru Shao Date: Wed, 22 Aug 2018 09:48:33 +0800 Subject: [PATCH 026/160] [MXNET-795] Fix a bug that CutSubgraph works only when each subgraph has its distinct name (#12106) * Copy only when necessary * Fix typo * Add unittest --- python/mxnet/attribute.py | 2 + python/mxnet/symbol/contrib.py | 11 +++++ .../unittest/test_contrib_control_flow.py | 41 +++++++++++++++++++ 3 files changed, 54 insertions(+) diff --git a/python/mxnet/attribute.py b/python/mxnet/attribute.py index 17044ddaef06..1a7bd44c01d0 100644 --- a/python/mxnet/attribute.py +++ b/python/mxnet/attribute.py @@ -20,6 +20,7 @@ from __future__ import absolute_import import threading import warnings +from collections import defaultdict from .base import string_types, classproperty, with_metaclass, _MXClassPropertyMetaClass @@ -34,6 +35,7 @@ class AttrScope(with_metaclass(_MXClassPropertyMetaClass, object)): The attributes to set for all symbol creations in the scope. """ _current = threading.local() + _subgraph_names = defaultdict(int) def __init__(self, **kwargs): self._old_scope = None diff --git a/python/mxnet/symbol/contrib.py b/python/mxnet/symbol/contrib.py index 38195bd62ffa..f40a372fdbcd 100644 --- a/python/mxnet/symbol/contrib.py +++ b/python/mxnet/symbol/contrib.py @@ -124,6 +124,14 @@ def _cut_subgraph(subg): syms.append(s) return syms +def _get_unique_subgraph_name(subgraph_name): + attrs = AttrScope._current.value._attr + if attrs.get("__subgraph_name__", "") != "": + subgraph_name = "".join([attrs["__subgraph_name__"], "$", subgraph_name]) + AttrScope._subgraph_names[subgraph_name] += 1 + subgraph_name = subgraph_name + str(AttrScope._subgraph_names[subgraph_name] - 1) + return subgraph_name + # This construct a subgraph for given output nodes. # If an output node is one of the input nodes, we call identity to make sure # that outputs nodes are different from input nodes. @@ -232,6 +240,7 @@ def check_data(inputs, in_type, msg): # the python function, we need to prune the computation graph constructed from # the function. One way of doing it is to mark the nodes in the computation graph # with AttrScope and prune the nodes without the special attribute. + name = _get_unique_subgraph_name(name) with AttrScope(__subgraph_name__=name): if isinstance(data, list): in_eles = [symbol.var(sym.name) for sym in data] @@ -456,6 +465,7 @@ def _func_wrapper(loop_vars): return list(step_output), list(new_loop_vars) def _create_subgraph(graph_vars, graph_func, subgraph_name): + subgraph_name = _get_unique_subgraph_name(subgraph_name) with AttrScope(__subgraph_name__=subgraph_name): # create new variables with the same name, # them feed them to the given func @@ -619,6 +629,7 @@ def _to_symbol_tuple(inputs, name): return inputs def _create_subgraph(graph_vars, graph_func, subgraph_name): + subgraph_name = _get_unique_subgraph_name(subgraph_name) with AttrScope(__subgraph_name__=subgraph_name): # create new variables with the same name, # them feed them to the given func diff --git a/tests/python/unittest/test_contrib_control_flow.py b/tests/python/unittest/test_contrib_control_flow.py index 76d0218775b4..54f22a8fd6a7 100644 --- a/tests/python/unittest/test_contrib_control_flow.py +++ b/tests/python/unittest/test_contrib_control_flow.py @@ -20,8 +20,10 @@ import mxnet as mx from mxnet import gluon from numpy.testing import assert_allclose, assert_array_equal +from collections import defaultdict from mxnet.test_utils import * from mxnet.base import _as_list +from mxnet.attribute import AttrScope from common import with_seed @@ -1765,6 +1767,45 @@ def hybrid_forward(self, F, data): assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3) +def test_scope(): + class TestBlock1(gluon.HybridBlock): + def __init__(self, prefix=None, params=None): + super(TestBlock1, self).__init__(prefix=prefix, params=params) + def hybrid_forward(self, F, data): + (new_data, ) = F.contrib.cond( + data > 0.5, + then_func=lambda: data * 2, + else_func=lambda: data * 3, + name="my_cond", + ) + return new_data + class TestBlock2(gluon.HybridBlock): + def __init__(self, prefix=None, params=None): + super(TestBlock2, self).__init__(prefix=prefix, params=params) + def hybrid_forward(self, F, data): + (new_data, ) = F.contrib.cond( + data > 0.5, + then_func=lambda: data * 2, + else_func=lambda: data * 3, + name="my_cond", + ) + return new_data + AttrScope._subgraph_names = defaultdict(int) + data = mx.nd.normal(loc=0, scale=1, shape=(1, )) + block1 = TestBlock1() + block1.initialize(ctx=default_context()) + block1.hybridize() + _ = block1(data) + block2 = TestBlock2() + block2.initialize(ctx=default_context()) + block2.hybridize() + _ = block2(data) + assert len(AttrScope._subgraph_names) == 3 + assert AttrScope._subgraph_names['my_cond_else'] == 2 + assert AttrScope._subgraph_names['my_cond_pred'] == 2 + assert AttrScope._subgraph_names['my_cond_then'] == 2 + + if __name__ == '__main__': import nose nose.runmodule() From e04565aa9534bdc35bdca57e67ef2307cb473792 Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Wed, 22 Aug 2018 14:02:20 +0200 Subject: [PATCH 027/160] [MXNET-703] Update onnx-tensorrt to most recent version (#12274) Update breaking change to createParser interface. --- 3rdparty/onnx-tensorrt | 2 +- src/executor/onnx_to_tensorrt.cc | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/3rdparty/onnx-tensorrt b/3rdparty/onnx-tensorrt index e7be19cff377..3d8ee049970e 160000 --- a/3rdparty/onnx-tensorrt +++ b/3rdparty/onnx-tensorrt @@ -1 +1 @@ -Subproject commit e7be19cff377a95817503e8525e20de34cdc574a +Subproject commit 3d8ee049970e81ff4935cc7f36b653c0b27bcbbc diff --git a/src/executor/onnx_to_tensorrt.cc b/src/executor/onnx_to_tensorrt.cc index 0b4d91be7009..e3a4ae868ce2 100644 --- a/src/executor/onnx_to_tensorrt.cc +++ b/src/executor/onnx_to_tensorrt.cc @@ -91,8 +91,7 @@ nvinfer1::ICudaEngine* onnxToTrtCtx( TRT_Logger trt_logger(verbosity); auto trt_builder = InferObject(nvinfer1::createInferBuilder(trt_logger)); auto trt_network = InferObject(trt_builder->createNetwork()); - auto trt_parser = InferObject(nvonnxparser::createParser( - *trt_network, trt_logger)); + auto trt_parser = InferObject(nvonnxparser::createParser(trt_network.get(), trt_logger)); ::ONNX_NAMESPACE::ModelProto parsed_model; // We check for a valid parse, but the main effect is the side effect // of populating parsed_model From c9274bef7bbd3dce3966f06771b395b542e2cf1b Mon Sep 17 00:00:00 2001 From: Anton Chernov Date: Wed, 22 Aug 2018 15:24:19 +0200 Subject: [PATCH 028/160] Enable OpenMP for armv8 builds (#12273) --- ci/docker/Dockerfile.build.armv8 | 2 +- ci/docker/runtime_functions.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/docker/Dockerfile.build.armv8 b/ci/docker/Dockerfile.build.armv8 index 8818ba4e4a16..46cc229d5904 100755 --- a/ci/docker/Dockerfile.build.armv8 +++ b/ci/docker/Dockerfile.build.armv8 @@ -18,7 +18,7 @@ # # Dockerfile to build MXNet for ARM64/ARMv8 -FROM mxnetci/dockcross-linux-arm64:05082018 +FROM mxnetci/dockcross-linux-arm64:08212018 ENV ARCH aarch64 ENV HOSTCC gcc diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index 2341674fa216..35311396e34a 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -184,7 +184,7 @@ build_armv8() { -DUSE_CUDA=OFF\ -DSUPPORT_F16C=OFF\ -DUSE_OPENCV=OFF\ - -DUSE_OPENMP=OFF\ + -DUSE_OPENMP=ON \ -DUSE_LAPACK=OFF\ -DUSE_SIGNAL_HANDLER=ON\ -DCMAKE_BUILD_TYPE=Release\ From c1a89488ef551f441dbdf1c5107694680ce1d340 Mon Sep 17 00:00:00 2001 From: access2rohit Date: Wed, 22 Aug 2018 09:22:41 -0700 Subject: [PATCH 029/160] Revert "Disable kvstore test (#11798)" (#12279) This reverts commit 039e68a8c4e99734e86282a85c186b30947020bf. --- Jenkinsfile | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 6757490c803d..50b86ec71900 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -890,10 +890,6 @@ core_logic: { } } }, - /* Disabled due to master build failure: - * http://jenkins.mxnet-ci.amazon-ml.com/blue/organizations/jenkins/incubator-mxnet/detail/master/1221/pipeline/ - * https://github.com/apache/incubator-mxnet/issues/11801 - 'dist-kvstore tests CPU': { node(NODE_LINUX_CPU) { ws('workspace/it-dist-kvstore') { @@ -905,7 +901,7 @@ core_logic: { } } } - }, */ + }, 'Scala: GPU': { node(NODE_LINUX_GPU) { ws('workspace/ut-scala-gpu') { From a3add6af2de740541b97cdaa173ef7f37153ca21 Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Wed, 22 Aug 2018 21:50:43 +0200 Subject: [PATCH 030/160] [MXNET-850] Fix incorrect abs function call (#12262) Addresses associated clang build warning. --- src/operator/correlation.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/operator/correlation.cc b/src/operator/correlation.cc index be54c05c8e9e..fe89e8462d12 100644 --- a/src/operator/correlation.cc +++ b/src/operator/correlation.cc @@ -73,8 +73,8 @@ inline void CorrelationForward(const Tensor &out, out[nbatch][top_channel][i][j] += \ tmp1[nbatch][y1+h][x1+w][channel]*tmp2[nbatch][y2+h][x2+w][channel]; else - out[nbatch][top_channel][i][j] += \ - fabsf(tmp1[nbatch][y1+h][x1+w][channel]-tmp2[nbatch][y2+h][x2+w][channel]); + out[nbatch][top_channel][i][j] += std::abs(\ + tmp1[nbatch][y1+h][x1+w][channel]-tmp2[nbatch][y2+h][x2+w][channel]); } out[nbatch][top_channel][i][j] /= sumelems; } From 902c5799826cd90f26dc8e6e81bfb8d609e285aa Mon Sep 17 00:00:00 2001 From: Da Zheng Date: Wed, 22 Aug 2018 13:10:00 -0700 Subject: [PATCH 031/160] Make the output format of control flow operators consistent with their UDFs (#12209) * always return a list in foreach. * update tests. * Revert "update tests." This reverts commit d5d3656a79011b402f760981705d83b187a7cea0. * make the loop output consistent with the UDF output. * make cond consistent. * fix lint. * fix messages. * check foreach states. * allow foreach states to be a symbol or a list of symbols. * support a symbol or a list for while loop states. * fix lint. * support nested list in foreach. * support nested list in while. * fix lint * update doc. * fix. * retrigger * fix. * update doc. --- python/mxnet/ndarray/contrib.py | 124 +++++---- python/mxnet/symbol/contrib.py | 223 ++++++++------- .../unittest/test_contrib_control_flow.py | 263 +++++++++++++++++- 3 files changed, 443 insertions(+), 167 deletions(-) diff --git a/python/mxnet/ndarray/contrib.py b/python/mxnet/ndarray/contrib.py index aae898a3b7a2..67ee7e4d68b7 100644 --- a/python/mxnet/ndarray/contrib.py +++ b/python/mxnet/ndarray/contrib.py @@ -98,6 +98,39 @@ def rand_zipfian(true_classes, num_sampled, range_max, ctx=None): return sampled_classes, expected_count_true, expected_count_sampled # pylint: enable=line-too-long + +def _flatten(args, inout_str): + if isinstance(args, ndarray.NDArray): + return [args], int(0) + + assert isinstance(args, (list, tuple)), \ + "%s must be (nested) list of NDArray, " \ + "but got %s of type %s"%(inout_str, str(args), str(type(args))) + flat = [] + fmts = [] + for i in args: + arg, fmt = _flatten(i, inout_str) + flat.extend(arg) + fmts.append(fmt) + return flat, fmts + + +def _regroup(args, fmt): + if isinstance(fmt, int): + if fmt == 0: + return args[0], args[1:] + return args[:fmt], args[fmt:] + + assert isinstance(args, (list, tuple)), \ + "output must be (nested) list of NDArray, " \ + "but got %s of type %s"%(str(args), str(type(args))) + ret = [] + for i in fmt: + res, args = _regroup(args, i) + ret.append(res) + return ret, args + + def foreach(body, data, init_states): """Run a for loop with user-defined computation over NDArrays on dimension 0. @@ -135,16 +168,16 @@ def foreach(body, data, init_states): Define computation in an iteration. data: an NDArray or a list of NDArrays. The input data. - init_states: an NDArray or a list of NDArrays. + init_states: an NDArray or nested lists of NDArrays. The initial values of the loop states. name: string. The name of the operator. Returns ------- - outputs: an NDArray or a list of NDArrays. + outputs: an NDArray or nested lists of NDArrays. The output data concatenated from the output of all iterations. - states: a list of NDArrays. + states: an NDArray or nested lists of NDArrays. The loop states in the last iteration. Examples @@ -166,9 +199,12 @@ def check_input(inputs, in_type, msg): is_NDArray_or_list = isinstance(inputs, in_type) assert is_NDArray_or_list, msg - check_input(data, ndarray.NDArray, "data should be an NDArray or a list of NDArrays") - check_input(init_states, ndarray.NDArray, - "init_states should be an NDArray or a list of NDArrays") + flatten, _ = _flatten(data, "foreach input") + check_input(flatten, ndarray.NDArray, + "data should be an NDArray or a nested list of NDArrays") + flatten, _ = _flatten(init_states, "foreach states") + check_input(flatten, ndarray.NDArray, + "init_states should be an NDArray or a nested list of NDArrays") not_data_list = isinstance(data, ndarray.NDArray) num_iters = data.shape[0] if not_data_list else data[0].shape[0] @@ -180,16 +216,15 @@ def check_input(inputs, in_type, msg): else: eles = [d[i] for d in data] outs, states = body(eles, states) - outs = _as_list(outs) + outs, out_fmt = _flatten(outs, "foreach output") outputs.append(outs) outputs = zip(*outputs) tmp_outputs = [] for out in outputs: tmp_outputs.append(ndarray.op.stack(*out)) outputs = tmp_outputs + outputs, _ = _regroup(outputs, out_fmt) - if not_data_list and len(outputs) == 1: - outputs = outputs[0] return (outputs, states) def while_loop(cond, func, loop_vars, max_iterations=None): @@ -214,7 +249,8 @@ def while_loop(cond, func, loop_vars, max_iterations=None): Also, `new_loop_vars` should contain the same number of elements as `loop_vars`, and the corresponding element should have the same shape and dtype. The `func` is variadic, and its signature should be - `func(*loop_vars) => (List[NDArray] step_output, List[NDArray] new_loop_vars)`. + `func(*loop_vars) => + (NDArray or nested List[NDArray] step_output, NDArray or nested List[NDArray] new_loop_vars)`. `max_iterations` is a scalar that defines the maximum number of iterations allowed. @@ -241,16 +277,16 @@ def while_loop(cond, func, loop_vars, max_iterations=None): The loop condition. func: a Python function. The loop body. - loop_vars: list of NDArrays. + loop_vars: an NDArray or nested lists of NDArrays. The initial values of the loop variables. max_iterations: a python int. Maximum number of iterations. Returns ------ - outputs: list of NDArrays + outputs: an NDArray or nested lists of NDArrays stacked output from each step - states: list of NDArrays + states: an NDArray or nested lists of NDArrays final state Examples @@ -291,21 +327,6 @@ def _to_python_scalar(inputs, type_, name): raise ValueError("Cannot convert %s to python %s" % (name, type_.__name__)) return inputs - def _to_ndarray_tuple(inputs, name): - """Converts "inputs", possibly a single mxnet NDArray, a list of mxnet NDArray, - a tuple of mxnet NDArray, into a tuple of NDArray - """ - if isinstance(inputs, list): - inputs = tuple(inputs) - if isinstance(inputs, ndarray.NDArray): - inputs = (inputs, ) - if not isinstance(inputs, tuple): - raise ValueError("%s must be an NDArray, or a tuple or list of NDArrays" % (name, )) - for item in inputs: - if not isinstance(item, ndarray.NDArray): - raise ValueError("%s must be an NDArray, or a tuple or list of NDArrays" % (name, )) - return inputs - def _func_wrapper(loop_vars): """This wrapper unifies "func: loop_vars -> new_loop_vars" @@ -317,8 +338,11 @@ def _func_wrapper(loop_vars): step_output = [] if new_loop_vars is None: new_loop_vars = [] - step_output = _to_ndarray_tuple(step_output, "step_output") - new_loop_vars = _to_ndarray_tuple(new_loop_vars, "new_loop_vars") + if isinstance(step_output, tuple): + step_output = list(step_output) + if isinstance(new_loop_vars, tuple): + new_loop_vars = list(new_loop_vars) + new_loop_vars = _as_list(new_loop_vars) if len(loop_vars) != len(new_loop_vars): raise ValueError("The length of loop_vars should be consistent during the loop") return step_output, new_loop_vars @@ -326,7 +350,6 @@ def _func_wrapper(loop_vars): if max_iterations is None: raise ValueError("max_iterations should be specified") max_iterations = _to_python_scalar(max_iterations, int, "max_iteration") - loop_vars = _to_ndarray_tuple(loop_vars, "loop_vars") # It should be work as fine if loop_vars are empty I guess, # but it is semantically unnecessary to include this case. if len(loop_vars) == 0: @@ -334,9 +357,14 @@ def _func_wrapper(loop_vars): steps = 0 outputs = [] + # there might not be an iteration. + out_fmt = None + not_loop_var_list = isinstance(loop_vars, ndarray.NDArray) + loop_vars = _as_list(loop_vars) while steps < max_iterations and \ _to_python_scalar(cond(*loop_vars), bool, "Return value of cond"): # loop condition step_output, loop_vars = _func_wrapper(loop_vars) + step_output, out_fmt = _flatten(step_output, "while output") outputs.append(step_output) steps += 1 if len(outputs) != steps or len(step_output) != len(outputs[0]): @@ -361,7 +389,11 @@ def _func_wrapper(loop_vars): ["Shapes of %d-th elements in step_outputs are inconsistent, which are:" % i_th] + [" Step %d, shape is %s" % (i, str(x.shape)) for i, x in enumerate(items)] )) - return stacked_outputs, list(loop_vars) + if out_fmt is not None: + stacked_outputs, _ = _regroup(stacked_outputs, out_fmt) + if not_loop_var_list: + loop_vars = loop_vars[0] + return stacked_outputs, loop_vars def cond(pred, then_func, else_func): """Run an if-then-else using user-defined condition and computation @@ -375,12 +407,12 @@ def cond(pred, then_func, else_func): `then_func` is a user-defined function, used as computation of the then branch. It produces `outputs`, which is a list of NDArrays. The signature of `then_func` should be - `then_func() => List[NDArray]`. + `then_func() => NDArray or nested List[NDArray]`. `else_func` is a user-defined function, used as computation of the else branch. It produces `outputs`, which is a list of NDArrays. The signature of `else_func` should be - `else_func() => List[NDArray]`. + `else_func() => NDArray or nested List[NDArray]`. The `outputs` produces by `then_func` and `else_func` should have the same number of elements, all of which should be in the same shape, of the same dtype and stype. @@ -398,7 +430,7 @@ def cond(pred, then_func, else_func): Returns ------- - outputs: a list of NDArrays, representing the result of computation. + outputs: an NDArray or nested lists of NDArrays, representing the result of computation. Examples -------- @@ -423,26 +455,8 @@ def _to_python_scalar(inputs, type_, name): raise ValueError("Cannot convert %s to python %s" % (name, type_.__name__)) return inputs - def _to_ndarray_tuple(inputs, name): - """Converts "inputs", possibly a single mxnet NDArray, a list of mxnet NDArray, - a tuple of mxnet NDArray, into a tuple of NDArray - """ - if isinstance(inputs, list): - inputs = tuple(inputs) - if isinstance(inputs, ndarray.NDArray): - inputs = (inputs, ) - if not isinstance(inputs, tuple): - raise ValueError("%s must be an NDArray, or a tuple or list of NDArrays" % (name, )) - for item in inputs: - if not isinstance(item, ndarray.NDArray): - raise ValueError("%s must be an NDArray, or a tuple or list of NDArrays" % (name, )) - return inputs - branch = _to_python_scalar(pred, bool, "pred") if branch: - outputs = then_func() - outputs = _to_ndarray_tuple(outputs, "outputs of then_func") + return then_func() else: - outputs = else_func() - outputs = _to_ndarray_tuple(outputs, "outputs of else_func") - return list(outputs) + return else_func() diff --git a/python/mxnet/symbol/contrib.py b/python/mxnet/symbol/contrib.py index f40a372fdbcd..3c81dcf9ee0a 100644 --- a/python/mxnet/symbol/contrib.py +++ b/python/mxnet/symbol/contrib.py @@ -100,6 +100,41 @@ def rand_zipfian(true_classes, num_sampled, range_max): expected_count_sampled = expected_prob_sampled * num_sampled return sampled_classes, expected_count_true, expected_count_sampled + +def _flatten(args, inout_str): + if isinstance(args, symbol.Symbol): + length = len(args.list_outputs()) + length = length if length > 1 else 0 + return [args], int(length) + + assert isinstance(args, (list, tuple)), \ + "%s must be (nested) list of Symbol, " \ + "but got %s of type %s"%(inout_str, str(args), str(type(args))) + flat = [] + fmts = [] + for i in args: + arg, fmt = _flatten(i, inout_str) + flat.extend(arg) + fmts.append(fmt) + return flat, fmts + + +def _regroup(args, fmt): + if isinstance(fmt, int): + if fmt == 0: + return args[0], args[1:] + return args[:fmt], args[fmt:] + + assert isinstance(args, (list, tuple)), \ + "output must be (nested) list of Symbol, " \ + "but got %s of type %s"%(str(args), str(type(args))) + ret = [] + for i in fmt: + res, args = _regroup(args, i) + ret.append(res) + return ret, args + + def _get_graph_inputs(subg): num_handles = ctypes.c_int(0) handles = ctypes.POINTER(SymbolHandle)() @@ -160,6 +195,17 @@ def _construct_subgraph(sym_out, sym_states, name): flat_out.append(s) return symbol.Group(flat_out) +def _check_data(inputs, in_type, msg): + is_NDArray_or_list = True + if isinstance(inputs, list): + for i in inputs: + if not isinstance(i, in_type): + is_NDArray_or_list = False + break + else: + is_NDArray_or_list = isinstance(inputs, in_type) + assert is_NDArray_or_list, msg + def foreach(body, data, init_states, name="foreach"): """Run a for loop with user-defined computation over Symbols on dimension 0. @@ -201,16 +247,16 @@ def foreach(body, data, init_states, name="foreach"): Define computation in an iteration. data: a symbol or a list of symbols. The input data. - init_states: a symbol or a list of symbols. + init_states: a Symbol or nested lists of symbols. The initial values of the loop states. name: string. The name of the operator. Returns ------- - outputs: a Symbol or a list of Symbols. + outputs: a Symbol or nested lists of Symbols. The output data concatenated from the output of all iterations. - states: a list of Symbols. + states: a Symbol or nested lists of Symbols. The loop states in the last iteration. Examples @@ -221,20 +267,12 @@ def foreach(body, data, init_states, name="foreach"): >>> outs, states = mx.sym.contrib.foreach(step, data, states) """ - def check_data(inputs, in_type, msg): - is_NDArray_or_list = True - if isinstance(inputs, list): - for i in inputs: - if not isinstance(i, in_type): - is_NDArray_or_list = False - break - else: - is_NDArray_or_list = isinstance(inputs, in_type) - assert is_NDArray_or_list, msg - - check_data(data, symbol.Symbol, "data should be a symbol or a list of symbols") - check_data(init_states, symbol.Symbol, "init_states should be a symbol or a list of symbols") - not_state_list = isinstance(init_states, symbol.Symbol) + flatten_data, data_fmt = _flatten(data, "foreach input") + _check_data(flatten_data, symbol.Symbol, + "data should be a symbol or a nested list of symbols") + init_flatten_states, init_state_fmt = _flatten(init_states, "foreach states") + _check_data(init_flatten_states, symbol.Symbol, + "init_states should be a symbol or a nested list of symbols") # If the input python function references to the symbols outside # the python function, we need to prune the computation graph constructed from @@ -242,24 +280,19 @@ def check_data(inputs, in_type, msg): # with AttrScope and prune the nodes without the special attribute. name = _get_unique_subgraph_name(name) with AttrScope(__subgraph_name__=name): - if isinstance(data, list): - in_eles = [symbol.var(sym.name) for sym in data] - else: - in_eles = symbol.var(data.name) - if isinstance(init_states, list): - states = [symbol.var(s.name) for s in init_states] - else: - states = symbol.var(init_states.name) + in_eles = [symbol.var(sym.name) for sym in flatten_data] + in_eles, _ = _regroup(in_eles, data_fmt) + states = [symbol.var(s.name) for s in init_flatten_states] + states, _ = _regroup(states, copy.deepcopy(init_state_fmt)) sym_out, sym_states = body(in_eles, states) - check_data(sym_out, symbol.Symbol, - "the output should be an NDArray or a list of NDArrays") - check_data(sym_states, symbol.Symbol, - "the output states should be an NDArray or a list of NDArrays") - if isinstance(sym_states, list): - assert isinstance(init_states, list) and len(sym_states) == len(init_states), \ - "the number of output states (%d) should be the same as input states (%d)" \ - % (len(sym_states), len(init_states)) + sym_out, out_fmt = _flatten(sym_out, "foreach output") + sym_states, state_fmt = _flatten(sym_states, "foreach loop_vars") + assert init_state_fmt == state_fmt, "The input and output loop_vars have different format" + _check_data(sym_out, symbol.Symbol, + "the output should be an NDArray or a nested list of NDArrays") + _check_data(sym_states, symbol.Symbol, + "the output states should be an NDArray or a nested list of NDArrays") num_out_data = len(sym_out) num_states = len(sym_states) num_outputs = num_out_data + num_states @@ -277,17 +310,15 @@ def check_data(inputs, in_type, msg): gin_names = input_syms.keys() # This array contains the symbols for the inputs of foreach. # They are ordered according to the inputs of the subgraph. - init_states = _as_list(init_states) - state_names = [sym.name for sym in init_states] - data_syms = _as_list(data) - data_names = [sym.name for sym in data_syms] + state_names = [sym.name for sym in init_flatten_states] + data_names = [sym.name for sym in flatten_data] cut_var_map = {sym.list_outputs()[0]:sym for sym in cut_syms} cut_var_names = cut_var_map.keys() subg_input_names = g.list_inputs() # ordered_ins contains input symbols in the following order: # data_syms, state_syms, followed by cut_vars and vars in the closure. - ordered_ins = data_syms + ordered_ins = [x for x in flatten_data] # this defines the location of data_syms in the list of subgraph inputs in_data_locs = [] for dname in data_names: @@ -297,7 +328,7 @@ def check_data(inputs, in_type, msg): else: raise AssertionError("the data arrays have to be used in the loop body") - ordered_ins.extend(init_states) + ordered_ins.extend(init_flatten_states) # this defines the location of state_syms in the list of subgraph inputs. in_state_locs = [] for sname in state_names: @@ -325,22 +356,14 @@ def check_data(inputs, in_type, msg): ret = symbol._internal._foreach(g, *ordered_ins, num_outputs=num_outputs, num_out_data=num_out_data, in_state_locs=in_state_locs, in_data_locs=in_data_locs, remain_locs=remain_locs) - if num_outputs - num_states > 1: - outs = [] - for i in range(num_outputs - num_states): - outs.append(ret[i]) - elif num_outputs - num_states == 1: - outs = ret[0] - else: - outs = [] + outs = [] + for i in range(num_outputs - num_states): + outs.append(ret[i]) + outs, _ = _regroup(outs, out_fmt) states = [] for i in range(num_states): states.append(ret[num_outputs - num_states + i]) - - if not_state_list: - # If there is only one input state, there should be only one output state. - assert len(states) == 1 - states = states[0] + states, _ = _regroup(states, state_fmt) return (outs, states) @@ -350,7 +373,7 @@ def while_loop(cond, func, loop_vars, max_iterations=None, name="while_loop"): This operator simulates a while loop which iterately does customized computation as long as the condition is satisfied. - `loop_vars` is a list of Symbols on which the computation uses. + `loop_vars` is a Symbol or nested lists of Symbols on which the computation uses. `cond` is a user-defined function, used as the loop condition. It consumes `loop_vars`, and produces a scalar MXNet symbol, @@ -366,7 +389,8 @@ def while_loop(cond, func, loop_vars, max_iterations=None, name="while_loop"): Also, `new_loop_vars` should contain the same number of elements as `loop_vars`, and the corresponding element should have the same shape and dtype. The `func` is variadic, and its signature should be - `func(*loop_vars) => (List[Symbol] step_output, List[Symbol] new_loop_vars)`. + `func(*loop_vars) => + (Symbol or nested List[Symbol] step_output, Symbol or nested List[Symbol] new_loop_vars)`. `max_iterations` is a scalar that defines the maximum number of iterations allowed. @@ -395,16 +419,16 @@ def while_loop(cond, func, loop_vars, max_iterations=None, name="while_loop"): The loop condition. func: a Python function. The loop body. - loop_vars: list of Symbol. + loop_vars: a Symbol or nested lists of Symbol. The initial values of the loop variables. max_iterations: a python int. Maximum number of iterations. Returns ------ - outputs: list of Symbols + outputs: a Symbol or nested lists of Symbols stacked output from each step - states: list of Symbols + states: a Symbol or nested lists of Symbols final state Examples @@ -426,26 +450,11 @@ def _to_python_scalar(inputs, type_, name): raise ValueError("Cannot convert %s to python %s" % (name, type_.__name__)) return inputs - def _to_symbol_tuple(inputs, name): - """Converts "inputs", possibly a single mxnet Symbol, a list of mxnet Symbol, - a tuple of mxnet Symbol, into a tuple of Symbol - """ - if isinstance(inputs, list): - inputs = tuple(inputs) - if isinstance(inputs, Symbol): - inputs = (inputs, ) - if not isinstance(inputs, tuple): - raise ValueError("%s must be a Symbol, or a tuple or list of Symbol" % (name, )) - for item in inputs: - if not isinstance(item, Symbol): - raise ValueError("%s must be a Symbol, or a tuple or list of Symbol" % (name, )) - return inputs - def _cond_wrapper(loop_vars): result = cond(*loop_vars) if not isinstance(result, Symbol): raise ValueError("Return of cond must be a Symbol") - return [], [result] + return [], [result], [], [] def _func_wrapper(loop_vars): """This wrapper unifies @@ -458,19 +467,25 @@ def _func_wrapper(loop_vars): step_output = [] if new_loop_vars is None: new_loop_vars = [] - step_output = _to_symbol_tuple(step_output, "step_output") - new_loop_vars = _to_symbol_tuple(new_loop_vars, "new_loop_vars") + if isinstance(step_output, tuple): + step_output = list(step_output) + if isinstance(new_loop_vars, tuple): + new_loop_vars = list(new_loop_vars) + step_output, out_fmt = _flatten(step_output, "while output") + new_loop_vars, var_fmt = _flatten(new_loop_vars, "while loop_vars") if len(loop_vars) != len(new_loop_vars): raise ValueError("The number of loop_vars should be consistent during the loop") - return list(step_output), list(new_loop_vars) + return step_output, new_loop_vars, out_fmt, var_fmt def _create_subgraph(graph_vars, graph_func, subgraph_name): subgraph_name = _get_unique_subgraph_name(subgraph_name) with AttrScope(__subgraph_name__=subgraph_name): # create new variables with the same name, # them feed them to the given func + graph_vars, var_fmt = _flatten(graph_vars, "while loop_vars") new_graph_vars = [symbol.var(sym.name) for sym in graph_vars] - outputs, final_state = graph_func(new_graph_vars) + new_graph_vars, _ = _regroup(new_graph_vars, var_fmt) + outputs, final_state, out_fmt, var_fmt = graph_func(new_graph_vars) # first `num_out_data` elements belong to `outputs` # other elements belong to `final_state` num_out_data = len(outputs) @@ -484,7 +499,11 @@ def _create_subgraph(graph_vars, graph_func, subgraph_name): make_identity = lambda x: symbol.op.identity(x) if in_input(x) or not in_graph(x) \ else x graph = symbol.Group(list(map(make_identity, outputs + final_state))) - return graph, num_out_data, num_outputs + return graph, num_out_data, num_outputs, out_fmt, var_fmt + + flatten_loop_vars, init_loop_var_fmt = _flatten(loop_vars, "while loop_vars") + _check_data(flatten_loop_vars, symbol.Symbol, + "loop_vars should be a symbol or a nested list of symbols") def _union_inputs(*graphs): # Given a list of graphs, each whose inputs are either from loop_vars or other variables. @@ -498,16 +517,16 @@ def _union_inputs(*graphs): # to a `loc`, where inputs[loc] = sym for graph in graphs: # some loop_vars are inputs to `graph`, some are not - name_to_loop_vars = {sym.name: sym for sym in loop_vars} + name_to_loop_vars = {sym.name: sym for sym in flatten_loop_vars} # other inputs to `graph` created by cut_graph name_to_cut_g_syms = {sym.list_outputs()[0]: sym for sym in _cut_subgraph(graph)} # input_syms: all inputs to the `graph` name_to_input_syms = {sym.name: sym for sym in _get_graph_inputs(graph)} # also we collect the mapping from var's name to var's loc in loop_vars - name_to_var_locs = {sym.name: i for i, sym in enumerate(loop_vars)} + name_to_var_locs = {sym.name: i for i, sym in enumerate(flatten_loop_vars)} # collect arguments for each subgraph input_locs = [] # results from the second step - var_locs = [-1] * len(loop_vars) # results from the third step + var_locs = [-1] * len(flatten_loop_vars) # results from the third step for name in graph.list_inputs(): assert name in name_to_input_syms # it should obviously hold # name -> sym @@ -533,18 +552,17 @@ def _union_inputs(*graphs): if max_iterations is None: raise ValueError("max_iterations should be specified") max_iterations = _to_python_scalar(max_iterations, int, "max_iteration") - loop_vars = _to_symbol_tuple(loop_vars, "loop_vars") # It should be work as fine if loop_vars are empty I guess, # but it is semantically unnecessary to include this case. if len(loop_vars) == 0: raise ValueError("loop_vars should contain at least one element") # create graph for `cond' - cond_g, num_out_data, num_outputs = \ + cond_g, num_out_data, num_outputs, _, _ = \ _create_subgraph(loop_vars, _cond_wrapper, name + "_cond") assert num_out_data == 0 assert num_outputs == 1 # create graph for `func` - func_g, num_out_data, num_outputs = \ + func_g, num_out_data, num_outputs, out_fmt, _ = \ _create_subgraph(loop_vars, _func_wrapper, name + "_func") # find symbols used in either cond_g or func_g input_syms, ((cond_input_locs, _), (func_input_locs, func_var_locs)) = \ @@ -553,7 +571,6 @@ def _union_inputs(*graphs): if loc == -1: raise ValueError("The %d-th loop_var doesn't involve into the computation" % i_th) result = symbol._internal._while_loop( - # [cond, func_g, *input_syms] cond_g, func_g, *input_syms, @@ -565,7 +582,9 @@ def _union_inputs(*graphs): num_outputs=num_outputs ) outputs = [result[i] for i in range(num_out_data)] + outputs, _ = _regroup(outputs, out_fmt) final_loop_vars = [result[i] for i in range(num_out_data, num_outputs)] + final_loop_vars, _ = _regroup(final_loop_vars, init_loop_var_fmt) return outputs, final_loop_vars def cond(pred, then_func, else_func, name="cond"): @@ -580,12 +599,12 @@ def cond(pred, then_func, else_func, name="cond"): `then_func` is a user-defined function, used as computation of the then branch. It produces `outputs`, which is a list of Symbols. The signature of `then_func` should be - `then_func() => List[Symbol]`. + `then_func() => nested List[Symbol]`. `else_func` is a user-defined function, used as computation of the else branch. It produces `outputs`, which is a list of Symbols. The signature of `else_func` should be - `else_func() => List[Symbol]`. + `else_func() => nested List[Symbol]`. The `outputs` produces by `then_func` and `else_func` should have the same number of elements, all of which should be in the same shape, of the same dtype and stype. @@ -603,7 +622,7 @@ def cond(pred, then_func, else_func, name="cond"): Returns ------- - outputs: a list of Symbols, representing the result of computation. + outputs: a Symbol or nested lists of Symbols, representing the result of computation. Examples -------- @@ -613,20 +632,6 @@ def cond(pred, then_func, else_func, name="cond"): >>> else_func = lambda: (a - 5) * (b - 5) >>> outputs = mx.sym.contrib.cond(pred, then_func, else_func) """ - def _to_symbol_tuple(inputs, name): - """Converts "inputs", possibly a single mxnet Symbol, a list of mxnet Symbol, - a tuple of mxnet Symbol, into a tuple of Symbol - """ - if isinstance(inputs, list): - inputs = tuple(inputs) - if isinstance(inputs, Symbol): - inputs = (inputs, ) - if not isinstance(inputs, tuple): - raise ValueError("%s must be a Symbol, or a tuple or list of Symbol" % (name, )) - for item in inputs: - if not isinstance(item, Symbol): - raise ValueError("%s must be a Symbol, or a tuple or list of Symbol" % (name, )) - return inputs def _create_subgraph(graph_vars, graph_func, subgraph_name): subgraph_name = _get_unique_subgraph_name(subgraph_name) @@ -635,7 +640,7 @@ def _create_subgraph(graph_vars, graph_func, subgraph_name): # them feed them to the given func new_graph_vars = [symbol.var(sym.name) for sym in graph_vars] outputs = graph_func(*new_graph_vars) - outputs = _to_symbol_tuple(outputs, "outputs") + outputs, out_fmt = _flatten(outputs, "cond outputs") num_outputs = len(outputs) # nnvm cut-graph does not allow inputs and outputs overlap # so we calculate the name of inputs, and copy outputs once it overlaps with inputs @@ -646,7 +651,7 @@ def _create_subgraph(graph_vars, graph_func, subgraph_name): make_identity = lambda x: symbol.op.identity(x) if in_input(x) or not in_graph(x) \ else x graph = symbol.Group(list(map(make_identity, outputs))) - return graph, num_outputs + return graph, num_outputs, out_fmt def _union_inputs(*graphs): # Given a list of graphs, each whose inputs are either from input_vars or other variables. @@ -688,13 +693,13 @@ def _union_inputs(*graphs): return inputs, locs inputs = [] # create graph for `cond_func' - cond_g, cond_num_outputs = _create_subgraph(inputs, lambda: pred, name + "_pred") + cond_g, cond_num_outputs, _ = _create_subgraph(inputs, lambda: pred, name + "_pred") if cond_num_outputs != 1: raise ValueError("pred should always be a single output") # create graph for `then` - then_g, then_num_outputs = _create_subgraph(inputs, then_func, name + "_then") + then_g, then_num_outputs, then_fmt = _create_subgraph(inputs, then_func, name + "_then") # create graph for `else` - else_g, else_num_outputs = _create_subgraph(inputs, else_func, name + "_else") + else_g, else_num_outputs, _ = _create_subgraph(inputs, else_func, name + "_else") if then_num_outputs != else_num_outputs: raise ValueError("Number of outputs differs between then-branch and else-branch") # find symbols used in either cond_g or func_g @@ -711,4 +716,6 @@ def _union_inputs(*graphs): else_input_locs=else_input_locs, num_outputs=then_num_outputs ) - return [result[i] for i in range(then_num_outputs)] + outputs = [result[i] for i in range(then_num_outputs)] + outputs, _ = _regroup(outputs, then_fmt) + return outputs diff --git a/tests/python/unittest/test_contrib_control_flow.py b/tests/python/unittest/test_contrib_control_flow.py index 54f22a8fd6a7..eadd63c94240 100644 --- a/tests/python/unittest/test_contrib_control_flow.py +++ b/tests/python/unittest/test_contrib_control_flow.py @@ -101,7 +101,7 @@ def hybrid_forward(self, F, *loop_vars): ) if hybridize: model.hybridize() - (outputs, ), (result_i, result_s) = model( + outputs, (result_i, result_s) = model( mx.nd.array([1], dtype="int64"), # i mx.nd.array([0], dtype="int64"), # s ) @@ -116,7 +116,7 @@ def hybrid_forward(self, F, *loop_vars): ) if hybridize: model.hybridize() - (outputs, ), (result_i, result_s, _) = model( + outputs, (result_i, result_s, _) = model( mx.nd.array([1], dtype="int64"), # i mx.nd.array([0], dtype="int64"), # s mx.nd.array([1], dtype="int64"), # true @@ -175,6 +175,8 @@ def _get_imperative_result(n_steps): loop_vars=loop_vars, max_iterations=max_iterations, ) + outputs = _as_list(outputs) + final_loop_vars = _as_list(final_loop_vars) outputs = [x[: n_steps] for x in outputs] out_grads = _create_arrays(x.shape for x in outputs) \ + _create_arrays(x.shape for x in final_loop_vars) @@ -203,6 +205,8 @@ def _zeros_like_dict(name_list): loop_vars=loop_syms, max_iterations=max_iterations, ) + outputs = _as_list(outputs) + final_loop_syms = _as_list(final_loop_syms) if n_steps == 0: outputs = [] else: @@ -1016,6 +1020,7 @@ def _get_imperative_result(): then_func=lambda: then_func(input_vars, free_vars), else_func=lambda: else_func(input_vars, free_vars), ) + outputs = _as_list(outputs) outputs = [x * 2 for x in outputs] grads = [] if is_train: @@ -1032,6 +1037,7 @@ def _get_symbolic_result(out_grads): then_func=lambda: then_func(_input_syms, _free_syms), else_func=lambda: else_func(_input_syms, _free_syms), ) + outputs_sym = _as_list(outputs_sym) outputs_sym = [x * 2 for x in outputs_sym] outputs_sym = mx.sym.Group(outputs_sym) executor = outputs_sym.bind( @@ -1741,12 +1747,12 @@ class TestLayer(gluon.HybridBlock): def __init__(self, prefix=None, params=None): super(TestLayer, self).__init__(prefix=prefix, params=params) def hybrid_forward(self, F, data): - (data1, ) = F.contrib.cond( + data1 = F.contrib.cond( data > 0.5, then_func=lambda: data * 2, else_func=lambda: data * 3, ) - (data2, ) = F.contrib.cond( + data2 = F.contrib.cond( data1 > 0.5, then_func=lambda: data1 * 2, else_func=lambda: data1 * 3, @@ -1806,6 +1812,255 @@ def hybrid_forward(self, F, data): assert AttrScope._subgraph_names['my_cond_then'] == 2 +def test_output_format_foreach(): + class TestLayer1(gluon.HybridBlock): + def __init__(self, step, prefix=None, params=None): + super(TestLayer1, self).__init__(prefix=prefix, params=params) + self.step = step + def hybrid_forward(self, F, ins, states): + out, states = F.contrib.foreach(self.step, ins, states) + return out, states + + def step1(data, state): + return data, state + def step2(data, state): + return [data], state + def step3(data, state): + if isinstance(state, list): + return [], [state[0] + data] + else: + return [], state + data + def step4(data, state): + if isinstance(state, list): + return [data, state[0]], state + else: + return [data, state], state + + steps = [step1, step2, step3, step4] + data = mx.nd.normal(loc=0, scale=1, shape=(10, 2)) + state = mx.nd.normal(loc=0, scale=1, shape=(2)) + for step in steps: + layer1 = TestLayer1(step) + layer1.initialize(ctx=default_context()) + layer2 = TestLayer1(step) + layer2.initialize(ctx=default_context()) + layer2.hybridize() + out1, state1 = layer1(data, [state]) + out2, state2 = layer2(data, [state]) + step_out, step_state = step(data, [state]) + assert type(out1) == type(step_out) + assert type(out2) == type(step_out) + assert type(state1) == type(step_state) + assert type(state2) == type(step_state) + out1 = _as_list(out1) + out2 = _as_list(out2) + state1 = _as_list(state1) + state2 = _as_list(state2) + for i in range(len(out1)): + assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001) + for i in range(len(state1)): + assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(), rtol=0.001, atol=0.0001) + + layer1 = TestLayer1(step) + layer1.initialize(ctx=default_context()) + layer2 = TestLayer1(step) + layer2.initialize(ctx=default_context()) + layer2.hybridize() + out1, state1 = layer1(data, state) + out2, state2 = layer2(data, state) + step_out, step_state = step(data, state) + assert type(out1) == type(step_out) + assert type(out2) == type(step_out) + assert type(state1) == type(step_state) + assert type(state2) == type(step_state) + out1 = _as_list(out1) + out2 = _as_list(out2) + state1 = _as_list(state1) + state2 = _as_list(state2) + for i in range(len(out1)): + assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001) + for i in range(len(state1)): + assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(), rtol=0.001, atol=0.0001) + + if step == step3: + continue + layer1 = TestLayer1(step) + layer1.initialize(ctx=default_context()) + layer2 = TestLayer1(step) + layer2.initialize(ctx=default_context()) + layer2.hybridize() + out1, state1 = layer1(data, [state, [state + 1]]) + out2, state2 = layer2(data, [state, [state + 1]]) + step_out, step_state = step(data, [state, [state + 1]]) + assert type(out1) == type(step_out) + assert type(out2) == type(step_out) + assert type(state1) == type(step_state) + assert type(state2) == type(step_state) + out1 = _as_list(out1) + out2 = _as_list(out2) + state1 = _as_list(state1) + state2 = _as_list(state2) + for i in range(len(out1)): + assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001) + for i in range(len(state1)): + if isinstance(state1[i], list): + assert_almost_equal(state1[i][0].asnumpy(), state2[i][0].asnumpy(), + rtol=0.001, atol=0.0001) + else: + assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(), + rtol=0.001, atol=0.0001) + + +def test_output_format_while(): + class TestLayer1(gluon.HybridBlock): + def __init__(self, step, use_list, nested_list=False, prefix=None, params=None): + super(TestLayer1, self).__init__(prefix=prefix, params=params) + self.step = step + self.use_list = use_list + self.nested_list = nested_list + def hybrid_forward(self, F, states): + def cond(state1): + scalar = state1.slice_axis(axis=0, begin=0, end=1) + return scalar == scalar + cond_func = cond + if self.use_list: + states = [states] + elif self.nested_list: + def cond2(state1, state2): + scalar = state1.slice_axis(axis=0, begin=0, end=1) + return scalar == scalar + cond_func = cond2 + states = [states, [states + 1]] + out, states = F.contrib.while_loop(cond_func, self.step, states, max_iterations=5) + return out, states + + def step1(state): + return state, state + def step2(state): + if isinstance(state, list): + return state, state + else: + return [state], state + def step3(state): + return [], state + + steps = [step1, step2, step3] + state = mx.nd.normal(loc=0, scale=1, shape=(2)) + for step in steps: + layer1 = TestLayer1(step, False) + layer1.initialize(ctx=default_context()) + layer2 = TestLayer1(step, False) + layer2.initialize(ctx=default_context()) + layer2.hybridize() + out1, state1 = layer1(state) + out2, state2 = layer2(state) + assert type(out1) == type(out2) + assert type(state1) == type(state1) + out1 = _as_list(out1) + out2 = _as_list(out2) + state1 = _as_list(state1) + state2 = _as_list(state2) + for i in range(len(out1)): + assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001) + for i in range(len(state1)): + assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(), rtol=0.001, atol=0.0001) + + layer1 = TestLayer1(step, True) + layer1.initialize(ctx=default_context()) + layer2 = TestLayer1(step, True) + layer2.initialize(ctx=default_context()) + layer2.hybridize() + out1, state1 = layer1(state) + out2, state2 = layer2(state) + assert type(out1) == type(out2) + assert type(state1) == type(state2) + out1 = _as_list(out1) + out2 = _as_list(out2) + state1 = _as_list(state1) + state2 = _as_list(state2) + for i in range(len(out1)): + assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001) + for i in range(len(state1)): + assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(), rtol=0.001, atol=0.0001) + + def step4(state, state2): + states = _as_list(state) + states.append(state2) + return state, states + def step5(state, state2): + states = _as_list(state) + states.append(state2) + if isinstance(state, list): + return state, states + else: + return [state], states + def step6(state, state2): + states = _as_list(state) + states.append(state2) + return [], states + + steps = [step4, step5, step6] + for step in steps: + layer1 = TestLayer1(step, False, True) + layer1.initialize(ctx=default_context()) + layer2 = TestLayer1(step, False, True) + layer2.initialize(ctx=default_context()) + layer2.hybridize() + out1, state1 = layer1(state) + out2, state2 = layer2(state) + assert type(out1) == type(out2) + assert type(state1) == type(state2) + out1 = _as_list(out1) + out2 = _as_list(out2) + state1 = _as_list(state1) + state2 = _as_list(state2) + for i in range(len(out1)): + assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001) + for i in range(len(state1)): + if not isinstance(state1[i], list): + assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(), + rtol=0.001, atol=0.0001) + + +def test_output_format_cond(): + class TestLayer1(gluon.HybridBlock): + def __init__(self, func, prefix=None, params=None): + super(TestLayer1, self).__init__(prefix=prefix, params=params) + self.func = func + def hybrid_forward(self, F, data): + def then_func(): + return self.func(data) + def else_func(): + return self.func(data) + return F.contrib.cond(data.slice_axis(axis=0, begin=0, end=1), + then_func, else_func) + + def func1(data): + return data + def func2(data): + return [data] + def func3(data): + return [data, data] + + funcs = [func1, func2, func3] + data = mx.nd.normal(loc=0, scale=1, shape=(2)) + for func in funcs: + layer1 = TestLayer1(func) + layer1.initialize(ctx=default_context()) + layer2 = TestLayer1(func) + layer2.initialize(ctx=default_context()) + layer2.hybridize() + out1 = layer1(data) + out2 = layer2(data) + func_out = func(data) + assert type(out1) == type(func_out) + assert type(out2) == type(func_out) + out1 = _as_list(out1) + out2 = _as_list(out2) + for i in range(len(out1)): + assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001) + + if __name__ == '__main__': import nose nose.runmodule() From b060a01e86b59c03b5d2c85f6c414152c60191c8 Mon Sep 17 00:00:00 2001 From: Aaron Markham Date: Wed, 22 Aug 2018 14:41:38 -0700 Subject: [PATCH 032/160] removed mentions and links to a deleted example (#12288) --- docs/faq/model_parallel_lstm.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/docs/faq/model_parallel_lstm.md b/docs/faq/model_parallel_lstm.md index 1e367eb5f291..b78b2c574dcc 100644 --- a/docs/faq/model_parallel_lstm.md +++ b/docs/faq/model_parallel_lstm.md @@ -24,8 +24,7 @@ LSTMS are powerful sequence models, that have proven especially useful for [natural language translation](https://arxiv.org/pdf/1409.0473.pdf), [speech recognition](https://arxiv.org/abs/1512.02595), and working with [time series data](https://arxiv.org/abs/1511.03677). For a general high-level introduction to LSTMs, -see the excellent [tutorial](http://colah.github.io/posts/2015-08-Understanding-LSTMs/) by Christopher Olah. For a working example of LSTM training with model parallelism, -see [example/model-parallelism-lstm/](https://github.com/dmlc/mxnet/blob/master/example/model-parallel/lstm/lstm.py). +see the excellent [tutorial](http://colah.github.io/posts/2015-08-Understanding-LSTMs/) by Christopher Olah. ## Model Parallelism: Using Multiple GPUs As a Pipeline @@ -44,7 +43,6 @@ This differs significantly from data parallelism. Here, there is no contention to update the shared model at the end of each iteration, and most of the communication happens when passing intermediate results between GPUs. -In the current implementation, the layers are defined in [lstm_unroll()](https://github.com/dmlc/mxnet/blob/master/example/model-parallel/lstm/lstm.py). ## Workload Partitioning @@ -65,14 +63,12 @@ Although the LSTM layers consume less memory than the decoder/encoder layers, th Thus, the partition on the left will be faster than the one on the right because the workload is more evenly distributed. -Currently, the layer partition is implemented in [lstm.py](https://github.com/apache/incubator-mxnet/blob/master/example/model-parallel/lstm/lstm.py#L171) and configured in [lstm_ptb.py](https://github.com/apache/incubator-mxnet/blob/master/example/model-parallel/lstm/lstm_ptb.py#L97-L102) using the `group2ctx` option. ## Apply Bucketing to Model Parallelism To achieve model parallelism while using bucketing, you need to unroll an LSTM model for each bucket to obtain an executor for each. -For details about how the model is bound, see [lstm.py](https://github.com/apache/incubator-mxnet/blob/master/example/model-parallel/lstm/lstm.py#L225-L235). On the other hand, because model parallelism partitions the model/layers, the input data has to be transformed/transposed to the agreed shape. From adc57d188239da19eb5da23807c256f22ba4f931 Mon Sep 17 00:00:00 2001 From: Aaron Markham Date: Wed, 22 Aug 2018 14:41:50 -0700 Subject: [PATCH 033/160] fixed broken links (#12293) --- .../gluon/logistic_regression_explained.md | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/docs/tutorials/gluon/logistic_regression_explained.md b/docs/tutorials/gluon/logistic_regression_explained.md index 8e5e4a547a6d..577a91413b33 100644 --- a/docs/tutorials/gluon/logistic_regression_explained.md +++ b/docs/tutorials/gluon/logistic_regression_explained.md @@ -55,9 +55,9 @@ val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True) ## Defining and training the model -The only requirement for the logistic regression is that the last layer of the network must be a single neuron. Apache MXNet allows us to do so by using [Dense](https://mxnet.incubator.apache.org/api/python/gluon/nn.html#mxnet.gluon.nn.Dense) layer and specifying the number of units to 1. The rest of the network can be arbitrarily complex. +The only requirement for the logistic regression is that the last layer of the network must be a single neuron. Apache MXNet allows us to do so by using [Dense](https://mxnet.incubator.apache.org/api/python/gluon/nn.html#mxnet.gluon.nn.Dense) layer and specifying the number of units to 1. The rest of the network can be arbitrarily complex. -Below, we define a model which has an input layer of 10 neurons, a couple of inner layers of 10 neurons each, and output layer of 1 neuron. We stack the layers using [HybridSequential](https://mxnet.incubator.apache.org/api/python/gluon/gluon.html#mxnet.gluon.nn.HybridSequential) block and initialize parameters of the network using [Xavier](https://mxnet.incubator.apache.org/api/python/optimization/optimization.html#mxnet.initializer.Xavier) initialization. +Below, we define a model which has an input layer of 10 neurons, a couple of inner layers of 10 neurons each, and output layer of 1 neuron. We stack the layers using [HybridSequential](https://mxnet.incubator.apache.org/api/python/gluon/gluon.html#mxnet.gluon.nn.HybridSequential) block and initialize parameters of the network using [Xavier](https://mxnet.incubator.apache.org/api/python/optimization/optimization.html#mxnet.initializer.Xavier) initialization. ```python @@ -78,14 +78,14 @@ Loss function is used to calculate how the output of the network differs from th Trainer object allows to specify the method of training to be used. For our tutorial we use [Stochastic Gradient Descent (SGD)](https://mxnet.incubator.apache.org/api/python/optimization/optimization.html#mxnet.optimizer.SGD). For more information on SGD refer to [the following tutorial](https://gluon.mxnet.io/chapter06_optimization/gd-sgd-scratch.html). We also need to parametrize it with learning rate value, which defines the weight updates, and weight decay, which is used for regularization. -Metric helps us to estimate how good our model is in terms of a problem we are trying to solve. Where loss function has more importance for the training process, a metric is usually the thing we are trying to improve and reach maximum value. We also can use more than one metric, to measure various aspects of our model. In our example, we are using [Accuracy](https://mxnet.incubator.apache.org/api/python/model.html#mxnet.metric.Accuracy) and [F1 score](https://mxnet.incubator.apache.org/api/python/model.html#mxnet.metric.F1) as measurements of success of our model. +Metric helps us to estimate how good our model is in terms of a problem we are trying to solve. Where loss function has more importance for the training process, a metric is usually the thing we are trying to improve and reach maximum value. We also can use more than one metric, to measure various aspects of our model. In our example, we are using [Accuracy](https://mxnet.incubator.apache.org/api/python/metric/metric.html?highlight=metric.acc#mxnet.metric.Accuracy) and [F1 score](http://mxnet.incubator.apache.org/api/python/metric/metric.html?highlight=metric.f1#mxnet.metric.F1) as measurements of success of our model. Below we define these objects. ```python loss = gluon.loss.SigmoidBinaryCrossEntropyLoss() -trainer = Trainer(params=net.collect_params(), optimizer='sgd', +trainer = Trainer(params=net.collect_params(), optimizer='sgd', optimizer_params={'learning_rate': 0.1}) accuracy = mx.metric.Accuracy() f1 = mx.metric.F1() @@ -97,16 +97,16 @@ The next step is to define the training function in which we iterate over all ba ```python def train_model(): cumulative_train_loss = 0 - + for i, (data, label) in enumerate(train_dataloader): with autograd.record(): # Do forward pass on a batch of training data output = net(data) - + # Calculate loss for the training data batch loss_result = loss(output, label) - # Calculate gradients + # Calculate gradients loss_result.backward() # Update parameters of the network @@ -114,15 +114,15 @@ def train_model(): # sum losses of every batch cumulative_train_loss += nd.sum(loss_result).asscalar() - + return cumulative_train_loss ``` ## Validating the model -Our validation function is very similar to the training one. The main difference is that we want to calculate accuracy of the model. We use [Accuracy metric](https://mxnet.incubator.apache.org/api/python/model.html#mxnet.metric.Accuracy) to do so. +Our validation function is very similar to the training one. The main difference is that we want to calculate accuracy of the model. We use [Accuracy metric](https://mxnet.incubator.apache.org/api/python/metric/metric.html?highlight=metric.acc#mxnet.metric.Accuracy) to do so. -`Accuracy` metric requires 2 arguments: 1) a vector of ground-truth classes and 2) A vector or matrix of predictions. When predictions are of the same shape as the vector of ground-truth classes, `Accuracy` class assumes that prediction vector contains predicted classes. So, it converts the vector to `Int32` and compare each item of ground-truth classes to prediction vector. +`Accuracy` metric requires 2 arguments: 1) a vector of ground-truth classes and 2) A vector or matrix of predictions. When predictions are of the same shape as the vector of ground-truth classes, `Accuracy` class assumes that prediction vector contains predicted classes. So, it converts the vector to `Int32` and compare each item of ground-truth classes to prediction vector. Because of the behaviour above, you will get an unexpected result if you just apply [Sigmoid](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html#mxnet.ndarray.sigmoid) function to the network result and pass it to `Accuracy` metric. As mentioned before, we need to apply `Sigmoid` function to the output of the neuron to get a probability of belonging to the class 1. But `Sigmoid` function produces output in range [0; 1], and all numbers in that range are going to be casted to 0, even if it is as high as 0.99. To avoid this we write a custom bit of code on line 12, that: @@ -146,27 +146,27 @@ Then we pass this stacked matrix to `F1` score. ```python def validate_model(threshold): cumulative_val_loss = 0 - + for i, (val_data, val_ground_truth_class) in enumerate(val_dataloader): # Do forward pass on a batch of validation data output = net(val_data) - + # Similar to cumulative training loss, calculate cumulative validation loss cumulative_val_loss += nd.sum(loss(output, val_ground_truth_class)).asscalar() - + # getting prediction as a sigmoid prediction = net(val_data).sigmoid() - + # Converting neuron outputs to classes predicted_classes = mx.nd.ceil(prediction - threshold) - + # Update validation accuracy - accuracy.update(val_ground_truth_class, predicted_classes.reshape(-1)) - + accuracy.update(val_ground_truth_class, predicted_classes.reshape(-1)) + # calculate probabilities of belonging to different classes. F1 metric works only with this notation prediction = prediction.reshape(-1) probabilities = mx.nd.stack(1 - prediction, prediction, axis=1) - + f1.update(val_ground_truth_class, probabilities) return cumulative_val_loss @@ -184,8 +184,8 @@ threshold = 0.5 for e in range(epochs): avg_train_loss = train_model() / train_data_size avg_val_loss = validate_model(threshold) / val_data_size - - print("Epoch: %s, Training loss: %.2f, Validation loss: %.2f, Validation accuracy: %.2f, F1 score: %.2f" % + + print("Epoch: %s, Training loss: %.2f, Validation loss: %.2f, Validation accuracy: %.2f, F1 score: %.2f" % (e, avg_train_loss, avg_val_loss, accuracy.get()[1], f1.get()[1])) # we reset accuracy, so the new epoch's accuracy would be calculated from the blank state @@ -203,13 +203,13 @@ for e in range(epochs): Epoch: 4, Training loss: 0.06, Validation loss: 0.09, Validation accuracy: 0.97, F1 score: 0.58 Epoch: 5, Training loss: 0.04, Validation loss: 0.12, Validation accuracy: 0.97, F1 score: 0.59 - + Epoch: 6, Training loss: 0.05, Validation loss: 0.09, Validation accuracy: 0.99, F1 score: 0.62 - + Epoch: 7, Training loss: 0.05, Validation loss: 0.10, Validation accuracy: 0.97, F1 score: 0.62 - + Epoch: 8, Training loss: 0.05, Validation loss: 0.12, Validation accuracy: 0.95, F1 score: 0.63 - + Epoch: 9, Training loss: 0.04, Validation loss: 0.09, Validation accuracy: 0.98, F1 score: 0.65 @@ -217,7 +217,7 @@ In our case we hit the accuracy of 0.98 and F1 score of 0.65. ## Tip 1: Use only one neuron in the output layer -Despite that there are 2 classes, there should be only one output neuron, because `SigmoidBinaryCrossEntropyLoss` accepts only one feature as an input. +Despite that there are 2 classes, there should be only one output neuron, because `SigmoidBinaryCrossEntropyLoss` accepts only one feature as an input. ## Tip 2: Encode classes as 0 and 1 @@ -225,7 +225,7 @@ For `SigmoidBinaryCrossEntropyLoss` to work it is required that classes were enc ## Tip 3: Use SigmoidBinaryCrossEntropyLoss instead of LogisticRegressionOutput -NDArray API has two options to calculate logistic regression loss: [SigmoidBinaryCrossEntropyLoss](https://mxnet.incubator.apache.org/api/python/gluon/loss.html#mxnet.gluon.loss.SigmoidBinaryCrossEntropyLoss) and [LogisticRegressionOutput](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html#mxnet.ndarray.LogisticRegressionOutput). `LogisticRegressionOutput` is designed to be an output layer when using the Module API, and is not supposed to be used when using Gluon API. +NDArray API has two options to calculate logistic regression loss: [SigmoidBinaryCrossEntropyLoss](https://mxnet.incubator.apache.org/api/python/gluon/loss.html#mxnet.gluon.loss.SigmoidBinaryCrossEntropyLoss) and [LogisticRegressionOutput](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html#mxnet.ndarray.LogisticRegressionOutput). `LogisticRegressionOutput` is designed to be an output layer when using the Module API, and is not supposed to be used when using Gluon API. ## Conclusion @@ -235,4 +235,4 @@ In this tutorial I explained some potential pitfalls to be aware of. When doing 1. Use `SigmoidBinaryCrossEntropyLoss` 1. Convert probabilities to classes before calculating Accuracy - \ No newline at end of file + From 271fd02f1c06fb09c6e96cb562bf0b190cb5f517 Mon Sep 17 00:00:00 2001 From: Aaron Markham Date: Wed, 22 Aug 2018 14:41:59 -0700 Subject: [PATCH 034/160] fixed broken link (#12289) --- docs/tutorials/python/linear-regression.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/tutorials/python/linear-regression.md b/docs/tutorials/python/linear-regression.md index 05afd032ed11..f9656844052d 100644 --- a/docs/tutorials/python/linear-regression.md +++ b/docs/tutorials/python/linear-regression.md @@ -6,9 +6,9 @@ The function we are trying to learn is: *y = x1 + 2x2*, ## Prerequisites -To complete this tutorial, we need: +To complete this tutorial, we need: -- MXNet. See the instructions for your operating system in [Setup and Installation](http://mxnet.io/install/index.html). +- MXNet. See the instructions for your operating system in [Setup and Installation](http://mxnet.io/install/index.html). - [Jupyter Notebook](http://jupyter.org/index.html). @@ -32,7 +32,7 @@ logging.getLogger().setLevel(logging.DEBUG) ## Preparing the Data In MXNet, data is input via **Data Iterators**. Here we will illustrate -how to encode a dataset into an iterator that MXNet can use. The data used in the example is made up of 2D data points with corresponding integer labels. +how to encode a dataset into an iterator that MXNet can use. The data used in the example is made up of 2D data points with corresponding integer labels. ```python #Training data @@ -65,7 +65,7 @@ Documentation for iterators can be found [here](http://mxnet.io/api/python/io/io 1. **IO:** The IO class as we already saw works on the data and carries out operations such as feeding data in batches and shuffling. - + 2. **Symbol:** The actual MXNet neural network is composed using symbols. MXNet has different types of symbols, including variable placeholders for input data, neural network layers, and operators that manipulate NDArrays. @@ -77,7 +77,7 @@ Documentation for iterators can be found [here](http://mxnet.io/api/python/io/io ## Defining the Model -MXNet uses **Symbols** for defining a model. Symbols are the building blocks +MXNet uses **Symbols** for defining a model. Symbols are the building blocks and make up various components of the model. Symbols are used to define: 1. **Variables:** A variable is a placeholder for future data. This symbol is used @@ -161,7 +161,7 @@ model.fit(train_iter, eval_iter, optimizer_params={'learning_rate':0.01, 'momentum': 0.9}, num_epoch=20, eval_metric='mse', - batch_end_callback = mx.callback.Speedometer(batch_size, 2)) + batch_end_callback = mx.callback.Speedometer(batch_size, 2)) ``` ## Using a trained model: (Testing and Inference) @@ -193,6 +193,6 @@ model.score(eval_iter, metric) ``` We can also create a custom metric and use it to evaluate a model. More -information on metrics can be found in the [API documentation](http://mxnet.io/api/python/model.html#evaluation-metric-api-reference). +information on metrics can be found in the [API documentation](http://mxnet.incubator.apache.org/api/python/metric/metric.html). From 76bdc8772c7f4edd3087bd1285025122fc837c06 Mon Sep 17 00:00:00 2001 From: Vandana Kannan Date: Wed, 22 Aug 2018 14:42:29 -0700 Subject: [PATCH 035/160] [MXNET-696] Fix undefined name and enable Pylint undefined variable (#12277) * Fix infinite loop in retain * Enable undefined variable in pylint --- ci/other/pylintrc | 1 - python/mxnet/ndarray/sparse.py | 8 +++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/ci/other/pylintrc b/ci/other/pylintrc index ca31417f321c..db3da4cae57d 100644 --- a/ci/other/pylintrc +++ b/ci/other/pylintrc @@ -101,7 +101,6 @@ disable= import-error, unsubscriptable-object, unbalanced-tuple-unpacking, - undefined-variable, protected-access, superfluous-parens, invalid-name, diff --git a/python/mxnet/ndarray/sparse.py b/python/mxnet/ndarray/sparse.py index 88f5eae0722a..7b4cc90648c2 100644 --- a/python/mxnet/ndarray/sparse.py +++ b/python/mxnet/ndarray/sparse.py @@ -46,9 +46,9 @@ from . import _internal from . import op try: - from .gen_sparse import * # pylint: disable=redefined-builtin + from .gen_sparse import retain as gs_retain # pylint: disable=redefined-builtin except ImportError: - pass + gs_retain = None from ._internal import _set_ndarray_class from .ndarray import NDArray, _storage_type, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP from .ndarray import _STORAGE_TYPE_STR_TO_ID, _STORAGE_TYPE_ROW_SPARSE, _STORAGE_TYPE_CSR @@ -787,7 +787,9 @@ def retain(self, *args, **kwargs): The arguments are the same as for :py:func:`retain`, with this array as data. """ - return retain(self, *args, **kwargs) + if not gs_retain: + raise ImportError("gen_sparse could not be imported") + return gs_retain(*args, **kwargs) def _prepare_src_array(source_array, dtype): """Prepare `source_array` so that it can be used to construct NDArray. From 5a9949a444771f6b62146c6cf24866ee784cef47 Mon Sep 17 00:00:00 2001 From: Da Zheng Date: Wed, 22 Aug 2018 16:28:22 -0700 Subject: [PATCH 036/160] Make sure input symbol names are unique in control flow operators. (#12187) * add test for foreach. * add tests for while_loop. * use unique name. * make the names of vars in while_loop unique. * verify unique names. * avoid flaky. * fix lint. * fix. * fix tests. * fix. --- python/mxnet/symbol/contrib.py | 27 ++++-- .../unittest/test_contrib_control_flow.py | 86 +++++++++++++++++++ 2 files changed, 105 insertions(+), 8 deletions(-) diff --git a/python/mxnet/symbol/contrib.py b/python/mxnet/symbol/contrib.py index 3c81dcf9ee0a..b551422320ca 100644 --- a/python/mxnet/symbol/contrib.py +++ b/python/mxnet/symbol/contrib.py @@ -135,6 +135,12 @@ def _regroup(args, fmt): return ret, args +# We want to generate a unique name for input symbols to a control flow +# operator. The names are generated on purpose differently from the symbols +# cut from the graph. +def _get_sym_uniq_name(sym): + return '{}-{}'.format(sym.name, sym.attr('_value_index')) + def _get_graph_inputs(subg): num_handles = ctypes.c_int(0) handles = ctypes.POINTER(SymbolHandle)() @@ -280,9 +286,9 @@ def foreach(body, data, init_states, name="foreach"): # with AttrScope and prune the nodes without the special attribute. name = _get_unique_subgraph_name(name) with AttrScope(__subgraph_name__=name): - in_eles = [symbol.var(sym.name) for sym in flatten_data] + in_eles = [symbol.var(_get_sym_uniq_name(sym)) for sym in flatten_data] in_eles, _ = _regroup(in_eles, data_fmt) - states = [symbol.var(s.name) for s in init_flatten_states] + states = [symbol.var(_get_sym_uniq_name(s)) for s in init_flatten_states] states, _ = _regroup(states, copy.deepcopy(init_state_fmt)) sym_out, sym_states = body(in_eles, states) @@ -310,12 +316,14 @@ def foreach(body, data, init_states, name="foreach"): gin_names = input_syms.keys() # This array contains the symbols for the inputs of foreach. # They are ordered according to the inputs of the subgraph. - state_names = [sym.name for sym in init_flatten_states] - data_names = [sym.name for sym in flatten_data] + state_names = [_get_sym_uniq_name(sym) for sym in init_flatten_states] + data_names = [_get_sym_uniq_name(sym) for sym in flatten_data] cut_var_map = {sym.list_outputs()[0]:sym for sym in cut_syms} cut_var_names = cut_var_map.keys() subg_input_names = g.list_inputs() + assert len(set(subg_input_names)) == len(subg_input_names), \ + "The inputs of the subgraph don't have unique names: " + str(subg_input_names) # ordered_ins contains input symbols in the following order: # data_syms, state_syms, followed by cut_vars and vars in the closure. ordered_ins = [x for x in flatten_data] @@ -483,7 +491,7 @@ def _create_subgraph(graph_vars, graph_func, subgraph_name): # create new variables with the same name, # them feed them to the given func graph_vars, var_fmt = _flatten(graph_vars, "while loop_vars") - new_graph_vars = [symbol.var(sym.name) for sym in graph_vars] + new_graph_vars = [symbol.var(_get_sym_uniq_name(sym)) for sym in graph_vars] new_graph_vars, _ = _regroup(new_graph_vars, var_fmt) outputs, final_state, out_fmt, var_fmt = graph_func(new_graph_vars) # first `num_out_data` elements belong to `outputs` @@ -517,17 +525,20 @@ def _union_inputs(*graphs): # to a `loc`, where inputs[loc] = sym for graph in graphs: # some loop_vars are inputs to `graph`, some are not - name_to_loop_vars = {sym.name: sym for sym in flatten_loop_vars} + name_to_loop_vars = {_get_sym_uniq_name(sym): sym for sym in flatten_loop_vars} # other inputs to `graph` created by cut_graph name_to_cut_g_syms = {sym.list_outputs()[0]: sym for sym in _cut_subgraph(graph)} # input_syms: all inputs to the `graph` name_to_input_syms = {sym.name: sym for sym in _get_graph_inputs(graph)} # also we collect the mapping from var's name to var's loc in loop_vars - name_to_var_locs = {sym.name: i for i, sym in enumerate(flatten_loop_vars)} + name_to_var_locs = {_get_sym_uniq_name(sym): i for i, sym in enumerate(flatten_loop_vars)} # collect arguments for each subgraph input_locs = [] # results from the second step var_locs = [-1] * len(flatten_loop_vars) # results from the third step - for name in graph.list_inputs(): + subg_input_names = graph.list_inputs() + assert len(set(subg_input_names)) == len(subg_input_names), \ + "The inputs of the subgraph don't have unique names: " + str(subg_input_names) + for name in subg_input_names: assert name in name_to_input_syms # it should obviously hold # name -> sym if name in name_to_loop_vars: diff --git a/tests/python/unittest/test_contrib_control_flow.py b/tests/python/unittest/test_contrib_control_flow.py index eadd63c94240..1c23c9161977 100644 --- a/tests/python/unittest/test_contrib_control_flow.py +++ b/tests/python/unittest/test_contrib_control_flow.py @@ -1707,6 +1707,92 @@ def step2(data, states): assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3) +@with_seed() +def test_uniq_name(): + class ForeachLayer1(gluon.HybridBlock): + def __init__(self, prefix=None, params=None): + super(ForeachLayer1, self).__init__(prefix=prefix, params=params) + + def hybrid_forward(self, F, inputs, states): + def step1(data, states): + return data + 1, states + out1, states1 = F.contrib.foreach(step1, inputs, states) + # The input variables have the same symbol name. + out, states = F.contrib.foreach(step1, out1, states1) + return out + + class ForeachLayer2(gluon.HybridBlock): + def __init__(self, prefix=None, params=None): + super(ForeachLayer2, self).__init__(prefix=prefix, params=params) + + def hybrid_forward(self, F, inputs, states): + def step1(data, states): + return data + 1, states + out1, states1 = F.contrib.foreach(step1, inputs, states) + def step2(data, states): + return data, [states[0] + states1[0] + F.squeeze(out1.slice_axis(axis=0, begin=0, end=1))] + # The input variables have the same symbol names. + # The free variables have the same symbol names as the input variables. + out, states = F.contrib.foreach(step2, out1, states1) + return out + + class WhileLayer1(gluon.HybridBlock): + def __init__(self, prefix=None, params=None): + super(WhileLayer1, self).__init__(prefix=prefix, params=params) + + def hybrid_forward(self, F, inputs, states): + def cond(state1, state2): + s = F.squeeze(state1.slice_axis(axis=0, begin=0, end=1)) + return s == s + def step(state1, state2): + return state1 + 1, [state1, state2] + states = [states[0], states[0] + 1] + out1, states1 = F.contrib.while_loop(cond, step, states, max_iterations=5) + # The input variables have the same symbol name. + out, states = F.contrib.while_loop(cond, step, states1, max_iterations=5) + return out + + class WhileLayer2(gluon.HybridBlock): + def __init__(self, prefix=None, params=None): + super(WhileLayer2, self).__init__(prefix=prefix, params=params) + + def hybrid_forward(self, F, inputs, states): + def cond(state1, state2): + s = F.squeeze(state1.slice_axis(axis=0, begin=0, end=1)) + return s == s + def step1(state1, state2): + return state1 + 1, [state1, state2] + states = [states[0], states[0] + 1] + out1, states1 = F.contrib.while_loop(cond, step1, states, max_iterations=5) + def step2(state1, state2): + return state1 + 1, [state1 + states1[0], state2 + states1[1]] + # The input variables have the same symbol name. + out, states = F.contrib.while_loop(cond, step2, states1, max_iterations=5) + return out + + TestLayers = [ForeachLayer1, ForeachLayer2, + WhileLayer1, WhileLayer2] + + data = mx.nd.normal(loc=0, scale=1, shape=(2, 5)) + states = mx.nd.normal(loc=0, scale=1, shape=(5)) + for TestLayer in TestLayers: + layer = TestLayer() + layer.initialize(ctx=default_context()) + res1 = layer(data, [states]) + + with mx.autograd.record(): + res1 = layer(data, [states]) + + layer = TestLayer() + layer.initialize(ctx=default_context()) + layer.hybridize() + res2 = layer(data, [states]) + + with mx.autograd.record(): + res2 = layer(data, [states]) + assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=0.001, atol=0.0001) + + @with_seed() def test_cut_subgraph_while_loop(): class TestLayer(gluon.HybridBlock): From 9ef919eb59907e084cab95af3da407d56f891d10 Mon Sep 17 00:00:00 2001 From: Lin Yuan Date: Wed, 22 Aug 2018 16:29:50 -0700 Subject: [PATCH 037/160] Fix broken anchor in doc (#12193) --- docs/tutorials/basic/symbol.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tutorials/basic/symbol.md b/docs/tutorials/basic/symbol.md index 961a9132e100..6a4bb872d04c 100644 --- a/docs/tutorials/basic/symbol.md +++ b/docs/tutorials/basic/symbol.md @@ -94,7 +94,7 @@ mx.viz.plot_network(symbol=g) The computations declared in the above examples can be bound to the input data for evaluation by using `bind` method. We discuss this further in the -[symbol manipulation](#Symbol Manipulation) section. +[Symbol Manipulation](#symbol-manipulation) section. ### Basic Neural Networks From 08f1c2dcba897ccf9568079e198ceb537ec388d4 Mon Sep 17 00:00:00 2001 From: Xingjian Shi Date: Thu, 23 Aug 2018 07:35:26 +0800 Subject: [PATCH 038/160] [MXNET-507] Set arbitrary dtype for ret_indices in ordering ops (#12250) * support arbitrary dtype of ordering * fix bug --- src/operator/mxnet_op.h | 46 ++++ src/operator/tensor/ordering_op-inl.h | 370 +++++++++++++++++--------- src/operator/tensor/ordering_op.cc | 5 +- tests/python/unittest/test_ndarray.py | 260 +++++++++++------- 4 files changed, 453 insertions(+), 228 deletions(-) diff --git a/src/operator/mxnet_op.h b/src/operator/mxnet_op.h index c3f6dc6558e3..f11a497c564c 100644 --- a/src/operator/mxnet_op.h +++ b/src/operator/mxnet_op.h @@ -176,6 +176,52 @@ inline int get_num_threads(const int N) { LOG(FATAL) << "Unknown type enum " << type; \ } +#define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ + switch (type) { \ + case mshadow::kFloat32: \ + { \ + typedef float DType; \ + {__VA_ARGS__} \ + } \ + break; \ + case mshadow::kFloat64: \ + { \ + typedef double DType; \ + {__VA_ARGS__} \ + } \ + break; \ + case mshadow::kFloat16: \ + LOG(FATAL) << "This operation does not " \ + "support float16"; \ + break; \ + case mshadow::kUint8: \ + { \ + typedef uint8_t DType; \ + {__VA_ARGS__} \ + } \ + break; \ + case mshadow::kInt8: \ + { \ + typedef int8_t DType; \ + {__VA_ARGS__} \ + } \ + break; \ + case mshadow::kInt32: \ + { \ + typedef int32_t DType; \ + {__VA_ARGS__} \ + } \ + break; \ + case mshadow::kInt64: \ + { \ + typedef int64_t DType; \ + {__VA_ARGS__} \ + } \ + break; \ + default: \ + LOG(FATAL) << "Unknown type enum " << type; \ + } + /*! * \brief assign the val to out according diff --git a/src/operator/tensor/ordering_op-inl.h b/src/operator/tensor/ordering_op-inl.h index a6f638e29321..c1a5b89db094 100644 --- a/src/operator/tensor/ordering_op-inl.h +++ b/src/operator/tensor/ordering_op-inl.h @@ -58,6 +58,7 @@ struct TopKParam : public dmlc::Parameter { int k; int ret_typ; bool is_ascend; + int dtype; DMLC_DECLARE_PARAMETER(TopKParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional(-1)) .describe("Axis along which to choose the top k indices." @@ -79,6 +80,16 @@ struct TopKParam : public dmlc::Parameter { DMLC_DECLARE_FIELD(is_ascend).set_default(false) .describe("Whether to choose k largest or k smallest elements." " Top K largest elements will be chosen if set to false."); + DMLC_DECLARE_FIELD(dtype) + .add_enum("uint8", mshadow::kUint8) + .add_enum("int32", mshadow::kInt32) + .add_enum("float16", mshadow::kFloat16) + .add_enum("float32", mshadow::kFloat32) + .add_enum("float64", mshadow::kFloat64) + .set_default(mshadow::kFloat32) + .describe("DType of the output indices when ret_typ is \"indices\" or \"both\". " + "An error will be raised if the selected data type cannot precisely represent the " + "indices."); } }; @@ -97,12 +108,23 @@ struct SortParam : public dmlc::Parameter { struct ArgSortParam : public dmlc::Parameter { dmlc::optional axis; bool is_ascend; + int dtype; DMLC_DECLARE_PARAMETER(ArgSortParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional(-1)) .describe("Axis along which to sort the input tensor." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(is_ascend).set_default(true) .describe("Whether to sort in ascending or descending order."); + DMLC_DECLARE_FIELD(dtype) + .add_enum("uint8", mshadow::kUint8) + .add_enum("int32", mshadow::kInt32) + .add_enum("float16", mshadow::kFloat16) + .add_enum("float32", mshadow::kFloat32) + .add_enum("float64", mshadow::kFloat64) + .set_default(mshadow::kFloat32) + .describe("DType of the output indices. It is only valid when ret_typ is \"indices\" or" + " \"both\". An error will be raised if the selected data type cannot precisely " + "represent the indices."); } }; @@ -154,29 +176,22 @@ inline void ParseTopKParam(const TShape& src_shape, const TopKParam& param, TSha using namespace mshadow; -template -void TopKSort(const Tensor& dat, - const Tensor& ind, - const Tensor& work, - int K, int N, bool is_ascend, - Stream *s); - -template<> -MSHADOW_FORCE_INLINE void TopKSort(const Tensor& dat, - const Tensor& ind, - const Tensor& work, - int K, int N, bool is_ascend, - Stream *s) { +template +MSHADOW_FORCE_INLINE void TopKSort(const Tensor& dat, + const Tensor& ind, + const Tensor& work, + int K, int N, bool is_ascend, + Stream *s) { // Use full sort when K is relatively large. const bool full_sort(K*8 > N); // Batch size. - const int M(work.size(0)/(sizeof(real_t)*N)); + const int M(work.size(0)/(sizeof(DType)*N)); const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()); #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < M; ++i) { // Tensor `work` stores the flattened source data, while `dat` stores the sorted result. - real_t *vals = reinterpret_cast(work.dptr_); - real_t *sorted_vals = dat.dptr_+i*N; + DType *vals = reinterpret_cast(work.dptr_); + DType *sorted_vals = dat.dptr_+i*N; int *indices = ind.dptr_+i*N; if (is_ascend) { if (full_sort) { @@ -285,12 +300,12 @@ __global__ void PartialSortSmallK(int K, int N, DType *val, int *ind, bool is_as } } -template<> -MSHADOW_FORCE_INLINE void TopKSort(const Tensor& dat, - const Tensor& ind, - const Tensor& work, - int K, int N, bool is_ascend, - Stream *s) { +template +MSHADOW_FORCE_INLINE void TopKSort(const Tensor& dat, + const Tensor& ind, + const Tensor& work, + int K, int N, bool is_ascend, + Stream *s) { // Use full sort for all but very small K for which we // can do a partial sort entirely within shared memory. const bool full_sort(K > 5); @@ -311,7 +326,7 @@ MSHADOW_FORCE_INLINE void TopKSort(const Tensor& dat, } } else { const int nthreads(mshadow::cuda::kBaseThreadNum); - PartialSortSmallK<<::GetStream(s)>>> (K, N, dat.dptr_, ind.dptr_, is_ascend); } @@ -331,25 +346,25 @@ MSHADOW_FORCE_INLINE void TopKSort(const Tensor& dat, * \param k the K elements to keep * \param param the topk parameters * \tparam xpu the device type. + * \tparam DType type of the output value/mask. + * \tparam IDType type of the output indices. */ -template -void TopKImpl(RunContext ctx, - Resource resource, +template +void TopKImpl(const RunContext &ctx, + const Resource &resource, + const std::vector& req, const TBlob& src, const std::vector& ret, const TopKParam& param) { using namespace mshadow; using namespace mshadow::expr; - for (auto ret_ele : ret) { - CHECK_EQ(ret_ele.type_flag_, src.type_flag_); - } // 1. Parse and initialize information Stream *s = ctx.get_stream(); Tensor workspace; Tensor temp_workspace; - Tensor sorted_dat; + Tensor sorted_dat; Tensor indices, sel_indices; - Tensor mask_val; + Tensor mask_val; int batch_size, element_num; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; @@ -358,25 +373,29 @@ void TopKImpl(RunContext ctx, TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); - Tensor dat = src.FlatTo3D(axis, axis, s); + CHECK_LE(element_num, mxnet::common::MaxIntegerValue()) + << "'IDType' does not have a sufficient precision to represent the indices of the input array. " + << "The total element_num is " << element_num << ", but the selected IDType can only represent " + << mxnet::common::MaxIntegerValue() << " elements"; + Tensor dat = src.FlatTo3D(axis, axis, s); size_t temp_size = 0; // Temp space needed by the gpu-based full sorts. temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize(src.Size())); - temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize(src.Size())); - temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize(src.Size())); + temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize(src.Size())); + temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize(src.Size())); // Additional temp space for gpu full sorts for batch ids. temp_size += sizeof(int) * src.Size(); // Temp space for cpu sorts. - temp_size = std::max(temp_size, sizeof(real_t) * src.Size()); - size_t workspace_size = temp_size + sizeof(real_t) * src.Size() + sizeof(int) * src.Size(); + temp_size = std::max(temp_size, sizeof(DType) * src.Size()); + size_t workspace_size = temp_size + sizeof(DType) * src.Size() + sizeof(int) * src.Size(); if (param.ret_typ == topk_enum::kReturnMask) { - workspace_size += sizeof(int) * batch_size * k + sizeof(real_t) * batch_size * k; + workspace_size += sizeof(int) * batch_size * k + sizeof(DType) * batch_size * k; } workspace = resource.get_space_typed(Shape1(workspace_size), s); char* workspace_curr_ptr = workspace.dptr_; - sorted_dat = Tensor(reinterpret_cast(workspace_curr_ptr), + sorted_dat = Tensor(reinterpret_cast(workspace_curr_ptr), Shape1(src.Size()), s); // contain sorted dat - workspace_curr_ptr += sizeof(real_t) * src.Size(); + workspace_curr_ptr += sizeof(DType) * src.Size(); indices = Tensor(reinterpret_cast(workspace_curr_ptr), Shape1(src.Size()), s); // indices in the original matrix workspace_curr_ptr += sizeof(int) * src.Size(); @@ -385,28 +404,28 @@ void TopKImpl(RunContext ctx, sel_indices = Tensor(reinterpret_cast(workspace_curr_ptr), Shape1(batch_size * k), s); workspace_curr_ptr += sizeof(int) * batch_size * k; - mask_val = Tensor(reinterpret_cast(workspace_curr_ptr), + mask_val = Tensor(reinterpret_cast(workspace_curr_ptr), Shape2(batch_size * k, 1), s); - workspace_curr_ptr += sizeof(real_t) * batch_size * k; - mask_val = scalar(1); + workspace_curr_ptr += sizeof(DType) * batch_size * k; + mask_val = scalar(1); CHECK_EQ(sel_indices.CheckContiguous(), true); CHECK_EQ(mask_val.CheckContiguous(), true); } if (std::is_same::value) { - Tensor flattened_data; + Tensor flattened_data; if (do_transpose) { - flattened_data = Tensor(reinterpret_cast(workspace_curr_ptr), + flattened_data = Tensor(reinterpret_cast(workspace_curr_ptr), Shape1(src.Size()), s); - workspace_curr_ptr += sizeof(real_t) * src.Size(); + workspace_curr_ptr += sizeof(DType) * src.Size(); flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); CHECK_EQ(flattened_data.CheckContiguous(), true); } else { - flattened_data = src.FlatTo1D(s); + flattened_data = src.FlatTo1D(s); } // `temp_workspace` stores the flattened data temp_workspace = Tensor(reinterpret_cast(flattened_data.dptr_), - Shape1(sizeof(real_t)*src.Size()), s); + Shape1(sizeof(DType)*src.Size()), s); CHECK_EQ(temp_workspace.CheckContiguous(), true); } else { if (do_transpose) { @@ -436,9 +455,9 @@ void TopKImpl(RunContext ctx, // Cast `ret_indices` from int to real_t could introduce conversion error when the element_num // is large enough. if (param.ret_typ == topk_enum::kReturnMask) { - Tensor ret_mask = - ret[0].get_with_shape(Shape2(ret[0].Size(), 1), s); - ret_mask = scalar(0); + Tensor ret_mask = + ret[0].get_with_shape(Shape2(ret[0].Size(), 1), s); + ret_mask = scalar(0); sel_indices = reshape(slice<1>( inplace_reshape(indices, Shape2(batch_size, @@ -450,53 +469,53 @@ void TopKImpl(RunContext ctx, sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } - IndexFill(ret_mask, sel_indices, mask_val); + if (req[0] == kNullOp) { + return; + } else if (req[0] == kWriteTo) { + IndexFill(ret_mask, sel_indices, mask_val); + } else { + LOG(FATAL) << "req=" << req[0] << " is not supported yet."; + } } else if (param.ret_typ == topk_enum::kReturnIndices) { if (do_transpose) { - Tensor ret_indices = ret[0].FlatTo3D(axis, axis, s); - ret_indices = tcast(F( - transpose(slice<2>(inplace_reshape(indices, - Shape3(ret_indices.shape_[0], - ret_indices.shape_[2], - element_num)), - 0, k), - Shape3(0, 2, 1)), - element_num)); - } else { - Tensor ret_indices = - ret[0].get_with_shape(Shape2(batch_size, k), s); - ret_indices = tcast(F( - slice<1>(inplace_reshape(indices, Shape2(batch_size, element_num)), + Tensor ret_indices = ret[0].FlatTo3D(axis, axis, s); + ASSIGN_DISPATCH(ret_indices, req[0], tcast(F(transpose( + slice<2>(inplace_reshape(indices, + Shape3(ret_indices.shape_[0], + ret_indices.shape_[2], + element_num)), 0, k), - element_num)); + Shape3(0, 2, 1)), element_num))); + } else { + Tensor ret_indices = + ret[0].get_with_shape(Shape2(batch_size, k), s); + ASSIGN_DISPATCH(ret_indices, req[0], tcast(F(slice<1>( + inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), + element_num))); } } else { if (do_transpose) { - Tensor ret_value = ret[0].FlatTo3D(axis, axis, s); - Tensor ret_indices = ret[1].FlatTo3D(axis, axis, s); - ret_value = transpose( + Tensor ret_value = ret[0].FlatTo3D(axis, axis, s); + Tensor ret_indices = ret[1].FlatTo3D(axis, axis, s); + ASSIGN_DISPATCH(ret_value, req[0], transpose( slice<2>(inplace_reshape(sorted_dat, Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)), - 0, k), - Shape3(0, 2, 1)); - ret_indices = tcast(F( - transpose(slice<2>(inplace_reshape(indices, - Shape3(ret_indices.shape_[0], - ret_indices.shape_[2], - element_num)), - 0, k), - Shape3(0, 2, 1)), - element_num)); + 0, k), Shape3(0, 2, 1))); + ASSIGN_DISPATCH(ret_indices, req[1], tcast(F(transpose( + slice<2>(inplace_reshape(indices, + Shape3(ret_indices.shape_[0], + ret_indices.shape_[2], + element_num)), + 0, k), Shape3(0, 2, 1)), element_num))); } else { - Tensor ret_value = - ret[0].get_with_shape(Shape2(batch_size, k), s); - Tensor ret_indices = - ret[1].get_with_shape(Shape2(batch_size, k), s); - ret_value = slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k); - ret_indices = tcast(F( - slice<1>(inplace_reshape(indices, Shape2(batch_size, element_num)), - 0, k), - element_num)); + Tensor ret_value = + ret[0].get_with_shape(Shape2(batch_size, k), s); + Tensor ret_indices = + ret[1].get_with_shape(Shape2(batch_size, k), s); + ASSIGN_DISPATCH(ret_value, req[0], + slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k)); + ASSIGN_DISPATCH(ret_indices, req[1], tcast(F(slice<1>( + inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } } @@ -508,9 +527,17 @@ void TopK(const nnvm::NodeAttrs& attrs, const std::vector& req, const std::vector& outputs) { const TopKParam& param = nnvm::get(attrs.parsed); - // TODO(sxjscience) We can support inplace in the future - CHECK_EQ(req[0], kWriteTo) << "TopK does not support inplace"; - TopKImpl(ctx.run_ctx, ctx.requested[0], inputs[0], outputs, param); + if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnBoth) { + MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { + MSHADOW_TYPE_SWITCH(param.dtype, IDType, { + TopKImpl(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); + }) + }); + } else { + MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { + TopKImpl(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); + }); + } } template @@ -520,13 +547,14 @@ void Sort(const nnvm::NodeAttrs& attrs, const std::vector& req, const std::vector& outputs) { const SortParam& param = nnvm::get(attrs.parsed); - CHECK_EQ(req[0], kWriteTo) << "Sort does not support inplace"; TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnValue; - TopKImpl(ctx.run_ctx, ctx.requested[0], inputs[0], outputs, topk_param); + MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { + TopKImpl(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); + }); } template @@ -536,26 +564,30 @@ void ArgSort(const nnvm::NodeAttrs& attrs, const std::vector& req, const std::vector& outputs) { const ArgSortParam& param = nnvm::get(attrs.parsed); - CHECK_EQ(req[0], kWriteTo) << "ArgSort does not support inplace"; TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; + topk_param.dtype = param.dtype; topk_param.ret_typ = topk_enum::kReturnIndices; - TopKImpl(ctx.run_ctx, ctx.requested[0], inputs[0], outputs, topk_param); + MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { + MSHADOW_TYPE_SWITCH(param.dtype, IDType, { + TopKImpl(ctx.run_ctx, + ctx.requested[0], req, inputs[0], outputs, topk_param); + }); + }); } -template -void TopKBackward_(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { +template +void TopKBackwardImpl(const OpContext &ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs, + const TopKParam& param) { CHECK_NE(req[0], kWriteInplace); using namespace mshadow; using namespace mshadow::expr; Stream *s = ctx.run_ctx.get_stream(); - const TopKParam& param = nnvm::get(attrs.parsed); CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth); int batch_size, element_num; // number of batches + the size of each batch int axis = 0; @@ -565,23 +597,28 @@ void TopKBackward_(const nnvm::NodeAttrs& attrs, TShape target_shape; ParseTopKParam(outputs[0].shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); - Tensor workspace = - ctx.requested[0].get_space_typed(Shape1(batch_size * k * 2 + batch_size), s); - Tensor sel_indices = - Tensor(workspace.dptr_, Shape1(batch_size * k), s); - Tensor batch_shift = - Tensor(workspace.dptr_ + batch_size * k, Shape1(batch_size), s); - Tensor dummy_index = - Tensor(workspace.dptr_ + batch_size * k + batch_size, + CHECK_LE(element_num, mxnet::common::MaxIntegerValue()) + << "'IDType' does not have a sufficient precision to represent the indices of the input array. " + << "The total element_num is " << element_num << ", but the selected IDType can only represent " + << mxnet::common::MaxIntegerValue() << " elements"; + Tensor workspace = + ctx.requested[0].get_space_typed(Shape1(batch_size * k * 2 + batch_size), s); + Tensor sel_indices = + Tensor(workspace.dptr_, Shape1(batch_size * k), s); + Tensor batch_shift = + Tensor(workspace.dptr_ + batch_size * k, Shape1(batch_size), s); + Tensor dummy_index = + Tensor(workspace.dptr_ + batch_size * k + batch_size, Shape1(batch_size * k), s); - Tensor out_grad = - inputs[0].get_with_shape(Shape2(inputs[0].shape_.Size(), 1), s); - Tensor in_grad = - outputs[0].get_with_shape(Shape2(outputs[0].shape_.Size(), 1), s); - mxnet_op::Kernel::Launch(s, batch_size, 1, 0.0f, - static_cast(element_num), kWriteTo, batch_shift.dptr_); + + Tensor out_grad = + inputs[0].get_with_shape(Shape2(inputs[0].shape_.Size(), 1), s); + Tensor in_grad = + outputs[0].get_with_shape(Shape2(outputs[0].shape_.Size(), 1), s); + mxnet_op::Kernel::Launch(s, batch_size, 1, 0, element_num, kWriteTo, + batch_shift.dptr_); if (do_transpose) { - Tensor indices = inputs[2].FlatTo1D(s); + Tensor indices = inputs[2].FlatTo1D(s); TShape src_shape = outputs[0].shape_.FlatTo3D(axis); sel_indices = reshape(transpose( broadcast_to(inplace_reshape(batch_shift, @@ -589,26 +626,26 @@ void TopKBackward_(const nnvm::NodeAttrs& attrs, TShape(Shape3(src_shape[0], src_shape[2], k))), Shape3(0, 2, 1)), Shape1(batch_size * k)); - sel_indices += indices; + sel_indices += tcast(indices); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } else { - Tensor indices = - inputs[2].get_with_shape(Shape2(batch_size, k), s); - sel_indices = reshape(indices + + Tensor indices = + inputs[2].get_with_shape(Shape2(batch_size, k), s); + sel_indices = reshape(tcast(indices) + broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)), TShape(Shape2(batch_size, k))), Shape1(batch_size * k)); } CHECK_EQ(sel_indices.CheckContiguous(), true); if (kWriteTo == req[0]) { - in_grad = scalar(0); + in_grad = scalar(0); IndexFill(in_grad, sel_indices, out_grad); } else if (kAddTo == req[0]) { // TODO(sxjscience) We can use AddTakeGrad in the future. // However, the current implementation of AddTakeGrad is not so efficient. - mxnet_op::Kernel::Launch(s, sel_indices.shape_.Size(), 1, 0.0f, - 1.0f, kWriteTo, dummy_index.dptr_); + mxnet_op::Kernel::Launch(s, sel_indices.shape_.Size(), 1, 0, 1, kWriteTo, + dummy_index.dptr_); mxnet::op::AddTakeGradLargeBatch(in_grad, sel_indices, dummy_index, out_grad); } else if (kNullOp == req[0]) { return; @@ -617,6 +654,28 @@ void TopKBackward_(const nnvm::NodeAttrs& attrs, } } +template +void TopKBackward_(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + const TopKParam& param = nnvm::get(attrs.parsed); + if (param.ret_typ == topk_enum::kReturnBoth) { + MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { + MSHADOW_TYPE_SWITCH(param.dtype, IDType, { + TopKBackwardImpl(ctx, inputs, req, outputs, param); + }); + }); + } else if (param.ret_typ == topk_enum::kReturnValue) { + MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { + TopKBackwardImpl(ctx, inputs, req, outputs, param); + }); + } else { + LOG(FATAL) << "Not Implemented"; + } +} + inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) { const TopKParam& param = nnvm::get(attrs.parsed); if (param.ret_typ == topk_enum::kReturnIndices || @@ -639,8 +698,36 @@ inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) { inline bool TopKType(const nnvm::NodeAttrs& attrs, std::vector *in_attrs, std::vector *out_attrs) { - return ElemwiseAttr( - attrs, in_attrs, out_attrs, -1); + const TopKParam& param = nnvm::get(attrs.parsed); + int data_type = -1; + size_t in_size = in_attrs->size(); + size_t out_size = out_attrs->size(); + CHECK_EQ(in_size, 1); + CHECK(out_size == 1 || out_size == 2); + if (out_size > 1) { + if (param.ret_typ == topk_enum::kReturnValue) { + CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32)) + << "Failed to set the type of ret_indices."; + } else { + CHECK(type_assign(&(*out_attrs)[1], param.dtype)) + << "Failed to set the type of ret_indices."; + } + } + if (param.ret_typ == topk_enum::kReturnIndices) { + CHECK(type_assign(&(*out_attrs)[0], param.dtype)) + << "Failed to set the type of ret_indices."; + } else { + CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]=" + << (*in_attrs)[0]; + CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]=" + << (*out_attrs)[0]; + CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]=" + << (*in_attrs)[0]; + CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]=" + << (*out_attrs)[0]; + if (data_type == -1) return false; + } + return true; } inline bool TopKShapeImpl(const TopKParam& param, @@ -679,6 +766,28 @@ inline bool TopKShape(const nnvm::NodeAttrs& attrs, return TopKShapeImpl(param, in_attrs, out_attrs); } +inline bool SortType(const nnvm::NodeAttrs& attrs, + std::vector *in_attrs, + std::vector *out_attrs) { + int data_type = -1; + size_t in_size = in_attrs->size(); + size_t out_size = out_attrs->size(); + CHECK_EQ(in_size, 1); + CHECK_EQ(out_size, 2); + CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32)) + << "Failed to set the type of ret_indices to int32."; + CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]=" + << (*in_attrs)[0]; + CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]=" + << (*out_attrs)[0]; + CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]=" + << (*in_attrs)[0]; + CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]=" + << (*out_attrs)[0]; + if (data_type == -1) return false; + return true; +} + inline bool SortShape(const nnvm::NodeAttrs& attrs, std::vector *in_attrs, std::vector *out_attrs) { @@ -691,6 +800,15 @@ inline bool SortShape(const nnvm::NodeAttrs& attrs, return TopKShapeImpl(topk_param, in_attrs, out_attrs); } +inline bool ArgSortType(const nnvm::NodeAttrs& attrs, + std::vector *in_attrs, + std::vector *out_attrs) { + const ArgSortParam& param = nnvm::get(attrs.parsed); + CHECK(type_assign(&(*out_attrs)[0], param.dtype)) + << "Failed to set the type of ret_indices to int32."; + return true; +} + inline bool ArgSortShape(const nnvm::NodeAttrs& attrs, std::vector *in_attrs, std::vector *out_attrs) { diff --git a/src/operator/tensor/ordering_op.cc b/src/operator/tensor/ordering_op.cc index ebd7c62ec886..1e2832d3763e 100644 --- a/src/operator/tensor/ordering_op.cc +++ b/src/operator/tensor/ordering_op.cc @@ -35,6 +35,7 @@ DMLC_REGISTER_PARAMETER(ArgSortParam); NNVM_REGISTER_OP(topk) .describe(R"code(Returns the top *k* elements in an input array along the given axis. + The returned elements will be sorted. Examples:: @@ -128,7 +129,7 @@ Examples:: .set_num_outputs(2) .set_attr_parser(ParamParser) .set_attr("FInferShape", SortShape) -.set_attr("FInferType", ElemwiseType<1, 2>) +.set_attr("FInferType", SortType) .set_attr("FNumVisibleOutputs", [](const NodeAttrs& attrs) { return 1; }) .set_attr("FCompute", Sort) .set_attr("FGradient", @@ -178,7 +179,7 @@ Examples:: .set_num_outputs(1) .set_attr_parser(ParamParser) .set_attr("FInferShape", ArgSortShape) -.set_attr("FInferType", ElemwiseType<1, 1>) +.set_attr("FInferType", ArgSortType) .set_attr("FCompute", ArgSort) .set_attr("FGradient", MakeZeroGradNodes) .set_attr("FResourceRequest", diff --git a/tests/python/unittest/test_ndarray.py b/tests/python/unittest/test_ndarray.py index 2db39d5dd538..c9bc0cd1e1e4 100644 --- a/tests/python/unittest/test_ndarray.py +++ b/tests/python/unittest/test_ndarray.py @@ -661,17 +661,19 @@ def gt_topk(dat, axis, ret_typ, k, is_ascend): # values, making it hard to generate a numpy 'golden copy' to compare against # the mxnet operator. The 'mask' function is particularly hard to test given that # equal values might span the 'k' boundary. Issue exposed with seed 1405838964. - def get_values(ensure_unique): - while True: - data = np.float32(np.random.normal(size=(dat_size, dat_size, dat_size, dat_size))) - if not ensure_unique: - return data - num_unique_values = len(set(data.flatten())) - if data.size == num_unique_values: - return data - - a_npy = get_values(ensure_unique=True) - a_nd = mx.nd.array(a_npy, ctx=ctx) + def get_values(ensure_unique, dtype): + if dtype == np.int16 or dtype == np.int32 or dtype == np.int64: + return np.arange(dat_size ** 4, dtype=dtype).reshape((dat_size, dat_size, dat_size, dat_size)) + elif dtype == np.float32 or dtype == np.float64: + while True: + data = np.random.normal(size=(dat_size, dat_size, dat_size, dat_size)).astype(dtype) + if not ensure_unique: + return data + num_unique_values = len(set(data.flatten())) + if data.size == num_unique_values: + return data + else: + raise NotImplementedError # Produce a large matrix (256, 300096) as the input data, to cover the case which # has a large size of matrix (exceed the express range by float precisly), but @@ -685,103 +687,161 @@ def get_large_matrix(): large_matrix_npy = get_large_matrix() large_matrix_nd = mx.nd.array(large_matrix_npy, ctx=ctx) - # test for ret_typ=indices - nd_ret_topk = mx.nd.topk(a_nd, axis=1, ret_typ="indices", k=3, is_ascend=True).asnumpy() - gt = gt_topk(a_npy, axis=1, ret_typ="indices", k=3, is_ascend=True) - assert_almost_equal(nd_ret_topk, gt) - nd_ret_topk = mx.nd.topk(a_nd, axis=3, ret_typ="indices", k=2, is_ascend=False).asnumpy() - gt = gt_topk(a_npy, axis=3, ret_typ="indices", k=2, is_ascend=False) - assert_almost_equal(nd_ret_topk, gt) - nd_ret_topk = mx.nd.topk(a_nd, axis=None, ret_typ="indices", k=21, is_ascend=False).asnumpy() - gt = gt_topk(a_npy, axis=None, ret_typ="indices", k=21, is_ascend=False) - assert_almost_equal(nd_ret_topk, gt) nd_ret_topk = mx.nd.topk(large_matrix_nd, axis=1, ret_typ="indices", k=5, is_ascend=False).asnumpy() gt = gt_topk(large_matrix_npy, axis=1, ret_typ="indices", k=5, is_ascend=False) assert_almost_equal(nd_ret_topk, gt) - # test for ret_typ=value - nd_ret_topk = mx.nd.topk(a_nd, axis=1, ret_typ="value", k=3, is_ascend=True).asnumpy() - gt = gt_topk(a_npy, axis=1, ret_typ="value", k=3, is_ascend=True) - assert_almost_equal(nd_ret_topk, gt) - nd_ret_topk = mx.nd.topk(a_nd, axis=3, ret_typ="value", k=2, is_ascend=False).asnumpy() - gt = gt_topk(a_npy, axis=3, ret_typ="value", k=2, is_ascend=False) - assert_almost_equal(nd_ret_topk, gt) - nd_ret_topk = mx.nd.topk(a_nd, axis=None, ret_typ="value", k=21, is_ascend=False).asnumpy() - gt = gt_topk(a_npy, axis=None, ret_typ="value", k=21, is_ascend=False) - assert_almost_equal(nd_ret_topk, gt) - nd_ret_topk = mx.nd.topk(large_matrix_nd, axis=0, ret_typ="value", k=3, is_ascend=False).asnumpy() - gt = gt_topk(large_matrix_npy, axis=0, ret_typ="value", k=3, is_ascend=False) - assert_almost_equal(nd_ret_topk, gt) - nd_ret_topk = mx.nd.topk(large_matrix_nd, axis=1, ret_typ="value", k=5, is_ascend=False).asnumpy() - gt = gt_topk(large_matrix_npy, axis=1, ret_typ="value", k=5, is_ascend=False) - assert_almost_equal(nd_ret_topk, gt) - - # test for ret_typ=mask - nd_ret_topk = mx.nd.topk(a_nd, axis=1, ret_typ="mask", k=3, is_ascend=True).asnumpy() - gt = gt_topk(a_npy, axis=1, ret_typ="mask", k=3, is_ascend=True) - assert_almost_equal(nd_ret_topk, gt) - nd_ret_topk = mx.nd.topk(a_nd, axis=1, ret_typ="mask", k=2, is_ascend=False).asnumpy() - gt = gt_topk(a_npy, axis=1, ret_typ="mask", k=2, is_ascend=False) - assert_almost_equal(nd_ret_topk, gt) - nd_ret_topk = mx.nd.topk(a_nd, axis=None, ret_typ="mask", k=21, is_ascend=False).asnumpy() - gt = gt_topk(a_npy, axis=None, ret_typ="mask", k=21, is_ascend=False) - assert_almost_equal(nd_ret_topk, gt) - - # test for ret_typ=both - nd_ret_topk_val, nd_ret_topk_ind = mx.nd.topk(a_nd, axis=1, ret_typ="both", k=3, is_ascend=True) - nd_ret_topk_val = nd_ret_topk_val.asnumpy() - nd_ret_topk_ind = nd_ret_topk_ind.asnumpy() - gt_val = gt_topk(a_npy, axis=1, ret_typ="value", k=3, is_ascend=True) - gt_ind = gt_topk(a_npy, axis=1, ret_typ="indices", k=3, is_ascend=True) - assert_almost_equal(nd_ret_topk_val, gt_val) - assert_almost_equal(nd_ret_topk_ind, gt_ind) - - # test for sort - nd_ret_sort = mx.nd.sort(a_nd, axis=1, is_ascend=True).asnumpy() - gt = gt_topk(a_npy, axis=1, ret_typ="value", k=dat_size, is_ascend=True) - assert_almost_equal(nd_ret_sort, gt) - nd_ret_sort = mx.nd.sort(a_nd, axis=None, is_ascend=False).asnumpy() - gt = gt_topk(a_npy, axis=None, ret_typ="value", - k=dat_size*dat_size*dat_size*dat_size, is_ascend=False) - assert_almost_equal(nd_ret_sort, gt) - - # test for argsort - nd_ret_argsort = mx.nd.argsort(a_nd, axis=3, is_ascend=True).asnumpy() - gt = gt_topk(a_npy, axis=3, ret_typ="indices", k=dat_size, is_ascend=True) - assert_almost_equal(nd_ret_argsort, gt) - nd_ret_argsort = mx.nd.argsort(a_nd, axis=None, is_ascend=False).asnumpy() - gt = gt_topk(a_npy, axis=None, ret_typ="indices", - k=dat_size*dat_size*dat_size*dat_size, is_ascend=False) - assert_almost_equal(nd_ret_argsort, gt) - - a = mx.nd.arange(0, 1024, step=1, repeat=1) - assert_almost_equal(a.topk(k=1024).asnumpy(), a.asnumpy()[::-1]) + for dtype in [np.int16, np.int32, np.int64, np.float32, np.float64]: + a_npy = get_values(ensure_unique=True, dtype=dtype) + a_nd = mx.nd.array(a_npy, ctx=ctx) + + # test for ret_typ=indices + nd_ret_topk = mx.nd.topk(a_nd, axis=1, ret_typ="indices", k=3, is_ascend=True).asnumpy() + gt = gt_topk(a_npy, axis=1, ret_typ="indices", k=3, is_ascend=True) + assert_almost_equal(nd_ret_topk, gt) + nd_ret_topk = mx.nd.topk(a_nd, axis=3, ret_typ="indices", k=2, is_ascend=False).asnumpy() + gt = gt_topk(a_npy, axis=3, ret_typ="indices", k=2, is_ascend=False) + assert_almost_equal(nd_ret_topk, gt) + nd_ret_topk = mx.nd.topk(a_nd, axis=None, ret_typ="indices", k=21, is_ascend=False).asnumpy() + gt = gt_topk(a_npy, axis=None, ret_typ="indices", k=21, is_ascend=False) + assert_almost_equal(nd_ret_topk, gt) + + # test for ret_typ=value + nd_ret_topk = mx.nd.topk(a_nd, axis=1, ret_typ="value", k=3, is_ascend=True).asnumpy() + gt = gt_topk(a_npy, axis=1, ret_typ="value", k=3, is_ascend=True) + assert_almost_equal(nd_ret_topk, gt) + nd_ret_topk = mx.nd.topk(a_nd, axis=3, ret_typ="value", k=2, is_ascend=False).asnumpy() + gt = gt_topk(a_npy, axis=3, ret_typ="value", k=2, is_ascend=False) + assert_almost_equal(nd_ret_topk, gt) + nd_ret_topk = mx.nd.topk(a_nd, axis=None, ret_typ="value", k=21, is_ascend=False).asnumpy() + gt = gt_topk(a_npy, axis=None, ret_typ="value", k=21, is_ascend=False) + assert_almost_equal(nd_ret_topk, gt) + + # test for ret_typ=mask + nd_ret_topk = mx.nd.topk(a_nd, axis=1, ret_typ="mask", k=3, is_ascend=True).asnumpy() + gt = gt_topk(a_npy, axis=1, ret_typ="mask", k=3, is_ascend=True) + assert_almost_equal(nd_ret_topk, gt) + nd_ret_topk = mx.nd.topk(a_nd, axis=1, ret_typ="mask", k=2, is_ascend=False).asnumpy() + gt = gt_topk(a_npy, axis=1, ret_typ="mask", k=2, is_ascend=False) + assert_almost_equal(nd_ret_topk, gt) + nd_ret_topk = mx.nd.topk(a_nd, axis=None, ret_typ="mask", k=21, is_ascend=False).asnumpy() + gt = gt_topk(a_npy, axis=None, ret_typ="mask", k=21, is_ascend=False) + assert_almost_equal(nd_ret_topk, gt) + + # test for ret_typ=both + nd_ret_topk_val, nd_ret_topk_ind = mx.nd.topk(a_nd, axis=1, ret_typ="both", k=3, is_ascend=True) + nd_ret_topk_val = nd_ret_topk_val.asnumpy() + nd_ret_topk_ind = nd_ret_topk_ind.asnumpy() + gt_val = gt_topk(a_npy, axis=1, ret_typ="value", k=3, is_ascend=True) + gt_ind = gt_topk(a_npy, axis=1, ret_typ="indices", k=3, is_ascend=True) + assert_almost_equal(nd_ret_topk_val, gt_val) + assert_almost_equal(nd_ret_topk_ind, gt_ind) + # test for kNullOp + _, nd_ret_topk_ind = mx.nd.topk(a_nd, axis=1, ret_typ="both", k=3, is_ascend=True) + nd_ret_topk_ind = nd_ret_topk_ind.asnumpy() + assert_almost_equal(nd_ret_topk_ind, gt_ind) + # test for kNullOp + nd_ret_topk_val, _ = mx.nd.topk(a_nd, axis=1, ret_typ="both", k=3, is_ascend=True) + nd_ret_topk_val = nd_ret_topk_val.asnumpy() + assert_almost_equal(nd_ret_topk_val, gt_val) + + # test for sort + nd_ret_sort = mx.nd.sort(a_nd, axis=1, is_ascend=True).asnumpy() + gt = gt_topk(a_npy, axis=1, ret_typ="value", k=dat_size, is_ascend=True) + assert_almost_equal(nd_ret_sort, gt) + nd_ret_sort = mx.nd.sort(a_nd, axis=None, is_ascend=False).asnumpy() + gt = gt_topk(a_npy, axis=None, ret_typ="value", + k=dat_size*dat_size*dat_size*dat_size, is_ascend=False) + assert_almost_equal(nd_ret_sort, gt) + + # test for argsort + for idtype in [np.int32, np.float16, np.float32, np.float64]: + nd_ret_argsort = mx.nd.argsort(a_nd, axis=3, is_ascend=True, dtype=idtype).asnumpy() + gt = gt_topk(a_npy, axis=3, ret_typ="indices", k=dat_size, is_ascend=True) + assert_almost_equal(nd_ret_argsort, gt) + nd_ret_argsort = mx.nd.argsort(a_nd, axis=None, is_ascend=False, dtype=idtype).asnumpy() + gt = gt_topk(a_npy, axis=None, ret_typ="indices", + k=dat_size*dat_size*dat_size*dat_size, is_ascend=False) + assert_almost_equal(nd_ret_argsort, gt) + + # Repeat those tests that don't involve indices. These should pass even with + # duplicated input data values (over many repeated runs with different random seeds, + # this will be tested). + a_npy = get_values(ensure_unique=False, dtype=dtype) + a_nd = mx.nd.array(a_npy, ctx=ctx) + + # test for ret_typ=value + nd_ret_topk = mx.nd.topk(a_nd, axis=1, ret_typ="value", k=3, is_ascend=True).asnumpy() + gt = gt_topk(a_npy, axis=1, ret_typ="value", k=3, is_ascend=True) + assert_almost_equal(nd_ret_topk, gt) + nd_ret_topk = mx.nd.topk(a_nd, axis=3, ret_typ="value", k=2, is_ascend=False).asnumpy() + gt = gt_topk(a_npy, axis=3, ret_typ="value", k=2, is_ascend=False) + assert_almost_equal(nd_ret_topk, gt) + nd_ret_topk = mx.nd.topk(a_nd, axis=None, ret_typ="value", k=21, is_ascend=False).asnumpy() + gt = gt_topk(a_npy, axis=None, ret_typ="value", k=21, is_ascend=False) + assert_almost_equal(nd_ret_topk, gt) + + # test for sort + nd_ret_sort = mx.nd.sort(a_nd, axis=1, is_ascend=True).asnumpy() + gt = gt_topk(a_npy, axis=1, ret_typ="value", k=dat_size, is_ascend=True) + assert_almost_equal(nd_ret_sort, gt) + nd_ret_sort = mx.nd.sort(a_nd, axis=None, is_ascend=False).asnumpy() + gt = gt_topk(a_npy, axis=None, ret_typ="value", + k=dat_size*dat_size*dat_size*dat_size, is_ascend=False) + assert_almost_equal(nd_ret_sort, gt) + + a = mx.nd.arange(0, 1024, step=1, repeat=1, dtype=np.int32) + assert_almost_equal(a.topk(k=1024, dtype=np.int32).asnumpy(), a.asnumpy()[::-1]) + a.attach_grad() + + k = 10 + with mx.autograd.record(): + b = mx.nd.topk(a, k=k, ret_typ='value') + b.backward(mx.nd.ones((k,), dtype=np.int32)) + a_grad = a.grad.asnumpy() + for i in range(-1, - k - 1, -1): + assert a_grad[i] == 1 + + # test topk gradient with a small shape + for dtype in [np.int32, np.int64, np.float32, np.float64]: + a = mx.nd.arange(0, 1000, step=1, repeat=1, dtype=dtype) + a.attach_grad() + k = 10 + ograd = mx.nd.arange(0, k, dtype=dtype) + with mx.autograd.record(): + b = mx.nd.topk(a, k=k, ret_typ='value') + b.backward(ograd) + a_grad = a.grad.asnumpy() + ograd_npy = ograd.asnumpy() + for i in range(-1, - k - 1, -1): + assert a_grad[i] == ograd_npy[-i - 1] # Repeat those tests that don't involve indices. These should pass even with # duplicated input data values (over many repeated runs with different random seeds, # this will be tested). - a_npy = get_values(ensure_unique=False) - a_nd = mx.nd.array(a_npy, ctx=ctx) - - # test for ret_typ=value - nd_ret_topk = mx.nd.topk(a_nd, axis=1, ret_typ="value", k=3, is_ascend=True).asnumpy() - gt = gt_topk(a_npy, axis=1, ret_typ="value", k=3, is_ascend=True) - assert_almost_equal(nd_ret_topk, gt) - nd_ret_topk = mx.nd.topk(a_nd, axis=3, ret_typ="value", k=2, is_ascend=False).asnumpy() - gt = gt_topk(a_npy, axis=3, ret_typ="value", k=2, is_ascend=False) - assert_almost_equal(nd_ret_topk, gt) - nd_ret_topk = mx.nd.topk(a_nd, axis=None, ret_typ="value", k=21, is_ascend=False).asnumpy() - gt = gt_topk(a_npy, axis=None, ret_typ="value", k=21, is_ascend=False) - assert_almost_equal(nd_ret_topk, gt) - - # test for sort - nd_ret_sort = mx.nd.sort(a_nd, axis=1, is_ascend=True).asnumpy() - gt = gt_topk(a_npy, axis=1, ret_typ="value", k=dat_size, is_ascend=True) - assert_almost_equal(nd_ret_sort, gt) - nd_ret_sort = mx.nd.sort(a_nd, axis=None, is_ascend=False).asnumpy() - gt = gt_topk(a_npy, axis=None, ret_typ="value", - k=dat_size*dat_size*dat_size*dat_size, is_ascend=False) - assert_almost_equal(nd_ret_sort, gt) + for dtype in [np.int16, np.int32, np.int64, np.float32, np.float64]: + a_npy = get_values(ensure_unique=False, dtype=dtype) + a_nd = mx.nd.array(a_npy, ctx=ctx) + + # test for ret_typ=value + nd_ret_topk = mx.nd.topk(a_nd, axis=1, ret_typ="value", k=3, is_ascend=True).asnumpy() + gt = gt_topk(a_npy, axis=1, ret_typ="value", k=3, is_ascend=True) + assert_almost_equal(nd_ret_topk, gt) + nd_ret_topk = mx.nd.topk(a_nd, axis=3, ret_typ="value", k=2, is_ascend=False).asnumpy() + gt = gt_topk(a_npy, axis=3, ret_typ="value", k=2, is_ascend=False) + assert_almost_equal(nd_ret_topk, gt) + nd_ret_topk = mx.nd.topk(a_nd, axis=None, ret_typ="value", k=21, is_ascend=False).asnumpy() + gt = gt_topk(a_npy, axis=None, ret_typ="value", k=21, is_ascend=False) + assert_almost_equal(nd_ret_topk, gt) + + # test for sort + nd_ret_sort = mx.nd.sort(a_nd, axis=1, is_ascend=True).asnumpy() + gt = gt_topk(a_npy, axis=1, ret_typ="value", k=dat_size, is_ascend=True) + assert_almost_equal(nd_ret_sort, gt) + nd_ret_sort = mx.nd.sort(a_nd, axis=None, is_ascend=False).asnumpy() + gt = gt_topk(a_npy, axis=None, ret_typ="value", + k=dat_size*dat_size*dat_size*dat_size, is_ascend=False) + assert_almost_equal(nd_ret_sort, gt) @with_seed() def test_ndarray_equal(): From 2f177d8a318fc9c0ad1b80a77ca82eeb4ab9f28e Mon Sep 17 00:00:00 2001 From: Lanking Date: Wed, 22 Aug 2018 18:04:25 -0700 Subject: [PATCH 039/160] [MXNET-729] Use NDArrayCollector to fix memory leaks in Scala Examples (#12232) * initial fix for RNN * add CI test * ignore the test due to memory leaks * release the GAN beast * enable rnn * add collector and dispose * revert the hacky thing after rebase * rename with inference * add collector in some examples * add experimental tag and comments * change the scope of the NDArrayCollector * apply final changes... * fix scalastyle --- .../org/apache/mxnet/NDArrayCollector.scala | 3 + .../mxnet/annotation/Experimental.scala | 4 + .../CNNTextClassification.scala | 143 ++++++------ .../customop/ExampleCustomOp.scala | 83 +++---- .../apache/mxnetexamples/gan/GanMnist.scala | 137 +++++------ .../imclassification/TrainMnist.scala | 22 +- .../ImageClassifierExample.scala | 60 ++--- .../objectdetector/SSDClassifierExample.scala | 56 ++--- .../multitask/ExampleMultiTask.scala | 201 ++++++++-------- .../neuralstyle/NeuralStyle.scala | 175 +++++++------- .../neuralstyle/end2end/BoostInference.scala | 48 ++-- .../neuralstyle/end2end/BoostTrain.scala | 214 +++++++++--------- .../mxnetexamples/rnn/LstmBucketing.scala | 100 ++++---- .../mxnetexamples/rnn/TestCharRnn.scala | 86 +++---- .../mxnetexamples/rnn/TrainCharRnn.scala | 202 +++++++++-------- .../CNNClassifierExampleSuite.scala | 2 +- .../mxnetexamples/gan/GanExampleSuite.scala | 7 +- .../ImageClassifierExampleSuite.scala | 6 +- .../ObjectDetectorExampleSuite.scala | 8 +- .../multitask/MultiTaskSuite.scala | 19 +- .../neuralstyle/NeuralStyleSuite.scala | 2 +- .../mxnetexamples/rnn/ExampleRNNSuite.scala | 11 +- 22 files changed, 808 insertions(+), 781 deletions(-) diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayCollector.scala b/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayCollector.scala index ea21cff9ebc7..3952b73cfb06 100644 --- a/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayCollector.scala +++ b/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayCollector.scala @@ -18,6 +18,7 @@ package org.apache.mxnet import org.apache.mxnet.Base.CPtrAddress +import org.apache.mxnet.annotation.Experimental import org.slf4j.LoggerFactory import scala.annotation.varargs @@ -80,6 +81,7 @@ object NDArrayCollector { * Create a collector allows users to later dispose the collected NDArray manually. * @return a manually-disposable collector. */ + @Experimental def manual(): NDArrayCollector = new NDArrayCollector(false) /** @@ -135,6 +137,7 @@ class NDArrayCollector private(private val autoDispose: Boolean = true, * @tparam T return type of the function codeBlock. * @return The result of function codeBlock. */ + @Experimental def withScope[T](codeBlock: => T): T = { val old = NDArrayCollector.currCollector.get() NDArrayCollector.currCollector.set(this) diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/annotation/Experimental.scala b/scala-package/core/src/main/scala/org/apache/mxnet/annotation/Experimental.scala index 33d1d3309794..147d651fb04f 100644 --- a/scala-package/core/src/main/scala/org/apache/mxnet/annotation/Experimental.scala +++ b/scala-package/core/src/main/scala/org/apache/mxnet/annotation/Experimental.scala @@ -19,6 +19,10 @@ package org.apache.mxnet.annotation import java.lang.annotation.{ElementType, Retention, Target, _} +/** + * Experimental: there is a comparably high chance that + * the API will undergo some kind of changes + */ @Retention(RetentionPolicy.RUNTIME) @Target(Array(ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER, ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE)) diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/cnntextclassification/CNNTextClassification.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/cnntextclassification/CNNTextClassification.scala index 674c81459f02..7745043b23d8 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/cnntextclassification/CNNTextClassification.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/cnntextclassification/CNNTextClassification.scala @@ -18,7 +18,7 @@ package org.apache.mxnetexamples.cnntextclassification import org.apache.mxnet.optimizer.RMSProp -import org.apache.mxnet.{Context, Executor, Model, NDArray, Optimizer, Shape, Symbol, Uniform} +import org.apache.mxnet.{Context, Executor, Model, NDArray, NDArrayCollector, Optimizer, Shape, Symbol, Uniform} import org.kohsuke.args4j.{CmdLineParser, Option} import org.slf4j.LoggerFactory @@ -131,56 +131,58 @@ object CNNTextClassification { numTotal = 0f updateRate = 0 - for (begin <- 0 until trainBatches.length by batchSize) { - val (batchD, batchL) = { - if (begin + batchSize <= trainBatches.length) { - val datas = trainBatches.drop(begin).take(batchSize) - val labels = trainLabels.drop(begin).take(batchSize) - (datas, labels) - } else { - val right = (begin + batchSize) - trainBatches.length - val left = trainBatches.length - begin - val datas = trainBatches.drop(begin).take(left) ++ trainBatches.take(right) - val labels = trainLabels.drop(begin).take(left) ++ trainLabels.take(right) - (datas, labels) + NDArrayCollector.auto().withScope { + for (begin <- 0 until trainBatches.length by batchSize) { + val (batchD, batchL) = { + if (begin + batchSize <= trainBatches.length) { + val datas = trainBatches.drop(begin).take(batchSize) + val labels = trainLabels.drop(begin).take(batchSize) + (datas, labels) + } else { + val right = (begin + batchSize) - trainBatches.length + val left = trainBatches.length - begin + val datas = trainBatches.drop(begin).take(left) ++ trainBatches.take(right) + val labels = trainLabels.drop(begin).take(left) ++ trainLabels.take(right) + (datas, labels) + } + } + numTotal += batchSize + model.data.set(batchD.flatten.flatten) + model.label.set(batchL) + + model.cnnExec.forward(isTrain = true) + model.cnnExec.backward() + + val tmpCorrect = { + val predLabel = NDArray.api.argmax_channel(model.cnnExec.outputs(0)) + val result = predLabel.toArray.zip(batchL).map { predLabel => + if (predLabel._1 == predLabel._2) 1 + else 0 + }.sum.toFloat + predLabel.dispose() + result } - } - numTotal += batchSize - model.data.set(batchD.flatten.flatten) - model.label.set(batchL) - - model.cnnExec.forward(isTrain = true) - model.cnnExec.backward() - - val tmpCorrect = { - val predLabel = NDArray.api.argmax_channel(model.cnnExec.outputs(0)) - val result = predLabel.toArray.zip(batchL).map { predLabel => - if (predLabel._1 == predLabel._2) 1 - else 0 - }.sum.toFloat - predLabel.dispose() - result - } - numCorrect = numCorrect + tmpCorrect - val norm = Math.sqrt(paramBlocks.map { case (idx, weight, grad, state, name) => - val temp = NDArray.api.norm(grad / batchSize).disposeDepsExcept(grad) - val l2Norm = temp.toScalar - temp.dispose() - l2Norm * l2Norm - }.sum).toFloat - - if (updateRate % 2 == 0) { - paramBlocks.foreach { case (idx, weight, grad, state, name) => - if (norm > maxGradNorm) { - grad.set(grad.toArray.map(_ * (maxGradNorm / norm))) - opt.update(idx, weight, grad, state) + numCorrect = numCorrect + tmpCorrect + val norm = Math.sqrt(paramBlocks.map { case (idx, weight, grad, state, name) => + val temp = NDArray.api.norm(grad / batchSize).disposeDepsExcept(grad) + val l2Norm = temp.toScalar + temp.dispose() + l2Norm * l2Norm + }.sum).toFloat + + if (updateRate % 2 == 0) { + paramBlocks.foreach { case (idx, weight, grad, state, name) => + if (norm > maxGradNorm) { + grad.set(grad.toArray.map(_ * (maxGradNorm / norm))) + opt.update(idx, weight, grad, state) + } + else opt.update(idx, weight, grad, state) + grad.set(0f) } - else opt.update(idx, weight, grad, state) - grad.set(0f) } + updateRate = updateRate + 1 } - updateRate = updateRate + 1 } // decay learning rate @@ -237,30 +239,33 @@ object CNNTextClassification { def test(w2vFilePath : String, mrDatasetPath: String, ctx : Context, saveModelPath: String) : Float = { - val (numEmbed, word2vec) = DataHelper.loadGoogleModel(w2vFilePath) - val (datas, labels) = DataHelper.loadMSDataWithWord2vec( - mrDatasetPath, numEmbed, word2vec) - // randomly shuffle data - val randIdx = Random.shuffle((0 until datas.length).toList) - // split train/dev set - val (trainDats, devDatas) = { - val train = randIdx.dropRight(1000).map(datas(_)).toArray - val dev = randIdx.takeRight(1000).map(datas(_)).toArray - (train, dev) - } - val (trainLabels, devLabels) = { - val train = randIdx.dropRight(1000).map(labels(_)).toArray - val dev = randIdx.takeRight(1000).map(labels(_)).toArray - (train, dev) + val output = NDArrayCollector.auto().withScope { + val (numEmbed, word2vec) = DataHelper.loadGoogleModel(w2vFilePath) + val (datas, labels) = DataHelper.loadMSDataWithWord2vec( + mrDatasetPath, numEmbed, word2vec) + // randomly shuffle data + val randIdx = Random.shuffle((0 until datas.length).toList) + // split train/dev set + val (trainDats, devDatas) = { + val train = randIdx.dropRight(1000).map(datas(_)).toArray + val dev = randIdx.takeRight(1000).map(datas(_)).toArray + (train, dev) + } + val (trainLabels, devLabels) = { + val train = randIdx.dropRight(1000).map(labels(_)).toArray + val dev = randIdx.takeRight(1000).map(labels(_)).toArray + (train, dev) + } + // reshpae for convolution input + val sentenceSize = datas(0).length + val batchSize = 100 + val lr = 0.001f + val cnnModel = setupCnnModel(ctx, batchSize, sentenceSize, numEmbed) + val result = trainCNN(cnnModel, trainDats, trainLabels, devDatas, devLabels, batchSize, + saveModelPath, learningRate = lr) + result } - // reshpae for convolution input - val sentenceSize = datas(0).length - val batchSize = 100 - val lr = 0.001f - val cnnModel = setupCnnModel(ctx, batchSize, sentenceSize, numEmbed) - val result = trainCNN(cnnModel, trainDats, trainLabels, devDatas, devLabels, batchSize, - saveModelPath, learningRate = lr) - result + output } def main(args: Array[String]): Unit = { diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/ExampleCustomOp.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/ExampleCustomOp.scala index a4b347959bfe..df79f5b63769 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/ExampleCustomOp.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/ExampleCustomOp.scala @@ -19,7 +19,7 @@ package org.apache.mxnetexamples.customop import org.apache.mxnet.Callback.Speedometer import org.apache.mxnet.DType.DType -import org.apache.mxnet.{Accuracy, Context, CustomOp, CustomOpProp, NDArray, Operator, Shape, Symbol, Xavier} +import org.apache.mxnet.{Accuracy, Context, CustomOp, CustomOpProp, NDArray, NDArrayCollector, Operator, Shape, Symbol, Xavier} import org.apache.mxnet.optimizer.RMSProp import org.kohsuke.args4j.{CmdLineParser, Option} import org.slf4j.LoggerFactory @@ -141,49 +141,50 @@ object ExampleCustomOp { evalMetric.reset() var nBatch = 0 var epochDone = false - - trainIter.reset() - while (!epochDone) { - var doReset = true - while (doReset && trainIter.hasNext) { - val dataBatch = trainIter.next() - argDict("data").set(dataBatch.data(0)) - argDict("label").set(dataBatch.label(0)) - executor.forward(isTrain = true) - executor.backward() - paramsGrads.foreach { case (idx, name, grad, optimState) => - opt.update(idx, argDict(name), grad, optimState) + NDArrayCollector.auto().withScope { + trainIter.reset() + while (!epochDone) { + var doReset = true + while (doReset && trainIter.hasNext) { + val dataBatch = trainIter.next() + argDict("data").set(dataBatch.data(0)) + argDict("label").set(dataBatch.label(0)) + executor.forward(isTrain = true) + executor.backward() + paramsGrads.foreach { case (idx, name, grad, optimState) => + opt.update(idx, argDict(name), grad, optimState) + } + evalMetric.update(dataBatch.label, executor.outputs) + nBatch += 1 + batchEndCallback.invoke(epoch, nBatch, evalMetric) + } + if (doReset) { + trainIter.reset() } - evalMetric.update(dataBatch.label, executor.outputs) - nBatch += 1 - batchEndCallback.invoke(epoch, nBatch, evalMetric) + epochDone = true } - if (doReset) { - trainIter.reset() + val (name, value) = evalMetric.get + name.zip(value).foreach { case (n, v) => + logger.info(s"Epoch[$epoch] Train-accuracy=$v") + } + val toc = System.currentTimeMillis + logger.info(s"Epoch[$epoch] Time cost=${toc - tic}") + + evalMetric.reset() + testIter.reset() + while (testIter.hasNext) { + val evalBatch = testIter.next() + argDict("data").set(evalBatch.data(0)) + argDict("label").set(evalBatch.label(0)) + executor.forward(isTrain = true) + evalMetric.update(evalBatch.label, executor.outputs) + evalBatch.dispose() + } + val (names, values) = evalMetric.get + names.zip(values).foreach { case (n, v) => + logger.info(s"Epoch[$epoch] Validation-accuracy=$v") + validationAcc = Math.max(validationAcc, v) } - epochDone = true - } - val (name, value) = evalMetric.get - name.zip(value).foreach { case (n, v) => - logger.info(s"Epoch[$epoch] Train-accuracy=$v") - } - val toc = System.currentTimeMillis - logger.info(s"Epoch[$epoch] Time cost=${toc - tic}") - - evalMetric.reset() - testIter.reset() - while (testIter.hasNext) { - val evalBatch = testIter.next() - argDict("data").set(evalBatch.data(0)) - argDict("label").set(evalBatch.label(0)) - executor.forward(isTrain = true) - evalMetric.update(evalBatch.label, executor.outputs) - evalBatch.dispose() - } - val (names, values) = evalMetric.get - names.zip(values).foreach { case (n, v) => - logger.info(s"Epoch[$epoch] Validation-accuracy=$v") - validationAcc = Math.max(validationAcc, v) } } executor.dispose() diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/gan/GanMnist.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/gan/GanMnist.scala index 70846eebfb8e..475d91faa0dc 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/gan/GanMnist.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/gan/GanMnist.scala @@ -17,7 +17,7 @@ package org.apache.mxnetexamples.gan -import org.apache.mxnet.{Context, CustomMetric, DataBatch, IO, NDArray, Shape, Symbol, Xavier} +import org.apache.mxnet.{Context, CustomMetric, DataBatch, IO, NDArray, NDArrayCollector, Shape, Symbol, Xavier} import org.apache.mxnet.optimizer.Adam import org.kohsuke.args4j.{CmdLineParser, Option} import org.slf4j.LoggerFactory @@ -104,75 +104,80 @@ object GanMnist { def runTraining(dataPath : String, context : Context, outputPath : String, numEpoch : Int): Float = { - val lr = 0.0005f - val beta1 = 0.5f - val batchSize = 100 - val randShape = Shape(batchSize, 100) - val dataShape = Shape(batchSize, 1, 28, 28) - - val (symGen, symDec) = - makeDcganSym(oShape = dataShape, ngf = 32, finalAct = "sigmoid") - - val gMod = new GANModule( - symGen, - symDec, - context = context, - dataShape = dataShape, - codeShape = randShape) - - gMod.initGParams(new Xavier(factorType = "in", magnitude = 2.34f)) - gMod.initDParams(new Xavier(factorType = "in", magnitude = 2.34f)) - - gMod.initOptimizer(new Adam(learningRate = lr, wd = 0f, beta1 = beta1)) - - val params = Map( - "image" -> s"$dataPath/train-images-idx3-ubyte", - "label" -> s"$dataPath/train-labels-idx1-ubyte", - "input_shape" -> s"(1, 28, 28)", - "batch_size" -> s"$batchSize", - "shuffle" -> "True" - ) - - val mnistIter = IO.MNISTIter(params) - - val metricAcc = new CustomMetric(ferr, "ferr") - - var t = 0 - var dataBatch: DataBatch = null - var acc = 0.0f - for (epoch <- 0 until numEpoch) { - mnistIter.reset() - metricAcc.reset() - t = 0 - while (mnistIter.hasNext) { - dataBatch = mnistIter.next() - gMod.update(dataBatch) - gMod.dLabel.set(0f) - metricAcc.update(Array(gMod.dLabel), gMod.outputsFake) - gMod.dLabel.set(1f) - metricAcc.update(Array(gMod.dLabel), gMod.outputsReal) - - if (t % 50 == 0) { - val (name, value) = metricAcc.get - acc = value(0) - logger.info(s"epoch: $epoch, iter $t, metric=${value.mkString(" ")}") - Viz.imSave("gout", outputPath, gMod.tempOutG(0), flip = true) - val diff = gMod.tempDiffD - val arr = diff.toArray - val mean = arr.sum / arr.length - val std = { - val tmpA = arr.map(a => (a - mean) * (a - mean)) - Math.sqrt(tmpA.sum / tmpA.length).toFloat + val output = NDArrayCollector.auto().withScope { + val lr = 0.0005f + val beta1 = 0.5f + val batchSize = 100 + val randShape = Shape(batchSize, 100) + val dataShape = Shape(batchSize, 1, 28, 28) + + val (symGen, symDec) = + makeDcganSym(oShape = dataShape, ngf = 32, finalAct = "sigmoid") + + val gMod = new GANModule( + symGen, + symDec, + context = context, + dataShape = dataShape, + codeShape = randShape) + + gMod.initGParams(new Xavier(factorType = "in", magnitude = 2.34f)) + gMod.initDParams(new Xavier(factorType = "in", magnitude = 2.34f)) + + gMod.initOptimizer(new Adam(learningRate = lr, wd = 0f, beta1 = beta1)) + + val params = Map( + "image" -> s"$dataPath/train-images-idx3-ubyte", + "label" -> s"$dataPath/train-labels-idx1-ubyte", + "input_shape" -> s"(1, 28, 28)", + "batch_size" -> s"$batchSize", + "shuffle" -> "True" + ) + + val mnistIter = IO.MNISTIter(params) + + val metricAcc = new CustomMetric(ferr, "ferr") + + var t = 0 + var dataBatch: DataBatch = null + var acc = 0.0f + for (epoch <- 0 until numEpoch) { + mnistIter.reset() + metricAcc.reset() + t = 0 + while (mnistIter.hasNext) { + dataBatch = mnistIter.next() + NDArrayCollector.auto().withScope { + gMod.update(dataBatch) + gMod.dLabel.set(0f) + metricAcc.update(Array(gMod.dLabel), gMod.outputsFake) + gMod.dLabel.set(1f) + metricAcc.update(Array(gMod.dLabel), gMod.outputsReal) + + if (t % 50 == 0) { + val (name, value) = metricAcc.get + acc = value(0) + logger.info(s"epoch: $epoch, iter $t, metric=${value.mkString(" ")}") + Viz.imSave("gout", outputPath, gMod.tempOutG(0), flip = true) + val diff = gMod.tempDiffD + val arr = diff.toArray + val mean = arr.sum / arr.length + val std = { + val tmpA = arr.map(a => (a - mean) * (a - mean)) + Math.sqrt(tmpA.sum / tmpA.length).toFloat + } + diff.set((diff - mean) / std + 0.5f) + Viz.imSave("diff", outputPath, diff, flip = true) + Viz.imSave("data", outputPath, dataBatch.data(0), flip = true) + } } - diff.set((diff - mean) / std + 0.5f) - Viz.imSave("diff", outputPath, diff, flip = true) - Viz.imSave("data", outputPath, dataBatch.data(0), flip = true) + dataBatch.dispose() + t += 1 } - - t += 1 } + acc } - acc + output } def main(args: Array[String]): Unit = { diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/imclassification/TrainMnist.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/imclassification/TrainMnist.scala index bd0ce45ffe5f..2f024fd039bc 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/imclassification/TrainMnist.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/imclassification/TrainMnist.scala @@ -93,16 +93,18 @@ object TrainMnist { } def test(dataPath : String) : Float = { - val (dataShape, net) = (Shape(784), getMlp) - val devs = Array(Context.cpu(0)) - val envs: mutable.Map[String, String] = mutable.HashMap.empty[String, String] - val Acc = ModelTrain.fit(dataDir = dataPath, - batchSize = 128, numExamples = 60000, devs = devs, - network = net, dataLoader = getIterator(dataShape), - kvStore = "local", numEpochs = 10) - logger.info("Finish test fit ...") - val (_, num) = Acc.get - num(0) + NDArrayCollector.auto().withScope { + val (dataShape, net) = (Shape(784), getMlp) + val devs = Array(Context.cpu(0)) + val envs: mutable.Map[String, String] = mutable.HashMap.empty[String, String] + val Acc = ModelTrain.fit(dataDir = dataPath, + batchSize = 128, numExamples = 60000, devs = devs, + network = net, dataLoader = getIterator(dataShape), + kvStore = "local", numEpochs = 10) + logger.info("Finish test fit ...") + val (_, num) = Acc.get + num(0) + } } diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier/ImageClassifierExample.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier/ImageClassifierExample.scala index 2a0d967a4b4a..f6e4fe0941da 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier/ImageClassifierExample.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier/ImageClassifierExample.scala @@ -17,10 +17,9 @@ package org.apache.mxnetexamples.infer.imageclassifier -import org.apache.mxnet.Shape +import org.apache.mxnet._ import org.kohsuke.args4j.{CmdLineParser, Option} import org.slf4j.LoggerFactory -import org.apache.mxnet.{DType, DataDesc, Context} import org.apache.mxnet.infer.ImageClassifier import scala.collection.JavaConverters._ @@ -43,47 +42,50 @@ object ImageClassifierExample { def runInferenceOnSingleImage(modelPathPrefix: String, inputImagePath: String, context: Array[Context]): IndexedSeq[IndexedSeq[(String, Float)]] = { - val dType = DType.Float32 - val inputShape = Shape(1, 3, 224, 224) + NDArrayCollector.auto().withScope { + val dType = DType.Float32 + val inputShape = Shape(1, 3, 224, 224) - val inputDescriptor = IndexedSeq(DataDesc("data", inputShape, dType, "NCHW")) + val inputDescriptor = IndexedSeq(DataDesc("data", inputShape, dType, "NCHW")) - // Create object of ImageClassifier class - val imgClassifier: ImageClassifier = new - ImageClassifier(modelPathPrefix, inputDescriptor, context) + // Create object of ImageClassifier class + val imgClassifier: ImageClassifier = new + ImageClassifier(modelPathPrefix, inputDescriptor, context) - // Loading single image from file and getting BufferedImage - val img = ImageClassifier.loadImageFromFile(inputImagePath) + // Loading single image from file and getting BufferedImage + val img = ImageClassifier.loadImageFromFile(inputImagePath) - // Running inference on single image - val output = imgClassifier.classifyImage(img, Some(5)) - - output + // Running inference on single image + val output = imgClassifier.classifyImage(img, Some(5)) + output + } } def runInferenceOnBatchOfImage(modelPathPrefix: String, inputImageDir: String, context: Array[Context]): IndexedSeq[IndexedSeq[(String, Float)]] = { - val dType = DType.Float32 - val inputShape = Shape(1, 3, 224, 224) + NDArrayCollector.auto().withScope { + val dType = DType.Float32 + val inputShape = Shape(1, 3, 224, 224) - val inputDescriptor = IndexedSeq(DataDesc("data", inputShape, dType, "NCHW")) + val inputDescriptor = IndexedSeq(DataDesc("data", inputShape, dType, "NCHW")) - // Create object of ImageClassifier class - val imgClassifier: ImageClassifier = new - ImageClassifier(modelPathPrefix, inputDescriptor, context) + // Create object of ImageClassifier class + val imgClassifier: ImageClassifier = new + ImageClassifier(modelPathPrefix, inputDescriptor, context) - // Loading batch of images from the directory path - val batchFiles = generateBatches(inputImageDir, 20) - var outputList = IndexedSeq[IndexedSeq[(String, Float)]]() + // Loading batch of images from the directory path + val batchFiles = generateBatches(inputImageDir, 20) + var outputList = IndexedSeq[IndexedSeq[(String, Float)]]() - for (batchFile <- batchFiles) { - val imgList = ImageClassifier.loadInputBatch(batchFile) - // Running inference on batch of images loaded in previous step - outputList ++= imgClassifier.classifyImageBatch(imgList, Some(5)) - } + for (batchFile <- batchFiles) { + val imgList = ImageClassifier.loadInputBatch(batchFile) + // Running inference on batch of images loaded in previous step + outputList ++= imgClassifier.classifyImageBatch(imgList, Some(5)) + } - outputList + outputList + } } def generateBatches(inputImageDirPath: String, batchSize: Int = 100): List[List[String]] = { diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/SSDClassifierExample.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/SSDClassifierExample.scala index 7c6c7ef12152..0edde9e6516b 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/SSDClassifierExample.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/SSDClassifierExample.scala @@ -19,7 +19,7 @@ package org.apache.mxnetexamples.infer.objectdetector import java.io.File -import org.apache.mxnet.{Context, DType, DataDesc, Shape} +import org.apache.mxnet._ import org.apache.mxnet.infer._ import org.kohsuke.args4j.{CmdLineParser, Option} import org.slf4j.LoggerFactory @@ -54,37 +54,41 @@ object SSDClassifierExample { def runObjectDetectionSingle(modelPathPrefix: String, inputImagePath: String, context: Array[Context]): IndexedSeq[IndexedSeq[(String, Array[Float])]] = { - val dType = DType.Float32 - val inputShape = Shape(1, 3, 512, 512) - // ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...]) - val outputShape = Shape(1, 6132, 6) - val inputDescriptors = IndexedSeq(DataDesc("data", inputShape, dType, "NCHW")) - val img = ImageClassifier.loadImageFromFile(inputImagePath) - val objDetector = new ObjectDetector(modelPathPrefix, inputDescriptors, context) - val output = objDetector.imageObjectDetect(img, Some(3)) - - output + NDArrayCollector.auto().withScope { + val dType = DType.Float32 + val inputShape = Shape(1, 3, 512, 512) + // ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...]) + val outputShape = Shape(1, 6132, 6) + val inputDescriptors = IndexedSeq(DataDesc("data", inputShape, dType, "NCHW")) + val img = ImageClassifier.loadImageFromFile(inputImagePath) + val objDetector = new ObjectDetector(modelPathPrefix, inputDescriptors, context) + val output = objDetector.imageObjectDetect(img, Some(3)) + + output + } } def runObjectDetectionBatch(modelPathPrefix: String, inputImageDir: String, context: Array[Context]): IndexedSeq[IndexedSeq[(String, Array[Float])]] = { - val dType = DType.Float32 - val inputShape = Shape(1, 3, 512, 512) - // ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...]) - val outputShape = Shape(1, 6132, 6) - val inputDescriptors = IndexedSeq(DataDesc("data", inputShape, dType, "NCHW")) - val objDetector = new ObjectDetector(modelPathPrefix, inputDescriptors, context) - // Loading batch of images from the directory path - val batchFiles = generateBatches(inputImageDir, 20) - var outputList = IndexedSeq[IndexedSeq[(String, Array[Float])]]() - - for (batchFile <- batchFiles) { - val imgList = ImageClassifier.loadInputBatch(batchFile) - // Running inference on batch of images loaded in previous step - outputList ++= objDetector.imageBatchObjectDetect(imgList, Some(5)) + NDArrayCollector.auto().withScope { + val dType = DType.Float32 + val inputShape = Shape(1, 3, 512, 512) + // ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...]) + val outputShape = Shape(1, 6132, 6) + val inputDescriptors = IndexedSeq(DataDesc("data", inputShape, dType, "NCHW")) + val objDetector = new ObjectDetector(modelPathPrefix, inputDescriptors, context) + // Loading batch of images from the directory path + val batchFiles = generateBatches(inputImageDir, 20) + var outputList = IndexedSeq[IndexedSeq[(String, Array[Float])]]() + + for (batchFile <- batchFiles) { + val imgList = ImageClassifier.loadInputBatch(batchFile) + // Running inference on batch of images loaded in previous step + outputList ++= objDetector.imageBatchObjectDetect(imgList, Some(5)) + } + outputList } - outputList } def generateBatches(inputImageDirPath: String, batchSize: Int = 100): List[List[String]] = { diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/multitask/ExampleMultiTask.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/multitask/ExampleMultiTask.scala index 825e46596755..bfde55831e26 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/multitask/ExampleMultiTask.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/multitask/ExampleMultiTask.scala @@ -25,11 +25,9 @@ import org.slf4j.LoggerFactory import scala.collection.JavaConverters._ import org.apache.commons.io.FileUtils - -import org.apache.mxnet.{Context, DataBatch, DataDesc, DataIter, EvalMetric, NDArray, Shape, Symbol, Xavier} +import org.apache.mxnet.{Context, DataBatch, DataDesc, DataIter, EvalMetric, Executor, NDArray, NDArrayCollector, Shape, Symbol, Xavier} import org.apache.mxnet.DType.DType import org.apache.mxnet.optimizer.RMSProp -import org.apache.mxnet.Executor import org.apache.mxnetexamples.Util import scala.collection.immutable.ListMap @@ -223,120 +221,123 @@ object ExampleMultiTask { def train(batchSize: Int, numEpoch: Int, ctx: Context, modelDirPath: String): (Executor, MultiAccuracy) = { - val lr = 0.001f - val network = ExampleMultiTask.buildNetwork() - val (trainIter, valIter) = - Data.mnistIterator(modelDirPath, batchSize = batchSize, inputShape = Shape(784)) - val trainMultiIt = new MultiMnistIterator(trainIter) - val valMultiIter = new MultiMnistIterator(valIter) - - val datasAndLabels = trainMultiIt.provideData ++ trainMultiIt.provideLabel - - val (argShapes, outputShapes, auxShapes) = network.inferShape(trainMultiIt.provideData("data")) - val initializer = new Xavier(factorType = "in", magnitude = 2.34f) - - val argNames = network.listArguments - val argDict = argNames.zip(argShapes.map(NDArray.empty(_, ctx))).toMap - - val gradDict = argNames.zip(argShapes).filter { case (name, shape) => - !datasAndLabels.contains(name) - }.map(x => x._1 -> NDArray.empty(x._2, ctx)).toMap - - argDict.foreach { case (name, ndArray) => - if (!datasAndLabels.contains(name)) { - initializer.initWeight(name, ndArray) + NDArrayCollector.auto().withScope { + val lr = 0.001f + val network = ExampleMultiTask.buildNetwork() + val (trainIter, valIter) = + Data.mnistIterator(modelDirPath, batchSize = batchSize, inputShape = Shape(784)) + val trainMultiIt = new MultiMnistIterator(trainIter) + val valMultiIter = new MultiMnistIterator(valIter) + + val datasAndLabels = trainMultiIt.provideData ++ trainMultiIt.provideLabel + + val (argShapes, outputShapes, auxShapes) + = network.inferShape(trainMultiIt.provideData("data")) + val initializer = new Xavier(factorType = "in", magnitude = 2.34f) + + val argNames = network.listArguments + val argDict = argNames.zip(argShapes.map(NDArray.empty(_, ctx))).toMap + + val gradDict = argNames.zip(argShapes).filter { case (name, shape) => + !datasAndLabels.contains(name) + }.map(x => x._1 -> NDArray.empty(x._2, ctx)).toMap + + argDict.foreach { case (name, ndArray) => + if (!datasAndLabels.contains(name)) { + initializer.initWeight(name, ndArray) + } } - } - - val data = argDict("data") - val label1 = argDict("softmaxoutput0_label") - val label2 = argDict("softmaxoutput1_label") - val maxGradNorm = 0.5f - val executor = network.bind(ctx, argDict, gradDict) - - val opt = new RMSProp(learningRate = lr, wd = 0.00001f) - val paramsGrads = gradDict.toList.zipWithIndex.map { case ((name, grad), idx) => - (idx, name, grad, opt.createState(idx, argDict(name))) - } + val data = argDict("data") + val label1 = argDict("softmaxoutput0_label") + val label2 = argDict("softmaxoutput1_label") + val maxGradNorm = 0.5f + val executor = network.bind(ctx, argDict, gradDict) - val evalMetric = new ExampleMultiTask.MultiAccuracy(num = 2, name = "multi_accuracy") - val batchEndCallback = new ExampleMultiTask.Speedometer(batchSize, 50) + val opt = new RMSProp(learningRate = lr, wd = 0.00001f) - for (epoch <- 0 until numEpoch) { - // Training phase - val tic = System.currentTimeMillis - evalMetric.reset() - var nBatch = 0 - var epochDone = false - // Iterate over training data. - trainMultiIt.reset() + val paramsGrads = gradDict.toList.zipWithIndex.map { case ((name, grad), idx) => + (idx, name, grad, opt.createState(idx, argDict(name))) + } - while (!epochDone) { - var doReset = true - while (doReset && trainMultiIt.hasNext) { - val dataBatch = trainMultiIt.next() + val evalMetric = new ExampleMultiTask.MultiAccuracy(num = 2, name = "multi_accuracy") + val batchEndCallback = new ExampleMultiTask.Speedometer(batchSize, 50) + + for (epoch <- 0 until numEpoch) { + // Training phase + val tic = System.currentTimeMillis + evalMetric.reset() + var nBatch = 0 + var epochDone = false + // Iterate over training data. + trainMultiIt.reset() + + while (!epochDone) { + var doReset = true + while (doReset && trainMultiIt.hasNext) { + val dataBatch = trainMultiIt.next() + + data.set(dataBatch.data(0)) + label1.set(dataBatch.label(0)) + label2.set(dataBatch.label(1)) + + executor.forward(isTrain = true) + executor.backward() + + val norm = Math.sqrt(paramsGrads.map { case (idx, name, grad, optimState) => + val l2Norm = NDArray.api.norm(data = (grad / batchSize)).toScalar + l2Norm * l2Norm + }.sum).toFloat + + paramsGrads.foreach { case (idx, name, grad, optimState) => + if (norm > maxGradNorm) { + grad.set(grad.toArray.map(_ * (maxGradNorm / norm))) + opt.update(idx, argDict(name), grad, optimState) + } else opt.update(idx, argDict(name), grad, optimState) + } - data.set(dataBatch.data(0)) - label1.set(dataBatch.label(0)) - label2.set(dataBatch.label(1)) + // evaluate at end, so out_cpu_array can lazy copy + evalMetric.update(dataBatch.label, executor.outputs) - executor.forward(isTrain = true) - executor.backward() - - val norm = Math.sqrt(paramsGrads.map { case (idx, name, grad, optimState) => - val l2Norm = NDArray.api.norm(data = (grad / batchSize)).toScalar - l2Norm * l2Norm - }.sum).toFloat - - paramsGrads.foreach { case (idx, name, grad, optimState) => - if (norm > maxGradNorm) { - grad.set(grad.toArray.map(_ * (maxGradNorm / norm))) - opt.update(idx, argDict(name), grad, optimState) - } else opt.update(idx, argDict(name), grad, optimState) + nBatch += 1 + batchEndCallback.invoke(epoch, nBatch, evalMetric) } - - // evaluate at end, so out_cpu_array can lazy copy - evalMetric.update(dataBatch.label, executor.outputs) - - nBatch += 1 - batchEndCallback.invoke(epoch, nBatch, evalMetric) + if (doReset) { + trainMultiIt.reset() + } + // this epoch is done + epochDone = true } - if (doReset) { - trainMultiIt.reset() + var nameVals = evalMetric.get + nameVals.foreach { case (name, value) => + logger.info(s"Epoch[$epoch] Train-$name=$value") } - // this epoch is done - epochDone = true - } - var nameVals = evalMetric.get - nameVals.foreach { case (name, value) => - logger.info(s"Epoch[$epoch] Train-$name=$value") - } - val toc = System.currentTimeMillis - logger.info(s"Epoch[$epoch] Time cost=${toc - tic}") + val toc = System.currentTimeMillis + logger.info(s"Epoch[$epoch] Time cost=${toc - tic}") - evalMetric.reset() - valMultiIter.reset() - while (valMultiIter.hasNext) { - val evalBatch = valMultiIter.next() + evalMetric.reset() + valMultiIter.reset() + while (valMultiIter.hasNext) { + val evalBatch = valMultiIter.next() - data.set(evalBatch.data(0)) - label1.set(evalBatch.label(0)) - label2.set(evalBatch.label(1)) + data.set(evalBatch.data(0)) + label1.set(evalBatch.label(0)) + label2.set(evalBatch.label(1)) - executor.forward(isTrain = true) + executor.forward(isTrain = true) - evalMetric.update(evalBatch.label, executor.outputs) - evalBatch.dispose() - } + evalMetric.update(evalBatch.label, executor.outputs) + evalBatch.dispose() + } - nameVals = evalMetric.get - nameVals.foreach { case (name, value) => - logger.info(s"Epoch[$epoch] Validation-$name=$value") + nameVals = evalMetric.get + nameVals.foreach { case (name, value) => + logger.info(s"Epoch[$epoch] Validation-$name=$value") + } } - } - (executor, evalMetric) + (executor, evalMetric) + } } def main(args: Array[String]): Unit = { diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/NeuralStyle.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/NeuralStyle.scala index f98d725c2304..1767cabcbae4 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/NeuralStyle.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/NeuralStyle.scala @@ -170,102 +170,103 @@ object NeuralStyle { contentWeight : Float, tvWeight : Float, gaussianRadius : Int, lr: Float, maxNumEpochs: Int, maxLongEdge: Int, saveEpochs : Int, stopEps: Float) : Unit = { + NDArrayCollector.auto().withScope { + val contentNp = preprocessContentImage(contentImage, maxLongEdge, dev) + val styleNp = preprocessStyleImage(styleImage, contentNp.shape, dev) + val size = (contentNp.shape(2), contentNp.shape(3)) - val contentNp = preprocessContentImage(contentImage, maxLongEdge, dev) - val styleNp = preprocessStyleImage(styleImage, contentNp.shape, dev) - val size = (contentNp.shape(2), contentNp.shape(3)) - - val (style, content) = ModelVgg19.getSymbol - val (gram, gScale) = styleGramSymbol(size, style) - var modelExecutor = ModelVgg19.getExecutor(gram, content, modelPath, size, dev) - - modelExecutor.data.set(styleNp) - modelExecutor.executor.forward() - - val styleArray = modelExecutor.style.map(_.copyTo(Context.cpu())) - modelExecutor.data.set(contentNp) - modelExecutor.executor.forward() - val contentArray = modelExecutor.content.copyTo(Context.cpu()) - - // delete the executor - modelExecutor.argDict.foreach(ele => ele._2.dispose()) - modelExecutor.content.dispose() - modelExecutor.data.dispose() - modelExecutor.dataGrad.dispose() - modelExecutor.style.foreach(_.dispose()) - modelExecutor.executor.dispose() - modelExecutor = null - - val (styleLoss, contentLoss) = getLoss(gram, content) - modelExecutor = ModelVgg19.getExecutor( - styleLoss, contentLoss, modelPath, size, dev) - - val gradArray = { - var tmpGA = Array[NDArray]() - for (i <- 0 until styleArray.length) { - modelExecutor.argDict(s"target_gram_$i").set(styleArray(i)) - tmpGA = tmpGA :+ NDArray.ones(Shape(1), dev) * (styleWeight / gScale(i)) - } - tmpGA :+ NDArray.ones(Shape(1), dev) * contentWeight - } - - modelExecutor.argDict("target_content").set(contentArray) - - // train - val img = Random.uniform(-0.1f, 0.1f, contentNp.shape, dev) - val lrFS = new FactorScheduler(step = 10, factor = 0.9f) + val (style, content) = ModelVgg19.getSymbol + val (gram, gScale) = styleGramSymbol(size, style) + var modelExecutor = ModelVgg19.getExecutor(gram, content, modelPath, size, dev) - saveImage(contentNp, s"${outputDir}/input.jpg", gaussianRadius) - saveImage(styleNp, s"${outputDir}/style.jpg", gaussianRadius) - - val optimizer = new Adam( - learningRate = lr, - wd = 0.005f, - lrScheduler = lrFS) - val optimState = optimizer.createState(0, img) - - logger.info(s"start training arguments") - - var oldImg = img.copyTo(dev) - val clipNorm = img.shape.toVector.reduce(_ * _) - val tvGradExecutor = getTvGradExecutor(img, dev, tvWeight) - var eps = 0f - var trainingDone = false - var e = 0 - while (e < maxNumEpochs && !trainingDone) { - modelExecutor.data.set(img) + modelExecutor.data.set(styleNp) modelExecutor.executor.forward() - modelExecutor.executor.backward(gradArray) - val gNorm = NDArray.norm(modelExecutor.dataGrad).toScalar - if (gNorm > clipNorm) { - modelExecutor.dataGrad.set(modelExecutor.dataGrad * (clipNorm / gNorm)) - } - tvGradExecutor match { - case Some(executor) => { - executor.forward() - optimizer.update(0, img, - modelExecutor.dataGrad + executor.outputs(0), - optimState) + val styleArray = modelExecutor.style.map(_.copyTo(Context.cpu())) + modelExecutor.data.set(contentNp) + modelExecutor.executor.forward() + val contentArray = modelExecutor.content.copyTo(Context.cpu()) + + // delete the executor + modelExecutor.argDict.foreach(ele => ele._2.dispose()) + modelExecutor.content.dispose() + modelExecutor.data.dispose() + modelExecutor.dataGrad.dispose() + modelExecutor.style.foreach(_.dispose()) + modelExecutor.executor.dispose() + modelExecutor = null + + val (styleLoss, contentLoss) = getLoss(gram, content) + modelExecutor = ModelVgg19.getExecutor( + styleLoss, contentLoss, modelPath, size, dev) + + val gradArray = { + var tmpGA = Array[NDArray]() + for (i <- 0 until styleArray.length) { + modelExecutor.argDict(s"target_gram_$i").set(styleArray(i)) + tmpGA = tmpGA :+ NDArray.ones(Shape(1), dev) * (styleWeight / gScale(i)) } - case None => - optimizer.update(0, img, modelExecutor.dataGrad, optimState) + tmpGA :+ NDArray.ones(Shape(1), dev) * contentWeight } - eps = (NDArray.norm(oldImg - img) / NDArray.norm(img)).toScalar - oldImg.set(img) - logger.info(s"epoch $e, relative change $eps") - if (eps < stopEps) { - logger.info("eps < args.stop_eps, training finished") - trainingDone = true - } - if ((e + 1) % saveEpochs == 0) { - saveImage(img, s"${outputDir}/tmp_${e + 1}.jpg", gaussianRadius) + modelExecutor.argDict("target_content").set(contentArray) + + // train + val img = Random.uniform(-0.1f, 0.1f, contentNp.shape, dev) + val lrFS = new FactorScheduler(step = 10, factor = 0.9f) + + saveImage(contentNp, s"${outputDir}/input.jpg", gaussianRadius) + saveImage(styleNp, s"${outputDir}/style.jpg", gaussianRadius) + + val optimizer = new Adam( + learningRate = lr, + wd = 0.005f, + lrScheduler = lrFS) + val optimState = optimizer.createState(0, img) + + logger.info(s"start training arguments") + + var oldImg = img.copyTo(dev) + val clipNorm = img.shape.toVector.reduce(_ * _) + val tvGradExecutor = getTvGradExecutor(img, dev, tvWeight) + var eps = 0f + var trainingDone = false + var e = 0 + while (e < maxNumEpochs && !trainingDone) { + modelExecutor.data.set(img) + modelExecutor.executor.forward() + modelExecutor.executor.backward(gradArray) + + val gNorm = NDArray.norm(modelExecutor.dataGrad).toScalar + if (gNorm > clipNorm) { + modelExecutor.dataGrad.set(modelExecutor.dataGrad * (clipNorm / gNorm)) + } + tvGradExecutor match { + case Some(executor) => { + executor.forward() + optimizer.update(0, img, + modelExecutor.dataGrad + executor.outputs(0), + optimState) + } + case None => + optimizer.update(0, img, modelExecutor.dataGrad, optimState) + } + eps = (NDArray.norm(oldImg - img) / NDArray.norm(img)).toScalar + oldImg.set(img) + logger.info(s"epoch $e, relative change $eps") + + if (eps < stopEps) { + logger.info("eps < args.stop_eps, training finished") + trainingDone = true + } + if ((e + 1) % saveEpochs == 0) { + saveImage(img, s"${outputDir}/tmp_${e + 1}.jpg", gaussianRadius) + } + e = e + 1 } - e = e + 1 + saveImage(img, s"${outputDir}/out.jpg", gaussianRadius) + logger.info("Finish fit ...") } - saveImage(img, s"${outputDir}/out.jpg", gaussianRadius) - logger.info("Finish fit ...") } def main(args: Array[String]): Unit = { diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/BoostInference.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/BoostInference.scala index 5410fb9edc7c..b1e6634db80e 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/BoostInference.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/BoostInference.scala @@ -17,7 +17,7 @@ package org.apache.mxnetexamples.neuralstyle.end2end -import org.apache.mxnet.{Context, Shape} +import org.apache.mxnet.{Context, NDArrayCollector, Shape} import org.kohsuke.args4j.{CmdLineParser, Option} import org.slf4j.LoggerFactory @@ -29,28 +29,32 @@ object BoostInference { def runInference(modelPath: String, outputPath: String, guassianRadius : Int, inputImage : String, ctx : Context): Unit = { - val dShape = Shape(1, 3, 480, 640) - val clipNorm = 1.0f * dShape.product - // generator - val gens = Array( - GenV4.getModule("g0", dShape, ctx, isTrain = false), - GenV3.getModule("g1", dShape, ctx, isTrain = false), - GenV3.getModule("g2", dShape, ctx, isTrain = false), - GenV4.getModule("g3", dShape, ctx, isTrain = false) - ) - gens.zipWithIndex.foreach { case (gen, i) => - gen.loadParams(s"$modelPath/$i/v3_0002-0026000.params") - } + NDArrayCollector.auto().withScope { + val dShape = Shape(1, 3, 480, 640) + val clipNorm = 1.0f * dShape.product + // generator + val gens = Array( + GenV4.getModule("g0", dShape, ctx, isTrain = false), + GenV3.getModule("g1", dShape, ctx, isTrain = false), + GenV3.getModule("g2", dShape, ctx, isTrain = false), + GenV4.getModule("g3", dShape, ctx, isTrain = false) + ) + gens.zipWithIndex.foreach { case (gen, i) => + gen.loadParams(s"$modelPath/$i/v3_0002-0026000.params") + } - val contentNp = - DataProcessing.preprocessContentImage(s"$inputImage", dShape, ctx) - var data = Array(contentNp) - for (i <- 0 until gens.length) { - gens(i).forward(data.takeRight(1)) - val newImg = gens(i).getOutputs()(0) - data :+= newImg - DataProcessing.saveImage(newImg, s"$outputPath/out_$i.jpg", guassianRadius) - logger.info(s"Converted image: $outputPath/out_$i.jpg") + val contentNp = + DataProcessing.preprocessContentImage(s"$inputImage", dShape, ctx) + var data = Array(contentNp) + for (i <- 0 until gens.length) { + NDArrayCollector.auto().withScope { + gens(i).forward(data.takeRight(1)) + val newImg = gens(i).getOutputs()(0) + data :+= newImg + DataProcessing.saveImage(newImg, s"$outputPath/out_$i.jpg", guassianRadius) + logger.info(s"Converted image: $outputPath/out_$i.jpg") + } + } } } diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/BoostTrain.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/BoostTrain.scala index 08b4c85d2c55..8246f44bae2f 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/BoostTrain.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/BoostTrain.scala @@ -19,7 +19,7 @@ package org.apache.mxnetexamples.neuralstyle.end2end import java.io.File -import org.apache.mxnet.{Context, Executor, NDArray, Shape, Symbol} +import org.apache.mxnet.{Context, Executor, NDArray, NDArrayCollector, Shape, Symbol} import org.apache.mxnet.optimizer.SGD import org.kohsuke.args4j.{CmdLineParser, Option} import org.slf4j.LoggerFactory @@ -56,117 +56,121 @@ object BoostTrain { def runTraining(dataPath : String, vggModelPath: String, ctx : Context, styleImage : String, saveModelPath : String) : Unit = { - // params - val vggParams = NDArray.load2Map(vggModelPath) - val styleWeight = 1.2f - val contentWeight = 10f - val dShape = Shape(1, 3, 384, 384) - val clipNorm = 0.05f * dShape.product - val modelPrefix = "v3" - // init style - val styleNp = DataProcessing.preprocessStyleImage(styleImage, dShape, ctx) - var styleMod = Basic.getStyleModule("style", dShape, ctx, vggParams) - styleMod.forward(Array(styleNp)) - val styleArray = styleMod.getOutputs().map(_.copyTo(Context.cpu())) - styleMod.dispose() - styleMod = null - - // content - val contentMod = Basic.getContentModule("content", dShape, ctx, vggParams) - - // loss - val (loss, gScale) = Basic.getLossModule("loss", dShape, ctx, vggParams) - val extraArgs = (0 until styleArray.length) - .map( i => s"target_gram_$i" -> styleArray(i)).toMap - loss.setParams(extraArgs) - var gradArray = Array[NDArray]() - for (i <- 0 until styleArray.length) { - gradArray = gradArray :+ (NDArray.ones(Shape(1), ctx) * (styleWeight / gScale(i))) - } - gradArray = gradArray :+ (NDArray.ones(Shape(1), ctx) * contentWeight) - - // generator - val gens = Array( - GenV4.getModule("g0", dShape, ctx), - GenV3.getModule("g1", dShape, ctx), - GenV3.getModule("g2", dShape, ctx), - GenV4.getModule("g3", dShape, ctx) - ) - gens.foreach { gen => - val opt = new SGD(learningRate = 1e-4f, - momentum = 0.9f, - wd = 5e-3f, - clipGradient = 5f) - gen.initOptimizer(opt) - } + NDArrayCollector.auto().withScope { + // params + val vggParams = NDArray.load2Map(vggModelPath) + val styleWeight = 1.2f + val contentWeight = 10f + val dShape = Shape(1, 3, 384, 384) + val clipNorm = 0.05f * dShape.product + val modelPrefix = "v3" + // init style + val styleNp = DataProcessing.preprocessStyleImage(styleImage, dShape, ctx) + var styleMod = Basic.getStyleModule("style", dShape, ctx, vggParams) + styleMod.forward(Array(styleNp)) + val styleArray = styleMod.getOutputs().map(_.copyTo(Context.cpu())) + styleMod.dispose() + styleMod = null + + // content + val contentMod = Basic.getContentModule("content", dShape, ctx, vggParams) + + // loss + val (loss, gScale) = Basic.getLossModule("loss", dShape, ctx, vggParams) + val extraArgs = (0 until styleArray.length) + .map(i => s"target_gram_$i" -> styleArray(i)).toMap + loss.setParams(extraArgs) + var gradArray = Array[NDArray]() + for (i <- 0 until styleArray.length) { + gradArray = gradArray :+ (NDArray.ones(Shape(1), ctx) * (styleWeight / gScale(i))) + } + gradArray = gradArray :+ (NDArray.ones(Shape(1), ctx) * contentWeight) + + // generator + val gens = Array( + GenV4.getModule("g0", dShape, ctx), + GenV3.getModule("g1", dShape, ctx), + GenV3.getModule("g2", dShape, ctx), + GenV4.getModule("g3", dShape, ctx) + ) + gens.foreach { gen => + val opt = new SGD(learningRate = 1e-4f, + momentum = 0.9f, + wd = 5e-3f, + clipGradient = 5f) + gen.initOptimizer(opt) + } - var filelist = new File(dataPath).list().toList - val numImage = filelist.length - logger.info(s"Dataset size: $numImage") + var filelist = new File(dataPath).list().toList + val numImage = filelist.length + logger.info(s"Dataset size: $numImage") - val tvWeight = 1e-2f + val tvWeight = 1e-2f - val startEpoch = 0 - val endEpoch = 3 + val startEpoch = 0 + val endEpoch = 3 - for (k <- 0 until gens.length) { - val path = new File(s"${saveModelPath}/$k") - if (!path.exists()) path.mkdir() - } + for (k <- 0 until gens.length) { + val path = new File(s"${saveModelPath}/$k") + if (!path.exists()) path.mkdir() + } - // train - for (i <- startEpoch until endEpoch) { - filelist = Random.shuffle(filelist) - for (idx <- filelist.indices) { - var dataArray = Array[NDArray]() - var lossGradArray = Array[NDArray]() - val data = - DataProcessing.preprocessContentImage(s"${dataPath}/${filelist(idx)}", dShape, ctx) - dataArray = dataArray :+ data - // get content - contentMod.forward(Array(data)) - // set target content - loss.setParams(Map("target_content" -> contentMod.getOutputs()(0))) - // gen_forward - for (k <- 0 until gens.length) { - gens(k).forward(dataArray.takeRight(1)) - dataArray = dataArray :+ gens(k).getOutputs()(0) - // loss forward - loss.forward(dataArray.takeRight(1)) - loss.backward(gradArray) - lossGradArray = lossGradArray :+ loss.getInputGrads()(0) - } - val grad = NDArray.zeros(data.shape, ctx) - for (k <- gens.length - 1 to 0 by -1) { - val tvGradExecutor = getTvGradExecutor(gens(k).getOutputs()(0), ctx, tvWeight) - tvGradExecutor.forward() - grad += lossGradArray(k) + tvGradExecutor.outputs(0) - val gNorm = NDArray.norm(grad) - if (gNorm.toScalar > clipNorm) { - grad *= clipNorm / gNorm.toScalar - } - gens(k).backward(Array(grad)) - gens(k).update() - gNorm.dispose() - tvGradExecutor.dispose() - } - grad.dispose() - if (idx % 20 == 0) { - logger.info(s"Epoch $i: Image $idx") - for (k <- 0 until gens.length) { - val n = NDArray.norm(gens(k).getInputGrads()(0)) - logger.info(s"Data Norm : ${n.toScalar / dShape.product}") - n.dispose() - } - } - if (idx % 1000 == 0) { - for (k <- 0 until gens.length) { - gens(k).saveParams( - s"${saveModelPath}/$k/${modelPrefix}_" + - s"${"%04d".format(i)}-${"%07d".format(idx)}.params") + // train + for (i <- startEpoch until endEpoch) { + NDArrayCollector.auto().withScope { + filelist = Random.shuffle(filelist) + for (idx <- filelist.indices) { + var dataArray = Array[NDArray]() + var lossGradArray = Array[NDArray]() + val data = + DataProcessing.preprocessContentImage(s"${dataPath}/${filelist(idx)}", dShape, ctx) + dataArray = dataArray :+ data + // get content + contentMod.forward(Array(data)) + // set target content + loss.setParams(Map("target_content" -> contentMod.getOutputs()(0))) + // gen_forward + for (k <- 0 until gens.length) { + gens(k).forward(dataArray.takeRight(1)) + dataArray = dataArray :+ gens(k).getOutputs()(0) + // loss forward + loss.forward(dataArray.takeRight(1)) + loss.backward(gradArray) + lossGradArray = lossGradArray :+ loss.getInputGrads()(0) + } + val grad = NDArray.zeros(data.shape, ctx) + for (k <- gens.length - 1 to 0 by -1) { + val tvGradExecutor = getTvGradExecutor(gens(k).getOutputs()(0), ctx, tvWeight) + tvGradExecutor.forward() + grad += lossGradArray(k) + tvGradExecutor.outputs(0) + val gNorm = NDArray.norm(grad) + if (gNorm.toScalar > clipNorm) { + grad *= clipNorm / gNorm.toScalar + } + gens(k).backward(Array(grad)) + gens(k).update() + gNorm.dispose() + tvGradExecutor.dispose() + } + grad.dispose() + if (idx % 20 == 0) { + logger.info(s"Epoch $i: Image $idx") + for (k <- 0 until gens.length) { + val n = NDArray.norm(gens(k).getInputGrads()(0)) + logger.info(s"Data Norm : ${n.toScalar / dShape.product}") + n.dispose() + } + } + if (idx % 1000 == 0) { + for (k <- 0 until gens.length) { + gens(k).saveParams( + s"${saveModelPath}/$k/${modelPrefix}_" + + s"${"%04d".format(i)}-${"%07d".format(idx)}.params") + } + } + data.dispose() } } - data.dispose() } } } diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/LstmBucketing.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/LstmBucketing.scala index f7a01bad133a..8b2059d2e119 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/LstmBucketing.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/LstmBucketing.scala @@ -62,56 +62,58 @@ object LstmBucketing { def runTraining(trainData : String, validationData : String, ctx : Array[Context], numEpoch : Int): Unit = { - val batchSize = 32 - val buckets = Array(10, 20, 30, 40, 50, 60) - val numHidden = 200 - val numEmbed = 200 - val numLstmLayer = 2 - - logger.info("Building vocab ...") - val vocab = BucketIo.defaultBuildVocab(trainData) - - def BucketSymGen(key: AnyRef): - (Symbol, IndexedSeq[String], IndexedSeq[String]) = { - val seqLen = key.asInstanceOf[Int] - val sym = Lstm.lstmUnroll(numLstmLayer, seqLen, vocab.size, - numHidden = numHidden, numEmbed = numEmbed, numLabel = vocab.size) - (sym, IndexedSeq("data"), IndexedSeq("softmax_label")) + NDArrayCollector.auto().withScope { + val batchSize = 32 + val buckets = Array(10, 20, 30, 40, 50, 60) + val numHidden = 200 + val numEmbed = 200 + val numLstmLayer = 2 + + logger.info("Building vocab ...") + val vocab = BucketIo.defaultBuildVocab(trainData) + + def BucketSymGen(key: AnyRef): + (Symbol, IndexedSeq[String], IndexedSeq[String]) = { + val seqLen = key.asInstanceOf[Int] + val sym = Lstm.lstmUnroll(numLstmLayer, seqLen, vocab.size, + numHidden = numHidden, numEmbed = numEmbed, numLabel = vocab.size) + (sym, IndexedSeq("data"), IndexedSeq("softmax_label")) + } + + val initC = (0 until numLstmLayer).map(l => + (s"l${l}_init_c_beta", (batchSize, numHidden)) + ) + val initH = (0 until numLstmLayer).map(l => + (s"l${l}_init_h_beta", (batchSize, numHidden)) + ) + val initStates = initC ++ initH + + val dataTrain = new BucketSentenceIter(trainData, vocab, + buckets, batchSize, initStates) + val dataVal = new BucketSentenceIter(validationData, vocab, + buckets, batchSize, initStates) + + val model = new BucketingModule( + symGen = BucketSymGen, + defaultBucketKey = dataTrain.defaultBucketKey, + contexts = ctx) + + val fitParams = new FitParams() + fitParams.setEvalMetric( + new CustomMetric(perplexity, name = "perplexity")) + fitParams.setKVStore("device") + fitParams.setOptimizer( + new SGD(learningRate = 0.01f, momentum = 0f, wd = 0.00001f)) + fitParams.setInitializer(new Xavier(factorType = "in", magnitude = 2.34f)) + fitParams.setBatchEndCallback(new Speedometer(batchSize, 50)) + + logger.info("Start training ...") + model.fit( + trainData = dataTrain, + evalData = Some(dataVal), + numEpoch = numEpoch, fitParams) + logger.info("Finished training...") } - - val initC = (0 until numLstmLayer).map(l => - (s"l${l}_init_c_beta", (batchSize, numHidden)) - ) - val initH = (0 until numLstmLayer).map(l => - (s"l${l}_init_h_beta", (batchSize, numHidden)) - ) - val initStates = initC ++ initH - - val dataTrain = new BucketSentenceIter(trainData, vocab, - buckets, batchSize, initStates) - val dataVal = new BucketSentenceIter(validationData, vocab, - buckets, batchSize, initStates) - - val model = new BucketingModule( - symGen = BucketSymGen, - defaultBucketKey = dataTrain.defaultBucketKey, - contexts = ctx) - - val fitParams = new FitParams() - fitParams.setEvalMetric( - new CustomMetric(perplexity, name = "perplexity")) - fitParams.setKVStore("device") - fitParams.setOptimizer( - new SGD(learningRate = 0.01f, momentum = 0f, wd = 0.00001f)) - fitParams.setInitializer(new Xavier(factorType = "in", magnitude = 2.34f)) - fitParams.setBatchEndCallback(new Speedometer(batchSize, 50)) - - logger.info("Start training ...") - model.fit( - trainData = dataTrain, - evalData = Some(dataVal), - numEpoch = numEpoch, fitParams) - logger.info("Finished training...") } def main(args: Array[String]): Unit = { diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TestCharRnn.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TestCharRnn.scala index 4786d5d59535..bd064dbd3518 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TestCharRnn.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TestCharRnn.scala @@ -30,54 +30,56 @@ object TestCharRnn { private val logger = LoggerFactory.getLogger(classOf[TrainCharRnn]) - def runTestCharRNN(dataPath: String, modelPrefix: String, starterSentence : String): Unit = { - // The batch size for training - val batchSize = 32 - // We can support various length input - // For this problem, we cut each input sentence to length of 129 - // So we only need fix length bucket - val buckets = List(129) - // hidden unit in LSTM cell - val numHidden = 512 - // embedding dimension, which is, map a char to a 256 dim vector - val numEmbed = 256 - // number of lstm layer - val numLstmLayer = 3 + def runInferenceCharRNN(dataPath: String, modelPrefix: String, starterSentence : String): Unit = { + NDArrayCollector.auto().withScope { + // The batch size for training + val batchSize = 32 + // We can support various length input + // For this problem, we cut each input sentence to length of 129 + // So we only need fix length bucket + val buckets = List(129) + // hidden unit in LSTM cell + val numHidden = 512 + // embedding dimension, which is, map a char to a 256 dim vector + val numEmbed = 256 + // number of lstm layer + val numLstmLayer = 3 - // build char vocabluary from input - val vocab = Utils.buildVocab(dataPath) + // build char vocabluary from input + val vocab = Utils.buildVocab(dataPath) - // load from check-point - val (_, argParams, _) = Model.loadCheckpoint(modelPrefix, 75) + // load from check-point + val (_, argParams, _) = Model.loadCheckpoint(modelPrefix, 75) - // build an inference model - val model = new RnnModel.LSTMInferenceModel(numLstmLayer, vocab.size + 1, - numHidden = numHidden, numEmbed = numEmbed, - numLabel = vocab.size + 1, argParams = argParams, dropout = 0.2f) + // build an inference model + val model = new RnnModel.LSTMInferenceModel(numLstmLayer, vocab.size + 1, + numHidden = numHidden, numEmbed = numEmbed, + numLabel = vocab.size + 1, argParams = argParams, dropout = 0.2f) - // generate a sequence of 1200 chars - val seqLength = 1200 - val inputNdarray = NDArray.zeros(1) - val revertVocab = Utils.makeRevertVocab(vocab) + // generate a sequence of 1200 chars + val seqLength = 1200 + val inputNdarray = NDArray.zeros(1) + val revertVocab = Utils.makeRevertVocab(vocab) - // Feel free to change the starter sentence - var output = starterSentence - val randomSample = true - var newSentence = true - val ignoreLength = output.length() + // Feel free to change the starter sentence + var output = starterSentence + val randomSample = true + var newSentence = true + val ignoreLength = output.length() - for (i <- 0 until seqLength) { - if (i <= ignoreLength - 1) Utils.makeInput(output(i), vocab, inputNdarray) - else Utils.makeInput(output.takeRight(1)(0), vocab, inputNdarray) - val prob = model.forward(inputNdarray, newSentence) - newSentence = false - val nextChar = Utils.makeOutput(prob, revertVocab, randomSample) - if (nextChar == "") newSentence = true - if (i >= ignoreLength) output = output ++ nextChar - } + for (i <- 0 until seqLength) { + if (i <= ignoreLength - 1) Utils.makeInput(output(i), vocab, inputNdarray) + else Utils.makeInput(output.takeRight(1)(0), vocab, inputNdarray) + val prob = model.forward(inputNdarray, newSentence) + newSentence = false + val nextChar = Utils.makeOutput(prob, revertVocab, randomSample) + if (nextChar == "") newSentence = true + if (i >= ignoreLength) output = output ++ nextChar + } - // Let's see what we can learned from char in Obama's speech. - logger.info(output) + // Let's see what we can learned from char in Obama's speech. + logger.info(output) + } } def main(args: Array[String]): Unit = { @@ -86,7 +88,7 @@ object TestCharRnn { try { parser.parseArgument(args.toList.asJava) assert(stcr.dataPath != null && stcr.modelPrefix != null && stcr.starterSentence != null) - runTestCharRNN(stcr.dataPath, stcr.modelPrefix, stcr.starterSentence) + runInferenceCharRNN(stcr.dataPath, stcr.modelPrefix, stcr.starterSentence) } catch { case ex: Exception => { logger.error(ex.getMessage, ex) diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TrainCharRnn.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TrainCharRnn.scala index fb59705c9ef0..c90b7637b9b1 100644 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TrainCharRnn.scala +++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TrainCharRnn.scala @@ -33,125 +33,127 @@ object TrainCharRnn { def runTrainCharRnn(dataPath: String, saveModelPath: String, ctx : Context, numEpoch : Int): Unit = { - // The batch size for training - val batchSize = 32 - // We can support various length input - // For this problem, we cut each input sentence to length of 129 - // So we only need fix length bucket - val buckets = Array(129) - // hidden unit in LSTM cell - val numHidden = 512 - // embedding dimension, which is, map a char to a 256 dim vector - val numEmbed = 256 - // number of lstm layer - val numLstmLayer = 3 - // we will show a quick demo in 2 epoch - // learning rate - val learningRate = 0.001f - // we will use pure sgd without momentum - val momentum = 0.0f - - val vocab = Utils.buildVocab(dataPath) - - // generate symbol for a length - def symGen(seqLen: Int): Symbol = { - Lstm.lstmUnroll(numLstmLayer, seqLen, vocab.size + 1, - numHidden = numHidden, numEmbed = numEmbed, - numLabel = vocab.size + 1, dropout = 0.2f) - } + NDArrayCollector.auto().withScope { + // The batch size for training + val batchSize = 32 + // We can support various length input + // For this problem, we cut each input sentence to length of 129 + // So we only need fix length bucket + val buckets = Array(129) + // hidden unit in LSTM cell + val numHidden = 512 + // embedding dimension, which is, map a char to a 256 dim vector + val numEmbed = 256 + // number of lstm layer + val numLstmLayer = 3 + // we will show a quick demo in 2 epoch + // learning rate + val learningRate = 0.001f + // we will use pure sgd without momentum + val momentum = 0.0f + + val vocab = Utils.buildVocab(dataPath) + + // generate symbol for a length + def symGen(seqLen: Int): Symbol = { + Lstm.lstmUnroll(numLstmLayer, seqLen, vocab.size + 1, + numHidden = numHidden, numEmbed = numEmbed, + numLabel = vocab.size + 1, dropout = 0.2f) + } - // initalize states for LSTM - val initC = for (l <- 0 until numLstmLayer) - yield (s"l${l}_init_c_beta", (batchSize, numHidden)) - val initH = for (l <- 0 until numLstmLayer) - yield (s"l${l}_init_h_beta", (batchSize, numHidden)) - val initStates = initC ++ initH + // initalize states for LSTM + val initC = for (l <- 0 until numLstmLayer) + yield (s"l${l}_init_c_beta", (batchSize, numHidden)) + val initH = for (l <- 0 until numLstmLayer) + yield (s"l${l}_init_h_beta", (batchSize, numHidden)) + val initStates = initC ++ initH - val dataTrain = new BucketIo.BucketSentenceIter(dataPath, vocab, buckets, - batchSize, initStates, seperateChar = "\n", - text2Id = Utils.text2Id, readContent = Utils.readContent) + val dataTrain = new BucketIo.BucketSentenceIter(dataPath, vocab, buckets, + batchSize, initStates, seperateChar = "\n", + text2Id = Utils.text2Id, readContent = Utils.readContent) - // the network symbol - val symbol = symGen(buckets(0)) + // the network symbol + val symbol = symGen(buckets(0)) - val datasAndLabels = dataTrain.provideData ++ dataTrain.provideLabel - val (argShapes, outputShapes, auxShapes) = symbol.inferShape(datasAndLabels) + val datasAndLabels = dataTrain.provideData ++ dataTrain.provideLabel + val (argShapes, outputShapes, auxShapes) = symbol.inferShape(datasAndLabels) - val initializer = new Xavier(factorType = "in", magnitude = 2.34f) + val initializer = new Xavier(factorType = "in", magnitude = 2.34f) - val argNames = symbol.listArguments() - val argDict = argNames.zip(argShapes.map(NDArray.zeros(_, ctx))).toMap - val auxNames = symbol.listAuxiliaryStates() - val auxDict = auxNames.zip(auxShapes.map(NDArray.zeros(_, ctx))).toMap + val argNames = symbol.listArguments() + val argDict = argNames.zip(argShapes.map(NDArray.zeros(_, ctx))).toMap + val auxNames = symbol.listAuxiliaryStates() + val auxDict = auxNames.zip(auxShapes.map(NDArray.zeros(_, ctx))).toMap - val gradDict = argNames.zip(argShapes).filter { case (name, shape) => - !datasAndLabels.contains(name) - }.map(x => x._1 -> NDArray.empty(x._2, ctx) ).toMap + val gradDict = argNames.zip(argShapes).filter { case (name, shape) => + !datasAndLabels.contains(name) + }.map(x => x._1 -> NDArray.empty(x._2, ctx)).toMap - argDict.foreach { case (name, ndArray) => - if (!datasAndLabels.contains(name)) { - initializer.initWeight(name, ndArray) + argDict.foreach { case (name, ndArray) => + if (!datasAndLabels.contains(name)) { + initializer.initWeight(name, ndArray) + } } - } - val data = argDict("data") - val label = argDict("softmax_label") + val data = argDict("data") + val label = argDict("softmax_label") - val executor = symbol.bind(ctx, argDict, gradDict) + val executor = symbol.bind(ctx, argDict, gradDict) - val opt = new Adam(learningRate = learningRate, wd = 0.0001f) + val opt = new Adam(learningRate = learningRate, wd = 0.0001f) - val paramsGrads = gradDict.toList.zipWithIndex.map { case ((name, grad), idx) => - (idx, name, grad, opt.createState(idx, argDict(name))) - } + val paramsGrads = gradDict.toList.zipWithIndex.map { case ((name, grad), idx) => + (idx, name, grad, opt.createState(idx, argDict(name))) + } - val evalMetric = new CustomMetric(Utils.perplexity, "perplexity") - val batchEndCallback = new Callback.Speedometer(batchSize, 50) - val epochEndCallback = Utils.doCheckpoint(s"${saveModelPath}/obama") - - for (epoch <- 0 until numEpoch) { - // Training phase - val tic = System.currentTimeMillis - evalMetric.reset() - var nBatch = 0 - var epochDone = false - // Iterate over training data. - dataTrain.reset() - while (!epochDone) { - var doReset = true - while (doReset && dataTrain.hasNext) { - val dataBatch = dataTrain.next() - - data.set(dataBatch.data(0)) - label.set(dataBatch.label(0)) - executor.forward(isTrain = true) - executor.backward() - paramsGrads.foreach { case (idx, name, grad, optimState) => - opt.update(idx, argDict(name), grad, optimState) + val evalMetric = new CustomMetric(Utils.perplexity, "perplexity") + val batchEndCallback = new Callback.Speedometer(batchSize, 50) + val epochEndCallback = Utils.doCheckpoint(s"${saveModelPath}/obama") + + for (epoch <- 0 until numEpoch) { + // Training phase + val tic = System.currentTimeMillis + evalMetric.reset() + var nBatch = 0 + var epochDone = false + // Iterate over training data. + dataTrain.reset() + while (!epochDone) { + var doReset = true + while (doReset && dataTrain.hasNext) { + val dataBatch = dataTrain.next() + + data.set(dataBatch.data(0)) + label.set(dataBatch.label(0)) + executor.forward(isTrain = true) + executor.backward() + paramsGrads.foreach { case (idx, name, grad, optimState) => + opt.update(idx, argDict(name), grad, optimState) + } + + // evaluate at end, so out_cpu_array can lazy copy + evalMetric.update(dataBatch.label, executor.outputs) + + nBatch += 1 + batchEndCallback.invoke(epoch, nBatch, evalMetric) } - - // evaluate at end, so out_cpu_array can lazy copy - evalMetric.update(dataBatch.label, executor.outputs) - - nBatch += 1 - batchEndCallback.invoke(epoch, nBatch, evalMetric) + if (doReset) { + dataTrain.reset() + } + // this epoch is done + epochDone = true } - if (doReset) { - dataTrain.reset() + val (name, value) = evalMetric.get + name.zip(value).foreach { case (n, v) => + logger.info(s"Epoch[$epoch] Train-$n=$v") } - // this epoch is done - epochDone = true - } - val (name, value) = evalMetric.get - name.zip(value).foreach { case (n, v) => - logger.info(s"Epoch[$epoch] Train-$n=$v") - } - val toc = System.currentTimeMillis - logger.info(s"Epoch[$epoch] Time cost=${toc - tic}") + val toc = System.currentTimeMillis + logger.info(s"Epoch[$epoch] Time cost=${toc - tic}") - epochEndCallback.invoke(epoch, symbol, argDict, auxDict) + epochEndCallback.invoke(epoch, symbol, argDict, auxDict) + } + executor.dispose() } - executor.dispose() } def main(args: Array[String]): Unit = { diff --git a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/cnntextclassification/CNNClassifierExampleSuite.scala b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/cnntextclassification/CNNClassifierExampleSuite.scala index 95c9823e3b28..44025c0459ad 100644 --- a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/cnntextclassification/CNNClassifierExampleSuite.scala +++ b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/cnntextclassification/CNNClassifierExampleSuite.scala @@ -21,7 +21,7 @@ import java.io.File import java.net.URL import org.apache.commons.io.FileUtils -import org.apache.mxnet.Context +import org.apache.mxnet.{Context, NDArrayCollector} import org.apache.mxnetexamples.Util import org.scalatest.{BeforeAndAfterAll, FunSuite} import org.slf4j.LoggerFactory diff --git a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/gan/GanExampleSuite.scala b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/gan/GanExampleSuite.scala index 96820ce4e983..59faba9a3779 100644 --- a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/gan/GanExampleSuite.scala +++ b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/gan/GanExampleSuite.scala @@ -18,14 +18,14 @@ package org.apache.mxnetexamples.gan import java.io.File -import org.apache.mxnet.Context + +import org.apache.mxnet.{Context, NDArrayCollector} import org.apache.mxnetexamples.Util import org.scalatest.{BeforeAndAfterAll, FunSuite, Ignore} import org.slf4j.LoggerFactory import scala.sys.process.Process -@Ignore class GanExampleSuite extends FunSuite with BeforeAndAfterAll{ private val logger = LoggerFactory.getLogger(classOf[GanExampleSuite]) @@ -44,7 +44,8 @@ class GanExampleSuite extends FunSuite with BeforeAndAfterAll{ val context = Context.gpu() - val output = GanMnist.runTraining(modelDirPath, context, modelDirPath, 5) + val output = GanMnist.runTraining(modelDirPath, context, modelDirPath, 3) + Process("rm -rf " + modelDirPath) ! assert(output >= 0.0f) diff --git a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/infer/imageclassifier/ImageClassifierExampleSuite.scala b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/infer/imageclassifier/ImageClassifierExampleSuite.scala index f0bb07b4a398..34d3bc97a005 100644 --- a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/infer/imageclassifier/ImageClassifierExampleSuite.scala +++ b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/infer/imageclassifier/ImageClassifierExampleSuite.scala @@ -23,7 +23,7 @@ import java.io.File import java.net.URL import org.apache.commons.io.FileUtils -import org.apache.mxnet.Context +import org.apache.mxnet.{Context, NDArrayCollector} import org.apache.mxnetexamples.Util import sys.process.Process @@ -64,10 +64,10 @@ class ImageClassifierExampleSuite extends FunSuite with BeforeAndAfterAll { } val output = ImageClassifierExample.runInferenceOnSingleImage(modelDirPath + "resnet-18", - inputImagePath, context) + inputImagePath, context) val outputList = ImageClassifierExample.runInferenceOnBatchOfImage(modelDirPath + "resnet-18", - inputImageDir, context) + inputImageDir, context) Process("rm -rf " + modelDirPath + " " + inputImageDir) ! diff --git a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/infer/objectdetector/ObjectDetectorExampleSuite.scala b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/infer/objectdetector/ObjectDetectorExampleSuite.scala index 31da38569281..addc837e6fc1 100644 --- a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/infer/objectdetector/ObjectDetectorExampleSuite.scala +++ b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/infer/objectdetector/ObjectDetectorExampleSuite.scala @@ -21,7 +21,7 @@ import java.io.File import java.net.URL import org.apache.commons.io.FileUtils -import org.apache.mxnet.Context +import org.apache.mxnet.{Context, NDArrayCollector} import org.apache.mxnetexamples.Util import org.scalatest.{BeforeAndAfterAll, FunSuite} import org.slf4j.LoggerFactory @@ -61,11 +61,11 @@ class ObjectDetectorExampleSuite extends FunSuite with BeforeAndAfterAll { } val output = SSDClassifierExample.runObjectDetectionSingle(modelDirPath + "resnet50_ssd_model", - inputImagePath, context) + inputImagePath, context) val outputList = SSDClassifierExample.runObjectDetectionBatch( - modelDirPath + "resnet50_ssd_model", - inputImageDir, context) + modelDirPath + "resnet50_ssd_model", + inputImageDir, context) Process("rm -rf " + modelDirPath + " " + inputImageDir) ! diff --git a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/multitask/MultiTaskSuite.scala b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/multitask/MultiTaskSuite.scala index b86f6751e45b..983978dbaec4 100644 --- a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/multitask/MultiTaskSuite.scala +++ b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/multitask/MultiTaskSuite.scala @@ -17,26 +17,11 @@ package org.apache.mxnetexamples.multitask -import org.apache.commons.io.FileUtils -import org.apache.mxnet.Context -import org.scalatest.FunSuite +import org.apache.mxnet._ import org.slf4j.LoggerFactory -import org.apache.mxnet.Symbol -import org.apache.mxnet.DataIter -import org.apache.mxnet.DataBatch -import org.apache.mxnet.NDArray -import org.apache.mxnet.Shape -import org.apache.mxnet.EvalMetric import org.apache.mxnet.Context -import org.apache.mxnet.Xavier -import org.apache.mxnet.optimizer.RMSProp -import java.io.File -import java.net.URL -import scala.sys.process.Process -import scala.collection.immutable.ListMap -import scala.collection.immutable.IndexedSeq -import scala.collection.mutable.{ArrayBuffer, ListBuffer} +import org.scalatest.FunSuite /** diff --git a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/neuralstyle/NeuralStyleSuite.scala b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/neuralstyle/NeuralStyleSuite.scala index dc8fc5b8c14d..71c2b35ef444 100644 --- a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/neuralstyle/NeuralStyleSuite.scala +++ b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/neuralstyle/NeuralStyleSuite.scala @@ -17,7 +17,7 @@ package org.apache.mxnetexamples.neuralstyle -import org.apache.mxnet.Context +import org.apache.mxnet.{Context, NDArrayCollector} import org.apache.mxnetexamples.Util import org.apache.mxnetexamples.neuralstyle.end2end.{BoostInference, BoostTrain} import org.scalatest.{BeforeAndAfterAll, FunSuite} diff --git a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/rnn/ExampleRNNSuite.scala b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/rnn/ExampleRNNSuite.scala index b393a433305a..14fb7b85e9b3 100644 --- a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/rnn/ExampleRNNSuite.scala +++ b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/rnn/ExampleRNNSuite.scala @@ -25,7 +25,6 @@ import org.slf4j.LoggerFactory import scala.sys.process.Process -@Ignore class ExampleRNNSuite extends FunSuite with BeforeAndAfterAll { private val logger = LoggerFactory.getLogger(classOf[ExampleRNNSuite]) @@ -51,7 +50,7 @@ class ExampleRNNSuite extends FunSuite with BeforeAndAfterAll { ctx = Context.gpu() } LstmBucketing.runTraining(tempDirPath + "/RNN/sherlockholmes.train.txt", - tempDirPath + "/RNN/sherlockholmes.valid.txt", Array(ctx), 1) + tempDirPath + "/RNN/sherlockholmes.valid.txt", Array(ctx), 1) } test("Example CI: Test TrainCharRNN") { @@ -60,16 +59,16 @@ class ExampleRNNSuite extends FunSuite with BeforeAndAfterAll { System.getenv("SCALA_TEST_ON_GPU").toInt == 1) { val ctx = Context.gpu() TrainCharRnn.runTrainCharRnn(tempDirPath + "/RNN/obama.txt", - tempDirPath, ctx, 1) + tempDirPath, ctx, 1) } else { logger.info("CPU not supported for this test, skipped...") } } - test("Example CI: Test TestCharRNN") { + test("Example CI: Test Inference on CharRNN") { val tempDirPath = System.getProperty("java.io.tmpdir") val ctx = Context.gpu() - TestCharRnn.runTestCharRNN(tempDirPath + "/RNN/obama.txt", - tempDirPath + "/RNN/obama", "The joke") + TestCharRnn.runInferenceCharRNN(tempDirPath + "/RNN/obama.txt", + tempDirPath + "/RNN/obama", "The joke") } } From d19d6c22471618ec1d9448898346ddd9e08a2ab1 Mon Sep 17 00:00:00 2001 From: Thomas Delteil Date: Wed, 22 Aug 2018 18:11:32 -0700 Subject: [PATCH 040/160] Update docs of LogisticRegressionOutput (#12299) --- src/operator/regression_output.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/operator/regression_output.cc b/src/operator/regression_output.cc index 07122d465c7a..5632baca0d4d 100644 --- a/src/operator/regression_output.cc +++ b/src/operator/regression_output.cc @@ -142,7 +142,11 @@ The storage type of ``label`` can be ``default`` or ``csr`` - LogisticRegressionOutput(default, default) = default - LogisticRegressionOutput(default, csr) = default -By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. +The loss function used is the Binary Cross Entropy Loss: + +:math:`-{(y\log(p) + (1 - y)\log(1 - p))}` + +Where `y` is the ground truth probability of positive outcome for a given example, and `p` the probability predicted by the model. By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. )code" ADD_FILELINE); From 1ffb3f58a35461bac62f9ae637f36ebbfd580883 Mon Sep 17 00:00:00 2001 From: Alexander Zai Date: Wed, 22 Aug 2018 21:14:19 -0400 Subject: [PATCH 041/160] Add randn to random module (#12156) * add randn to random * reorder args * update comments * add test for randn * fix ordering of params * update test name * fix params * remove dup param * restrict loc and scale for randn method * test should only use single loc / scale * pop out kwargs * fix lint * use normal symbol from randn test * update randn test * retrigger --- python/mxnet/ndarray/random.py | 57 +++++++++++++++++++++++++++- tests/python/unittest/test_random.py | 25 ++++++++++-- 2 files changed, 76 insertions(+), 6 deletions(-) diff --git a/python/mxnet/ndarray/random.py b/python/mxnet/ndarray/random.py index d0c83c10e6b4..1e941f79aa1c 100644 --- a/python/mxnet/ndarray/random.py +++ b/python/mxnet/ndarray/random.py @@ -23,8 +23,9 @@ from .ndarray import NDArray -__all__ = ['uniform', 'normal', 'poisson', 'exponential', 'gamma', 'multinomial', - 'negative_binomial', 'generalized_negative_binomial', 'shuffle'] +__all__ = ['uniform', 'normal', 'randn', 'poisson', 'exponential', 'gamma', + 'multinomial', 'negative_binomial', 'generalized_negative_binomial', + 'shuffle'] def _random_helper(random, sampler, params, shape, dtype, ctx, out, kwargs): @@ -151,6 +152,58 @@ def normal(loc=0, scale=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwarg [loc, scale], shape, dtype, ctx, out, kwargs) +def randn(*shape, **kwargs): + """Draw random samples from a normal (Gaussian) distribution. + + Samples are distributed according to a normal distribution parametrized + by *loc* (mean) and *scale* (standard deviation). + + + Parameters + ---------- + loc : float or NDArray + Mean (centre) of the distribution. + scale : float or NDArray + Standard deviation (spread or width) of the distribution. + shape : int or tuple of ints + The number of samples to draw. If shape is, e.g., `(m, n)` and `loc` and + `scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale` + are NDArrays with shape, e.g., `(x, y)`, then output will have shape + `(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair. + dtype : {'float16','float32', 'float64'} + Data type of output samples. Default is 'float32' + ctx : Context + Device context of output. Default is current context. Overridden by + `loc.context` when `loc` is an NDArray. + out : NDArray + Store output to an existing NDArray. + + + Examples + -------- + >>> mx.nd.random.randn() + 2.21220636 + + >>> mx.nd.random.randn(2, 2) + [[-1.856082 -1.9768796 ] + [-0.20801921 0.2444218 ]] + + >>> mx.nd.random.randn(2, 3, loc=5, scale=1) + [[4.19962 4.8311777 5.936328 ] + [5.357444 5.7793283 3.9896927]] + + """ + loc = kwargs.pop('loc', 0) + scale = kwargs.pop('scale', 1) + dtype = kwargs.pop('dtype', _Null) + ctx = kwargs.pop('ctx', None) + out = kwargs.pop('out', None) + assert isinstance(loc, (int, float)) + assert isinstance(scale, (int, float)) + return _random_helper(_internal._random_normal, _internal._sample_normal, + [loc, scale], shape, dtype, ctx, out, kwargs) + + def poisson(lam=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a Poisson distribution. diff --git a/tests/python/unittest/test_random.py b/tests/python/unittest/test_random.py index 575fcdd3b4c1..4310658ae0bf 100644 --- a/tests/python/unittest/test_random.py +++ b/tests/python/unittest/test_random.py @@ -45,6 +45,15 @@ def check_with_device(device, dtype): ('std', lambda x, params: np.std(x.astype(np.float64)) - params['scale'], tol) ] }, + { + 'name': 'randn', + 'ndop': mx.nd.random.randn, + 'params': { 'loc': 10.0, 'scale': 0.5 }, + 'checks': [ + ('mean', lambda x, params: np.mean(x.astype(np.float64) - params['loc']), tol), + ('std', lambda x, params: np.std(x.astype(np.float64)) - params['scale'], tol) + ] + }, { 'name': 'uniform', 'symbol': mx.sym.random.uniform, @@ -123,10 +132,14 @@ def check_with_device(device, dtype): # check directly params = symbdic['params'].copy() params.update(shape=shape, dtype=dtype, ctx=device) + args = () + if name == 'randn': + params.pop('shape') # randn does not accept shape param + args = shape mx.random.seed(128) - ret1 = ndop(**params).asnumpy() + ret1 = ndop(*args, **params).asnumpy() mx.random.seed(128) - ret2 = ndop(**params).asnumpy() + ret2 = ndop(*args, **params).asnumpy() assert same(ret1, ret2), \ "ndarray test: `%s` should give the same result with the same seed" % name @@ -134,12 +147,14 @@ def check_with_device(device, dtype): assert np.abs(check_func(ret1, params)) < tol, "ndarray test: %s check for `%s` did not pass" % (check_name, name) # check multi-distribution sampling + if 'inputs' not in symbdic: continue # randn does not support multi-distribution sampling + params = {'shape': shape, 'dtype': dtype, 'ctx': device} params.update({k : mx.nd.array(v, ctx=device, dtype=dtype) for k, v in symbdic['inputs']}) mx.random.seed(128) - ret1 = ndop(**params).asnumpy() + ret1 = ndop(*args, **params).asnumpy() mx.random.seed(128) - ret2 = ndop(**params).asnumpy() + ret2 = ndop(*args, **params).asnumpy() assert same(ret1, ret2), \ "ndarray test: `%s` should give the same result with the same seed" % name for i in range(2): @@ -149,6 +164,8 @@ def check_with_device(device, dtype): err = np.abs(check_func(ret2[i,j], stats)) assert err < tol, "%f vs %f: symbolic test: %s check for `%s` did not pass" % (err, tol, check_name, name) + if 'symbol' not in symbdic: continue # randn does not have symbol + # check symbolic symbol = symbdic['symbol'] X = mx.sym.Variable("X") From 67ba3c508b1cad6ee222ffd378210a77db3195fa Mon Sep 17 00:00:00 2001 From: Deokjae Lee <36436141+asitstands@users.noreply.github.com> Date: Thu, 23 Aug 2018 10:17:31 +0900 Subject: [PATCH 042/160] A binary RBM example (#11268) * A binary RBM example * Retrigger CI * Rename the parameter `interaction` as `interaction_weight` * Improved Bernoulli sampling * Cosmetic changes * Implement likelihood estimation using AIS * Add momemtun option * Replace underbars in the command line options with hyphens * Adjust default values of the hyperparameters and add command line options to set device * Update README * Minor updates * Setting num_workers for the dataloader * Remove unnecessary `enumerate` call * Fix a bug on `--cuda` option * Show the initial real images also * Minor change in README * Trigger CI * Trigger CI --- example/README.md | 1 + .../restricted-boltzmann-machine/README.md | 13 + .../binary_rbm.py | 253 ++++++++++++++++++ .../binary_rbm_gluon.py | 142 ++++++++++ .../binary_rbm_module.py | 171 ++++++++++++ .../restricted-boltzmann-machine/samples.png | Bin 0 -> 191570 bytes 6 files changed, 580 insertions(+) create mode 100644 example/restricted-boltzmann-machine/README.md create mode 100644 example/restricted-boltzmann-machine/binary_rbm.py create mode 100644 example/restricted-boltzmann-machine/binary_rbm_gluon.py create mode 100644 example/restricted-boltzmann-machine/binary_rbm_module.py create mode 100644 example/restricted-boltzmann-machine/samples.png diff --git a/example/README.md b/example/README.md index ff071dfb3f50..6b9a086ff5e1 100644 --- a/example/README.md +++ b/example/README.md @@ -117,6 +117,7 @@ If your tutorial depends on specific packages, simply add them to this provision * [DDPG](reinforcement-learning/ddpg) - example of training DDPG for CartPole * [DQN](reinforcement-learning/dqn) - examples of training DQN and Double DQN to play Atari Games * [Parallel Advantage-Actor Critic](reinforcement-learning/parallel_actor_critic) +* [Restricted Boltzmann Machine](restricted-boltzmann-machine) - an example of the binary restricted Boltzmann machine learning MNIST * [RNN Time Major](rnn-time-major) - RNN implementation with Time-major layout * [Recurrent Neural Net](rnn) - creating recurrent neural networks models using high level `mxnet.rnn` interface * [Sparse](sparse) - a variety of sparse examples diff --git a/example/restricted-boltzmann-machine/README.md b/example/restricted-boltzmann-machine/README.md new file mode 100644 index 000000000000..129120ba9961 --- /dev/null +++ b/example/restricted-boltzmann-machine/README.md @@ -0,0 +1,13 @@ +# Restricted Boltzmann machine (RBM) + +An example of the binary RBM [1] learning the MNIST data. The RBM is implemented as a custom operator, and a gluon block is also provided. `binary_rbm.py` contains the implementation of the RBM. `binary_rbm_module.py` and `binary_rbm_gluon.py` train the MNIST data using the module interface and the gluon interface respectively. The MNIST data is downloaded automatically. + +The progress of the learning is monitored by estimating the log-likelihood using the annealed importance sampling [2,3]. The learning with the default hyperparameters takes about 25 minutes on GTX 1080Ti and the resulting log-likelihood is around -70 for both testing and training datasets. + +Here are some samples generated by the RBM with the default hyperparameters. The samples (right) are obtained by 3000 steps of Gibbs sampling starting from randomly chosen real images (left). + +

+ +[1] G E Hinton & R R Salakhutdinov, Reducing the Dimensionality of Data with Neural Networks Science **313**, 5786 (2006)
+[2] R M Neal, Annealed importance sampling. Stat Comput **11** 2 (2001)
+[3] R Salakhutdinov & I Murray, On the quantitative analysis of deep belief networks. In Proc. ICML '08 **25** (2008) \ No newline at end of file diff --git a/example/restricted-boltzmann-machine/binary_rbm.py b/example/restricted-boltzmann-machine/binary_rbm.py new file mode 100644 index 000000000000..115e9d140e4b --- /dev/null +++ b/example/restricted-boltzmann-machine/binary_rbm.py @@ -0,0 +1,253 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import ast +import numpy as np +import mxnet as mx + +class BinaryRBM(mx.operator.CustomOp): + + def __init__(self, k): + self.k = k # Persistent contrastive divergence k + + def forward(self, is_train, req, in_data, out_data, aux): + visible_layer_data = in_data[0] # (num_batch, num_visible) + visible_layer_bias = in_data[1] # (num_visible,) + hidden_layer_bias = in_data[2] # (num_hidden,) + interaction_weight= in_data[3] # (num_visible, num_hidden) + + if is_train: + _, hidden_layer_prob_1 = self.sample_hidden_layer(visible_layer_data, hidden_layer_bias, interaction_weight) + hidden_layer_sample = aux[1] # The initial state of the Gibbs sampling for persistent CD + else: + hidden_layer_sample, hidden_layer_prob_1 = self.sample_hidden_layer(visible_layer_data, hidden_layer_bias, interaction_weight) + + # k-step Gibbs sampling + for _ in range(self.k): + visible_layer_sample, visible_layer_prob_1 = self.sample_visible_layer(hidden_layer_sample, visible_layer_bias, interaction_weight) + hidden_layer_sample, _ = self.sample_hidden_layer(visible_layer_sample, hidden_layer_bias, interaction_weight) + + if is_train: + # Used in backward and next forward + aux[0][:] = visible_layer_sample + aux[1][:] = hidden_layer_sample + + self.assign(out_data[0], req[0], visible_layer_prob_1) + self.assign(out_data[1], req[1], hidden_layer_prob_1) + + def backward(self, req, out_grad, in_data, out_data, in_grad, aux): + visible_layer_data = in_data[0] # (num_batch, num_visible) + visible_layer_sample = aux[0] # (num_batch, num_visible) + hidden_layer_prob_1 = out_data[1] # (num_batch, num_hidden) + hidden_layer_sample = aux[1] # (num_batch, num_hidden) + + grad_visible_layer_bias = (visible_layer_sample - visible_layer_data).mean(axis=0) + grad_hidden_layer_bias = (hidden_layer_sample - hidden_layer_prob_1).mean(axis=0) + grad_interaction_weight= (mx.nd.linalg.gemm2(visible_layer_sample.expand_dims(2), hidden_layer_sample.expand_dims(1)) - + mx.nd.linalg.gemm2(visible_layer_data.expand_dims(2), hidden_layer_prob_1.expand_dims(1)) + ).mean(axis=0) + + # We don't need the gradient on the visible layer input + self.assign(in_grad[1], req[1], grad_visible_layer_bias) + self.assign(in_grad[2], req[2], grad_hidden_layer_bias) + self.assign(in_grad[3], req[3], grad_interaction_weight) + + def sample_hidden_layer(self, visible_layer_batch, hidden_layer_bias, interaction_weight): + return self.sample_layer(visible_layer_batch, hidden_layer_bias, interaction_weight, False) + + def sample_visible_layer(self, hidden_layer_batch, visible_layer_bias, interaction_weight): + return self.sample_layer(hidden_layer_batch, visible_layer_bias, interaction_weight, True) + + def sample_layer(self, other_layer_sample, layer_bias, interaction_weight, interaction_transpose): + prob_1 = mx.nd.linalg.gemm( + other_layer_sample, + interaction_weight, + layer_bias.tile(reps=(other_layer_sample.shape[0], 1)), + transpose_b=interaction_transpose) # (num_batch, num_units_in_layer) + prob_1.sigmoid(out=prob_1) + return mx.nd.random.uniform(shape=prob_1.shape) < prob_1, prob_1 + +@mx.operator.register('BinaryRBM') +class BinaryRBMProp(mx.operator.CustomOpProp): + + # Auxiliary states are requested only if `for_training` is true. + def __init__(self, num_hidden, k, for_training): + super(BinaryRBMProp, self).__init__(False) + self.num_hidden = int(num_hidden) + self.k = int(k) + self.for_training = ast.literal_eval(for_training) + + def list_arguments(self): + # 0: (batch size, the number of visible units) + # 1: (the number of visible units,) + # 2: (the number of hidden units,) + # 3: (the number of visible units, the number of hidden units) + return ['data', 'visible_layer_bias', 'hidden_layer_bias', 'interaction_weight'] + + def list_outputs(self): + # 0: The probabilities that each visible unit is 1 after `k` steps of Gibbs sampling starting from the given `data`. + # (batch size, the number of visible units) + # 1: The probabilities that each hidden unit is 1 conditional on the given `data`. + # (batch size, the number of hidden units) + return ['visible_layer_prob_1', 'hidden_layer_prob_1'] + + def list_auxiliary_states(self): + # Used only if `self.for_trainig is true. + # 0: Store the visible layer samples obtained in the forward pass, used in the backward pass. + # (batch size, the number of visible units) + # 1: Store the hidden layer samples obtained in the forward pass, used in the backward and next forward pass. + # (batch size, the number of hidden units) + return ['aux_visible_layer_sample', 'aux_hidden_layer_sample'] if self.for_training else [] + + def infer_shape(self, in_shapes): + visible_layer_data_shape = in_shapes[0] # The input data + visible_layer_bias_shape = (visible_layer_data_shape[1],) + hidden_layer_bias_shape = (self.num_hidden,) + interaction_shape = (visible_layer_data_shape[1], self.num_hidden) + visible_layer_sample_shape = visible_layer_data_shape + visible_layer_prob_1_shape = visible_layer_sample_shape + hidden_layer_sample_shape = (visible_layer_data_shape[0], self.num_hidden) + hidden_layer_prob_1_shape = hidden_layer_sample_shape + return [visible_layer_data_shape, visible_layer_bias_shape, hidden_layer_bias_shape, interaction_shape], \ + [visible_layer_prob_1_shape, hidden_layer_prob_1_shape], \ + [visible_layer_sample_shape, hidden_layer_sample_shape] if self.for_training else [] + + def infer_type(self, in_type): + return [in_type[0], in_type[0], in_type[0], in_type[0]], \ + [in_type[0], in_type[0]], \ + [in_type[0], in_type[0]] if self.for_training else [] + + def create_operator(self, ctx, in_shapes, in_dtypes): + return BinaryRBM(self.k) + +# For gluon API +class BinaryRBMBlock(mx.gluon.HybridBlock): + + def __init__(self, num_hidden, k, for_training, **kwargs): + super(BinaryRBMBlock, self).__init__(**kwargs) + with self.name_scope(): + self.num_hidden = num_hidden + self.k = k + self.for_training = for_training + self.visible_layer_bias = self.params.get('visible_layer_bias', shape=(0,), allow_deferred_init=True) + self.hidden_layer_bias = self.params.get('hidden_layer_bias', shape=(0,), allow_deferred_init=True) + self.interaction_weight= self.params.get('interaction_weight', shape=(0, 0), allow_deferred_init=True) + if for_training: + self.aux_visible_layer_sample = self.params.get('aux_visible_layer_sample', shape=(0, 0), allow_deferred_init=True) + self.aux_hidden_layer_sample = self.params.get('aux_hidden_layer_sample', shape=(0, 0), allow_deferred_init=True) + + def hybrid_forward(self, F, data, visible_layer_bias, hidden_layer_bias, interaction_weight, aux_visible_layer_sample=None, aux_hidden_layer_sample=None): + # As long as `for_training` is kept constant, this conditional statement does not prevent hybridization. + if self.for_training: + return F.Custom( + data, + visible_layer_bias, + hidden_layer_bias, + interaction_weight, + aux_visible_layer_sample, + aux_hidden_layer_sample, + num_hidden=self.num_hidden, + k=self.k, + for_training=self.for_training, + op_type='BinaryRBM') + else: + return F.Custom( + data, + visible_layer_bias, + hidden_layer_bias, + interaction_weight, + num_hidden=self.num_hidden, + k=self.k, + for_training=self.for_training, + op_type='BinaryRBM') + +def estimate_log_likelihood(visible_layer_bias, hidden_layer_bias, interaction_weight, ais_batch_size, ais_num_batch, ais_intermediate_steps, ais_burn_in_steps, data, ctx): + # The base-rate RBM with no hidden layer. The visible layer bias is set to the same with the given RBM. + # This is not the only possible choice but simple and works well. + base_rate_visible_layer_bias = visible_layer_bias + base_rate_visible_prob_1 = base_rate_visible_layer_bias.sigmoid() + log_base_rate_z = base_rate_visible_layer_bias.exp().log1p().sum() + + def log_intermediate_unnormalized_prob(visible_layer_sample, beta): + p = mx.nd.dot( + visible_layer_sample, + (1 - beta) * base_rate_visible_layer_bias + beta * visible_layer_bias) + if beta != 0: + p += mx.nd.linalg.gemm( + visible_layer_sample, + interaction_weight, + hidden_layer_bias.tile(reps=(visible_layer_sample.shape[0], 1)), + transpose_b=False, + alpha=beta, + beta=beta).exp().log1p().sum(axis=1) + return p + + def sample_base_rbm(): + rands = mx.nd.random.uniform(shape=(ais_batch_size, base_rate_visible_prob_1.shape[0]), ctx=ctx) + return rands < base_rate_visible_prob_1.tile(reps=(ais_batch_size, 1)) + + def sample_intermediate_visible_layer(visible_layer_sample, beta): + for _ in range(ais_burn_in_steps): + hidden_prob_1 = mx.nd.linalg.gemm( + visible_layer_sample, + interaction_weight, + hidden_layer_bias.tile(reps=(visible_layer_sample.shape[0], 1)), + transpose_b=False, + alpha=beta, + beta=beta) + hidden_prob_1.sigmoid(out=hidden_prob_1) + hidden_layer_sample = mx.nd.random.uniform(shape=hidden_prob_1.shape, ctx=ctx) < hidden_prob_1 + visible_prob_1 = mx.nd.linalg.gemm( + hidden_layer_sample, + interaction_weight, + visible_layer_bias.tile(reps=(hidden_layer_sample.shape[0], 1)), + transpose_b=True, + alpha=beta, + beta=beta) + (1 - beta) * base_rate_visible_layer_bias + visible_prob_1.sigmoid(out=visible_prob_1) + visible_layer_sample = mx.nd.random.uniform(shape=visible_prob_1.shape, ctx=ctx) < visible_prob_1 + return visible_layer_sample + + def array_from_batch(batch): + if isinstance(batch, mx.io.DataBatch): + return batch.data[0].as_in_context(ctx).flatten() + else: # batch is an instance of list in the case of gluon DataLoader + return batch[0].as_in_context(ctx).flatten() + + importance_weight_sum = 0 + num_ais_samples = ais_num_batch * ais_batch_size + for _ in range(ais_num_batch): + log_importance_weight = 0 + visible_layer_sample = sample_base_rbm() + for n in range(1, ais_intermediate_steps + 1): + beta = 1. * n / ais_intermediate_steps + log_importance_weight += \ + log_intermediate_unnormalized_prob(visible_layer_sample, beta) - \ + log_intermediate_unnormalized_prob(visible_layer_sample, (n - 1.) / ais_intermediate_steps) + visible_layer_sample = sample_intermediate_visible_layer(visible_layer_sample, beta) + importance_weight_sum += log_importance_weight.exp().sum() + log_z = (importance_weight_sum / num_ais_samples).log() + log_base_rate_z + + log_likelihood = 0 + num_data = 0 + for batch in data: + batch_array = array_from_batch(batch) + log_likelihood += log_intermediate_unnormalized_prob(batch_array, 1) - log_z + num_data += batch_array.shape[0] + log_likelihood = log_likelihood.sum() / num_data + + return log_likelihood.asscalar(), log_z.asscalar() diff --git a/example/restricted-boltzmann-machine/binary_rbm_gluon.py b/example/restricted-boltzmann-machine/binary_rbm_gluon.py new file mode 100644 index 000000000000..cdce2e6125d2 --- /dev/null +++ b/example/restricted-boltzmann-machine/binary_rbm_gluon.py @@ -0,0 +1,142 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import random as pyrnd +import argparse +import numpy as np +import mxnet as mx +from matplotlib import pyplot as plt +from binary_rbm import BinaryRBMBlock +from binary_rbm import estimate_log_likelihood + + +### Helper function + +def get_non_auxiliary_params(rbm): + return rbm.collect_params('^(?!.*_aux_.*).*$') + +### Command line arguments + +parser = argparse.ArgumentParser(description='Restricted Boltzmann machine learning MNIST') +parser.add_argument('--num-hidden', type=int, default=500, help='number of hidden units') +parser.add_argument('--k', type=int, default=30, help='number of Gibbs sampling steps used in the PCD algorithm') +parser.add_argument('--batch-size', type=int, default=80, help='batch size') +parser.add_argument('--num-epoch', type=int, default=130, help='number of epochs') +parser.add_argument('--learning-rate', type=float, default=0.1, help='learning rate for stochastic gradient descent') # The optimizer rescales this with `1 / batch_size` +parser.add_argument('--momentum', type=float, default=0.3, help='momentum for the stochastic gradient descent') +parser.add_argument('--ais-batch-size', type=int, default=100, help='batch size for AIS to estimate the log-likelihood') +parser.add_argument('--ais-num-batch', type=int, default=10, help='number of batches for AIS to estimate the log-likelihood') +parser.add_argument('--ais-intermediate-steps', type=int, default=10, help='number of intermediate distributions for AIS to estimate the log-likelihood') +parser.add_argument('--ais-burn-in-steps', type=int, default=10, help='number of burn in steps for each intermediate distributions of AIS to estimate the log-likelihood') +parser.add_argument('--cuda', action='store_true', dest='cuda', help='train on GPU with CUDA') +parser.add_argument('--no-cuda', action='store_false', dest='cuda', help='train on CPU') +parser.add_argument('--device-id', type=int, default=0, help='GPU device id') +parser.add_argument('--data-loader-num-worker', type=int, default=4, help='number of multithreading workers for the data loader') +parser.set_defaults(cuda=True) + +args = parser.parse_args() +print(args) + +### Global environment + +mx.random.seed(pyrnd.getrandbits(32)) +ctx = mx.gpu(args.device_id) if args.cuda else mx.cpu() + + +### Prepare data + +def data_transform(data, label): + return data.astype(np.float32) / 255, label.astype(np.float32) + +mnist_train_dataset = mx.gluon.data.vision.MNIST(train=True, transform=data_transform) +mnist_test_dataset = mx.gluon.data.vision.MNIST(train=False, transform=data_transform) +img_height = mnist_train_dataset[0][0].shape[0] +img_width = mnist_train_dataset[0][0].shape[1] +num_visible = img_width * img_height + +# This generates arrays with shape (batch_size, height = 28, width = 28, num_channel = 1) +train_data = mx.gluon.data.DataLoader(mnist_train_dataset, args.batch_size, shuffle=True, num_workers=args.data_loader_num_worker) +test_data = mx.gluon.data.DataLoader(mnist_test_dataset, args.batch_size, shuffle=True, num_workers=args.data_loader_num_worker) + +### Train + +rbm = BinaryRBMBlock(num_hidden=args.num_hidden, k=args.k, for_training=True, prefix='rbm_') +rbm.initialize(mx.init.Normal(sigma=.01), ctx=ctx) +rbm.hybridize() +trainer = mx.gluon.Trainer( + get_non_auxiliary_params(rbm), + 'sgd', {'learning_rate': args.learning_rate, 'momentum': args.momentum}) +for epoch in range(args.num_epoch): + # Update parameters + for batch, _ in train_data: + batch = batch.as_in_context(ctx).flatten() + with mx.autograd.record(): + out = rbm(batch) + out[0].backward() + trainer.step(batch.shape[0]) + mx.nd.waitall() # To restrict memory usage + + # Monitor the performace of the model + params = get_non_auxiliary_params(rbm) + param_visible_layer_bias = params['rbm_visible_layer_bias'].data(ctx=ctx) + param_hidden_layer_bias = params['rbm_hidden_layer_bias'].data(ctx=ctx) + param_interaction_weight = params['rbm_interaction_weight'].data(ctx=ctx) + test_log_likelihood, _ = estimate_log_likelihood( + param_visible_layer_bias, param_hidden_layer_bias, param_interaction_weight, + args.ais_batch_size, args.ais_num_batch, args.ais_intermediate_steps, args.ais_burn_in_steps, test_data, ctx) + train_log_likelihood, _ = estimate_log_likelihood( + param_visible_layer_bias, param_hidden_layer_bias, param_interaction_weight, + args.ais_batch_size, args.ais_num_batch, args.ais_intermediate_steps, args.ais_burn_in_steps, train_data, ctx) + print("Epoch %d completed with test log-likelihood %f and train log-likelihood %f" % (epoch, test_log_likelihood, train_log_likelihood)) + + +### Show some samples. + +# Each sample is obtained by 3000 steps of Gibbs sampling starting from a real sample. +# Starting from the real data is just for convenience of implmentation. +# There must be no correlation between the initial states and the resulting samples. +# You can start from random states and run the Gibbs chain for sufficiently long time. + +print("Preparing showcase") + +showcase_gibbs_sampling_steps = 3000 +showcase_num_samples_w = 15 +showcase_num_samples_h = 15 +showcase_num_samples = showcase_num_samples_w * showcase_num_samples_h +showcase_img_shape = (showcase_num_samples_h * img_height, 2 * showcase_num_samples_w * img_width) +showcase_img_column_shape = (showcase_num_samples_h * img_height, img_width) + +showcase_rbm = BinaryRBMBlock( + num_hidden=args.num_hidden, + k=showcase_gibbs_sampling_steps, + for_training=False, + params=get_non_auxiliary_params(rbm)) +showcase_iter = iter(mx.gluon.data.DataLoader(mnist_train_dataset, showcase_num_samples_h, shuffle=True)) +showcase_img = np.zeros(showcase_img_shape) +for i in range(showcase_num_samples_w): + data_batch = next(showcase_iter)[0].as_in_context(ctx).flatten() + sample_batch = showcase_rbm(data_batch) + # Each pixel is the probability that the unit is 1. + showcase_img[:, i * img_width : (i + 1) * img_width] = data_batch.reshape(showcase_img_column_shape).asnumpy() + showcase_img[:, (showcase_num_samples_w + i) * img_width : (showcase_num_samples_w + i + 1) * img_width + ] = sample_batch[0].reshape(showcase_img_column_shape).asnumpy() +s = plt.imshow(showcase_img, cmap='gray') +plt.axis('off') +plt.axvline(showcase_num_samples_w * img_width, color='y') +plt.show(s) + +print("Done") \ No newline at end of file diff --git a/example/restricted-boltzmann-machine/binary_rbm_module.py b/example/restricted-boltzmann-machine/binary_rbm_module.py new file mode 100644 index 000000000000..e1a3653bcafc --- /dev/null +++ b/example/restricted-boltzmann-machine/binary_rbm_module.py @@ -0,0 +1,171 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import random as pyrnd +import argparse +import numpy as np +import mxnet as mx +from matplotlib import pyplot as plt +import binary_rbm + + +### Command line arguments + +parser = argparse.ArgumentParser(description='Restricted Boltzmann machine learning MNIST') +parser.add_argument('--num-hidden', type=int, default=500, help='number of hidden units') +parser.add_argument('--k', type=int, default=30, help='number of Gibbs sampling steps used in the PCD algorithm') +parser.add_argument('--batch-size', type=int, default=80, help='batch size') +parser.add_argument('--num-epoch', type=int, default=130, help='number of epochs') +parser.add_argument('--learning-rate', type=float, default=0.1, help='learning rate for stochastic gradient descent') # The optimizer rescales this with `1 / batch_size` +parser.add_argument('--momentum', type=float, default=0.3, help='momentum for the stochastic gradient descent') +parser.add_argument('--ais-batch-size', type=int, default=100, help='batch size for AIS to estimate the log-likelihood') +parser.add_argument('--ais-num-batch', type=int, default=10, help='number of batches for AIS to estimate the log-likelihood') +parser.add_argument('--ais-intermediate-steps', type=int, default=10, help='number of intermediate distributions for AIS to estimate the log-likelihood') +parser.add_argument('--ais-burn-in-steps', type=int, default=10, help='number of burn in steps for each intermediate distributions of AIS to estimate the log-likelihood') +parser.add_argument('--cuda', action='store_true', dest='cuda', help='train on GPU with CUDA') +parser.add_argument('--no-cuda', action='store_false', dest='cuda', help='train on CPU') +parser.add_argument('--device-id', type=int, default=0, help='GPU device id') +parser.set_defaults(cuda=True) + +args = parser.parse_args() +print(args) + +### Global environment + +mx.random.seed(pyrnd.getrandbits(32)) +ctx = mx.gpu(args.device_id) if args.cuda else mx.cpu() + +### Prepare data + +mnist = mx.test_utils.get_mnist() # Each pixel has a value in [0, 1]. +mnist_train_data = mnist['train_data'] +mnist_test_data = mnist['test_data'] +img_height = mnist_train_data.shape[2] +img_width = mnist_train_data.shape[3] +num_visible = img_width * img_height + +# The iterators generate arrays with shape (batch_size, num_channel = 1, height = 28, width = 28) +train_iter = mx.io.NDArrayIter( + data={'data': mnist_train_data}, + batch_size=args.batch_size, + shuffle=True) +test_iter = mx.io.NDArrayIter( + data={'data': mnist_test_data}, + batch_size=args.batch_size, + shuffle=True) + + +### Define symbols + +data = mx.sym.Variable('data') # (batch_size, num_channel = 1, height, width) +flattened_data = mx.sym.flatten(data=data) # (batch_size, num_channel * height * width) +visible_layer_bias = mx.sym.Variable('visible_layer_bias', init=mx.init.Normal(sigma=.01)) +hidden_layer_bias = mx.sym.Variable('hidden_layer_bias', init=mx.init.Normal(sigma=.01)) +interaction_weight = mx.sym.Variable('interaction_weight', init=mx.init.Normal(sigma=.01)) +aux_hidden_layer_sample = mx.sym.Variable('aux_hidden_layer_sample', init=mx.init.Normal(sigma=.01)) +aux_hidden_layer_prob_1 = mx.sym.Variable('aux_hidden_layer_prob_1', init=mx.init.Constant(0)) + + +### Train + +rbm = mx.sym.Custom( + flattened_data, + visible_layer_bias, + hidden_layer_bias, + interaction_weight, + aux_hidden_layer_sample, + aux_hidden_layer_prob_1, + num_hidden=args.num_hidden, + k=args.k, + for_training=True, + op_type='BinaryRBM', + name='rbm') +model = mx.mod.Module(symbol=rbm, context=ctx, data_names=['data'], label_names=None) +model.bind(data_shapes=train_iter.provide_data) +model.init_params() +model.init_optimizer(optimizer='sgd', optimizer_params={'learning_rate': args.learning_rate, 'momentum': args.momentum}) + +for epoch in range(args.num_epoch): + # Update parameters + train_iter.reset() + for batch in train_iter: + model.forward(batch) + model.backward() + model.update() + mx.nd.waitall() + + # Monitor the performace of the model + params = model.get_params()[0] + param_visible_layer_bias = params['visible_layer_bias'].as_in_context(ctx) + param_hidden_layer_bias = params['hidden_layer_bias'].as_in_context(ctx) + param_interaction_weight = params['interaction_weight'].as_in_context(ctx) + test_iter.reset() + test_log_likelihood, _ = binary_rbm.estimate_log_likelihood( + param_visible_layer_bias, param_hidden_layer_bias, param_interaction_weight, + args.ais_batch_size, args.ais_num_batch, args.ais_intermediate_steps, args.ais_burn_in_steps, test_iter, ctx) + train_iter.reset() + train_log_likelihood, _ = binary_rbm.estimate_log_likelihood( + param_visible_layer_bias, param_hidden_layer_bias, param_interaction_weight, + args.ais_batch_size, args.ais_num_batch, args.ais_intermediate_steps, args.ais_burn_in_steps, train_iter, ctx) + print("Epoch %d completed with test log-likelihood %f and train log-likelihood %f" % (epoch, test_log_likelihood, train_log_likelihood)) + +### Show some samples. + +# Each sample is obtained by 3000 steps of Gibbs sampling starting from a real sample. +# Starting from the real data is just for convenience of implmentation. +# There must be no correlation between the initial states and the resulting samples. +# You can start from random states and run the Gibbs chain for sufficiently long time. + +print("Preparing showcase") + +showcase_gibbs_sampling_steps = 3000 +showcase_num_samples_w = 15 +showcase_num_samples_h = 15 +showcase_num_samples = showcase_num_samples_w * showcase_num_samples_h +showcase_img_shape = (showcase_num_samples_h * img_height, 2 * showcase_num_samples_w * img_width) +showcase_img_column_shape = (showcase_num_samples_h * img_height, img_width) + +params = model.get_params()[0] # We don't need aux states here +showcase_rbm = mx.sym.Custom( + flattened_data, + visible_layer_bias, + hidden_layer_bias, + interaction_weight, + num_hidden=args.num_hidden, + k=showcase_gibbs_sampling_steps, + for_training=False, + op_type='BinaryRBM', + name='showcase_rbm') +showcase_iter = mx.io.NDArrayIter( + data={'data': mnist['train_data']}, + batch_size=showcase_num_samples_h, + shuffle=True) +showcase_model = mx.mod.Module(symbol=showcase_rbm, context=ctx, data_names=['data'], label_names=None) +showcase_model.bind(data_shapes=showcase_iter.provide_data, for_training=False) +showcase_model.set_params(params, aux_params=None) +showcase_img = np.zeros(showcase_img_shape) +for sample_batch, i, data_batch in showcase_model.iter_predict(eval_data=showcase_iter, num_batch=showcase_num_samples_w): + # Each pixel is the probability that the unit is 1. + showcase_img[:, i * img_width : (i + 1) * img_width] = data_batch.data[0].reshape(showcase_img_column_shape).asnumpy() + showcase_img[:, (showcase_num_samples_w + i) * img_width : (showcase_num_samples_w + i + 1) * img_width + ] = sample_batch[0].reshape(showcase_img_column_shape).asnumpy() +s = plt.imshow(showcase_img, cmap='gray') +plt.axis('off') +plt.axvline(showcase_num_samples_w * img_width, color='y') +plt.show(s) + +print("Done") diff --git a/example/restricted-boltzmann-machine/samples.png b/example/restricted-boltzmann-machine/samples.png new file mode 100644 index 0000000000000000000000000000000000000000..b266f8eb6eab0e1c4ded2d8faec14d2f1e499f84 GIT binary patch literal 191570 zcmce;hd-Bp|30j($Sf&)BuSE_Y#}4jP?Eh0W$&3}CxnpfRFb_{$V!sE_bd`hSy|o3 z`T737kKcX&1NU{gK3yO2exL92^&H3ZIF9ETa8*Tq|Gp#pNJvQbD=NrdCn4G1Kti(3 znshh5<0*UA1b^(bl~%k#ivPHg8u{bj_gX33wk07Uqapsct$pah2EHk5cTwF=)$*>L zqrS}@5_5e!D>F+wGgAXrhdVa5rj{100(|%{FRO{2ot4O`Q~&n|_$+OVPx&Tvl97+XExKu_V;yz|RwW7(4aLwji^d6X0vQ+eg%Wu(VzWx{{OjyK+{36j}J zrLJkG3JN%vdA|0r?&Zrb1J*jZgQ<+`JoorN6wva$>l+zgRx!fje6#36>%6a)t?jzz z{Zp^s-S5d%O?g+5*COHihvX@(%K0(x|NX~;`-h!lzqt`DghGvvRb6Pi}7RCo>^c z2M32J`<8g|nB}n8vbWtVeR%_crm#z&rf^t>AY94DcNw2`5ohZwxM8Y#Z4li z*5I_Wi%WX|?J@3Cr$ST`B$O8x7C5O$XS+@JOC(&`?)lJLnr*L-^t_vN(o-XW!vS^U zdHt~xdAG`2+!2>%da_uQ<3u7P{!Y?r6j?AS$DR*$S(>@H&*#zo9iCEB zZVy)4y+&NhemjX}X6@a%-NxFQ%}4qIO@Q~#?H8i#9qlsbx39bT*BQ|4n_F?u?J?hE z^p|B1yYzbU+c&v}hK79W3Aq=7rZj)o*BxC)R+V>rusy{7NHsp|?TvTG3h{2#O-)VH zZOMK$E_*1M$-Syb0_(Q>>g(mGTw=B#vv}wq!h{P zd6~;gij^dx#c@?kcBe!FrAO!IDFS4Fe<>XO^Oq{H&Ob1)row&ws+QK1?rybdI}yRNXUA|o>UnqfKf81k zhe_Pd{=(#So~TB)@CH_zfKqKqSBgT*KY!iILJUJ98o&BKeq8;vF)B$d?b_|z0bYA3 zrIW7qzV6Y$IlsDo9Ix{J{d+1Vrl8i=t2kIvA8EhLh?XfCuJEe*`aM72|Dh-}>w82g zu}ad`*5)9|kS*px7$jQHqe$y-~W zUR(U$@s;veNWIq$9-qKKGWoYOG&EPWwf6_fd4(UlVzV>tgpn2}jec*o{(1K`!OxA2 z)_=Y!oAu@H!t0LtPg(kK=S?I}T3wQi-{{w`cg`)`O$hb(Cnc`laemNKEzPG&Ps-fKT}W;lt@a_intH>5^6gwsRjBa`rtA|Dmx{~E5D)gfZ|0V^_!MoB zQSBWaq4Dv-pFdyXj`(a*d95;X@#hFjAl=F8P!4&k$xqbkymW|F{f*THhwlOY{!#`8 zqo@OUS6A2D%uHX6jQKDL z78Vw~_w%N^uim_o=8JMHYnt1edv><#XvC$^7ccge>x|s}=yUY1zP|ny#*V{^^V2dD zU%&b!CbA;=5&Um&@TBXM9~trZCytx5FI2&3MgAYA_%0kYde z6>6#1R)$?EQHolBESgdSRBz>Bec4FqokG9TE?{DAk<`))L z>+vTR6da^vk*Ixq@I>NOZq`tR*?VL;>g1A=lB;WLh2M9GF;hNx@ZkB``*dnqIzfAB zxq3V2yO}$eTUtUZsqS(G{Qv_Kt)IAi*$PV>eWu7drjInN3HD%sh82yW8J=E$Jn=T zyF(RrB2RELoA`T`s>{pE7Z&PSI8K}(>&eos_hWhB|MV%9g|81v5-XM1X{%N3(8&}9g;ZI8wNM&ktc z<;zSXZhw?x47Iei2WBfh{z>rCkx?+7f0$!f?~t}hUc-9y=<5M{kxO9#q$uKJ^D!zZ zGVbo;!AzpQy}i8_1*%C`6B83Z@P#o_Q+vjVI*t#P*qYDZc5o0HcAD6)(wra}q7=n9 zhGggF;gQzT(kkj3%F>k_EOVqh60|!gD9F58n}S))BrpEqLy}k#`+aiBp`sNfC5O}0 zGG^#R!^6T7yY2}~H|`VlcU$VZHP?NZj&65>#{L40pm=dthx;CSUvFpMO0bs{cK*$| zd;bydtctbAWtPs)&Q}->?-yTGSEs*p>C&&YA6d3LGmTr~)l2P;UsqM7rl!_k@jH$v zwzs!$H4&V~os9{} ziNsM5Av{{;H8wTnmz~W|q@Cl>;fu&jzuJx$pZ4tDZP5No5k(HA=exOMCFHz#H;5HUhZ!O!p35xpIY^S&W`1Vjqe!8yj1(^B=AkFJ7?H1)6u)&`?G4 zXRFS*uPt7%u;9DQMSHg;ey(*<^2GJj10xUCPokG=XlgFB$+NKW@ll*Uefrm*KVB%| zjz4FaCX0)U_5N*bzD-N3DR*5#$*l45LtYcti!(r_SX~~lG{46XDQj<3kG_@Eb?=&% z)+|+zdXB*^A&WuMLx-Y-rZj(cPq9zkWS$KSSg;dBmk%xJgbfE-yX)ZA}lAog;#F zac*S4BJk)U2m?uwoe78Wjc zU9l+o9>cG{o!-||C(E&S7!4#C5a-y$udx~u5?_6*+1{LE?|XtTp&FrkU%22oG^~(x z)qir*Xt30tY}c+`+}zwwGkT^-#vy~etK79kBM-=Y^@~%h7Z>e_Dx3BG_v0oOHFoLS zw{J6qyeTNxF6ij#DGHj${X=`+vv==a^W8qni=n_yn~Pjb7j<_d2Q+DQ5lTBfXa`J5cv<8opg=w+s;#&m}!1Ztz45 zS-*`srR$6A*4H1S9hNH58b126Zen2+B?Q;VP#?@xgYzT~>&utR?>CnRtZZ$)q>~=1 z^A-khlI`67$w)v}R#xwO`)d<3GjF-%svw4QRJ61;q2{Rl1XenD@F4x!dpG>dtET>p z&CKkJxa5P|g4SMbBKQ(jIU*urb<~S8+x~~zfddB;(ZG>p_&R#@C@^1M4E?S>dnW2x zJZb@-sF|4dJ32aEjuF~_^Coe+Icdrnd)btpAEnvL&cR{5xn@7Tgu>!w6O>4-43DhqhR`6*XYnwvix(Th5E6C`x+~nOhG{bV=9xd&Ey$-oTwE`>)HF= z`F)4bwPIr#(dW{2D=Rby0wPpXRP0YE#tYkW(ffXHcfX@(bMxlGqM)Pd`}glBYEAm> zyi&6tsQgv@?I(LAY$g?WwTh{b?zNvnIjC7!Mlv`{fDM7{I%vHcujwz{xOr3m z&$p(bOJQdO1p1wV2WT%ijQsxEXx?3dc0nd4?$=@h^H`Kw;$aPo;9$MS674qaVc;?8&7(*0^6<41n@5wp0974u!qPYV_bj4El2KXg&YbmMlNl-W5-p4r$N-6n_^a;G^)SChD(iY!$c&!n#0g-Y+9fhi zsb8{hAC-@hZ1*HVtL3LBKkqotSkx*YD0rZvqJos1yq3smz;xtvx@OUQw0YCVZ}Rf; zH6tUH`vY$AMCj2IS%{x?DDuL5)@D+hp;=Tc@o{1S(Dd>6xPjkpc0i_U*RDDJ8r_D` z!WThg)sm5rz*4a`a{}Gd5LZnxBDpLE{4IO`{s{uUn)M0n+`YTHuCDIeRwIQs5+?_x z6?vdl?sCxD+FC(DK?;2l-6p_%pzuOU2iv)Ghfs$d#97cA@7%eAPvSHc=Lkr+J@v*u zG=<`l5;ksbQdG}?nq z7krcZ#EHNoM~=j&rl1wc${O+0zb!2-ZK_z9s8@AzIv;(R>s@;KY*=~R7>;Z5=g$%I zH9%N@e-o4h?QZ+o-qZ8{@bPbGX;Fhmj%#shpsHsg0G}h+JewL+v(a~Ad z{kNn$^wRS8WCOm_xamG#ySKmC+AwCsq(CFA@$2H}6IreDEbd0(!}>M8)FJiBzkmN0 z8gO@WdyDsym3@|wa781d+ajhVLGlWR+>_C*3ac^cnEUGGE(Jp(0siR5mnS|3B`bz= zT{AZ~k9nZ?RUw-Jpk)zAf;OQgB|l&A-o1P5NW2$lm*NjLo}&Utqm&y7M4L$*3aRJ7 zp^Yy%J&TT3L^Y0U#QuH_fg?d+i_9v5QPG}WdOdE& z!^`_B@X)DtRM6Do;^*YMy|kArmbx_;hh2Y$N++?F>s+~b@uJ<|-*-x#7T6C5$;}sm zsM*-qP~bigR#fDeB7znHoDq71=QUtEahB5nV@5(Tdx;L!223os+sjkOX>8ZNef!vR z)GwopI}9l?hXAzba~{~&(A5>8SOE*wiqnF z@NetqKNXZhqa5}AUcHbeln5@w&)UGqmK=GFl?c7B^Y}*LL*+Ul!;j?Gc_J2zDmR1Z z__P#o{gX2@mylC}4zudlxeq*xi&F+HUUU)vIyou!hY>Usm;*UaPtRwep%(yKw0QC{ z*3wY7eeXy~EiCN*_QM#=hrd3(i1ql+9^E>F)9xD!^$5c&208!TGV>D$_L+{?2275P zU6A~@aptU`AgD6j=}-0beqe7>U%&PNRc2CDKF1cSppe#e{n|BZai?Dofw)prQyVOp z$PV$n^7h_&&%)vj+C%Hoj~-KC-9ZGDN5EsIINBRGZW!6R{&EsKBId*o z?n)K44>y`Rq2*ZWjY|l8uDrg>r^**;HtRh$_Ap7fJx6zqpY{@(aCQw%_@=0G<;tEK zex{EZ!{7#h5h#M5fos`K!AR=pxI(cafAL~cdU|@3U_d$hfqm99oyVJh2;J}I^-J-f zrKQ!+QNMcQM(5pi_wV~1-(;i~+8ymq*SDpns%luohYS!D60(UO zZov@*^t#+XO;64r(}b%DR$f~{NACON%lUV8l@*PJE^rqzVsyDBPPE5jJ!2u?eObk^ zw5&`r=W5H^&-Im`j!uIeVh!M75D#aMFffFMg`qS)-r8K}0P@4El4sl^hsyNwflLFC&A$>QO@(ZIsog!te z89-`)Q5b{h`SnO}4F1K%=e>P=`rfw&1YMXPC_06n3v7?F6^LSJoBFli^@G==M^@|0 z=J>P@M|v92I~W8npbv9TJzfuX7KiHU`# zb0d0xe}4r7Loa9om_yTwGx3wr=xwDDitK4>+P{Z_@u6JuSoIag{ehoaTKs@vL1rO} z2Y9RV&z%SWjc6RzU{t;F_m6OhU`RcIzDkU}mt--k&;e`6BuKc&r;ZE^Frf9&pSepO zRr+)~ia!M`9S0v@bJE2qbhvOXWogX4Z>}T+V^IAPFV28b&j-)|FAT^~qJCSmdIWR; z62y98{36OM0XRX#)}qXM+yCe$hH!vESvk4g1_lO;yV`BoVe`;tz3zh{nh#X1m`ElB1(WJ~U4iycz zrP!wiA=vue4xwjaYI*^H1r-BdE-UjxW9i{d8;j1glPRH-m>i_DdWsr40ZIcOKi(HB z;ohJ0ea*Dv%@|m7bbK_XV*4L-1l+lnELRXsV?e(fJeJM$QCV5Qo4LYNQ%waMSH&w= zJZ5U@ePeL0ZTW($L^M zW0|5H^KsK*rZZLOdnKY0Tt&tDyr{)ML-?_Q*q`e^EB*S+3J129_37qTB+Yei_4nb; zKRsn*V(y?;ygR{5Mn;y@W^(oV^+z}nedq)PN2Zg7kg35(&v~%<-^_pkT{cUn-0bn# z;^Ja0kk?fJu$2)HmVkf&++Y49L73e?H8;yWcpw1~VD+Q>BqtYF|K-Q(VQxQ%#ogBD zied>3p|{wYL&tsTW|>4gxX9JnycRvdBcU-df#8)bu3dHgU4!#wW)9xia04ImGnHb$ zrKRQK{15{=xRtfFr!2cEEz8LR`+$5rp-w!~r>75+GaG*2mt=O?Acr0(b+9bfoJ0Oh zK=$dE=uTQk+V?XES;;PD6b(O$iaI2pIM*QGH1(&1>KPImqp6qD;g2-7u8V|NfU$qD zIvs}&t}MLq+!sEs%(OHa%tH7N5g178)xUqmRV(pAyD6EU04F3}Ns!RuHA5g?Q&;!L zwBbkB16>4HbQmQ8MIFQK==kW8i#X<~r?IhL#3c)RKVyc(^<#|TYmPhWg>fEP;H{A% zha7d7?bTnA){4ID>qB!H$vp`vN7QPR z7a{Y0?<*32cOJVEU)!CbN$_J6C>b~xb2_2^HM2agEn;fz>+8d}2G?`Jz@bnB%0jZx zx>P)JRFR9FQSt#JW*xBEJBVrpHDR#Aok1Y_OA`Gc68D3d5XYG_w9(Pg+$T?Bz#w^W zd(nPz`Wolaqr$Or%B)nQHI$OFAVH9eBqWyQOOZ0C1l82T%!FdBM?Y)>56i1vdO$9D zVXuwb+M?;@IFY63Q`bPHKYMndIl1x8VU0qw!-!yqpTqk>iU0a#rS;w^r_t0CDEi~c z=9$Ow#tms%6_4>TUkxng1nzrq(zUU%u`Gxa03BJ<+1)LXQ`~VaN5{-Wc^J&$TzB0^ z`a=f}uyS+1?HEbD!SgIC3IwFB;)Z!!mRcI7Fg~qf5_W=gJzu7(s(L|ox-0Ea=e+{$ zmCjN<1^IY`AG=(GjXV{VmGNHRj6NT3L&21}OP{_4cx1yS8tuuBj>6$ixjj zdGh3-`x$z2jC(RFDioN^{V*eHl{xI+wr!hG=p;DgXV0Dyoa(S8SEnooFd2(O|bwdIjI08=D_VyKZn{>6b1%w}~h{zEcU-GJ_Y%*rK5%m*QLqkl^K&IYjBe37T zdq>=@4p3N;PK3#7(7pqiGbC` z0NntN{%_uJVVpzBN$x11t7Hgcq@sGbxxSK|n@f}6vE>|d@g)8z89KGMd!lMK*2_fM ziKk_OfO0r~`As=LxuNP~3HjG=-ndMI|MI&dbXy$9>&F@Z34u6OnVbWovz{&4x-3%6fdh*E#XOUI5+NsF;{W@v<{_ zzl;|4z3VyBO}6CUPcyl^9)A>~YR4O8Tn`%2gX(19{>OmC4y8|vIl_2D6@FpVL;tkm zjApu=_xE97V4yW7IYu!jg1V`mO7Lj@+VB3N9QXv`Qv6_31f-Gm^6YI?5^B&Yb>4ev zN2jLtpsi>XTWv#M?=7`IG3A_VT-5@?An5R!yC1+7fwMx5{YLunM^>dpI7Nw8FiI4G z|4@91;qU$ZImH(WoYI#slV>7e-o4}5xo3}L!O8w99rF=Cbjr`p!hin!p{A#ww(=41 z@bFk&UCnPOZH;(m85$Z2^5Y}%W@)#wY?zI_svhD5p~bu`Ej@x!0_pq+97>FMN{`khvPu|e^?cH_p*Ah{iY5}!UP@YQERSccFu zh``;a|7&H1P_chu44nXQB&fHmIy%9SR-sGOk6ne<^W?bdn^7>Q=N)G0iBLtsBJ^5> zzm`Iv0_V8R-~N|iKG-1{iaN}GfKUWo&W%^Mf)VorUSA#>EMzl+{4ndy-WDGpFQiL| zQJB{}eSLQiVkST{mqW3GM(YiJ1vOYEtH%)YixA~<@8N?&D?%q`XRn%?9*4wa?#K`* z%TCaCEiIHtG^85>K(6w^W^11D6`~=bCps-oQv2dO;j*RHks z7Ta;5AoSt_Q&byU!f?8AjrK!bg%%_AxXjXNI+#mQWzd$H363-oxcMK7#MR-<@uH=BizO?3siXKd@$a z^`}u$emnM1=s_#OEv5u<@(AL9k&#j2yLbD5Xowj>E|@Vur`&~&pFg{!FLt>Q^EE+B z9zJ{+6VQ}2$=&8y)1skSao9&NUJ*kLD9_q*j^3ZVO7CaQX-l-4@5uHSS+W4B{hA1o zbWj>&`%wXA-WmY*`DHGx$_GQ42V%R|dc*oay%4eiKgfvMj~{JR*qB51_(*#anOIqL`%027a=f2o9S>Lm+|zM-5KlqK3wiL$~51BDyk$?bJ%8!zdZg|LQE(B*B#M z5pyR~;bzAb$z3G#(Rgcu%7?uHbZ?PUi`t{ho!x=4VPTJ`IpplrOf>0@e)*XB_fBkiXBoA0_t;6Ff_3**w+8E|8unx62j1u=?Dn6VmwUW?$ zNQPDqhGjTlKt&1~VWC#%c=*wCXU8r7ob^WiOBLIBfHtm#qecigw4!2Pm z?=bIjaNjPM#c5BFmxj0tT5qdx8cgg8XjiCCA5kmY`uq2x0%P9j?P^K4Uz!^@sj`Z7?=1Zn)qMHT~^Mt>K5V!yk-=?NkL8@Zfr=7jrpQT+E8FRSAW$9iF)ggK5+r1as@nW7J z3IzoPS-H60$c$TB#%R*Y!e4|Ik=JLYdaty35km?-6=BDSIcF&c7={VFIC~#{2+fL7 z+~oi$1lUddn>dBRjH4pbOSqi}4BuU@)AIVEsK=IAb#*mlv)SD86UnWi2Tv+#(f{lRzJH2leWOOSKS#Aq;hlrLZpPQ~2BgOjRm14Nn(#*|i zyHews;T*#~lp-^j>o{LU0aM=mkaKul&X>=gLEt z!zR�Vkx=x(ZDjG1?2?nCYa8mT(u55B?a$VBC{4t3mSP{ioS~S7Oh5g6MbXvnb4c z4Ln9?bK}-6j&tXtM5D@+WC}k-8b~n}$ipq;7`)7hdOU{yhseg@JbP8%m~5qYI|Z-v z*q^@&N=g%S6!<)uO0UCU+Bqaq=WCDhlAoWi2WW5psVZ+cHQgQ4vD3fJm20Z1H9ob- zOu{e$!sSs&2<4J&Nw6hS4pRzGd^6=gwM@I=TFVQR*JHkZcS3r{jlBh!yCW%XF}N4|H1ckLrrZQKzS9H1KP^ek=w5TcRLYg z4hsv5q5h8;=`fa5YM2@pW?<|bx^o36_A!`bLhL2H6mOJcN*P5h0<*t&io7FP5ex_n zl))J;&kqH@e=kO~%c`oXUkeN0uiF@zfkl`^_8vZbTw$;f zz@PB}=+A5S{%QbrMva}H_%;mol*~%KN=zhymq;EZxTJ3vTtPn4+YsKM7GN<;kw~5` zk8Ci4$%>pnnouzeSAUIZnT((f;Q{%x;M+NS0-*TKQ43 zI23=G>Zk=zi&$B^ndRA}@caEhJKguTrJ3laIncXS*ViQ% zJd1i_jzu+6)6#n3{In|E4ik|Ip@sU;p*?Wc#pDTzDd~7@+JopoVHJrLN-rORO9Kq5 zTFkZtm$8%JP_z=5?JtK)RDidLgskhfvlQ5fhs0`AgClVOztPO*9IM7XJJm*n>>r z79Q^}xc{lKQ3fU#g4ik=b|K@5_{?VT6VoNwR|`j>+kWxz7F%pHw6n8|N|2OD2qDm% zq$CUYG_&xt-*0=})BOA0vUGe{k`{ggC7Y61+%RhDh^VNdRISdTUrS4`LD?0i(l2HC z0;p)fb_S*5s=K>;(GTMje>eJ_zQ2xoi<7^hp|O^hkamKgn(z~1mS?uchAgjd zE83aMn9m`2N33rqA@CGIUi`FKx*S+1P4dY^Fs#q)4JZT zR2KqtBy`gH5EfNM#l2lUJ*qG<-Mn?H10Cu3xpO@ht{{aPj09Y881OQ-0keDuXsQMM zDr7aewT%^@2}k-YB4R1cEhS^lbz}8(#_c>`_{>sK6_wnByC9P~g(+|nA|p6iP(_gC zo>u9x`s&YCaN*ZcqF%L`l$s@+?Oov_fqF{FgitpD1fVj}knW^dF;@Er<1HcLF8*ne zl)R=`pqcp4?0s56RGRh+ii;`oG-yBf?SL*W74MXcAZw( zRqT;ZPS5`^CUY)*U##ku^>4u|xdqv(-N7p6TX})@v-fV5J9UddR0Q>7^?OngZk^HD zzC4FR)~n@v2R@pKWpONYq{CkfN98!&UiB#JR0j@po{axD)y3B`;5)_Eol1k>mzI$s z{6t8r+Pb<6JP{75%6&eWJ9d+2i$1GUd+_;3^EI#J)k-qk>HrVnSW(AdUrH7g!WIg@ z*_?CCpUarPJz@9x4x02OsQM}M=1M_{_1P$o@bH99P3DlE4Ca>}_Lz-uZ#Y90Oj;8! z;N~7bcFgZxT3SMpX(a<;w-2nF{QC8x7;^~t!_rB6yYauC?m7@LO<*R$ab1KIcPv9~ z;H&HBsXue^e_7eL>vb$9{-14%4ccnvu-X3>FKS2hPbhx-*YErPTErNum1Kw|TlsI- z;X#Vtp&LZ>Y4IH+dDV4~2#@ApfgUv3D^FXd!mo03;*9U+^*R50FafXo;J6CZXXw6~ z{4=$Zl(C$B|1C&3?v7N?$$5xVegn<-Ivf>*W(sq_#(U+BrVk>vinjt%mhNrMKZMr- z&0-%JS@JC%XCH93-w7wfw=aj+M*eN;zRS#;v1 z+62o#g;N!)?C9UE$a8obUI5rcxj!;@JZ8N$H?AAc>FVklF2$M%2S6ys)*wdVu&ud~ ztspdzO_(VbtM;(8HpQGvD2o0^1?;`nk+3BT@+}eK89O`F?#{GZr`XxszuOTO2!O4Y zjxQeE>AE`KzkkoS7!vutvXb)U%NO0H_X5#RK($_klI;4;vi$vf!j6H6la=-O_e*#o zYfmIS&Yf}X&{q&sW5K)54VTBj)>9U=ntvsze&*|f6KVpicvRHa?%z+-{?3!J zw*u%0p0We)3 zD(V-gSj4e^{X{6XEcPE3@YZpiJBzzX+(eCxNH`XzQxYD$gw-S^UWt|J>GjlC;2ADm zzWnOkuoDbC)F_7<1@{g%e6_(XgWmrZk`8B7qhf}dDy(H^g@pRuhGr{aFaSerK2#b3 zD`xrD+K(GLI>UFWn}F7a8rbV_c5KzKRNCZ!RiA+|5!YSSYl^q-||%J3qEc5u@#Q))HiQ@i3)=8 zM-a#eB!jI&f;#UT9CwlPTd*jK_)wfQdoy$^<3Y_#OaA*KIoB?V!b7pQu(DDTb8i&1 z*H18oa9w^bbv^Z2t--<>d(fm|44qWeckbS8hj4gBa%(lYrstn)b*!-MG!O=7j=EWg zU&Wc)+S*paoCNk3QC}*wj%FfH_<7HsJLlLnXt7Ocr>2HR@xe*7r9~+9Vxda;CrG3a z7CX0ZquzACntGLc8UgzTV26jBI}yMD-6^2yuO^1A@?X_t&z?S&f~rj<2N*lV4zVN! z)*h|+sVuwm8@~JFa*Ij>>fu2>I<7(RPs@@(#Pk1hH*aX}L)aA@sz_Ysz^5b-tVD{_ z`zFCD)?eu%+2hB=HeohDi=_>?rIOGV!x0x=RZ@^r4jegh$paa00&o0cz2sIXv314r zV3iH!L~KdRi~$fh9Ir@RU?8xSTiH@&)^>fbU&IIenxh_HiGQ~YL%6e}wN6hVv^1dK@>a!;cep zjYRoWWTm=_a_i&kYa{$@j_niT8DrsFh>|chDey(nQeT6niQW|CddPn9Nx#5o-tKkR zh)Wj;+iU8Sm^S|VM zZn<%aW@ctZ-#Ezk?%hUR1-S?ms;R2;M><6Xg@=7+58gn-9ONq}D6Z&cKpz196y@Wy z7)beaT_MQ9y*lvhL!mnFe++VRGE;jWD1ek0}lgEb8v9%z$<)wc3jm89%GOkQdvF3BFueBrOTIVaku-$ zya=E1fMun5QReit31+LP6Fx@>t7oB$lM_Lz5k5pX7owJK-nb!FAW7)uga+1q-|2;& zTBHUml@#{@Y5WroS|~__l@#PPd`PpIAD4KcS&&Db(j_DcIIj)iH(rHE0+PQuLIR?U zuNGgCVgiiSxPTx`xUd0({U)ehT$L%OF2xb-nHfO^Y*rL-PH-IL?8SEK@DT0?~bGMeDiEXgAq*dA7H)x^^w6+4}VQ4 z*SK*o-(XMV13x*Yd4B!sou7-yRI z08~)P?^f8`DG?1+R6`i9GWVK8!Jh@0wtHVVr4&wHoM+jfZF7 z_0)W29dr0GVT&U^bYX%0*-ydoKFA2`%^nj$hkHf;<3TYFHNwP!t~vHa{PXfzYt-@j>l*t!_BG5>_7y2?-VcEyJ1sh%{}99!kj#G|D5G zf6!^Ib}m9Tq8D?@ZgT0))NZ66!(S29kbE#>6~-ZIM#e7ag zRou?IYk&R|)NjK2suCySgODVgP)h>v%MkOjxAYk8`WuzF#~?2-`Q>S@ErFkir}tM6 z{7+&b!VPeRksu`zx_&)lZxO2>^!Qm%pQhgD^RjU*{;Ub00%V$eLJ^!sfo};#-4_?%fD{i;=UnBN*n|7GZ7{(jOw6KFQ9lKs4!Q+ zb%2@}$$Q)2Op-R%I#w)hrF9YFuB_}%ytE(ER)Leof=chcHK>39!s|9%0+A6jII$t1 zs!EHWnz}0b>8lN-K0DVoe?UD$uyp3601NU#u5x~A?V5AaCZaR!l^EV)sQw2?>E!b`jqGUs^S$m384@b z?Hb%Txw!&3^$$h@gsqBV$+%E97ibPB3Pcz2)?if#_XWZ0VoXDw#RrEJ|L^iZf`hx+ z7+HxH1@;fXSs}7P1R25$Ng?S0+Gs){Hz7ecYCQ%e1g3PWe_QU@nqvoT0c2Wi^K~Cq zX7&F3`AAXg3+@UwVtou1zm}JMK@t+{M{vbtQbHK|KYTM>K&0g4-U^&E`6q8b8l+aF z+vkI+ermWLXA;-HTE^NiD<8e2b~~g!JUqO_oyC2TT9!R@h=~v{2&tpAoTdxzrjnR^ z*xxP6wD{!zqm0R5%_y@Aj?#T_n2>Ip!y_Yo(E!;`p4<<%BeuSdP8$I{N%|Y;W8|bc zYs4k!6%@oL;peBy%*-6DfHfnSvl6>{4nOXeFT%n=M#pQCcZ!uYv2ilvOkbr(5H>yXAkdDtr$xOt@)%g_VjG9tm7E6g#j`o;P^DzHjJeHkR+N z!J6x>Osy*H)p}p>+D&+to$_aS{~{C{RPfez_VEAyL$8HuGiNa%#9U; zI1cuNg@wTa@dWA>QSjEY_xVU;7$zY&D+spWLI%ECZcAvXM@J$kqB3#Ixg}V84#Hsx z``$$60Gc%ZPtA+7_hEa|FVYE+ojLwG^MAbnk1$QyctxZ45*_0?wRBXKiwB3;)-(YsLZ}Vxcda(fqjHi0Swaw)>)yt+q^Z$!ZIdm ziZ~U*v_l9*rcb%>V=$G!?Yid;nB(OSwh4!^vDk4%=nwCyQ>O@K{a(Qlyp!#>FIAeB z=E>U6N1S;&*Wy&_l@*DNaYQjF5k^I!m?p5VmD)_woHUSp?bZ*Rq6YQo zyIu?{gY`fZj8f)BcX&2UG4ND7zlDwV58wA(t_C=8ek?DWoWj6K0w~k3}20$LL zY)|M2-HKz%T{oc zf<xz3;pR4tRmA?6UjlbVGe3~~|?qyJ@0ma1d^I&%7Nt<-Tz$q_bf3@l{l za@2W=4LE#8exRl&%;HBZ-!JSw$8lX#bA9J=mm`XI#f32*d%=kc-g)fD4>FAXsLQPE z?AzhL0QHOQdYexVaD2qg<>cIn?G;!JuxaSA`9t?KvRr7u{Advl21_)+5Uk@1*gwO^F$84F`=>0>ZcqxL$4Lt)FG zsLgZTKYsiO#LxgBfiIK*5^UUE(9_e4*_MWqUR1QAD(=FxB>bbYFB@B(<~`)E3bJ>I zLtBK5pWk;2QZi-?w_jCzggoTa6;Yj8Fjnk9^@jU{@M@b_Sorpt`C>T=;{5z@c_5Z< zJ|!7@AN_>6I%ZR^#T|Se$k|aSuoscXv)#yhJQaW+VlVuZgiRNiXppr&v=D6i>P%V| zQ2+>&Z0ZemF)>EiPhlCVd3iC=ta}Ko1seGtEYsj^<)Hc!i?EQD2oEIOEb93t)I`Ha zd-^{X7oah3Ek2K#St-|_*lQsrpexh@!q|bi<|8Pe9Fw+}DH0upzdJ@2tHqJi==h74 z|CTMad;_pLe~c~!q(g5x6(8nZs6Gl9S|A;7w*1?mq^2G!vGpV0EmPF%0o_OhN&q!A z@eB%#;Z1XC@7~#{|9rHI>{DHxk4n6lBJ66fVf*46_&k2f<1ZQfxcP0VYH8RQC>&_c zF>0V8-WjGVk@rtOgMh>q(_8ETV0VIBL}V?%=P1|=VyUNW@v$sBrtM_lofqdUnK#EM zJ*F3n3VEg9rKYmM%}>j9IT0?b-aCyQW&{8**T}|eNuz-ZVj=tmieCBOZ!tm%^1O86 z;KDF864t18Upy4U1++0N#z7b~;k#{z4{xq?H+C;zLzk5mbo!-t1Y0mgR2V~Q__VvJ z_W2C1k0F}|mmx|Br*|Ad^IL3JI&}>bAvHACa5idbuQ6e@ z1GH)`F{#R?bXYTlfw=)Apo#@zcYp%$-VJ*ERKKibo~DY^JwVQPyIr&KT%qNN z3cvzaMChhFp69|Vj{4nMWT{K6)(~4**ewBy2k$c(KQ=ij3;fR&QBZD-Fah^H=zBHk zsse1L0OJb46M6o@p>wK8dAvB*e$v!W_U4`;mjMf)lTtnm2r$>8eMcs~~CPdHYICz}8U4(3G0r=XqpvX^1Q z+S%y%a-8Gv>49dTD{P+k%&0d8`K~X|)qp}_S$KZCOfk%#Xl>Xd)W~VuYmC%>=o?oWL{Mty1dS^$>4tCFV4e0LHIcy zJ=)%UM%;epMp{38o6Hq0SS*WjTbYDTX_wY+E<>|)R8nZc)V|+W{}-%#A5aBEAK-JT zjnSctg$xcvA9IFe3T%UNOyCQw@e}d^B%=w!&WHVfa5{T{SFtA^AZwsjg3UAn%)raU z1kFww@@|ng4;))o3n(#7gl8EWFQA3{;xUg>Vpk2bB;oby&o?D}gfP`&pZhen8wjr= zu7Muv5pgT9ww!ZLg)b^OHFYmKqndf}fqg#exkh_|(}Dq|557sOMBbRPm=-6dqAX5@-1FH^km6P|2PXKzq|2DIvG*`KU;t?R_w(xQRs>a3p5t=E9U# zXo(npV44Ih7*m>+tj0;+HSk z0~@c%!c9WwBlY|x2JMO6vPw}apr%Xq%b&cN)+L>xCX?2sA=Yq>CxWoI2qooe+`Q>y zSRbt45O$QSFZ#cI=qk8Vlpi=?odS;maB?bcSOGScxK7Dyb>G6GaLt|IBlAG*KF04= znFvbcrr%3_0F0xxk$pB?K&>zL-3=ZnXoRUi&C3XQ4&~Oyfgl;+(kOg)KjKo?e0iMb zBxXJv(a!(%1!F;tAm<=Z5XNDf#<#eyHYM*nJX7y5vL>|1gRgn|>=}Wlh@3#hQuE`< z=A}mN)dQ+gW9tLV7}y-REUL^t?uA$OmacomjtekjbMx>#`k&Ds@dUGIj@|d`yti|A zf(G)2MYE1I8mcuv|I-~bv)|Zl=b$dn4wlejRKTbsYi7m^-kktkMtARy!@3PaDFdEG>D-!?tz^0m+h=~Jj63^4|4-U2{(%#^c z$^|=dBJ2GlVg-hhS?mKIg+lDxc#O9^hjJ%t3=^W@l8=PlWYJ88~Y<}_`$Bv~~qW>e`gNXA`}=c7O{4ahmP%Z_C1vRBwG~ z4~v^#!O7GcpEKCM#EBk~r938%rCRhAwE|&E$`A^sZ9q6o7OgYn#Dh5C;J`-VHb70n zm1$=uC>O#U1pb5r9!iiwqnY3R#{+NT;X0rxL9}D@2xhW$^_=aBii*S&1ZoYePq20~ z{e*L+s=1j0Zd*J^+f9kJ8M62U**_LFOUfEh0S2vreq@+J?l%F~I>qbZEkVg+_)c z8n3qlr6L<*7zNYIO0uVeYBkl>=Y!RhQX{eUi;h#aSPj!DIaVF<%$|h0I8DAN+s0h* z9@wME>s8ZST3MNT7*P^mgvyjZE&;ANOerTTJ!Gc3dAr-fb#80LKrjeD5LRu0of$ z2KY1^DBO>kpI8(IEepZanC6qft&?{qFw^hBe5l7ykLSaD^r8MAyD_jT2OiP~3aS=O zLUhxC?0>ApfPHhh1KIX1+h2h`B3!gEu;o7Id8%D{3Xj{lcySk=3z!k_gf@2hh2UP8 zwb3Zf&Cbn%qCNjuWO>N`>ov>6VQpq_=(&W~7azVfoeWjb!hZaN=MDlJVe1Od z0i-&35Knm&0HOfS9>vlhL5~vc43q>156|b>hNF0h78WNGJHC@7-t0|1%35+K4h9b( zo-yK~L$IqtZ%Lo&!P77>@jn-Ja?&*4P>O zkqPTZ+hO4tI6jASfO4G-M<4EF{`up?^ZdHAbX$C`WxGs#+z;WP3PV8$R!0iQuNF=^ zgV;pj1-zu|F_r(^ZGGp?>x*pXQP?tuZz`3Fyz~ptgG)iOPT@56-tX7A@8`OcdMIdpwy28ZxVvKZ_JwIKs{z~mjyp?k(?ytNm$$G&Oa**qt4b1| zeMmp+c*x1I6w(rQMWsjI<2Yg?yLT!9pNZ;x`D;#<8dOCrF0#t(xUshYf1^LGM_k@P z5Y8Ty5CZJWR-0w^^~aA7FZby>H3|6P9#a`lkplsNLC7rH_sJCv_^Pl6`f{0 zTsq;OcK91axJlh-wPFQAO`({yv{FM(J$D91%XVA|ALuoN;DIid8PO_FaBSe8bk5mRK!GJel}zGv{AL zQ$1w!-dm{jJ^H@OS#9KFwZ>%bbz^Qd?ujdA9tVKkTnE*Q3H-F3#JR2Ir_8mBbkc!6 zmj$0oAN1+&@lw48DeOQuH#h0SL~qm5llzA?AR$p8bgxNNSte_vO|O$Zt2qnu0xk|O z*wUU`;lbm_yN?_>lJuq#5^DYsdK_DO`<-l9BGV3ojf9xTWxQRt^-)1m{rVg;Zt*wt z#+6)mx)P_+p&5xpXInCM}w>|lW&dkw$(s7OagM~)uz_=5)y=+iyI zn})4OQ<300u_<$q%PM$Q%vPKlJh%y2uVMBmu7dULAD!BSZCE5U-6Mf!5}b-=95jgY^mqgB7Kuw>rp!&QU3oZc>Aj|F56uahLl^i#p@ItU z%Ujof?-mru*$J6)|HkHT;@gIRavkQ%Uo4I8bnP`ci;UI{(sQ3mtJ2P4f}x>V<)NtmB(R(-ehIVPu z7>Dhio;^Dq`0F0tSeCN~q1fG}@l*VdH88KNtTb7XIlFHM_TcX6fxEDIo0yzTYFJnn z#Ad{*br_}L^m-N4>{=BaP{-hD_wevKb(sgk=UAJSxWX+a|LjQ|P@umE0HSmk&6;Hq zV}mDAJ}%;QsWDWF@9}T19--e z{#KVF4>^O2Qx&k3=!){R|KRZx1$br23#nJ8_8I)o?He@b=9lLo4~J!Y>$UFYn~mVy^o@0E~Hb|PT7>+m-*b7R)hTSAFa9zE5_-tK=pM(O&fNb zIoK^xb01Hv@`;)!bki^Fr(bI6eA zAx^*Zw-;iDipqJAact<_1>QKjL7U|$q)&LwisifmYd6VW=J^@3ORAfH>C^@d8fZN+ zw>rqe>V!sS#}t33hN=ZOHa}as`6`E($cBJtzX_AxOe{GwyV6hU+k$oeC^o@ zKi<84%agx7&=iB+Yc#Wlgbxf?Py`0Tu&MY$D{%mM1@|b4+jQ@K1VfcHAr$81>xx}~ z(5AzTX-@^wH9QkC2^PpQ)Y>b)_4^oUS@z#OdI!YwkwO->`oDyKeQsY=#&6 zxQ!ju^+?vBVgKSWm5V`qkmRAD>t7q`-EUV3U^KFjyPN%pd)Y^ELbr$|{K@5x3i$4* z+}|My8BbZrr9%Zrw@H0?J$f2KR|I9}F?s_wo=uMO56tN1OmPKJY0~nC@`>3&bL8;B z4j?ZW`0VQkuXT5+3z>!ChB6nEb1` zCpfNyr#Hg0d(NK|_Tet#V@JO7q;N+8w_UOVX8V`~zyI^1v{E28Qf_;82wE64sn=bM zxoxmPpbJb)O7ef9w>tag%P_T*7WdBX)(V;8Zw%XE>rrxYV0Qk%J3+Mc6l_#rYle1I zrLY6W6fZr0pSW3pS2m&3x;8EF!*Q?L0OV}1>4KoTUdF9o;n`x8P30IBNhQ1(7`R?_ zY^aG@x2&4a#wa4zs-~o-x`b^1F=hg>DP%Bd8dYNW>Nc~Vre@U7&NRoj*}n&_-ulx; z=q?ICA^-&J*yQXy=Bw_Oc3s-Gl?xbrV(pCFS3aY)k6KSYdB<&tmR4xT8Q)RjXicfi z_aSMa4dJQ%`#-X}{bKHdl9<)0ulr#XD=B%HhxcOXQF zz)sf*H?9XdbsPC(-WUYiycW_2z zr>Jk8@i|ay@C3(f9-k<&XrAKzx5ZZam^~z#ZIMx{)kbB%`EG&LHBUd^s#Bp#|CD*> zd(h}bhmRcj95df+Hu(xYz(wXB`dHUHl`4ZOr~jYN1^*l~yBQdx*#~ZvHzz9*!cTg&ehaev%mKHyiLEP{fRo4 z^cnAiMxF?)*k+7|$p}FPI>sdxqe`>J{C2#vGkMOXz=}CO4`=`KZl>VVFY9Y}M}7Wn zvj)G*K^q&@kmjm&CwC_gUKPU6C28Ql!+mfd`Q0d+^+zVZylxX*=Q-i$#pYk+&ye3$ z(BG!X==JkDfRm@Br5!-+(M^gir?xBc3N+KKCMKD~w@MnN_%`r1G zLOHCow}-;L0gpYjOKvUn*;)9%uQf&$5&pOPbmyeA^@bL_^{2W&fp;Fa8?Q-(ko;4f zb;}q`r`~yL#XVd9{8~}|d-j)#O?xUYEg0Nm)reUoR~(a0y6&H(+NKQ&bExOjMpbnq z)bkLTRPPH5pMom)9XK%jSy4n4doaEEzc?< z)v^6)X7k9Vphr#Yi1*}tc2HKHhF)tV)S;-Qd-v)^IJz<)aH-N>XU~wbg4He~Tm2f- zeQgg#*C28oVTXzVJBKX2w_knX#e%c6DDwLtHp?e>dhe)?oGO4QOo#Y<4*!Q~X`eU7 z*R85RIp0R13kQ*h_QUokP8tQmX%Ra$9iAf)y}(Ss)XDzF9*LuPh9o(&L-MH zXk1kx`kFRe?&(z;aN-Tm7cn%>lhPeEC?>Pi_OS2X&C?FocI30aA zbDLxI2xPhl-z1n0r(@_?n-$bT;}KE#bDm^?q!7@2Y|)ml6VScqU2apVzjj2wa1-6; z#XnQ`IF4Mkyt&a!EfJ0(`_%`QznZmP@2e-Owv)=+w$&LLw~O5UCqMN_?xKg5Msxdk zSfu+-psF*#Wb4^7#Q%&}`9M^QXgSI{5B+#qKX3KB5hMeEk(L zBCs$GGEx+|5HYzSnxcu&(r)4vk$1+bFO3Ri-L8p#n+wfR1!GPQsWuVfTvW1wSq=ts zm$!_!Z<6DWcW7a0nb_N6z@GbL8I28^BU#liNd%z4+Q(s{O4H4!8ZTK)*ccOx zjQU=FS%KFAL`6HX%cEOsR@Dd&7#W!;Gz;v@`pG6Bw?}<^T5->!AG|X9dIWqh!rh!{ zv5)&q43CX1(mA2^=E1Ul*XiITHCwJ(NsEQpqNHh#3@g3K#^t%0nTqft6X;67Gy%2` z5sn0oS*{#?Q9p8TnkS$fT#H0T91AHdER^VI?o-}M;h-YZiqYBC(htFuNssYK2q zOE;MvN73FKJI_A)lGUp(PhNI>QZES(qI|k$?S}kJ(xsr3c1aL08#uIjc&=fp*5XH1 z?+~%DZ`G)e2mzBw6G*;+sC#urjL`4f;Y^bIlbGSdb{NldIUc=+7}q1sV$oCjfzTM! zC1$^TInw?Q-*E7wS3Io8smsYV?>lD9IWZU*7y#Q`kY0Q7M@8c~a}4+@iUr-#(9_HK za*sVdSNFT9)z#bt=iwMM&Mb3A>{IyZ(^S@3#{{~JBWKrHTVEt1BJxq5u|!ikmB$tB z-+w6};J$_#hliLG%RjTK2Pw5MFo25x2ouuFFMSfSr%(2i+bJxJv){wjJOyw5d9n-#!6lHfI$G%d7Kt;V0dVFtqoW7l z(B1S!Raw_zzk{xK%AzS)aQXQFl+^#r5{1SAu%Ec?_q*=>w6qQbKFi;V|YgomJFB)$L&4Z<;>P!?>k+2b!ejB9e^DXwCCi_-xWsH zfNA|kjmi&keCac?DsR19Re(kKcBYH*0-0Cc+mm17E0l&IBSEBo;^aw0B^Tn>7iEBR zUMt9s7#T7=#y|Pl9E*AL0N^M2rO(8g>Oy9g1Ax}SLx(P+*LLR~AR^iI6OlDAljk0= z2zcVG%47JEf?lkS2oJxocw6lYjdePFIMz!=Wo2*QeT&{-`NTLG`L8TPygAyW`J_`T z2Sr64r>$JYW)(x2E>eN~n=ZL;Bf{^V+?Q7L42MoX9i3AOGr2bB#fnDceNoXffb`UZ zE^CVqG`=0(Xyb3WtUEM|o7?H|L`#!X8QVHc`a@sS7cw$fA$q|ysDEE*lMHTnNqSwL z{q4&aRU}0ySuWC!w{I(VzucesB^3pj?c263%Z}W~3)74k$g!zw`&xl?ooMIL>{nR5 zm0#Z`ky^Vd#)X=?{41isc5P>INLI~?nL}mG#n`s(tSwt^ByA)L@~<-y#)oKD9`(n? zb=95brGW>d_Y5VP;u)xSX9~yh zG@p^DW!IZEJcen2K}gfDZsu?Q7@f7qk0`;@Hs?OR2?)IPr}8RRqe)k@X0wDz)WVVL z>$%A30ANTyJ$a%28Qv{lYzAA?0gz!qg40mPv(f1pCXaMd+%CR%!Qccnlu}S|4ggml z2oIyr19m94mRMtDtP+5uKOjam$i80Gqx&b^x(q!?1)K8ZiQ9g*pE8OhR6-Y@6|qJ*fYlh7ehLDb*gSrX^RgH0VC&z z55jSZcQf;3>z{^@CD)WH*-l!zuu$8vyG5Gy;K6y2nxhnfoP818@8d>Jjb4R#ikx9U7ra@|w%2R$@yNE{Jw>S~ z1G^THPK`oOjuLKw3{JdM*Z12pE+_`p;)7qnuoSPRH{SEJM-c2Z3gp^Xy<>_2p9OJN=~B8iO@ zBbM$qT#i>%a`paYUXQ0|lf4!>3rCk}z;jfCXq!2e$^86Sj?XPu|&B3QgeU zb@r!mIWccSawkbNh>b7IUo=9ue@`zHvixbQ+QS`4S^(HJPu!O1N`bg&>+isapnBRN zA_FvWWqVtOJzSod%Md2YJaG#xTzD9L#XDF!DTr8_cI9~^U>L+C!0Ec%n?B)D0&pN4 zTb|=IUIx$tZ?dMUBC6KL9^F+PpwA@{lfv5HiLT_q|P${`6%gZ(RfxP0x0vG|23 z8{qzgT9NwIv%`JbYCgIs%r<>FKf$O(>)g9{?;2p;L1_k)CBaY2_k0hl%*A3TqFp>! zMdOrnw1b1U;hhygGl+oCxi5da!fz#dSndFrgKiJa&H{9{siiEy%`;+xm6MQH}! zDzE~NWoF+FwzjtNkdgGRwHtOf#>K7W>1$hlMWn1MZBl@WFdiit@rQ$nEE(3mj z@Bo!H1A{|3{I3D@yZY&3f-mjcSpLz!joBnJ80NBpB;!O#`1@ zKy-$zMjD3yKeLLs@jJCD9$$++8b4ULqmXKux9|?>V7^bze8PU3kM6wp4#wadtu!Un zDhf+bP%GxzIye+QG@+kB3wR;7WP77K|8elEH>~A00l$R+*@>O5WCX=?-jZX;8c%4& zXk+35uPi7ZDscD)B=%uHO?i2&5Hw-smXF3}kqZZ1>Z%_BRR>mpuBhfg94{9fMzm*@@=NSM6nuQ%(=pk)V>hH&E%$$~w())!cLP0RKhVd}o=(Gf3 z;I)lqU4UKjHiS8TUL=|jaP7atEZ|-sC48}4adYfTNN@=Z_x!ZIJ7Y8Oe7vQGS0@shp8T3#@GS?miRtvU;h~$up%-=0Yr?%mB>`eaOKt-J6({n558W7wK#J zL`GpvO(xx)wzxRu1aO9Af0rQU+zaSylL}hyYtWTS{&vH$lL`gE$)FhoplEcWK70u5Ov^S$-d@m^X^C2{a&v_ zmHfM#qBV8V8^%wLhxx?>E$N&{;gNWeotWvn?$Sg*eH;{3%J07YV(H)h^{og*xiQV5 zYss}xT^gt07jL-$`!p!TO1;dv=<6ooz&Md#fBg6$=~8S$+Swvb$Z>(sn8K#2p>fdl zMQmi`c)kc0o(89tuyZc=1x#t_eJwgaQ66OgF1 zgC-}|6+UpMhvy2p#~Idu`o8=u+D9s7nLY#s5?s0zAQuh>qBCiuG-DpaR_p5NiRp-? z7cf`#^b0ujk;9I5>(b@n=Chh{+fa%=c<^A8)sj=gY`Q=IYH52;?enf0z2L-sS^gI< zg0JLi<|5d*L$E+jZQtpvIV36?>lD-L&P_FaTf8=ik%Uir7RUxEuRA$)JK=5R1-Duw z2w)OZo3?uyctR~e_G-k7NQ#b}==9E$cxs|N6TbvcZnS+lrLnASf(5gKhR1|LAcE`P zV6uV;si;_xnJ}u2V^BBN3%1Hve;N@%yC*?%*BlJ%^jVOF^%WbC( zczkY5ce0+wajVni#dT~t`^IzD8``Qw7Jx+X!2Z007~twe{nkhzVNACd$T8`Y)rgUA zWPD=$AQPdd%Zy&_-rPITUgA5Efu>Y4jL-e_>6FmXSR6WIaY&-_B1vo)_k|=p1MY|3 z%vu;kfEuL;q3PjledJDSznz`0IT1HhQ-mAL4P$aE7u77DS~)B+T_V z>1FAC_N>a+r(V~c?)+$k#Cr2})7#`(4kWgK)1JygrH?1b2xe@V#f6-8oCMujT3W1G z!_>w7NCcNsi|WwM&@N+o6Oo`V#Y1RL(&zfUl1X6kH|B>#xrvsW*!u4eDz5uq=WuE@ zDg_B>*`QIU{CI0;M^TRiTSw1(!X>|ioy5XDll_C+j>88JvuP?ICu`@AwMVKhQMU}S zf}S+9k|AE?8NaRG%aO;#!A3ZR=4OkVy49r(_Y@^)jiZMVSg$m5K@>sZ&}@df5j5Zz z9^MuLGvfN%_~yMLEt<111sZxvQ9zxOg|gJio!b#2Pi8c7SlA__hmNaTmm3qhuqzT< zdPGs}KxzIY%izxDuP78nRTvVYiB_pGX5{7n?#e}d^6*4|sZmjgUznqjPCF|kDHz10 z88ebG#!C&zVY}hu>1FyAEJYb~g?6$DhiJE-8yy{!iB6MpOY+dzK-}?6=4+We&f;M5 zJODb46TYILjt*+6_q)-X!OBbFbSsL>8E zPpnm}n%6P}3ulsut2nh_El(wt%jHKx73Zz!@Hr14`IblTizz8A+zpK+GZ&zo$+)_M zkcJq41qG}r5gHCVw5tBI4Nz0rr0b;aEc{X3movjaO`is*jf%>XJ-$$*Yb&0;sIkh{ zWS^1#W433b5N{G9oABd|sfWXlU!0N2+;%cl;bl&ay@eU1jV*d}sBg*8=2%H&46X># zH*f2t8aK*m7)8mkW9@?PcABq6^1U^VKQv@2?0GpV0Bl|iStb#+O^$_X_1Al5b9Ho! z#tmRJo6{_Llu4>Y{JBO~wJbNmQvG^RaMJiTqOa)s?@ngw!`$;ZVIP-QjDiOW(+$W; zPEIcTxqSUlAZRjwS=Au_irGpd?0Zp;>&*23oAAD|Ja3g<9QCEnIw+)gl>>8EY@qgM zKVXC(SkoD>l?-$x8sYsa(>2H|r0UeV;(9V?Az!Fv^X8GwdR<+7>t_Dk5M)R4#5q3m z!a-^rm-xAe2wU!@$kYXLw&tHIeMD%CAxLAelwXBKzizJ ze7V38%?;yRYK94{d;0VNn1}Vd<~hfp>=YpkJd9X~rJ-T_v!W!Fq@@fl!`zpTkziMK;51)(mnar9_XlHp-%*tqeTmdA2yywIcT>YiC&eyVg2DQEXM z|75k%8;G77E%0B5ELG|GkdQ;pS4HlbeFajR;}11P+)pI-jp!xr-W?}#5K07sX4Qd7 zq&BugAc2;S`k3*J$ta*jS_LQo*&96fQ*ghM&!3O*@L2i8Gw=5*Oy%wa((QJtlWP)a zx#Z*v#7vPM+07hNJq!9xpB@6W;}e*xIt+g%dz0gaS-$UYJaTy5*w}qj z{Oc=Fjm)30Eji;#^e2W>D4f?2k)O_yTWy2=*@l|hi?7jU6F-ZcHNajY~l5JE6 ztst;9;5>Swq+>e_*!(NVpNH6bqr=reNO?A13h+iA72GxPgYaewk5 z@yT}X+*xPqZylDc&g+0@#d1@$=svm|MM1Z^;1OMqvHZAHRsP9-&1lT#%@aWdkDfTO zhbG^%>-OBpBx0u#K1f9UfBFj~JbCe!3CBM5IZHa9>$fEvI(~k9nIZ>-jzjkg*s4$s zd^)I+iBwILZPF+Z%pgG$MmO_+Jc(32`@ZZdgi_s~TCLTcG*?=@OE9~?k>*x&ewYFq zoqK2aqXsMR!yw+XU`PD~E0RHLTfV0o%JEL_@>A?~tkf-keibcuo;g9SEt1sM$Qx)3 zMARgb_^m(RG!Yxm(4qK-yb_kWA3eIA-~@EcqL2sf5T6qrVcHHAgq`Hv@SZH;0&F*K zEN^!M*q4q?qK8FA1YOFb`Y#LunB8*Vt)bXbfY0!p30Z?!@P*nk6{8Q``B(Qf5KJD&R!(C;-vaG#cc z<;kU!*%T797z69Jx|)LAQJ$Wjh6F`e>fgxbHcYHN=Rdh*dUo7;^|HJ;kNqn4ac7Lu zen^BBwPL^Cz4z$FFembVW5Qyt$I!Tns729b*4D~B>ux$bS% z*NawARYQm$hd{=~2mVg5@3yC-Sz}fPv&^i9bcwUJRjxEs5KkpD>3X&#YY<|2C9Rd% zE|Cy2z_d7f#kt;xC-V4}eYuyt3<~bmEH0lz9u<$?kIZLxvOW#~FH)@#)+CePPEVc4 z{Q?X&L*dem8)tzE^=hYw--<5#_UFsKF-M8l1L8$P9Cm70L~n1iM@0&%aA3r67<@n4 z?Ae_1MGAn-9~qrPELrl`t$-7@XX7ZyhzOX#x*Y1N<{G+!nO)^BeR}tvg{MK1o&X%C zrH%Y{C_B4^Rn5GjV>D1H{sHCB0$Ym%_xQ16o6t}an#aI0?b#ub$JbHy z$_%}9fZ`w&6cDXO+iG$PkV3k_9%!koQFtndKa>!kjc08PEQO?3Uh~514k>E!TJPym zx|MZy9lFWnVvz|`h*65BU_&LZEsf@Eg)t^}C^#}{41JF^uvzR?w4jZ>6Ua5Pv|F6jcp#JkFC z5*XTaPwU|O`T6;mix_hI=+ixw4^vD{PiI#-G_>Z=%xc!_?|4OJWt*XfgT5SH{FMkE zlkCmyGT_fxB@H414w6x%Wq$1_YSIbZ%;#}n%|wMnk?8y!Pa!FR!X1LU7JmFViGw_f zt>1-_zX-J>eTI@-h4yP+uqE)RaB!|;Blhh}?)lFUv{8LAbDMC{ydw$q&FLRNmwlY-_LQ<+dldVt>x@1^M`b9ysMD+^!09xF)q-NoXEfGvm3zPU1P=+P#fh2?D4OWRXF`JyIl#A>7>21nEOanM6 zoG1W^yb_r)$mIG9)XUJBGRY6&V-IZ1f=^qYYM?_^mGJd`S0Y0th|PhqAm6UdHyi2M zG1KEkgeDXVol4xjrdlSJ@f{dj$Zva5*&?wGgZcR9%-|KR7T((G4OaG8dw5&BZn0pk zDDMnV?-&^;4rqY}#W?sy-n!B$6si)7$Y)+ji#-H46mDxtV1pc`G?R$DKYyxjN#dKV zME==#BhfmaWEK038w1GmIx4}6@KPj=F(H#hXJBB``=^4+2iqm;K1AwKpnL=yh+3M| z%*`+Nxzo!2pK8>cblm2dm@G&QuR~mFjJLD0Jm1UcJ|hf-3n7p&&U`v{th|dM*RGx7 zBw97(%=DUJi32dt$F18=(ZJ1Q95hG5CSBY23NO$Dm&#!}3K6dS^mp8H_LG-E<4$l1 z3;k`l(@HJkTF*%1dGidRRpmEbv5Y#7PMc%Kk5kIjY7F=kPr`*sK_(}@O`i0qRaMrl zKQC5q&vUm~-+S1wlQII=X4$~GaEO0H8o@VCZ&*wugbTjUP?L60-4iG%*yt;N|NKgB zZcd30?Eg$S!Tej>3PM)Y7SGt`OOo{%`#j?|UNAtwjeR%~X;WmKJw;2o#{?hY99OI%|4OBANJ(mabn5pnHT>Z-gx3WQ^Z0f%;djL&?jG`R zJ^vJK`?C+jaNM1Uo~zcb?SpY3bWZqfx_(ha6g%k33gbdK-9lsjItz}czv--su4Fr| zubo1ewCNbi5mL~uqg4fu+3?Ruy$Ut*JI;`#G8J{+MRuCU!|cpfUY!r6Hh#JB=(Z21 zSM=qM6IP53x_noN+m*Vjb=Citz!xNdWG!#dJhP9&lgLs|k#+}mOSYhR{gJ_B+cf{J z7iq>xPnF^;933?YcbA%4v!+dj;O)0B4GX``-9jYL}@r=wPIlw7Nj zj>lm8epOc=o3Ggi@{L;qpdr@xC4ZQx5x7W@!>!qut(|>9L!ni#$oJ2S9k@fwOiYzi zI}(K;>yk=?JC+^)muhB)^X>-aC#?*v=lM8{v^N}>!Z8S6@)GB4u=iyY?Fg3gkA3pP zwljXCD?))gY9*a~_l57B=$F2F^y`xv?vn-(Zf5u$h#<7xlD}@`S&v$D(>dxoVNvYb zg|SbchgH^_pSFFDhX zSaP)4-QnhvVW@ zKQD%$MjG|3;-OlWGF2ciKc5g4HE|K@db{N|!NB(AKV`Ff?%AFvrts-nU^rU4;#$A= zOW)Z!;|bzZw3d`8L~BQ&dn9cO?B-L?vv&e=mW`MR4h2_js9#)O{->$)?taUxR>cC~ zNjHlO9famDhX45SJ-AbM3VluhyC{o*wM(qd$qW^M7QF9N%%_tU=K1^p27=W|ri+HY z+s6dyZWy(RFy;YtqB9fI5Qj4YW3%7gehiN|zLiggf<59?Ba&Xl6}DzQ#uj=`8W;gc zNu3P*XT^16NHP8dHF_c@Z%O5gCT%+=9DLYc+zY2FpKyQwUWJJi;927P(=>}ANAjO; zV;l_(c2D^Fz-F5lYF`r*MNo;@Sx2;X#l@Iv7XfQY&Lboj$)}}*Gowaij?S&3fPn+u z!L&%e7zMr`O*(^R%+GFlH2)Ve7K$aiI~=zp0pOI@M`A~>BngrD80Ozoxm)xNOw64D zkc3T9V1n`5d3~Q6Z8>PPONK&R&!c(Ae6)xkXEaI@Cq3kF1AZX@<%0YH0$(E!HXIva z5*&Rn!Uk_0bY-MQBG7%@=8&GXgzhsIbm@!rXx{{1LJ1T+xJyLN!md^On-XWlif3ew zOW^V0G{!V00RIvSCqZs6){}T{?+B#dKX#z&w5b_w``=xs4?xud@SLa$x2rdLHg0Kj z!hiCo@?HiZEKCIDq!hZUKUZG0BFZ@=reVsbKAo|SGtE*NBoB=8E}=E}$*UeL5mm~? z?g3VwYi5|LKJ2pz%Fw&d{((s+p_ss)>D$D?b)siK)!_F5qljJP$bU_N)NQyu z%@XL0EH4t)Y5K=rn0YFxHYcK`js&xk&zKcR<||!rS;}GyN)njw33pGz2=9L0DZyhQ=^I0SUsS5a;=-hGov>P zdQyM2rMDFl4Zx;3vu2STeh?+iw6!yAyvzna6+d}A z>*KDDQV%kFMPwv|(W&$Q_odHS5dvUh*QoyE-c;omxO4Ne^w*Y7Sb2!H(%8xAdV2rJ z+;%qVBl`7A>9r8byy(ehk}a!hz2h{>!$F=EsidAKI6Foznzw!mw@$|amy&m+XlTbAZ3S+9Vg)QrkcAK{#mL09xKxp=w4R+`Tfg* zICq4|fgD%QxST|O3qzjXpwZ}aX6LwFI;=_D-9gwCqqfy;m4N2T;%EEt ziA?MAa%C=>^AxZnoN+55y(X3}n?R4q)wZEHV^7!Z(T4SYs#Y5tKJG;U$_3X_W#p;R zzrT!kiy#m(VY%H0)2`gQAHf#Q%j(4>9PYdyuI3B@epKqSB+wxD+0)TE>DyT|#&8@f zu~~~Q@Hzr2N=V7%pLTjs(ntM%&Rbw9s<{RvF#J27!D9@(xHB@Zt|_nd(Rwr14b2Ay z$|_*`ZI4oyUp|#Px$#(TQ6{G626+4qON$*99upHllBYM*N=$!-X0IpACGPXOmk%ed zj?TGwd3{LOi9Zaf|MfKY+c`DvbCHJ;8B%rJV4JxWWD6H@PUxSNm%xhh`l_3nsl>cn zvwx*t3MxKYz?BGY9RRm$~v~U zFJ8WE6TOceLyH5I_xAF+)5jugZ&`-VeL+`0?O?Al+O;#BO?J{j&<*3)jU3_gX-Opn z_<68KCZK+Zp|>rQ)L}y9h_P{%mn*S;?eZH~-*Tm|ikIscw2ed+$vbg+&`#`~4Z#qs=b^9DLi) zkN;6#u=$(H73B|TTH3fKAr_V}fc5LYB@QcAA!7nkg4^Ntojdi&2>?qSzUu4qL`2eL z-*dG9*-i1!&^)A2cXii7RjQ!hFDw69ziVMwmd1e9Lm-!X3X$^@lc$I1=Ix|=n-Hk}XW{gFg{?T&J8S2luir?`QGmN~|!crc(<4pTNc z`nEsP{$<|mn5j{5w<+m-d!zGi`Zw+K?d6T5ru@_^v-mb3+cF}lcUKX6($kSzv< zJj0NR>ixlzR~b#RyU*8N;Xdj4@~$7_R~H<$Ts_zjDvWB#b8;CfCmSrKpj4Mg4x`_E zkePYXve~9KhuYoGh^0%DSpd{dvkB9YV;2#_L@;x#IO@kztd?KUTvoHuEbE!lLNsS& zl1u?>7a)iJp1euX4)z&1XyoO)uaa}-xa6FDPe0kKT}lHzM{J|_bC!O*S*O%Y$0j^7 z(vQ{s44|8%^eJRPUq;wpcl^9|rhi8p_bL98T&MgbbDGz%QQx1`448obVQL>gl)VCB zA@hC`GBT{HJyqtx!-t1<^*-tE-DR9xhKhAjuc*9hRm>Psnwm3@A(HaF&}gBbd=_%u z%!syRzx;IRp2q`PWas9J&`(;p!gue+lXM=~rjeP_j*hdBgxcKV@E6=-`dJ-) zs$MmIx1aK|y%v3$!vPvBGgPowKGyxY#^n|H>>OTe)~rd;$XQkDKB<=j30zmgCSkt^ z_elrmS;dJz1oNod*0lM~6#p`IhGvc1Wx|R9D?=M!$X+D3Fh_uM7{b zXfY#}^R<8f{>m$!nrDUhSw`B}-}=m-z%hr+9T8eg8^K$+=7l6bhY$4#&qW&_!?(mPyJ#uoY-lf~fgN*K{v}V5%Ywsk1tNo0T zJ5V?+C4_Cc-hI8#H+GSDIr8hh_IWYHG)cRJabRGs9fW}@v8@F6C^!c0UN)#R)!Q^p1O>LpOhKx^Yf}(`5%TBl@lpC8?g3f+qc)DAk)ZQJ7e|&HIdI?( zo~e2YnVFdq*KkMoW$`r1V%EAPr!U8(h)84*5{?t1Z@KRL{QVn|$kbSC=hupgafnW! zV#G88v5ey?F(u_67;{7Xcuz(-jOn`2+?-&he?WUE&i-#w68r%JrO`AVT5vYeI7MJH z;S9LX$yEFMH~FQD5(>-j_KAM_7T|n%RKUi?jTM02Z;-FTyf_qF5>J3oCOsp= z*K_N}W@jvvIO*=(y<30Jo;@;s_a;qzWV-9X7R1gx$fj5#!sRQ4E#HNbsXh%F5Q%aL zj;BrF86s1FpAmZ}O`$&9fVKC9zrq*K3(RPy(PA?=CkdO7aL4&(W=!ep#92_Sq z3QBpyD1!+L*8?z@~Y5+Zxo8NVz|{$(eUORGU%kJXkR-( zS2t&6E_JL!&QESwDe}4uvpzMen#0}UyYL1zYe{^_`+;eP(6YfLN+GXCyHw8v1;fu` zS36Vh;MJ?V%hEai4ab7&ZdPsN7?oDg?R}A-pWlXRYtl{YokvpgD)zXuv%A$^3n2{T z{a;7HMe_iFi44n-vg){p2jtjf@!?fNK8PBT`wt&7a4?Wyd4PlM(@1m504JK$_`s2NW^c>Orz4Z{M_YA<-?e$uCON_>iUq&~hej0DQRooLk~`_9 zU~pW(e!%PU$M75>fdv4ki7mX`woChPf)*+8`=W(pPae8FDP^J#w~55>qP)1Dt%!6U z5t3ZVM3On(^2|9?#{7CA^>`LV%kB8Ym%ye)5cjOS>+m5o&W%*fb_(J+&$^t*$ ztT8x3R(B*XK^HIXMk;bCBH|eFS`z^3dQeO{=e+6Oaq=IAM=L0Z^_p!KS4OeFdyYmD z{B;w^BF0QiUgr(F@?mrx6!P-OO3?Af3X%|>8#Sh@E0Pr46x;!LN~m2)0xcxW;zC04cntW~26b>u~xY$hxrD zG*b<1hv%5ho3{@ws_iY7pqpvRSB8zZe5H;MhWl*y)IRY=@gR&A*y0ofKCwO566r!G z5j>#8wIrtdC+O;-jKsI}LJA732n%Kai&U#>Ayi=pmb1dc8^D*aA;|;oBBzl?&4CW0~<&>v7e79G60o@UI#%0 zM-QXD|1VOpo*4FMdHF%T#~x^L(AsqV{J#oYEv*D~-PvYlSRfC;@6^06hQFRS7o;+roga-5!VB_C5kAP6N*{|8W!$q`3kfO z<_targjAjwMd1q7SryNLG5*5L*NiU`fi?Qq!v_zpGCT;1dVp$u{jAYH&Tv5>B9(Zf zz8%y?jOdKcNguQMzUtwO+=gEONPGv@g~p+ATf4}Ppf7ow!(Cfej%w{bX*aPlqRGU> zdJkekBt{OUnJwWzxXK3&AG~Dc^P>x|WisWM;z~_jy~t-+X;bSO9_V@YI9mP*^a~=i z;n1n4Kr_9o$N0UkPp|N~!y@|blaT(WW#WKSa1dyKTadq}z{-=cjv|7=>*qb}T7gSG zvGV!q9HTNZ1)vn)E*e~vGJw44yZ}gW?Jl_{R@Zxm4I}ZA)vUtf<=4-@?$8gl_hn1J zAD@<%4&#QV0mjU5kApSt(l$&E^hg|W7OPhM122BX=8)f)3IC@B0E?k!YdXEsc9 zP-H$)m=xXhv7ET~H2kuyu4-H3;)Rq8v}(I$_PPkDz|>M=>TgG`FHj8X$i|ZGMCpH! zyNbh0KiPtus)MF(O%y=JWX|ranwX6ZO~s7yv+7peVKCc7`mPS&=!3&3M9r_+-sv@} zMWStkcvRP&U7jdk;H6aMJ^L_j;>5ivx@-;ly=%p}P5z;|rjmOH?~b~v0cosAR;gch zKyH(ZC_F04fCAQ2;~Qs>EEm=;GWQ9vqd-n~$OWZ_Kd-64D z=|GB^qbE<^A2G>h$9@sK;taS!@HjvOFzq;8kj&TN0^oTMMCEt`Tav;B#wmv}yVC9&~#{*nXzIyN;edY+ov64gU} zb|2RfHy)W|OsQ)EX!i!00IPHy@0Jprkw>rI*rE0I+5Dli!rv4h{(OXz3fHG98{8YQ zvKsurf(1irXk+A%3|B{PI^urojZc?B{IbGE`PCg5VT2GrN@)y|^ic@Vnr z`U>8o^Qx7Qn9{cic0qF^hp4**?~nen6dcd(-mJH3=o7;wk8Jy{DH_=(a7=?Wjb$G^2wgR6&jAS(AS20wGA0`qUdp(WssD}Ou} zCNiia#GZ(-8+ZMA=bNrLh#dV^vomFE&c_<>Q)Qyj^ zXp}EoroD2^mZ+Yu_bvr~k#pRss{Am;!BU`Tp(MO&{d+duGfubz1<`zUv6*csRllU* z2|89&Ba?n8<%cg=%uB~UcZd-v{~#L=P5B{n`CcYC$IgmuviCOQ#rfwHJ~qlw1sNKg z3?vW+d{h1>vx)U_Z;{(%uaAkR!`+8N4IT%UhlE?*zPC@Fqd3nlEQ}8g2?^Ws%N}9dZ)6@XZ)_e6lOI>FPvQXC*)+5?aL`Up z3E$lLwxnx?G3|wE)~Gz2^A#u1gd(3{c7}DEwiI%Q9)2SrM3iv+%a;`ObGC8C@Mn6C zF5i*6%RCXe$_&)*aK1nxKJ->k93Ufdk)$=@SzMTT$sAOG;lEV}IR>h84)16j~ zmD*Xtwi$0Gc?G0g+3spvX6o^yx^wCIM^=M~bC7cmT_OvJ`d%M>D%6+)@>gn+|H7xb z4Aprt;$xzs!%>e&Tu%T^QN%Aw)eI1E`Rdg`4(klE*;+o-%Q%V3F+Z|;44s1L+@hYi zxvj9CPC#n_IgG{DkzrxTpER|{L{wJoZQ6DgnsO|!_BQE=fsp=OD~jFQO&uu(CB_5; zgHzF3oP|@s{&hE>r6;8KaHz;kWoK#*bY9fo)^l<%6m~q0e1u1@Yf<+KOjX(d_WgoH zh)9h1{saYaN9TQ=8dt}!#HC*TZ_Q@Zht#q9MZLO@u)k+r4Mi%xU$N?og@1&OPE`Di z(z8}H0eD+vNMRT702hRl;#a1M_GN9zfCBzUYII7$XWR;_XjH{AO9j>VefgTYlm4!v zalt|{i`*JcQg?Ih;X3CcLknuEt3|ZJr(p&`rkT3e5$Kf3=n0}&XA2M}%Cyg>4GW8l zCsXnSGL%La!wZvvc1cp~u5UA8v_#wlBE?v3E{1*b%{es=2dG&p?d2Y=GAB-|CtrX; z2BM1NmJOuIQ3@%*R!u(DSjsJC1l$kb+_YhR+9?7N9QwZ3U5!~}5;_5Z$+3t?`2Z>J z?*ay*YmoF9_HYzSKzchgqPkAs`^GlD;1@$qaBDDMNrRBKnyXxJGMWn;c2F7{bme!SzTYp@6Ra{{8oeOo=4&BBS&bgY{&Q$jTDiqjf*N0`mht5>PAJ z3IhXV+l3Vmb1lz5Rpi{xiCEU0h0nRtVT7*kF|<968aA8>R#G*UT)%)(oWpp)bF~KFXgLM(9HMqb}(uSy8hf1;%L#zu%KOMWbyVIR+G5%p~?~$$)Q{t&zuD+sm%a-p0!k7+Z zbKDK+_8lixk+C8wxuj{Kr<$qlBqwZp{48HFEK`QBT;H%32lU5K6t4+C|I~aAr}bdo z(Ov6jrKAydWxVwPYTviq>y2$aQ^heoP{S#w3*rT~T0<;vwyf`&wSj)7@hsD zs_U{Nl3v$#(pu2aebT+!I#5AhoZ?;&LDvk-&CgCA*0@n4k$rF#>*liHrd{{>G#=}8 z3zl0l;-GMV0J(DiwqQ@DZ8d-M;lo}j?NNYCd|zhdZ;Wla9VQyV&5mM!A=;@CVAO5O zCj~5s>@ODS-G>huFW7nnoUiMm?#9a`@F60(S+phsf#uH_1i(n zEbuG1kpPy6_AI%*Cl<9F++r6+^LEnX!RfdI8nv|`ob%_W$h0Mvmb-vBtP5`DpLbAY zMG9UAMkJN;^ze4W&LmIFFdMufsuA*0QDHzzsH%}IZ`&3@**vPM+o<*_wI^_0@rlWv z_=47`v}4B=N{w;ZQm`^jULX;sIJj~YkcGp=MOXO-XYjN5mh)*oWR?VVv%+>THZD~u zV$H9MP3_g@BIO&n)s+L9Mf7V5Pzoo}ICdcu)W}5VFMXf05eaqP2FuTZKYU*xY|91i4tJXr-Ph9rB!F;(~A_tj}uAWp-lC~Db9f? zA$EKAGy;?t4G&c?9Tz|5o0>9)1mMxGk>erLbKmFdq;OPiB9df-qIFoOM;(xCH(rw& zvIAjZY#BY@4s&g&hW7!ZQ|pyE?4rQfRh35UDfS?#$+=|Tl7qJg&c?^(hv|)qhy7vs zBGO~RqbxLioJ#sd>h`6IwOzkHk%cqB;p0rkC**57ea?&9#W?bIu;oo`JvG*_8eU$r z^PB8cRhGMtE^R#Oa{^A*&;RbN@(cw1#+`J<2pocUVVZ;$;51ga=GF7QcKLvYXo~I$ z;4jg0xEkKZM2;}Eo__iQ?|u_zxq*mWd13%*2fH2;HT~`UJ3jrI7t)MVa7cpY6ZbkW z92s=bd0M@!E+oYCDP7t8ld2UR!18nej=iu{cY!)UTi)a>Sj^jl@|DsP_qz;mGpzN* z$U<0;B64Fk6lcWE)cf}v3p9cOc=_^xSFOBx8JrW?MMO(qxsJ8~tPX%%lBM{C6hLwM zLl)<3exZq6=uYt~60%=ipTEMxWd}-WWg1f{&Dts{HATC`7?n+%Hr-}x(=m7RzR;uj z7H)k+yJ4$eXh`*LYHG?zkY-pp4ln3?Z~6+BX;;?oPX_T2SlSwv=-&PNcPUiqTaYyz zO|hcje4&?m9=o1E4fJO+aC+pgLJzU((2UK5ZSz=gTlZm^cOnhhYi6bKLFk>SBtcV9 zaab@OipmxD=S~nw5`n}@iWEgg;5m&zHQQayy(x=9XUoq46vOyoZ>RSJ>U#~kI$Wb_ z2px#10Vx=-ec#P-A=y*~E>+56ghkKse{KY{BY_*Q`cLs62bl_!y#$jcyckH08oBw< zd-C(#?T`aZ)Oi#=xa-lkKqU9I3p9bVr|Mj@6h9iE@C;l z2TNEeY#1LJ_vM(A5drdj0GZgf^ulBj+YdRctUNuq zXDwWq>K`BF@8NPad_)$SABP@)ySGU5Q)gPaKg6$NHxz@q2(E#LGa9!R-~ z!nNFw0r=s39$734N1`EKc~TZ8e!CUZ#M18pdtac7ue?k#M`JlHRLq86JzP3+uMiZR zd*Za_&mOBf;7okQR&lyqACyBGL)+ zOG-wKwfv?H{?d%~Ek|5j0)ee)_jmA&k#5j+e5IXU-z{HRza}r*FHWwx;pYJg2J4h4 zcj8RMab1t-3?TN4P~|V>|;!|0XYjV3RBe z1|8n4Dc4hQ?0iw|Nz9B84wS9lT9hp9Evj<(Y^1ZKjpNSBcnu-POx0m*9?X!O-_%f%Xs6aQN-()s}jdxZ|*6cYQ`@*K1 zMlRYHqHcZhEE3L)jCKaQDIwp2ms}%&LQ!-6cG5@M@?xD%1T42H&+u;!1bZ7)kx&Lc z9;pX!alZd(_-|b<)*Lc(IA7 zK_Mm%eb{PaL?M1)zeH9okM=wPYMo9y-XaGyU=OrIztN+8Six~+Fxy*e%7$}@Wb`+j zv>3U=Up?}I&|z`izEPc%wtN77^dpbD(Ec)9ErS|TRwhtrRUP-~qgG8t+4P#(*MSuA z*j6etQ$9G}LJ5vV;@U`En*&FV1kwRXrYB^hs5xNbioaNN)xh9XC4><^XsdSg5|M~IG-KF#5BYvmA^Eh|z+@kk(;0q9P72m)2;_*mRjeA>i zJSlzM)|^SL8WMBe2h$J_MfwtYCsd;TKL^%#vO&M$g6G!>o1;6>rdh3n3(BGVba11`*l7N<#+ zysDO0b|(*LksQpKWW?Ma$rxqzS;2fe^wz;V!UB;loQx+CBO>_Y`HMrOZwEkot?R@p zCf&bJ;#|5>AVw^#8Q5W>z+X_y_(KFRYEVc$4J|EyzP0zp0mu6SNfmEiTLY}(|>*o8-4zI5q4{h)-qu0&G%KcReo3FJb3k()5@Wg3#0 zy!9_3lPPTHb-X&X&fahs;n3oF!1yB#9BTqKQYX^|3-~B+zw^;0QBOEfu8Q&P4iug{ z;h}kA%0Na~QJt!3XiQN*xa`T0rZzH61yhj1k`A9sYnQb0v087*ZX+r=i4W?awi%0* z;5@c0Sjwc+y>E58=A6oXw@VV?|4sPb4tJ78)D`;o`U=7~PlZ18is?GaDXLZP6Bi^` z(V-#~6E8J%T)#?=25@F!#$dGuPpZO9E7A3Uxh(Y&^C!e5&Lz{$c>;(#7|kvM z4=h=zI*c~mzu#!y;AJKU84~ZU=lN(uEykY~ZVyN_>`5vY8jpPx@YH0afgAv=mYag8 z5MU`PmFEXOL}w{df8zA*i!oC`bwGPE6l5qOSETunslD2D9`$Uj0k@`k?~%Z-z#`mP z@1SgC&YSOTK9}-Dwd`VtlNDvCZzbx3^(G{ zNTo_4FtlWmrBY)GSD>4wty}Lyjm2Ub@uvo#4~gqC7B`8@?{#^(8>>(>C{WNgo*!m) zrF|ozr2*mV+@{;(S4|1+Z2z4s_=%hgOqc;Er1!YC=H9Dq6j{>9@vqqWS8YkyjgsT$ z;$FW1ECPkP>Z@)WTVYOf81$l~RZp$G4*+1|+?SQE^bH#45L@CiV#8|Jf$oJO6K6NW zk0hjiE?}TEG>0#zKx7Ul$aFBL0Zw(Ca&in3zWgz4KJQp09sEAO4Gg<~N+-ufl=1~9 z9ej!ZH?IG#fIOF8>5}ZjrY4TkAIOs79T(72t$ltAunoNamV??OC`oZj#m4*QfRBOy z7kqGJQGYnK;E;{0TwnY>kR0e}1K_)3I(vr(2m6uRoM3W!jggsqK83It_M)SrX4=_> z&)VorX5^|s246-bT^#-R%BA6k{-aD-(SC!+>l+|oJG!wd-K`fc)2DL5g55#iuEfqG z-bP)#<|O~vx<5h0z`)i3dVV~0uT6qtJEedMM)5Sj*3>YxvM6OTyg0SE-VgVgcE*5J zepaQd@8y0kw9CrM`k}Ieob9Kr?M54YIKyvd-<3G_1;ooC?N^Rcxstf!<}6$|4R_IrL>tE6kSM2LH70b81VPP5$*0a6~~EeXp3ujiX31&I|B7VD zrxD`;T%tzbzUSE!#_*2v*$vcsaPdioEAs4!=x7_q0E75^nfl6Y46%?@Y3fq5*32av z84hfSAkpX}*@B!#+dek+lS%z$X3AT-pg zjWH8x8x%xifV`Zqp-?{YbNhDfB=+Yw9Sdlxus=);y&u{xs|ocwu$o{|oc?IjL^!G4 z=|zX!Xc_9t-!Fp&m6-N?W$`UHbRMTdV*UM-G*A549v`}0e+S22 z9eWgtI8^pXC6qEc*@;3KWmHHCNoGYV2?<%DLqbDT5>iP>W;;oghLupN|KsJ{zw7$n zm;1i%i^li+`M$^N^?W^t^TLG-DTVB3EWXUqN^QDN8f*@)8_1*O^i@(K;}}0`AM`Vl z6p5k%hqKoz`xUain?k^2#6g+~95`SM`Bsc^`0h!@#`{eAE;!cdPUd6=C`R;>w9V{= z?d|eKAy`7i-i!HeXSg;rI~L1EvCSeAR9&(@Dk&+E5ZiG;R^ zD-SkTDA_>RJ%XG8Mu)slPLxXvu1#H&zsQbWuMbr32@_TvOg#O6TmYz>lW((#Mbren z;MSug;~LE|I>7EQPNeUEo};<9zke!JcrX!PX_nmTe}+t`ju@rj+{L(Ew_&@y9BzT> ze!t{9q$%W#3A_NXsdn~GK=Y>FQVni3PuT>oXEq5aC0QgG8vd~9r2WHdK> zRS~yz52X#yItLmjm;NGTt}=C|O;>m7sN_0B&43g^x(YQ+e(M`lr-fhsHWEhn9vNI@hy*`ebjp;hsGJ?`-(plm_CS3`#bE z#ohP9oXzQ<|FQGsE_ep9PdW(>ZK6sZmE_L0n>nE4|C#%PRZTTEJ`2sMB&th}Nku6* z`e`cUXN);Z@D_jjcz&-%g&uwANx-01JUTk^IV6^KuNTKSB$XUrX`1=(A`Uf~V}R5C zwvvz@NO9FtyTgHFHYJq9J~3`UYZrSD&~L%}RT zQ5cC33w+;@(HQo{6R~O;VV6AuZq!q)E_S+`YeU-QOt^K@zx6ih{rCF%zF%~gw7cE} ze>QvR_vmlApnJY;cFoea)61DduQlHZtrNeUlXPMFYvR9khL?Wq>>3p1RpjA*xQ}(p zKTk^`h;wUp?A2>J5V5przfLbM^Pm|X7e&s?<=2lH*W_4Zag(9A63+LV9^L&K)HsPE-o zU)DMA((H5Tca8M#v3^7(MS(7!1gvAV-4j1gj$UVE$9X%E{oh|Wkb?%{^pNAy)TU-p z3Uf`m`!m=SJ>k^8Fz@L8aj`ias$=f#)w=vg+>?wWbsU&uq2nY(#h)>YxL#Mr(iN&3;NMYu|aSgN`{$D}|MX4|%zkK?%A|NAJ*GhH2?-$G9 zYt-HlV}lum%|53F%x}e0U&e?$ z$fm__BO=FRzb0)ah3OeiZxdfF>milzd#`r0F|OR~k}`bx`Rze50uuPALg-&xrfwD= zmacDJ^=*<%u$<@piHOiGw zYSzXzXxY07J)Zvm+9ji2L6HTqC5{Jr&4$xbVs-4v@9$E8oSittH!02c+P4YWX-^O8 zwynreR(|pIb={TLL#OTj?yNMucY7TNyU#baVI-*DnE}T5zJIOTZ4*_S8Ju09JVJT? ziMO?MRd*mM-Og*d^7g{|w-_%;QYq|#cF%4m{K+OUaCD3^*>J<;NHc)!yR=FI_%rCD zT3=VdmeA)AT{tPUL(mg<$bHo6GTk7hGw=H5Gea$djA7&O;ILp0W{!wZQR&OE=w{XZ zE>tKe`Yqa$R+AYT)PMEmIpYtTzr1^QN6~aDamj-hO3KWc8CFTL9@}4o** z_IAEbwMPc&oroVivb*;Ltq1t7L3mlaZrubZLNG8SlZ>)d(|KVJ-)z?}r#@$DrIPxD zoh*3!4`!tb+iK$&W{80vRxd?+H+Gd)!iu(0K>^7r=j}1;8-a`=yqN!3(xbP_>9~`k zgo8tv-K^4bx3Ok6{TSDvG%yr}jilO-`|-N5MU2CrTqZ<- zob0ACQi(c1{>9VN<(9Y<^74nr&S+8A9y$vGfTh%?(o7-`;BFPWPJuV6eHolQtyM-& zwG&=@)3!Z5vA4l>wJSzvaKRYqnZa!kt*<^GC-+vqkF-oagzxoF6kVuQJsPpNhA$9|H3y)Fg4t6Vjw&Sw4 z*3r=b4g>#8TM2=~7bwK}#^Bnm-I3aWAS+P%{Z`Y^$#}`r=O()c4`oBkc7cs~+zd%l zAaHv~0Vgw!(5>t%`WJ@7Z}5VC)ACH$XZgNC{<)c%8w*xxi%}s|Slh!ZmGzb0GNBYu z;V)w&LF{VE}XbT^T9! zH{1N1S!a7_f?mtb#O1H03riMoV&I{vgO%LszIEwRVcE+agn|_U9W@1xt>eWhM;pTm zI0D-Dy@7zK8(HpBqBB$`G09-jq7(CdS8HOA>d@9bAt52d!~q6_kXK2)R8T;-=L58o zv8?|lsCoF}Hu;@UjUH|!zux=hJcbf7{A63QKLios@vnt%7~gbB*y*5)VERLu^75qa zg7$;zu${%<_S1twS1hS5XfO^e`re{!#y1AqfX3TkEOv>vwYPWY`jlu*_9qOk9ifkt zDs%F^ru@ge#y3#7WlS;Yf!sk~pu~%1A|0lob_au99HbVotf_@52cU#B!LNnML^Jx| zo7LH?yJA5I%q+H{j0d{A>X7)XHHXm+50gLt4L~J_S6_eUA|Jil9`06_D_8CTRC|pT zYlf8}GsB4}OvII8r>%9@rnD|{eqbimh$#*>`zpK`aQjvuxj5>zCcqq$ej&?h*VJ>X zt|#Ed3f(}U!QJC~zdqqoUVFr~^Tijdg6>}(_oQAaCtVU;={3(_P82$0rO*6v?pCMp z_JC+3%xDq1iFiNey3J^U=D_NhB)_Ipf)wFck===@g$y6*HBJw2KfK=u69bF6^Ay;y zhVT$QYX38m6P5#3jS>k>!3iBq@{mXgDS|y|h!+!;qYT4oD~@XH#b*WHU}I`=|G?Fm zcZTF7Kp3VKTJsvV@eUAbI>c~j??LYmOxT#&GqvVZU&4VJ)@Y0X3^OrUHe>k>>g@ds z!qjk8!dr|P!eduHMYaF3BHAjAUFJA@|@V1xxGMgog!M88TK6~gw(00sL8`0gC0 zju`+`(#CAvdh1T7583|g&e?P49x|zoI8+{`*8l^1HB{X5)!roT(U>zCdgFa zvWlh$+>ii^1F(x`zVa@fb!h521`pFY9V7(_)i3>tl&)+-u>fEdz$knQ7+tk>-1X=^ zR$W;pXE?3)ixHT#vPmf^&kzm#7K~xMLe|;{fdA-C4O8I}%bM!wng&iKh|m;$-#C zJ?{NUnkj-MAr(>=WNPoUs*=DT_Uve{CHVX_3tRBJ-cIoFtO)1oo&8OGa!j z5*q5YCm9-k$h4WP9raC{&y+h|WP4Hq%86ZQW?ki8>HwTg< zWGd4aESFohYd8ABQR}kxQ4tYmIMY;Acx|Kh6vbfOEl=hs4w4eFM6YKAjWgF}4TFC> zhaF`VC&$?~o8PPde6(axG4|Qt^SOtf?0p08nq{zU!dOfJ2Mik)@#tedNHBV};zirw zB^qY^#NzG%Xe_O8ur=K5CFuR?TalF!}6Eri_MECV)}!R*?z;|(~y;)s=wE@%^V``*Sb6R zM<0I+g*g73JH2x4`Sa(A>FOv>2~~95#9#zv)%MOAOb2fLydKz6&H04P)&QCXZ)PoA z?s< z-;k1j%)@Br^17ms@4DtT)3=RE9j|84p^tm(Ay1Pn>V)q4#vobEgRcf9!R6(i1EyVgm~zlC*VH~yDDxav4wv`vY6F}HC+c!|R7 zGl&2FB1}m1bf==DJF_%316J9)xV7~VZ(ON4#q?;!2pNLIwx-;c;W(POUj+?yi#;X{ z%~lRbU3O%g>)0G45P5gFUT_>>HJsR5vl9O)=KFEEA0anE(Hch6CGZ&MvE1JRz+n~y z2Ib8|o)~6Y#B-Jl-eTAR1_hjw8QIx;$q?`vIe^VTBmdSRW?Hhfk*j8BuW-6gad#R$ zx(F?zAe5k&^Hp=_W>IwENFx;^^AZ_e0TM(-N1uW6U>e$qlSFgg+-2`hLPQXSHY3~D z{90l5wK~F4pbEY^@${3T@LZ={5++GA6qOIBit~lRF&3Iq)9_+Ndm=L8yLay*`tH-Q zC6vH4)7KiBn#pW2-GSqZZhTr=_0Az?BZi(t$3vajiTG_W6~Q$H#9=#AmvqS=Ersmo7C?c-h;!{j%KI7t_qn&AMY93G-s( zubCClb``KFzjvh^;#?IWR~uh$d$|rC-hw|TNWozuIEbt ztsiyQ7mgr4H33noL@$!a(`E3Y@~1XkP_MC@Gi{?ahqi{QYR>nviyilDefW6CBWrzT zfw%?^L6vNh+LJqlBnp&W&nkZk0;~7yQ)8j+U0>b~{RRwQlU8c)dJfk_!e8W<-+J>e z2)G5#nlL$QHDm_u#W9#ELc<8jj9`CY7N zWOQp|!dMrunMjyuBZ?#h<=FVLQ+hKtg_6K@a#!lIyUa-#s>~1e`g?o%hxj99*@ypm zw|0z7)i~CzMjgw@&28;B`K^Q7kcy4C^#Z5t;7Af!`&_`HyExIeZ{J=4#y&_7krfjM zbd0?;+9PoSfT)a>;ol%eXiMp2Kz=BCRa{1d{f79ge{iTO>qW{-7QWTB1&Up|APi5h zF5UJ=sx!26Oinbd^%_bv_ih|@}+4_~P$#e)ws!8@O#whn9L!Xp=Gg~yPo++0j%`5U#Nm>GkFxJ_Z{98|41~|x z7cnyrW5d)GWbymfhBp4T-zP?5jQ_TBb4;wga!-wZ>+mz8T zX|9{H#7r|FuG}GG=KNLf^wY^v>~p8sA!~O5=2WjqRf_Ptvt>Nu<`Wy=v=F|wsB-v0 z_+8?R_WF~fh1G5C?5wlN z$9T#rYkN|R$_o;DVO358jpV%D&6jg)s|vNWgKN9Ew-lrBOJl%t-Bg){W^6B{>H>tzUc$iQ)Mkv^<;d_1TA^UpcPUA}Lx z^<%ayT4+T@Cbk>)7^_IVOo7+$jLY@~aCMYZyCI(zcO2naRil)vEAbwk&#+1dJ%?J4 zCncIgE7Vjra|~uhc?O+k2|U3bWqrZMIz$8|yseJP$|h8s_RUUdE{O6(hcrzZNgv^I zhrFKjZlkMfe9qW*JP=P$N!o6@v+&keW&&bKU4w~B>4lIJXW*j7^c^kfhn{`^niBW^ z+^h>Zu}%g}cW&oYD(DwsJE}+HiB~5WF=>&RFT*S0i$QO**|}&;yHvLaTSk;SX+uJj z0Y1F(SpeBRlG2woS-V?df6|gZxLdRK`h~uDa5%Hm9)pAX_m8K?tpDum_C&A#b31rQ ztfRLtbv^4FbYk1Z8j|Du$XeAaX(Ef?zFD0^jKx?8JsOK|9cSb&voI->Yo48~zt{BB z&OYV01`Z9JI&wdm+sz*qCtJAXh) znRM^o4346e+^!ulPa>&OB32eamBn>>*s({NsW$nC#Aewm>CN-=yiDI^*A@irc$j+* z>d0RKX}p`M%-f(V_FU&rxAW^e8|=z&k^id;9)Yu)VzuEjcIk zD1XzC(pQFl+7l1pr=Drq*Ql>(Nx`zKHhvpvb$3_olO^uBrN8!j2+|f&ze{Ny#~=7i z?%@R37YY(BG~PgcnD&}{9g$B1hF{I8FpIVgGLgG7x5%o2ONOowwMv*zd8FzYnd4Q!}v~xx={8 z+I9}Z;ysCopSRJu3}nXsJX`i{v&1BWKibr}FI+c#g|Cr!_u`1bU-$RX&zlDI}Q-#cSm(R19EAKO2k_1V7+mN|1S$;F06 zYT~yZfkSAu_W@|+Vm`}> z3e{IL?%|LxSQg3UcWF_4cM3;(lHG>EcTk)Tg{11+XfGuo{C6qJu+|8#0qQ+I&b2#- zHE6wZoFV3EqsCWpOvr1)GbyMhWnm!T&hFp&Ig@A?V6p7r;TLh-UNuvPu?~Lhn^QTf znceaK;{qrceBV*Zg~_dpZ6QP2K7Z}OX=o@RUm{6{$8KL+G&X_)C@%EZC*7={Nk}l|(wI)Cd&kaevK$kyjy-!CBLq`{`+9WDx-;g3=BB(bWmqa` zf$lsD3uE#cL#WV#Fb8`V@sMts8>An58I7PYpXevgy5Tw^gjN}eb0Wl&zL+*FN1{%T z*FWaikQ3 z2-O#mNlRfheU6#gWV)8MKZ#*E!6vH6iwl4W^gHqvs{Jzvc{VN4D`UNUq zSt{Tr!SedfzF4)VWRZOV3nK%{d)|R51~QF)N{u<59-@j+DL+_?rB~eQLT9%;TY!Cz zeOg`2N|5X4Po9itFmx8Bj7scv?JwMJPw|R>`!aHKdx6Q6UCM`sECmqns~{851vfVi zjYn(8ZJqD&fMQ1MPG>SHp%xVOHIN%<*(576O*sU+13Y4$_5_+c*y8>yTc;Koe~jnW zoU?fGbc)5Qbhmo{-^T5!%N!oeE*nrb!68Sv7l<+6)jlKsco-k|qYTcYg%S@97DOiY zvT(D?X|+i|=Z#wg0~t}`q2e8K|#QvyhB?p zXN=)0u+R*kqArCp6oQsGM%^PLF#Ubx{jM2xlLip%yMb^_y5G!#xSbap_m0dmW@IFM zZdn~jSD6%p+^G=Aq&lwJ1mE`nx!_fY(LNvmhuy25sD2$OoNs9awb)laSe^`cxi zf`wKRYz8(w+2H$=;*b-Y_ktye1(5APbG3#~JMSx=wocv%vMcj^$ZtGQmzy1K-?wpE z1z~nPq$F0?N<59re=NyMN(z%G;D=AFbf*?)sLG|O=4bdW z7ncQaU=i(Ler?uwIw_ExNiy(Y>v;%7!`Ra-&PfBL1~6~g^)o*gS}8EuC^@jsiA*>s zmFLoSJn(ETlb1aE@6D&!r!k&PgatF-F>BQ|6+OKI)%lgZQ<*4nR@;Bnx%ugCD~VWT z9(iAR>6Qbks;b%}>$7t0-oW0U-PEKWZJ+JGU}c>kzd- z12}vw1a9e|QJqNKW7)~_y^oIwgeM;t38v@m3)V8pA%k%Jj_w~|KreTF%n@o8ip;??L#N#q2l}u3? zU}HXK5+YeQ`5TMc_~uWu(Ck9?NNaJaJ8J)1;Y&*Cqxg7)@;8Vvf@OI_p58pP2C`_2=Y7_Xm zXO}J>pt~@=UL$WtjWhv23Q#QFFWuB)yEw*SKMAR{F~SDvF{yMRzBbx<=XF(U=zuL+ zwGs#iHloZQp>{Z>3}PSyhV$3-M>T7v3h3o z!Evliw>icBMS1i;cMghvogqPmJ1bvMMADFfaG(ZsYB|YXd+?D{k1_uXi5?Q()-EM= zO`|EAg9nH3H%CXWQKmrl!wE?(ysgGxt3#$E-qZ3 z?+-4+4Mlv!xSvBHY6zfy?9}m+MOwvp(Vg)09MRo7&4KB}{Hn`1ed^{XxVd#7ZrvJj z&J#JUqi^}ZFYzo{)L;sY4O;06j&^Yp!1A2FJxGXCaPfTP+g}^f1CnkYU>t>(3<;i7`e*(paf)9u7MA+ z0{@Ahxift(^@Htjo?ApRc9H#MU%bX3kh&n$=>VN$d(k%IVtLAm~NIVTkTkb%>*;tbAgssOT z!vwH#I!FBN@qoVQ-x`dc zW3dWW_iO}ulF9(NaVp>|uUlw!P$O0XE6B^off`FCFSBv`x-K?udWRn)Co#(Ib6_js zoyu}2^m27|{e2N}aP!!$XUaQY z+{}8Xr@|uO^NsdhcJ}>FD=!iVF$h?RK#uOFg@RZ0*h=@Ye?QmYfD&W{r#>9k|i!U5{gfM+c;xV{{}Y zfMJW`k8*I8X+M7G2eu%@c%AB`VoCyjY((rP2i>n%AZ$wqCm!CcB{eD{#DQvYc_y4S zO}2UeY56V^s9LUDR+7cV-f2JQ^?StGbYRASvk7$-YZ%&IGdX>~HMN9TR@dfPfE&1e z95TH&tMrYVQQtsMiRc0wz8X6fz-rB&WSaDE2+-%Fq=|6clX0DQk;k*3n&n-9M zjW3*aP*p=CEU#yV-PhK`8?2Wv*Xpyk@~VYq3jIr^ey_S;zqo*ukLqnI&kR=|n3->H zfQcfhW$ohf+Il7JfDjRzmK+Y91tQTcexRm*HzlPVn;uR0(F^3LJtss zZicGOt^p;Fd#7@Yo=6!c(axw4%83B`_G;^sHFb4$SrZ3U6i+#vR=lyud-Pi7j9|6m zf+cynyg@-_#nXx7RDyp+5(HzT2_Kq8N z;Q>>RW+*RFCLmki?z$LTswGU{l)Lm;|9p~WfTXg5?=dn7P5C-s#m|y0D$-QWr#`z9hd3HR`2?@2s-y!r@MD3z{5& zdhid-3LS8jm+ALl0&BF%ng8KsSdQ!c&*>9-w)LP6F$T+9c0H#Q4yGe+M!w4!PtIU& z%rHFv{r6vz#Ku2qhkfK4GNBl!JE{fbE-FoCWVu3sFS^Dd^r{}kkjsZYKx?dj- zmx(M=kxe0vePcZy6HKxbbEQ0c!$Z-bPf!fZq_SZ*U;hX$H>q*c)~7gOUcNt(R{WIT zL#3-1b*Y4)AAN`({p4Jd9Icw#f`-Q`H-%s(UjB!UVLXcdVV!wOGdlV+G$&68jjs-Kmep6B+6LzHY0A9gfRExSudRyVjgC?%GUPm==_a4>Fa>uMAHN>YvAC)&RXt$ zaBC3=nx9jk3Z+ExZkmoZ4IWikfA|@ok&Y+~Yu}vowCJ%~>FE`373W=jwXLtONTX`} zyzyMhkfMOx7h6+&$_aTzSeSSd?om;H>>Lxo6I~__0qfz4ay?=Q7 za>rXh-?MjXJ8#U6T4~#K+vP2bz}><8lA*6`15wiY7M`?3q;FmWMvoe`+0mDRkYR&h zXN;O9oS5lM?&Z5F2lqOu_sKs@0TH*wIQ_U*DB+B^HUQyLpJ+o5Nj&%#H&@|6#$clKiNB>XTUh3=3zz9d|TZ~!3p zpvnyab&g(gjH?=8Ni6dA7(6}g7NV+x26zbQ6!7Wz&=(R+%x5D8UlhijKV|1mx~LOfq>s~Z9Oi2=>7U046qQ{J^7>Q-TpbN&Qy9gr)2EpZupsE~T&Z7MhY zEUeeJUZ;`ahLuG3V{^~*&e%rfab`?iV6Fn7foq!Fy3kJFPpjd9YfhN?Xl4%)fjay& zekNBjg%0A3cc$m}RUi2snQ%Kjy&Y6m+o4Bb=Z0+EFuKeL_PMqZ4Tf1&J9woShcx2hS zGt;K2H-CNK|9x^)22w0;$032nLOu%xMV4VKZQe0bEC4j?r7|i(3Lfr1;Tr7(Bs)k1I&JX3%(s zQzLgB=}jmZUQ}-##`b@=KuDZ8*kJQWF|xwWZg<8C11fJwo??fOlb3Fl{{61%&?D(= zQa4(S^K3nQ`Srkwe^A zVoe7-4{9^a9T%GR5^)SfQiLQ$2ritH0?m_R2B+0&v;7eFgVe6W({*+NYnrKdhDj0m zhx>`Hg>VUIvZ-JU7(g&Qz=5#bB&(5K%6u+AHUgH*rQtnxsp8x9=Yp`qVVWsjAF}V2 zd)#j=acWFs#Ww>&EgV-0Ad90Z6}rO%joq0w^Z}UwME>;c+iuuBW#^weouEC7=@^x= zFCJEtyK5I{DUn_}(@%8!`WTDxF`K^hg{TMQP!W9-0?DtFcaiaw_Xb9i3BHKKAxdvp zHMeHkSIXM9ae4X6wLH2l0;S|c%h`0?EloWjO?%%cCciG-!!ciprz?n5`n7e|)8OG?*D$vLKf;^81` z53iv&wL|@RSmNtRF!gK1=HMST!#!{8GzKJhs9%1z{$kaYwhg}LA#2F=iQvQRIOoAZ zz!$GfQ&s*?b$Q8&*s{S zMTyf4d~SQB4C~ooeR18;7md5=B|o0Cpsb~X?V6mWcD^nuaiKZYyoH`t;JBosV;Y%EC2dWio}BBVljk_sr!x)lSbQbtH!im7h{1wh(ML;Yz_Hm5cC@s zm(3jlZ%?FY=S<=dSqb~evG_qJumFMV-RjB~U!c~(Bk#`}cnGgtso6_r#qdg(MP>~cKHUH8td#)|>a_+C z*TA@%n4RKRrp5D4i?yHBb&bXkJaln(1U0R2e;F#JQiuZCh))R}rE>f!H^T#%!C*Y_ zOeAYyK*YN|JNE^y!=9uiFHe<}N7}CLJv`a#zv;qXd+up$VywcQ%sRgq7S_)z>27y@ z;5^`S3Qw1_0#Sga2p512@!;{}^Ac*q&&?ZlE9cCUQOCR~KV>0NpONzC#n>$#?)wO* zax=U{l*=BT3ijE)Y@gNp%T@(108t=Q@8b$Fl2%G%(aqlhK-~P_uLWehQVLSaN$q;y zIC&qSh(Le83wL@8sdQQHk?$AwOQ~n{{+xSD9w(Z|@Q>Q#64oE9qyb|%E;f>s-@0V3 z@V<+xkGx(xhEIl3!G_e_x^wr}DU$52{XBk9_Vem-6&i0Ao?onO6rsK8zcjPF}WT~97FDju9sKoN0d?1q8j;470p z>&QMMN4~Jr5}`F0apq4aCM;XX!H|*Ri~+Xak9rhOu|ASmcnZu~ERSa*R0z&!4j+`@ zMgp$DDjQ>)a_eToWrrM#FnSD;a&R$|tm+9sab``yEzLBihe;0#*S1$w+zwDsi0S1< z>@h3Taz0!%#`_V{#N8DmCSG|3Hr+#*L1>X z7fX04x(Cub{=M>8!=b+~zJ~G<6OvOM6sP4(B?ez#gc;xr{9#Pp6`#Gk{N!+~<$;bP zV!@Jd&R}*MP?=$BlTzOn6M2ed+G^SpAe7G;M!ou|shywZ(6jc(53Kz-$+r01C~HTU>lZHQktE*F8yMX9yq)ikR%V&;iAv|jz26f znh%S;QKO^PFt7$dLVTN{FCBSg&5&v-M8=>8broaY9oh8m-wrr=^UIi;(Zw?T&Fh{+ zNdPYZZk_;5nQKnbpYY2eyKl#aQMgge+~U8li8ylK?V{gtL`KY1+ADH%V@2??pTq1~ zvrfG#gZCFs2f~H`8+%rY<+`=!>X@&kut|cXUmVZ(Gj!f&COZ;vABc=g*jC8rRYC( z;ngw)X&S#M{jmRKChB)A*T#&9RE0EnUSD++UKo(E;!-9}L+QaE_jbK}#2ry4FjJg|JgMWX-iK$|iU+=V{&zUGwilKg~*%si5e{%ep*uQ=A%)1KoBvj1;o^fpqp|Pv2{J7T`w|by= zfX)_FQewM+3Lf4Y%uT4cStpkYrg3&O)dk#G3fN^{fZ;hDT$E(;J7V>2l@=sW%a3zp`wbr4whUJ=YZ1T zE)ecj2yXQBdUHnMmat`uj!+r+EdZ2bO8fn_=k>>r?P4q{=Xj=cQiX5_N4yCKnm&~7 z+HtGx>?YxM1K?^Ny(O-H(RI5P8{QqE(VC3O(tqz=Hm{vIDOs@BBom&Jmh0nT~l4o(CK1rDGuec;xn z6~{|SnLJ32HeGLd_~o@-z^kf2@~&#OM>}pB5M{$5R`q=LbVbGgu@_`mEznMg=#oXx zTwEN}Hw}%sK>KW{V5T=6X4d}E3=92=sC?QUF6AWqDfM+9-9i{P1zcf#esEes8Gd}a z$QLD(-M8veXsGJBS*i8$OmG%lTXo22X^e)%h&S0L#w;wd(kLC3Rio#XpxiFah<-tS z*a~PrmmRe|DpZ4Wn7GX98J2b+GoC@sn`qhv?Hg$9St=SHNcBM>;#OjO*H2=zv`S;YR zZDOuiBQ*TIo}RsF!m#(YZ>Ebd1#nU8QQ&Y72uFx66tQ^Ww&(kvXJ=2fjtt}+g+iFu zFN>Oxc#0|9xZ--s zqu64gML~i+k=o8XpJvFD6~r?9>$Vv8l{;UoIwu&e(W1V8`N27i%zEgP((KzGC)33W z{0Zt6W4!;k+GNE@lrHGud9h~TfGpuU*7Vjg$})Ic^$-FBy_*f0sd6)zxLcNc90AXiX~PW>WF*6>jt%NE*e-tAH_c z#ZH}eb15PbmhqOY-i>EnjI};QYyQDAW4gpk@!Y%jOY=R)2-^X)Pg6++>?3){CvEvl z$}&oPkxq2$wucKd7$p#InCX(3Du}5a%nGJz9Uy%(1^C^I=TzG*2i^KDs`qGT#HGL$ zTU&9H-NBpu#NkFR(bZQj5wi6CZzuxANt4=#ehDh;cC4`$j zd~5^|i>-~MwZC{_McOoTaGn_6vi&yvuj)emXLAsgy!Znl+2l&>lPX z=*WRN-BbA_K1TWJkOyn(#Pt%;brN&N<;yWo%_+42Xkw5e(H8;56tUs1S^44@<+L$h z2^8s?f8iS@kuSednXhney@kXwlY}17A)v{)lf4(y&yC8?wqV#z=FVH_#4;1y8eG3) zuwWn8V~BlcyM;?A3f*bwdJ4;Z;nv*c6eI9P;CO(^G7_%XzBS_VCkppoCC_M<9!4iE zj4^KDAitDnvsOsLZy7l>lH!D4Dd;$U*`av_pkRL}aFXV}{3$3bSSS-<=3>m#MT^y2 zRkiWpo1Z3nymESLFQ4AX)i0&QrvNd@c@K^&3!Y=EcBu2KN4-hOqAC)Ym%%9?Dl;F< zEnR!L&dX~JPx!Xx@t)r(2HMecynPy7wKI7Q{$@gL4L1|kPm@3Y6jKEDfv1<(Qg4lU zb<=aIn|-}Pnmc!(_!YqP67-vj`s?I(*qJ{;kVZLHoi((;Kl-(akzJSOP1xa|zJE_) zU4Me{W=T#c;FAZM1e7qi65ve=cm{XL;=^j|p|?xU&lr&TXTqW@sIdR05nD$pp>1C5 zi{grgX6tYd6DM>;DU1s&`K=qjy_hH5uAXDoDzG7imA`#~!S^K#AWvF`n-M-#S$#$g zrwd670xsQ;x(=l>GuS`48Om>nF0Y5nXZBL;h|%2TQoZxjj~J_o?96M}c|At*LjYeX zU*VNWMa^+R^s`d+XbcBI$8l7k)@;Ei&pk?CM!b-)o&ST7)v#?d|GIP2E2?ElYXe>u zOJSHY8O#T`m3%F3cFjT}jGaXB@Vg}^kyqqA&9UC!u*12sq?(8%yh zW3vl$mN3`iXH7syto?5`&CT4n@&~%ClKE~rO|_OkZqt3lzq2Py=ZWBM=`ieenCa?} zUw*Dzwq!cZGd0b3zC|N}nF1XhEv~TQAj3PQ_ZqYIRNB6`xw*L}-BP82iW?A-wB!yX zxKB7tErD*RTv{zY6qZA-+A+lZ8?t_8ml!@q&NrW)T8)l9YKqd5t~X|bFC(UiB3sIo z-eUF0ygtt(H!f7qBk>M*;{3!zg%EBxJ@3WwaOva5ob@z_+yUG3>^)3FH<*ulW@r>O zMnj_wQ!RJ?B4=`EuOyg!DUH)vN>Lv5f!5w`?8z=(f*{HSI8VSq+u|pjH9Dt z+$*PLeLZ&X7Ed@NeD`3LW%ll+?Ar`WH6Q@m42%mAz1+wf%-O|JDq{>TJ*-w9*Sv`y z?XIlG%>NCeP<^%((-i+gEw)7!=gZO}O!{8q<>L;kfF=&C93+MEUFc4T%4r^iED^iCMR6;!~3EQb=_x0e`EHtc|fyVD6EGw|-%Sz47gX1}b;Bp#6>> z-HX15)e~N#M%zLg2h~r+4%ch9Cl*TPHje<+>VKjdrUNovptE9Ovh>onyV(7NLIAUX zKRC-IQ;1K{&wQ7;S8HzgH`qB|uR(Lt8NDNVz1MchG|yL$tD37bxk4emuR zxSEpn%c`14F-tfqPpfj+n;;DA&YxFf=Wu6vwTW6jBb$SOd;EyE-4S9encVe?9iZ5} z2}g<0?1)+34!V3B)gm>ijPt|>kP?Zjy0LgCSZF#AGBd?)g)^On4oy^_`8j}_2m<#Q zyvaCo&`%>EO@Md_zhEfY71LBGVe9ZS2b16w=6?k)XMdS&_c2JAsT%lmymRto>wRyq z8+5YNt1XZG!95I-JgjX-F&AK*qc1vNOE`jLV!U#l3`;#B#l4Sj%KC}QN_rPzhF9H~ z-gQJ;M$(z#kVVZBt-u78PK-YGDCpMW9F4Wk-f!Vf7eNqJiZ$n0;|C?$c;Rto#{?7TcXRms_h}LX#`4bNvjRBGM z*|m=aYz!(ovCrnbH+L*e3ul7Us)RVhFNug=wY)n_Z!}h^OKiKEgreF+KkZ=$8aHnI znHu*E4q1}`%vpY{PZp%#p+kR0X6<9)bKj(?-ZKVMpg)DRNLkrb#5w2zuhsssp*VOT z16|r^Y2N1y?ED99s>CP5{CfAf+1vpg{3sao*gEy~_uKU=5*3a&T)!6sy8U?b33eq7sS*JZPu|u+Mm| zdfgK){fcV~K(obQNK6^mQDu#)y$7{G)FFZjR%}HRYP&x+b=iOHo^8k-_~h22kRr{h zc-$)u=NkYT)ThYY47O~!X;cK5e3!v1qD6c>cCc3f3@CP*!p?zvAX-3Bly&pxfx}4f zAUliwMIR%p3zEVx6|TQ(S6sjRq}od0=r?=C9N-5l=y;II$016m#BJg|DHcA zTNg1AP-MU_bt*=i4^QCiao<%g0gUoHUnqoITwQI*O@*4SRhX$4pHpuYmAk4-MQ_F# zW2I-vN)cFe#*F(2{CxxgdS3_mhQ0w*!rq}dk8q0mdeb#LC+hJZ2Oc|uK zEjNAb>ByVL(nkO&nA=QyWYdZMigIS=Olb-_Z+A3u2KQngsA5;d9EM?UGQF@68IS9_ zh)Ds1W^`M}&EvXIcSBBKd7)>PLmN_v;9~&+9taiXBH;K9nmRYR{0F?PjaBtlGdA%i zp`W~_U67$KvL7xTYBbge@hIN#cdpMW)G)ic?a{B`7pssuJ^9b^fD%5uU?7JJZbgZ} zO$)m^KkrXTN|Bg?e1nrGDZ7JkjYNP--D(NagWTT?0iM3=@thAD?VF>R+Rc~#_R_{Z z6!d{?1qCV!RB^s1yRG*XhD`4c{{2c3`%Hv#nCZB!;@j6XFDFNX?R5LwPHV)7j%3(P z#FDD^v%03HyM^X=v-7GTmGSzjW1!qGRSldFz^#?J6sgA#c>k8y7KpK1z;gP2F)%8> z?O^d@>bY4skZ5)53YTWXw0Vmc7c{FbW!VNj`Sth{EQ6rd+43)`>zDliYyhHM*1wN} zvPp{+qTvx3mIs9#(&^yy7%XmNOMNVwRsiTWtKTFTKUg~ zkAc~_#$i$M`RvG8v8>HoIveNGud1(Gj?a|^3J!uS?#g`sq6iERLVs#JwSMxgw`i&!UrB}5$op#P(p9Gtp5 zB>PcSB;d9b|0qsG_e?o0OCJ^^1d3f0G69MkI9j;}rJ-fyJqi8kP()hdDG0Q-gR_~d z2b?R?7D}AYw21Z*Jux35LGhn`rc-rS=_gD`iNT;t{&aHKgrGAM>glf z;4)5Oc6emN;6a0y4*z4w4Zs}{=9Rb`ONlBw$D6T z)na(#*|!q(a$TI=$NstWK=GDw?^Pc^)cqcl?po=3D}B@BpUTe8pMGw^_nOILr~ird zv11s@%^i8WJv?aK0L9`Fdj{$kSXts}jZjjI2+-O|x%$#2&+j|orTSDNq}5>lDffVI zv*FHB?vQ*VLmjD!;~uTHzuVov$!Mt55CWR9oNhQ2x1<%BTi9fu9p;{*XJMPc7fytv z0}ZqF?umCQ&q9d_j8}d2rKYvM?wI2?NfrX5f&FA>XFFY)gyE!`KiRtbH6nsdLnVN3 zjJEcFPMW=WTL6aua*|$|^Yu|~I&Io%jFSPjI6oo==$zM}(MaOutYa`BE+XQYwrbTt zb4#^>10SfXwQc%?(dVcw$yg>+Q0xYIf|jU&BXV@Yqr$V+$rl(6;(VND+yRF^z1B97L}f4s`ama?e|~?EtFSBW3RGne%A~* zV`~nDf#`HSwCLrkYijFE7N{UaDY&1Xe?s*72I*tmur#zk^oWre#`q%S;YN#y^~R5@ zyg-&_!%(EkK;bc=y~kcko1skn||2nf3))8@&X!mmH8;tJ|~ z0Bt5RRFk)7a1a0Ru)~gHgC^x>+>VTR3?O^fG*l3AkVfyWLw9WD6+Y!EDy0<_T|3&o z5oJAr?$xHB5JxkPSxLlXC^$jXog9#; zS4gyO4t@=y`iGOu@1L(M0TGZcK=~(!nfjZCj?B_bZ2ZV#aoYMuy`lABQmF2{-rV&z zQ-GH*4~yG0Tc+w*!7q2VX$gD>i86rTix}Tn$mFa%8JYO|>vNfmh_L$b1A`k8W4?jd zjnM-F8@2dF1!^7+HckubP1XD}K>sI)Jj(#?@GQyFKIwTsFE413Tiu4K)2EAnTzBvD z`nATry8F1IU7;5pff719!A-9M6$a>0mQ&X;eY86zUp5UrhkbetEEbs=aUYSV9oet^ zpH6(63_L{KgWVdXBCkZOGad3KKpB;(#5y5&Y54BEjcpsyyqv4 zTkwE=v=Z-V!;`L5MW)xnRIStwUOq^745c1@zDBe}>GDy&WyQ}qYfZx&O?zF_A5;B=&;qu9ZV2>>d?EBB2Me4WDJOO%l6lRHc z_mP~!5ha|Z3=x5?GOk)B3hn;vi~;Ifjb6QYkwF=_k|or!d-thVZhmPg8YXZbQF#HP z7>ZAQ>9@_1={QOK!+?**J03GfafhIR2$-#lYPlW`CQ1d^(-_%Cfm53}Z%DPaA%vH(WjG z++?bD2RQx=eJ^R?St;p3RBVy2E@!MT{$RtmvwsngBA~#~ZS;r1O>G^<^xO>oEy==a z>=@STqolSTiG9d<`arEJ?0nX#q|pj{O!Q~G8AKmq+Xr>+bVF@ILom$rBd(*5wX)yi z3EJIUAqZp9g~)ofT6isPft_VPTjeL0g%mO+zkoB%Bp5N1a%ebmvWA9Gx4 zp$wWD&-C}tIMlJ(XwP4AU-(b%OL?V&C~{X;1=rU(hL}$m*MI3ENmk4>X4B2py&Iiy zTC`?WAn(sO_u|?PzEv`#)%rw@Bl}yeUhy0ofAgeSojY~nI!wK^q+3NP`*Q8qcmFsI z8VgPiQ~(?lIi}~f7SDz}+?c>$Ex8(%t7$vYmaQRhb{3GcYiH0j0xklaNGE zarJ0I=Y^f%6z5X<^xVSiWcZ!o$B*4k=w{H`zcC$h*w_?IGX8|J1UhAww+J8Q;I1x- zW?#5?j~RDa{x2vmTwB}qarRG6O&9~;d2Wj+_oM#)OJ97=%`yvDTN!?CD~?r$MX!OT z)VeqgG18wn5zd(Hu(;iYPM63^kvLQuFIAv&`XZ7tBxa2ac3-ZyBk>wQJQAxgYKmvp zJA9=Kx$Q}<@{Zo5v8VJPi5VrqzGg?SfZm9)TFK~l_7~QV>Dg0f*QE2!Xdn9Mf)Qa(zI1g~iQ{KNOf%h~#t+`_qbrH%@hG zcD~iLrF#PHoYeAj4;W+|{!m!BoxYGpZT`WZQq0z#7n?8zVtVUX3wK8fS4j@(|^b&&n4 zo3IjIahdZV<$ zdhaU32xkY2B!}6cwQnA4P~rlnz7t()jQBk(u>a(X7CK1Jfp|&$J#jgD-@Iodec; z3XZV?W1l&T7MYO4`P(vK^5m-}$~YDJ`%ugEpMK)EuG7Fk%p^8LNfZ+6y<&%zOr%Y( z>!J;U>*{vkug@>10`(9r;ic4$-G&V34=?AgjJ?KFzIl4`I}&}gRnG8WiQd+BA`M2WML_z7h2L3(Z{^6XY@3OnCY;WuT44J7795O$;RHx%Bogrff! zI>Bzi;WYQOQ9A{=8)QlKR;XhBK9fdSX{@8C5SOXXV9>7{(-R)9cu4Fg_Xjf~)FFJQ ztzZA+B?Zdk-hF)c?w#i6gOF)=9}fzmq7WagQs6A>EX=*72t$gethdz|@S72yHf^E` z6_7#t#0}ghdGRg*%#une4q)iyz1svcM1{(K9qSnVrKij{0Ybz3I$|YA`)-FTAVOeEi zFIN4nt1FfpvaD>oRRZw`+nih8b&AnlO1~E`4sl6y>B&tuW#FNwfa{V-59UCjy20-G z9j|JYV?|T)VZE32GY?;~J1O+N+?xkyh#4f;6+E`*%cm?xW62pceCpP+f&{mK%*v_w z{Hm`Rk?7yEM~_x`xOolVt^JfT2dc0hIYw$Y2oz>xC zj3KV>Q#E$5HP=byYFTr=W`5c|-Q=7Sjsp$z=+~SokdpO*WX#>i(6RI#n0ENVM$hHL zx^aPV0ylDTafyC=RcrpopbPy@Tzy>r|Cl-tu%7q#|7V0lMnV&DQtt#}u_IN-wr+9eXBuu9;awCGbRM z3&%~!=?TokJ{2j8Xd3JWRaqBAjdKvo%RoJ~pk95u5AL}x=vq+#VMARxU6zsw^W)kjbIr^)TpPKGC z0BrH7&|nppm06mdKf$>tnhauWGJ;zzb_+DIPjb?+^v{wczy)$xXzh9EvWXjU!$8ZH&vXS)pbwS($IlU zOS1Eq4zZ$=Ypx{24;Q#w4!^(KFdoTp~t;u(r$*5s@GN(`9 za%0Y?Simyg-Jhd2(gUY zy8#Daj9TBqD`WnrK0z(gQ%fs?+0~r=A`38f3U#_|w&B+6St&`|Kb&h(6}&JWedw+1 z?Cz{Hn%2bO#y<{dgIufzGSrM%Ts!UOLbDsDDL?(~e$*}glX>G(vD@m~1EwGBc|&SJ zrj5vb!8Mhm_3U@&XFJ@cl%4qp1r+<55Zx550mrQi1Z zb#EDcp>tfCmVSl%ujw^ggYQ4;&0nec_m9kJ-Kv!#x~G$WzMuOO!IqZvqJ^u2E}pPF z)0*fYkQ5F=Vr?u}6H#4yr{U+{JGZBogC5D0QOTnI_Wk>PF2hPd7S~=g{%B|9C5s#T$C>93lec5;WMMC%1!H>ttC_%DkcXb&N%zu6DH{gl})W800 ztJ?+xl`iROPc^RYaQ~8i`~^2RH|!1^exi)yY4ndi?~rQ~7+%j#yBU!xk?J<1F+94qgIi|s-BHMBdwpJ`oL`g%PivLxwQC|o+o*X)uHXnPW#;7^kPlTRu$*c5w% zg4%FbZw6CyG8=XK{+s!_+d~>+2O*3#FxyTt#JR-=RIjs}qVc7wXpbj&MGy;yJ-COm z&0_Zcs?(nCN3XwFDL!OGnIOsEbC-FJ=gtJl03x}gv;1cHa!1?|GK~rMiD+BZHdr>T zQ0Z5V!ZaZzrT0_wzSS^Nd6h$>iicMG9@3WPisD*il^w~a;!nw3S@=Z7l*1=%rx6JV zU&qot*lzdro)>sCj3GQR=GW`#6|Z0aBSTpK9zOg&;TM(MU8`a78A9o(d~`%>u5D&j zKS(3bg&*6TS!!KJx*JmVipz(bR=6A^cC-fS-TqT@tg7{zbxMEEcHZvoE!UtFg=j0q zSq0X`Eq~CZEUpV(B4MZk9>^WbZ`EQnj?_T6yZlPr+kWaFhZf57(e5BsQc|KLRm%?` zVl5K|LBpY~2=VU4Z$ z%`%vSN=SUEc6N3eXiWt4Oi4*GccnF=_knqUhMF{Q-oe?2?|6SP(P*WCPZYJ}Rl1b& zSFaj(-LrURuk*q4^Wo}R?`X&z#lJZ>@jZA0zc81i0}>gvxkKzSZxeLOLf^zu7V`Nq z`#^@c(!Pj^Q4(F%)MPjfL=w@5=~$uSP~Zud$gkrTXw{_Q5k)phM>r%zLyri#_S0+A z`RA9{>$c&2JsF4sHo`sitYGfK?WnU1HR9e}ShNqbE8bs%ghgNC=xB%(UKGNZNONpD zn$VSSCn%g=aW+Zlw0yRa6IHUJd)XG+ZTKEhFCki^ge9gNSGaHYZh!y5^>3i;pp_29BJT%iXrx??DoYS$^qn>*_LQf4A5U_8b-HqCZ?T+08P{voW1zr|i zV6Nv|puhc(AO;jifIHm3f9k8&e|{4N&cswxZsvrI>4ru~r1>T$Mu-lnQz4sFyLMd~ zUxOCx85oaRJ1h6Sx1QIMOjZB#f|g2H#}yaDn!oFj*kmt#Csd6XRCwgCR_bCtz`(+7 zEw)kI12%=VTon`VMs%OPl8GS`G`@i!yUeb}t06NdHNv(r>e({o2 zJCT1P&2V!++*gBn72WB%LGY2`b4@Q%UY@J_sw_d>$XIER=TG!27D!kuM-(R+6ce%aXSzn0Cl6p4GiX7U z48p`ks-|vXTlD^q=3d_yUF&LVMMXv*E@{4^PO>XwA(F}^+9rrQ1PUXRox3Clbr|n` zVlP@I%ZK9_u9caYS&dR?d!xk4W-VJzLPi0&Ez^M*rR!3f^cS>l!p^Ls$B(EGu%UhC$WaR6QAGSiH z+`2W6UxSlHW=hhum>krr$KsKKkHTML?_?c|fTT5}s5rN>;V{-fS);J$|8f-QA;qA+ z@l(4B2>ZWmFfoBJ#iTsBXu)`dI3rwB%5D6=>84N3)$t7Z)2Yt&_N&)(l>l1>ZxB;Ma?g95J0G2unUzJ`VM}{~;Igr5IQTlx z9betK4A-V;5YQj-ib~I1$wKT9U80DLjD*GwZ;_=Su#F!~M6J2v0BO~nC2%DVD?jpn zEX+0!=+ozB6Ll{TB($8=e9oUYgxY$*tBT41sXP~jp%MREV&FwF28=Cl)U|6_hso%3 zNKqfp0)qo@OoJ4oHMP-kx<6zG_24H+d$a8g?AVN1YHVk$TFlWBS%XdddDC;Gc5R-g zlL^NzClzMGxThyc^#kp2|N6c=Is&ITs*P|vq1_sPViu!FI&-#J&DzEwuCdzth59|e z_FY%Rr6L<8Wr5DTPur&8m!dnfi7!5NYvJNoua*y6?&g+>+(Cq(Harook4=+%4y+%3 z^ypEf)sy)2Vgci12|AaXpWg<07_0FFd^+<|kIYUVlN)et)N@w}&*CpL2zx){!5f1EtAe39NAthhr6NqlWuHetr7M3pF>E#mdKr6#b z5n;QpuOS3Ke>@K!gSlvl&E~9n_XJ%AvnntWO4@g)QJtdvF9(m93mu8-y@lAeI52u3 zAqIrIdGn?rv5^GsZGs=6KUdcc@EW4=H^DQXHP#lcD|A1|=1A?iFa7;yyiJ>_4VeY` zz^V0+lO~@jj;l6>-j^#gsyIZ7zb54dIF1$HQpETB(U=% zXFp*;5|;BTaW`K|bR+FC@ibgJX9~V>8?C?m_}R(=v@XNg0-;Z6k1SrCSQ^dK7Xb)( zF3n!ao4Mpb5MF=n5{Lez;-#GKT;PvWx zBvJn`$1uT!T^LsjRBQ;nQr5XF)?nG!BQ{#HonS~Tx0@d+-m?%xKSNTAO zWLq0+v=R>lbS?lf(kFw9ZfrCAa!$CYL-7y0!>H))uieILL zAy%U^47#mcR$BUuibc>|hRPcnZkpL)AwQ2@Aq$)vn6n_jd?Rp+SUA|pi^$KdUUArK zc244hNEgTS^z^O2axya;A<;tgr|%Tm%a)OPi$I_Rpq(X8^qv~(uf_~ZVmQnl;o4Zh$Q&%kWu3)tKDqFSiO zl(NVVAJdu!bXHSyXzj@TG{M5+&}_Q>=TY@nnm#Bg=|C4ie#L~POV8%jojZB*WVV~i z*bh(s{9apVDj*IXplv=ri7SHrALIX^yYV>!g!c*o*Vm6>e;)QKdKBWlZJP`;CZB%) zgseeVb@cV)bKe!wuU}rUEPmsOStSnYo2g4VUT|kPuIYxvqa*K>II|)3`HRtvRNH61 zF?vqS3Xt9N;6D{N_g1gvT$KxhNwOI=E`J52xRy0S_7<_Y>n~KuiI*Nz7GMUmAJrBL zS@ggTxrdZ_K>{>nXTQGSKSIlH+m7i5U);w}ADB1j-3L)TBIm#Rf{R7Jj6L_9;QKr@Qr8__`=qh zRZtO0gbE>|TjW^BZ(rJ;Dp6u#4jno~Y^)yclSqR>O1~srh~^4a%P#(8hjlR{OkpE& z*@s`aP~uqc)+e7CeUiR*Xoi)0;>1Rry$u=5%4I3)4y5VJF4ay9jjxU>RcpFkjv~;l z?IanCLqHx5(#svtw*Kn+^|^R};?7Ml3^O-$PPRyVe9gPawux3qTCdiB5uW3NMGa>| z#PQ<|5LX$beIY$fBAH3xdkq01-LBu=cD6Ihk}rP@ z(FOoL4&6X8--BAV?xUA(lS-d%o}6_9{aohGp3U*WbHGU>)Vw3K06<5{6Fz=0f$NTn z8kt)M{Do(-FhUWcO$pYUSr6d6akuW!hYtVg$+4J(Qs?xUGlOq0WnW90^29L0Bm}K@ zL~OGfrBq_sttMC$<#Vzw{0AxsS6zbW^T+~IW6t>Q*d@6^A-J^N{8dNAtUP$=kdvz4 zcm@mgn-j!hgPx!#|HsG(M(5MhYYh^s??6^a4ycilpd!B*Dmo<9HTaeU=Na8ixR*wp zTZ{`>`%L&K%xCdoR#!HJkUoZ6_^ytnk~{>x@6`bP4L8wbIPxtVQtkpBiDN*BPOi`U zuUG3-ZG+$d3n#%$1PV$P@r{#uF(5)5nR!4?*`Vc|9NehlhYW50uDqOTY9~@SBTo6_ zMO7n`bHUX4!T$m$Mw;P-ptlLT+naApTB*d23S1%D1RyQ`Tj-7XqQ5{B3c?F5l22oT zvL)ee6H-}acp>!yl+tc|NJbK_3dl##Aaus3k@}jgcQ~S(t4`fWUB8+_S~zFEiDFy` zFwce5)O@~hbX;6O#maS|bt?8GB7HhJ=GP7g72-XXJ=uFT{8hUO1uBnQb+TX3N*t;<9OO zeC~^tuW9wdF^uhHkSx|@SPXOnm8>q2*l=|B=6nRG(cmj}+?9DoLH=q0k*0WS{zaz> zz!TAn3x6-^p0Ctaj4y^p!Uz@F z^Y1@CZ~eW1yIICM0@IQqG9D?A(WmNPXb&XB4?^fOxw`ll`8do8VX~xshv(TaCTlDH z>5nNCmJ7_sn*ftJ+|vQLK1s@oIoQQQtG2_=69zk)0M%=Ey1wfFv;YG?3@0Pd!hVmh z|D_u|X${S4^6G#2*T&6rN~3Xw7%Wo7MRvkUXHxE5I_GEtd(W4*RjG(U< zLG1gH{15*8b~>4oIvABL;HLyxq1p86imxDH|w@U-$j!BHuo*)zPDEhge`s-NJNZ)JWl?^4&+&<&$~kBn zffk`SX-ON9(&g_Sj}p8dxUYt+!s7!)+ht;a&~rqS?f-Xn#Fg2&%A0I z8SX*IpQtamP6P$u_R;wOq4?ogK z8e-xNZw7wDfhC`b`Xcwkebf)gp=?SL?Eed%&7zPuKgYc3L6EyL7KGa$lEm*)0VNe- zcw45NxK4*z9H+jj&59qNt9+0gQ-m^}tO?}y1YDUiJ{e$zoDCT_4%thD2o=t<{Ew~! zM{mqiy3ixQMZJDrFyQH-jgcw^0kc3TuJu`Qj;2Ig=?X(8`OfieUV|X&?2NTrjd^FH zVMzG2MZcG2*Ud2g{OQvT+EbAp*Z&C$fBwP+b5{=$=vRIIiX=_=H_|Kwg;6W1zX^p( z@{dss0@n;bGSlU7w3GS*d9#vD3lgo?>tu&F+=%m1QIS$+BwSqhgbN5`k1S@RZRjS$ zraC>S`dp*6am{fgGq!vFps+0abp+6K!av@WP-H_@7c($DaoFIfQJ+72xK7B9Ow1L0 z7Ru8HMV*)AIP>HgRp?xTn1t@*rgL$1-KEuKbl4t3(s-}d$Qt5OQWn21GpT36dji0F z|NcF|z!sJhYypBIB57pYqgAR~NV}aq+BTK+e9#ygM=||?xmK^)iJfEHHpzMfJ?z@G zt4QeS+2U!DsJoMv6y3SgLS((`KWo!yZlz6*z|Sk)iS~Y3Ja~U*Mc<%V7OGSR3nPi^ z;=O{FOmc`B8c%PhK6=A2xLxx(O~69vYgL56lW{EzE|=~!Kb|=H8#Xt5EVw=vH|$9% z$O!G6$4_Xv2`|YwQ=tQvMp4!hnBSW2S~Bug{a^#jWFwzEcLj^ZhFyoW-2T zECO5~H(x&1%l7mBrPa2nz~2m;j3mI5mdKz}1*$TG6fq8}f?kx~;YH;`#FjFI~mT za}et;JO^=>=`Y8M%O2cb5H6@uc+PC^=LZ`EHXpxz^tXyOn0L=uB%f~gaa(j9+7Q(? zZMLHNc*@Y)iKM#2E@Pxtrn{h{78HrT3NW^&e(gf#$p#PN;`BhJcPYoI97|T~^Mj*3 zV<7)GLBUOTCZUGm^;nX^Il7U5;dvSyF?Y};Bx^f3PU7R8bPFs_8^}Q03^s>Mj|2;- zKmvf?aZbsnPoD9+S$$UZsZWj}0kD&d`GZi}z<&i8$4lCIrlvo@hGF3zP2o5Xtk?u0Fh;fb@ z`=?B8q~pEffXT=zfKl8-lAJMmU zjLjYqpdTda-r3$^|16lFpnCD!}Zxpnu-1k&Am-9)yNExiMu*(ColFX7lhz`fYe-FV6E>Fnn9Nf5ZR?v{4>73 z)?EycE?-`&+hzcZCyv5Kwhb^1HkSn736bn*iW=BEwT!`?`Zq;TfOkK1d2cnpl8i7D zt?>X`5WBT3UTB@G)vLI`BkiVP*`iX7cN^$ywFv5 zGdzFKHZ%LZzoq1`s`0_Tx7-h;UJnMy$k;JTcMVc{e|(QKy$`AKQR%-Ay&R zsLI@8yu;jUhaLq7w_Wb-g_3towmMAR5!LA{7q|89vbjQDAVtopn3(afMJUpDBMgls zcE!c}J6%!t1Gkx+b$xwhX#)#>y*P(wB88#w!bixU*@mDeU}->m+vdbf!0tgIVr^^N zVA7gugFr3Z3thIDH#+VOv&cbC;tp)eHEJ`&l{49WK&iycD}(ZhR|So@zUR-c zZXu%&>R+w)WQ{#%-|H0Hsja4(TNO$w~#hpiD5u0ixXu}N2Co8$Z>aXKE~E&XY6OKloS4|_t$VW`1Oe>P520Hps1hy^25!6TXjjYV^!58 z4d7|tKD{Uw0w5F^{>agkQA37keFH1By8jrZD(AMPiFwh{I`$cLCRpR!fuSH5pnP(W zw@A4Pk>S!$`Y+;<0GF|Q}P+JMOjGW`d7 zLZ_f*UfT1o9Rggk2VJkkvAAnf{JC@IX8LaHWf`TrhEtT^gDpw8E584T4HJjg&jyVF z4_!R=3xzdn@%prGQG6kB)k>39u@90pe)JZ#{MX08wa~J#$w9h zls-MJi9`q6s~&W1(^9SAk))HrUvM4`qarQr8vto89~l5*CQfbS0?!#B_HPO>)wK*C zwd#Yv3pQnP&%hS8-h5_BFr3jxmM1Y~iR~&lUw{jqR#y7baB)FOoTEEa*3G{UH&#kO zuC{6M&ztlk4nA$%-Q6{aD`q`RfDPDn_U^s>0hr~9GZLQb#H?~yj^(RXIe*!p@?|BM zPO9~isn=N0JpxUMb1KjlW`$Eh=d;};H_tU1A^#LR ze+wC682necqu=n|s?LrtREB73ZaFdoOiNb*q0-Uq!yk^|dBbDD$Dxoy?VaIhChnV~ zI~xd}hKMyjC(k$e0VfsZZ;)bN*(|N76v{VO&%5uxxb5$adZiZ4uW~~d>rWod#3Re@e&1YpAuc13B4@8WW^qUdiOHgdf%uL$_6T^`?%fUo zt9B`817yp;FjcLZ6CL2woU&A+99nU08LJULg;rnFC(1J^@7^)Jw7j-7X^wWD-@+@0# zHT{i1+kj$9NLVQP1qPuQk{<^cz1YeFrEMyh$ELnwA^k%@C~b3lYt>sPgaILxjvi_2D*vN>$<5{z_3GgTN&A5}fED6Bx6%Td2HC<^v;w<|>m+ zL-FJSGTTKP>uwyaVta(gu$xs1(2Z0e{rKW9bN>po+)^%>)TIRfB$L2JQ>>qxKfh>n zZeiM;X{VSciDfWo*Y`Zw@Y>lXEjeZggv`QaqBDFn5yxGO=}0LMswixM75LWy493A38=ad3;`rWWxN`2pR5Up8?b3Xkusn#X zb51#B0?m?Mezl;WUAK*>xkx&;mb7L+pu}fRrL7fpH40-fO2w2`QeTb(H)?O#_3% zn^lg@Pmw*WGs0)Vxn;BcZ?Oruq^BdVI40g za2!f7anh3fVg;CWqP9}U-JK|RC3lV$#xrG9Q3hw(3XC|6+@S{E*?jEZqzBM~oH<)T z;Y{bh9Q`!pQAn~9PDq#6Utm<}SET3>B?#0uvC@*Z!>8qag2z4O;#J4JKww5$^Auad zgwW7VQWCP{!7AHxIJDYf#LWzkm$B`yHt&%qxa9>MKJ3zZ;lPi(nbXke=;)0*A!+RF z?Ym>><@K|HWOM@E1lC3sR!-v4W)7XvXN^1OHRt!pe*OHKM=s$AgfG{*ve;w27P2630(`*d&B=y&eYy#WDNgRdzbC>DLziCA3r*}IL?ZC967 z4_fscv;%0S1-I#tvS5TrJU#!N-IM%N7=*6YOWBj$PKn|5AKeUX2E-J3{7Y&-w^0?< zZGA;`wF|4E2MH9AF&S)T{QmmPn25Qv$o(C4`{Q-5y7=vG*|O#0pm>$dF9Gx}Cavt2 zmw50MuQ&1Z-+Z9fk1e*?8n0b?Q1>w?!$|k{O3)URl6F}idxveXQCrhy{OptYuis;F zwFQxxxZ)-?hr##D06SENxl!|NK5=IY{I6DLdnGnK+XvUX>;`*P4Oi5_o0 zgSbE~W%CFJhbiMayZ4N{yVls-v~JX?KnEGhK4Ska{*!0& z3FPbe@~+A3SOGdwzD*#@Nl?1V$|k@fe$9FG~MGv>{xX8RX z!#L0qbi#|h#b%pr?JU~6GyOPN8Kg$z6h`6}uy0`{dXNNksCjVDr3m&FlA9 zaaL<%>c!@legm{hO}8^5Spboezm347=_dJ2qB|PazvSL;o^NsS7s82eR~Y<$V>mD` zt<4v8w`R`W{BgSXn33Z<e)##tn#n^=KP3

5%CLjorjRi=(y!=)2=Vi7P4Ft zMyA4ffkR`#o_O-%xtFJ?#Ce6+M{nZp;XHmFct9&sr9nE?@Y zgjAe(tI{*2`iEk4qTT&A6oY^Lbq!2(IW)LTXj5BtQc)mj?{R~@n{JOVu0FJKs!9E@ zA+>i+PE8=gC3VQsCB=WW*;)lALFt^*bVPh+XijHm_t>0>owb^lKf2m09qZ+al1d1E z_*GyAfw)kusNnli%j&ZBL3^ZIQtH5XJ*}y6t@A* zQXJ#D+6}O?2<&%G{IE-XHd5?@+g#jdb1nwnr~&8NoWx~6&2G6*zG$QAIrBT7xCq7{ z-p&0D-v;tJqT;?m%R(eSklzh_hhSI1>*xhB!wGo!zJwUbP4$Y_nOw z2*2kpB@4zUr=*Y+XhdBmBYxBQO-|>AH+bsm;!^r>bM>1m`YA1so>Wvnd9|)9(?KYQ zBrQyB>oDW?4Wkr!anpy}M7%z|YSg(OOJcid)murSB9Sk}7L9bDj6HJXT&JFChF!>f zYZmzKN!qeC@tc=!Tye|pyG6&PwWgJvQ)oi>K;@;qSFEm@ezI_jo%6ohAoJ{Q=^~G7g;n0@}9rdyhpHiyIN*N-NGWjw@OvUfV}MY#xq}b z{oPru@uSjs#Lkk|1kMaS`M6;mu9fh!jXiwf8_sX*dj&kg@<8Z25YoxGMOQe zj9tKJqxdmqHuMa4IL5UbuRewuFC2My^zZ3uUjD0Rk6HcY#mD_&U&?CR_oprx%tWYC zdIOx)5Y}eq=Ko0EP2SWhycZ=mDRud=X_XE?vmlVW!|lqv=IZLn+WUX&Y#!L4(WsFl z#qovrx>=Kp15M)kjco+Q3dhgX4CPOSr8ew18K(hX-M@eTit_T>`I`nFxNU#e_3e2a zp-fuCrG|5OkW>47x4o0?RuA~o=0>YlgPNJXT!>gZSQj>z-byUs@EZ~Ifife4Ln?p7 zyG!G_ch3AeyEJXc)yMDM9S-vr@CV#=jW25V;iJbOMR$@D=$8m8Um;lu^bjz{wBmQ{ zIw_GO5Pv8za2zYbW2viDcrdPFiZ3ZHj%&G<8ifCGkE`-%*@fJ_uv9al0%5s_Z}_U$ zTwU+!grV6k9l%;$A72W=$m~UT+$9(Fv9<@gWl1l%b@y)X%L}F}O+Hs-N=6bQf;BT^ zyh;-AlgG68z!)j>fW%apdTMqTzxn-pV^c?Tw{r&L>Lo;`!}Xy}gzE zYKIdlGK+rW^5;TJfLo#N&1TFn^!j#tp>Ni6^TF{@0hk?SB!xz+xS%jbzl$vqT45re zihzn71@apsP|I+k#Yz6EOv6)N($K(o8?G2679e6Ws+J7>p%VWLoGyXrpbxw$orK)t z#V;99IG!*IFlKtTT{mpvvo{5qcV>uh;f06-r@xf`ooh`Wo<~;85&%kqE=u0%s5)%W zd%C?|9N!x^FQTUSRl$902%4=1a0g9&@$J>+4Ih<%dqKX?3lWlt<57#Y=)Lo1)y zK8*Wi)txnk#%ePC=rL*%fQ>|_`K@Ng95;TsjL|D1gt+|SR=}%Np6s>p`pA5KAbsX3fXAd|Pex;XXfFa_T=hEZ7pf1O2sM`n z936e-%2BX2yo`q)O>AVU?{{%`scpem_q9N+yQ3E9XN^J$uH0LT8Qv zdT9Dtf@1suYi!6|2O$`D;NCtXBO^iWAaA_={ETDXfx?OXEjU+HxdsNpWojJ=tTEwu zCXq0s&Y1iH(KF4(vA5my^1=Ic@F2;jRp`ilcwc@s17|-Zw^sTyg=RRCeI)l!pFVwm z{oJn!U967w{XZ>072s4n-!sNV@5PkQ>qkL!{&-m0QLXMz$&a^jr_RRy8nSbX;MN{~T=^sB>i^lET%*pb(PonF`5;w4%!QEbXqTljs^K%`NAS4o%T}7}!{nMmL6GI**bx327lh9$I9d3;k zVcqxd0x3gjy6inn_tKrgIl32ZWA3u*5ERRB!AoqWIhQ*fnl-@S#0CZ|#XgG&9(LwW z+mYE6w8WeOzlgd4oG9KNY`A4iEzz4#7u-D2~FJoKYGx|3%o&sg>eAM|0MKV%PmqwaIoz0jv%f|FV)k0$)aGBFz zFHi;)+kF)83|$OldaKTzRU*QAUxeI>+uwp#`*Ht_Hoi@;eDWu6K=)G`skU$bCG0?64ydq6hkMwtzDaCx$N@4bi2_u z=t0QModv;KdkYP@#FY1B+15)yLLm%wWaQq%hyB6S#+}T+MqC>G2JwXnvt6m2(eev# zEJAONlO7CTQnuu?F>&0GtxcL61N~Zpbou!WCLDz^%I$>tnsP$eEu>ih=XZ%-YKP5&jDOA<|XB@hMq=4az>{Ms6QNt|=C^ zJv6Nlfb>8xto%dQOoZaLnnA$|^Z%5MB8w(P6EYn-1D@Ib%a(sTN=!z`RcLWVDT?N? zZ1(oO@jQ`bI`E>xK~Zk#5p^Q)yU-&vzyPy+eWyIlfO1{M@_~V{aRW)x%<&vBSh*s+l)Rp_pDx7ZWkN2c>F?N-ja zs14c?TneF}7M8xrE;8?{dY`1V?S4_4OPneXW0_(TIC+idyEa-TG8I`u$weFiq9XHT zgaHOrH@FLQAvyemJ5G(!xPX8v%J&_ekA#sYeP0`O_yYksE?2zBBw79TVF!V4B;w@E zjq@jd9rOhws#V*=L^n+)&9L$Y#uQ%hwaAx=KchxI$K+XX8H6b|1DK)Yb6-|i^3 z)=sYJF$#^NL{11aaiFHIVK!B3O$zT{Lz68zr0OnF6`K zW$@s^(g2GUpRADT$KPm5g@+e0==@9Ws+@GJ^2PNMVg(x{@R7+P^+6!O^8~jPFCTAV zeE$RKOt*?DuwgB$jmn=?uXlrhQKLo)l8YiwCTMaq*td5mObWUC>Nz3j)q{+uPZ!G! zMI%i}aTgWm>d~d|dxBgE(;~0N=jPhhHO`)e95<8%@-t%i$ zJvAyATIgiwfWgYz{{1bIZuSqIKFh=K15t8UyRKTZW_yXt(v(llGTTobJHev_`_*7} zr34s)NMC>S=#kb4m~eg`UlaV$W)Bv8@%^|M*do!LQ}!8`O17QT$F%@1+lUWFdX%9c zjla~U8+Z%pLPLHmGp-@6Awg*h1I*cCAWie)tm{duhe@t88h#lqnPd@bb?j&!Rgkih zHI)M*fz%rPnq}vHe?GPJ;rV#;isGZO>ouJU7uG0eLwK?w1+QX643}ron-Mx3Qda8I zj||^G>i6-T9&{DB0P#G@MT&0;O(D#iFMaegZZxss%%6X_!14$OHNB;XmRO{X$$4^X zP2l+r2y4N*KrsOkGoRHsX=!^9FvE+I3GTjl)lfeAQ^4a@X$YFF=&Nz{)kg`6jyQi_ z9r9ur9181v6H-pbX)uxZ8K5i}$&v&2uMCEl1@~jm1ArR8($&;R-mJdbEAe=!%+}+_$GM zUc8v#r)GJ&mR^$zSQ5q2W8vh_ zB?T`sLOfMFbjXhCe);$JDjYf3&>Bn0i+G`hibsNAT{wPd@<7UE9x)Y&Ps(o6Oi zl?Km2>KZ$z$Kb&ao~=^1{$#v`m)ARG#AF1hVw?rnql{U+He_j|`NROkml*9mc(4h< zyTZNM4UDN7sPXL|)iyZl(4V7dGFSd^(j;UHaFKw~ybBs3$#0bT5rnr|HV=H_{=7M9 z>0pG{i;7YT{;6hF;s2SF`U!`A7f2Wwu5M2QIG;%OBof6`BL4H2hl; z`1sgzy8K@pz2D}h4FW_2`M)_29DUUdZyx_gtR4OOHIt4i)Y3Wo2IFoCS=)thMM!Tn zbv~eoQb>4wc^i(DH`T2sVM(Bfb|DCm10YWjdy{A0eXuwZo$~c4T&f7#rcn#Nj8Sec6;L!)|3iuN6%E zIWxa+C*rF>M%aRAmPNoTBXNN1q-w@)>iYPyW9cL$ zWk7r!&?~U#re;}!6{Pl4L!Sc7h88gri6U))Yr&ncMm^i8;@c&=q*-L%&55&+LMc;+ zFdDwNZFlL~&9rC7M5~PpzjtO8c7`qZzb8(-(&l?jEl=4XA7hQ&U z1->ng->R(6Q&rBpySI^p>Y}Q~_MDsO+onIfn2H=xRv=T!+%6oTdx}?DM>V4s)}DH9 zW?KXkJ^J;Fu_`TXyBvLXj9ZC>R^basy!5B#n4bZ=njoqzdGwf0N0Cc!;#R_~eVtpd zr0$T37ViCGJ3oRnex;zhMZV&CGzjat{$v6mg+`a4ndxJGwS`$0c3xmnHUN9(8Cq14 zzz{*8vwJdvPsvabARix!{3T>o2C?kswlJE+3TME^;8g}3EADf;S6!BWhpj+IwjV08 zIcUd{sem1@q{{&owB?bkTVF>KAVyw-AxMwNv3ao8w5LLz<3oq2a4DTbiKg@Pd9AxT zk+Jmmor>doKO=I;mmR->u^#*KXpYLFhGOlZrWmr;jc=UX^qkI`@P{#f&b08ipt1;jeDdmq;_Al4$VC;p2F zES5gGvWpJHqHTur)R2||BGSjtCayAejEK3YeRMB=33eH{=~>>7ktl!u)3Krdky%-M zR=>z9y5@;&xYPn&HNR`mYng6^V^h{VURlNko-jXtafk=S1`2h_D-2+2PqIaGKv_Uoym@lV#VJF zhIUb$Aouv<#X~fT#kN{OJ8;_nxrelFIzKL|?i6>E01}&mNVbbBfm!FAUTT@fIAC#h=#`bYzDex<* zs*FiJ??G-4(RQ3;zuu>}v>O=Wd;3Ko>HilpE!&V3g!cA0#=oPw2DL?LzxPARW?Sps zW0Bv?$knbKeTieLoU%N;`rFvWff=7fk zuzx1MxbhVZh-%paARbI#b3Hd3@!mVuR|6e z4JVcFA}er7vW4G9Pb8&p3S&Egs7r7W$w&9NiuslS(@sXF{rUBdltSl0J_dLM`Bgz% zw>AJg4-PF;D<9m~&2;{J6ObaaS+m9gOP%EB-gini`t&g|DXAOo8={kX@qAGQ^<)dc z3`isr59P-5T1ToNmnW6?Pc-Axl|1xxIP-(VC<4OZUjrAMOUdrqBWEM*aVvf=1@)|$ z0=E)cXGtRLhIj@R)DOJ%AG&ie{mG5CZU!ft&Sm3fwbl3cJdJg{+J>7pz9x34L|Mrp z=ipf8$o2pr+ezS+W&|Pw63V$=qEDUjz4G&eF+pkJC{^avP)M{^vOvzhu{NW7P#qC?IYVMrDtLF?1?UKsu$Ek|0`w3ABUf@xKMz1i9J}wGY-jB-5J}8U^+XO`C z7$)BPK<@IYWm-G9e$=0DVSnONb7^EhrKJ7ZB|}JUYi3`lp_$-~8WEn}OEUtc8Ug7i ztqg!crvTY}u+3_Wg*@mo1e<|Z(Ej}eT5$W=~7lV9GlGosNOgt@XsE&P6H*oByk6WyW{LXVc4%)$R zRs=mpSaTz3X-p~Zo7Feb%5U>7mq=_(WVEZQN|MN}^j}iADYy{umLFhsx5UMN>5*K{ z{^-O07H$r1hNr`9)d2L5R?P-BZroV7A4a`ONE^a6lvsYWT6-fSZSx=9H=r<@fqrw; zmxrqeHt&nM4~^AiqcK}Ii2W14Dps^WXi#`qRtmXX)+;=);WV2eE<)aN1QANJqRoyM zI7MH&beMmER3=9=4J2p3yyXw?-)pdpL4A@a{x71c<_d*Hl~Jzr4ZZhInLx7>f$%!1 zq_?_$h>2J~g@*5g^z8Ck6RjRW**DbM+kW@cMgLhTp>TW>MyJVWU$+%D&ELNq+;`rh z!5yO+y&<{RC-Y~5jP*qU%1MK!##|);VTiC{II8(<{@lEW+D)*n-yR09+9RJoqBVvg zW|H14nX)t&oMyAQB4f=HqcS{_9UgLvl3PA_%^NcqUCFN=*m0LZa*!&A5co_4Ov*h; zD5XS{%-8ib$wjA_X`AWa!S>4#-`^VTz5tG9+_(|o5w7ozr3Bwf3Tlx&4QGyI3dyKQmc2kafM)-O`5&w@VxO^(Ti8l}~$NuS}v4{_T((AnY;4G=3ybRr;S5|qgwBndlw z9kKG%wC>we^FwXzlDc;kpee>X9&xYl2dAA%MQ{RKd*zB1;dJ~Z_8<9;ypiGB-Jsagh+l7Oe&Kpvj(laOG zfG^QAyX53Zt^U{KJ`)Z6iKwX7s(ECG*rrT1w!`r*`}xoJqBKWJwAkOfL-cm|a?w0G zzhWzx{Y1>e=-T=MJxNaMcyE>N+*mN|-ARv7y$S>p4w^2mWpZU%uaKpL$p_);XS}k- zC3k%e(!H3NWl?h{B$hnX=~x z!weC%fGQFp4$2h)bvTp!kAF_gBD`>NqDM+2NB!SK2Q}OTZZ1=%$%LIeF>-ILx5F;x zmiYnxFX31nOr#TkT@xvqU=vcHz-`L>hSlJ3m>a_W4RSAV(*5uk`p^+kS%smPFHU9H4;^5kPQCIqCP6jGnQ&XH4{*CL+x$6zHgLW1ylK#Nex zXq+=)Z@qx|{ZCS7X!q%J4AjMxrB5V$YshP1bG|Ii-Eusd63B<>#&L|8rk~Hc^5l&2 z7Eo=TMQV^K8V3lh<9UwqJA_bXUEmaRSdm7{)V6bH6Pzz6(|*1fP~Cr-DfEqmr9(v9 z4esc=ehe8|E7z_)31)VW%{>_gncc!|2~XoxR~FT5r7mgOBWn-Uo^&AilpSZ)_vz?gpG*zX zC~C)ns8T!DI4lI5%6O9(NE(gEQGsir{wj;(dx%hpjuFD@aes%_TCbLo%2XZCsMrTZ zMV1qO6b9Z28xvM@moyUBqANd6NxHe)tCn9F%*LWrICh?Q8XL&Jo=PZ6`fo(MV&z)Y zw-(;bVRrX0E;|UZ?tT02m`yI?%Vzx5lt&w#2Zp$OKf9^&)rO1g+;9YNv_3BSW4t); z!AGpK)*1hOa_XwUP_3L*22>D#e5rSIMo)j2M>tuaax30?)C&}V`u zw*|?RwWTJpg-QwvHZMTKh$l)65nifW!-~Gh%);0>zC;9m}v%pA%N@Gv3DMt-7Kjvm@`|ljpztP zVn7#cgzr+uf`F_Inbj%n|Fi(`b`P&?EVEg#68ev17u0P8n&HfUiUEBRa1jPV5gF5B z2pkXFO9$)DMER%mc<~$217&84bx+zU1ik2fkL5Wo$H&O09#vO;=JQ=b%k%t)Y`dVi zyrz%(h@3xzx+N_uNjQ;fVBtUA%X!m7waLhK*ZW;vNs)*h%FRoOu!E94?P%7Y_@^s? zy*Ln-jWuIvZ~tPzt)Lve z$@5MhL1wT&FsGy>ME9We9;6X|)i?4!RnMX`UX}>;N=?E?x^VNU(--d;c&4k$mlPf~AUK9TyX08ddbNIU^e++^?dF<_ z8wI$GJI__Yf4)(-5RVRwme~98JIXYg;xzO@Ail|cCp);?h6HAcc9gc21-LgPWCyG9 z;bo8Zl$e?5V2Z~z$XOSBZ#otQ#6+sx{kT`g!i)}?Pbv2EIGrvr_oA5T z!tkDS7y?5Ql@j;jXzY>0AqD_;@t4janlU7#oqGB&T4tIJ(`(A!16?T0WOX?X7X80V zLV?+TmSunHJVg_qYu1|}Z05uyhxFZg`-QN)v4^9~v{%4iB2>BVVjw1DYKnwIQ_ezZ z;REMfngEY1E=^8RG0-zy42=a@!Q%%t()RFM1>Or#z6dNo5CGIh`0Y)%*d~ez5byiV%jxq<5ST*5hv117W zf&^w#w~Y-43^?at_~)N5*S-xnw}vGxGI~@~;sC|QBO}(S%MvG;n}!y84kpH-nG(QI zwfZ8Zi?Mgl^`h_X!-t>HQ^l)ez(_>N4td3>{mVJ^C6^O`(E7l50LmTozDbko4F$^b zyWmC-N@S4Cyk*KI#%pX5ktNHRNBFqf?`vJPI-!LrJKCR*W{QOdE*+TFbt-nGFU_;> zha`pK4!r7%Z4o5smV()_7WY9xlQNvPqT|pp-lDtVw9%LbCG^n-BZ310lxP!&R@y0@ zJYmvAaoSMBaj3V9l><9+Th^n^D1*>kgZyaKU-KVdWZz0ZSIZ2 zgDGiG5zfp@bZ9Q4GF|?8Sm-jcqeO4G?5L1bbPU?iqf{m28V*=rSDmf4lM9k;(>e9j z7|=*wm6w+nVHcXvqsV%wzN-Lrq3qF1R0 zMJM6`0s)$OBZQHld){jtfOH(>DHalORqQ zOUXpHn*D;cKucG=B7#s5@~<+|IWA(GCh0W`5JKi;emI_L78QlGSVieoglXpT9;v1m zKQvHsF-TwxeVwlEg@eo*eJ`47gbPXQ>fL|fKt-p|2%*En*X*=<3M$0KxlMQK(%j3# zhYuGSFpz#6Oq{)p7qlPO-${O>qJW_smLt8tj6mue{dWx4?az4bb z+x5xCH8Hd2U!8tGO5+=oZnA)7^>JU4fGS&>7>m=oKcbulh6EfwtfD!N_C%kd>23$E z9W_|{`o1!}8|RQy(M>O79LG=2YJ5v2LUclz^EocngSz^6lV~P85xVR6#W}h0WKT2K z@Oe(Uk5FCx8uedONM*RPEaEDau=LK{Is=J?n- z5?DjvC5cRMf?F|nKDzpGX1gX04-T|%YB~GPoQ3!pbu1Ms0I-}OMCvI1zWD2utM{~J zxY56oo*a)>A~dW^;)sfO@AioK0SjCv%s+!+{ACO>p-YtyFC^xgZ3P2=4zpS+MnM$~}ZDi|FFr%IFAHBye)i=|_o||z_(*G+T+QN0nE8a2C-OwM*_*5Gg*PDfv6hgS-`bvwv_F(MXW0#0#9~SN zw(9rgB@PZRmp%6GJ^LYK$@0~!w;5?J32t<4lbxmJupGPBd}uKH=b#!CFpK<_Clzfg zdhBUas=I$mc~zBgd*<_>R-`z691yP3ZDqb@&$=idy`L$(S0ukR_gh+g{79=HGv@Jl zFb4a!mUZ!imSmaCW2P;Ly!}TE3Rh}9-ge9RaWyx$Idld(xJ_o-$^9-z^!kxDsoT7r z)BpaX9Gw>ZvuN}5jES%LI-i+ut9AC#$q!YOUE?W($Ae*c?ysWWOMG;2^wC@IOx3l& z96!GBx5d=7aj$?eTJJyz9XO-Uaqxh7xx;GBJU5pAJTTKIa-I58NAK+?E|--(c#`(2w$^!M+QRXNzLy35P_kA{&g{0c z_qXUI9rV4^>wS1PUm!JBU)rr?EC)-NJXCXC^_$tzrZYcg2laJ;T=e{+^fdaX*OFz+ zf1Y}@aF)^xSVWoROV<^B?ATR@O*zGh$9;Fl-hWV(w)W{ak7ZAm=}sT}!E~K=+7_}n zg3{9doLi|2@Ih0?bTGxj7wf0L_!In7RqOZ3`w2hyhM~egMr-lpVh6R`-AanQjJ}uaGg2u~q;h1)snv$cX z^Z;6#-)g{Rf<)esWyIS9IVqj3S+_?^#qj%y>d{W2^=!R*#%GR6+c!q{f`hTs4uBS* zl}Fb@xfJne*T&4cJRXe!?m*!qk^i=eFW_+|JvqKZM5^N?;} z=(5GmpaRzx9zT}(r`MWQt8CYh%}S4@)z z{!iQGd)`E@p2jI8)R|0brRpJ+&SGlgI>$wupY+HzHGYHhlolT$b5f@ieXeJ0&}dB{ zX%6kb>)^hwOXtp|m#@$B=$C$^pPFC!+qZW;;P1W z9kyI-WSBZ9QR#qBcth<}e*W(Ex|m~I{Kk~9DviebIsJ9-3%oFD8{prpIUNqca03xF z+efcoz4V-!_5_kf5RR8jaRex#92euZi=r&)zDD}u9JAguY9kPIJ#1@ufqj z9Uius_aY}f?VwAZ(#u1NO%LI9aOypZ$nZ~o!~8GB?Z-QJq;03xd`hdP7n;jOKPe_{ z6+U>ysoS@1UFR8zvGI+#B&i&^{gP7m_^5Q)0^eT_okoqk$oOZe#eGk7daN_&6axYv zIIx@d+;7QiH#RpM{Z(P z9QDaq@;M(qDr_5t$k(0ZU|C$Kv1pIRQ8+~Ap*}B1SWM6AW26~hoJMI#u{{hC9WCMV zFK?|nk8o1~CrgSsH$>SLXCg`6Ts;jqy|!Q(lAX#OZCfvr`hHC{%5Df&3H-p{Y7_Gu zv=5j@G$NG5T=5QxAD$E@6%l<5aId^icUQ)w3qfIa=fGg301%lQ$szQItlH-im5OkM zT#mg*kM5Rf)P35H;~$3QGQ~^#SdNZi$mva-ln~;Lxa-Me*@8wvlN5c@wPe#6Wd3=e zae=4J+Hz~xuDj+Lrogc}`@|78blPS z+$xpqm86n}G(}OOY@ww8<8pt0=lsuc&e!eBr_bm8d5!COUe8f~YwOREz-oK>z^7p{ z(4R%+JK-_Ix$x}$u`~Oc@$lu%{;ZyNgXx%pxXE~ot}2e>^9^BQSyY(|Z?6@s01lf; z?Zn(j&IuL=#YLo+I}YG^pTAU{rc<~F4xT$=9cpT^aR&05N+PFz%OKeQcPvN8_1Hw! zC9OXn14w4o(*(QiW=)phqPdFlLJSJ!ssAUQ7w;yDd}(`I?lNu|tAA!R>l5Thp1gQ7 zpPu_rYdiI-FccX>NSsjU;HCeIte3(S$h{Ad!0)GZqAce8+Wkkys>NVXBq(f4K8`6D z7qSVp8C$w(aFU${w})zHI~vQN4gjM3IPmB@YFHkMNG;gt zVn9XR8q7Zx@+8j;0npQl4TK@QWT-`P8lnnb3da))VG^WPoVk2x#{eKE_H3(eBOB3_ z315}jt;4618IY;OM~TX2po+?U4kKXC2Q_>4_c6{|#vBMIZ*k@4eGMEqa0&~8Q3$!~ zUYK4opSp+1a)~x!zrz6o;eEJ?8yg~z=`?aN){Z$}8-`9JGsL20t{{@M|93GJk=!RZ zXQ+aMp{+rAK#nq))99V|%qoyk*;^cf_j#f)vKIwE`|zE{|^VSc1)GmN+;ArM$s93JryZUT6ecj72zi154-p5armAV!Vb&*=JXNfXgl zuB@HORRi`(n<%c}d{v@)2R*n`P|$-@AnieX$d9jo|9trz7oC0k7<%Wgw&1MP)D+0S z`Z%LohP5%8r8x)q9>|TolyCs3H-wW)CI{`>!JhQrw~q_2%4$NpcEC)BD3EkPtbK+q z)sg9kU0f_uE6gXlxO}|R^fEkSB_&BumO_WCiU*u*Uci8k`JhY}fLANnM`{Juvk0-A z1GWzGN6N_w@kKS@dLvfEXGku8FuMD}r*T+y5^D3Z%gbyJclUo7$aDB#F7Tngi!_Sb zadO-5&75)Vg((0Tz!HlNNg2FW^}QmiJZgmG12CdLJKnL3C2Rd{330`ea+k2=_kEk( zfPZCP1aegOoq1DObo3D(-E1nFVEX4c4SJO$0RaIEt6}b2aztQd`)$DUf5()N?!(;s z9N?C4EeaC*w~I>fp;PbJyZ4;>U*#IJ`g=2H{HI;rulJ+<^Z@)NX^_Rb_<8AoypCkY zA{Zu>RkEZ&Iyz2N1Abtp&OBw9bb>hSPmCxrrrB5Feh|E2p35fbl z#$>bQ#I4N2tmO}B@|9S#9i+< zySpswv-8drcmOi;#oCQc2Ip+{Xdv?%#KsRfb$={tFr~%r9j?DV&AbXUEy<24DgT_w z(5CT#7_#BblkK>Fgh9fT5M=lKg6#T3)x80fu^d+3J?)PAbQ{e`!};78tIf>B78uEA zwLed{51J_~enWtZ;v!S)jMk|>c$PYj_bHK4e2p0RR=|o|uza~}3@SfnLOeY+hpY83 zr-@zngtZNEWp3)p;aym zm@VbDDMP^S8;PCO6W}EO2Cv{y7?6I4&@*@SM4~vM#j=;eiyA~zi31z^Yiv`OF&$Ob z8{XKVx{D}EtP4=8$w-)dYpX+{>ivNZ?8cgaD)j-&gHql(vS!VoIL0_Svu2$J9M4}p zk*OVnrzxC^wocbMuht{r&xf=UpqBsFLl78c2HZHL#Uu#Xbaj{0nws1{A%cq5)Iy94 zuS`EaVkghOU+mnHi%?k^y$ipVTkr%KaZYT$?3&X-Zt9?Bzq(y|M+TaVmBYvYg`dfCbPC>pakTJ0Ad~=AFcnWo zbZ>4ReaPMYjK#^#dm0aH72}4qm(qdiD_B5s?-Tt@-I7W(YCqhX!?S_)VD;lGd2LP;bF3+c4XZ1{Km%T1w z$vP*eP~6LEaC8FfKL8`xPp|(~EJ%B5xCVj?&z9Y56`OBv7n|K%Qm*AYqdQ<@9H1wl zdv~L1v!s<1w`dMy$ZL6t^L69=YOi5PFN$P@1PBqM>-uSE1Ofl|zNYcO(8kH6JxneN zMHy(aWkdt4QTMfjc!F~V4&uE)bdmus9Em5PHwZ^N;OW_)?-EvFAFMuIyUzUTxZJ`m zvftgnl&?SK@o9M~&XTvwu2Ng;h!qBB;aAr35oiz4C&iIF!?M?xPNkA#$K-l}u`!XN zHy78h=-h{T^rZ9q@sK|ZRZ9YDg1YS$_hQLequ{83gst*0Hv8lqQ(P+(kF*-zJ5wgq z7zW^8oyjIYf9cYtyv;s4oY8}mO4v9@MB!+HQ=8_rs(p1jg(xpU$DlMfJz_#M;RI966Yva!}>YUlvT z)k!yAuyL5{PhxErRp!UGq*E3Lqd`d(^8jDDw~tSDR%0-@lZ_IW(sUG6F{M)ckmXm$ z97GMeYE|j5>R)&*H$cL=XwfsLsHO^79oc@mGPy!bwcPS-wG0gnV@mn1C6ocH%d|MM zgGb;Z0SJ7Tp=tmA5Bg7(dcs5N9~PIG$(CJ-jk7BUB;%eB&s3#&A+nXJ+}GUVWm`YZ zfbyOn8g=)qXKTfAmlpPB@y7j#pP8k4eEmL_KnZ}qNLw(XK|AJ71&jAkXW?^F6X_E7SPJ1&?MDS#^g`hfr>Xnr|kUcaTeUH{J+Q`HaA&d;F z(0A)wd1)()uB}#eXY8PMGJ{0pL#{a}Xr}p3-_)7bV7ODMCYgfDXwtoQ%-(zQ-vPyh z3f*!2*xyS#_wUop)dMCL`mdq-faEB}GG8rH88OphBq{l%WNht4PAXo#)L1eUozOZJ z=BM~Yfc5kG0Xit z8qF>Bq}VwTP}F%_*Q8hKzCJ#|jJ-I?rH^qQ%6sSGu}m%@d2r8CrC|h-lK>O76Q`_< z8^9o?rl3xq;c9N6a{N~jaI5HTV13<%gM*-CADd4iCCG+0zt!>w!3X)jrhJ}->LPFb z18^(jqmRCvr#waHcpC8#%aECaI4;AZzqa*Zd3pI>;*noKdN{6T*ByOYpZ5EW^-8UD z{jx&|5#qjH!TBK>8pzvtD9pk4z`5+ren^DMoTCCDm8l zXD6Uh<7AWrU+f|%XekZnvGn;v_qZDkNM8=c?;CRBSsI0_^m-(|dx&?$Vb%u;0R~!H zdlt6q!4XUmHrniA+b!U#)YMM8CqM0Eo3-=O4~!K}jzhu{gMl4kl;tG_>u|PHV+v`M zaxcor)}dn_JO+v!;qo!QpTem>-@R&;K^uRqc_v5LF2bDcv2=m=|8W8EPS6QC0=;6E zTo9AD{y5AMvgC`DB62=o7k~8R?d2dY^A^pU_f=Y1yoLyi2~L&TxD^J|<8Te2OF;i= zWmjW7uw&+^^hsnQujtsm{WOk5Dx>{LrBM}Qsf*T6=!yRNSXDXa1f9-PbLY*IdcpO3 z0$EX?7v*x3Li93ozUpp5f&*6DmtKy6hHHS;bWM%#;~}xZsp6yG)0!Ai-4gmPV4`qn z#TP_exZK?|1|L#)=lc>W7==cbyd7L3mA$mjpXTPPD` z$#Df)yvW{}>Xq7rj`w`tCt(_|XKH<|YVWRHPZ+c53V~0kN#c?57JRlR9)%29SH>(w z8(==DEnDPrLc>u8+`JT(A@u5*baKKFk4srL->M>UB&o37M_ab-*S~)zOwY);Zrq|m zscF})Dlca5_q89*s9VtV*x#@gdStXC3{?AUaAA!LKQUd z?EC%M_ts%1JqR$2Uv)6Ay#@c26Yk;Akpb#K5NGpAqGGqs<*uf@i@M{Lb8=PI&43dp z^eIdyK!>J9U+q}gn{kyO5gf`B06IH$?reM`cSXKf&%g{7eytodA!pC#?rO|Il{W$w zhb8e5RMGqHy*x~Tez5x(9Q`r}YG@RfDB5XJ3P|^>so6-l_#B0DhTG=fr;x2+d&ZCZ z*vO5KC4Z{Y6*41bs5T9&PDn8HXE?qqTP6-~`ACCX0gwrq5D=32#veiZNR6sAx9TjU zX$q`V^O!U>ydZW#Ji??wkvY1>pmV*RQXCX)JqR&JEoSwwPQBU*pAQaw)io)BD2aF?qIGPV?vM6OhWDpecKTehRR@ zCnv?eLx<4C#|JFjy$=YX>a9g&1;|bmaz&rlcMT^3u1my#IC(`G{i@G({&4C1o3SRUMUnF6kbwCSX*DvueH?uehq?%xYiA965nX~{Q0Xgs_~%M#I@z2btaLy zsiQ@B`CuwcCgYtyTpP0yISy-wai$xqDdvN$tp^)zZrRO_e{r-4;wbRVhtXvykv*WN zOD>4Gyi%zr*YJ#UNkRd3xo>tf-)7jMBnI)rC`0+;XnMRD1=E_FnVYW!12h~K#O{y` z444^nPEK0Uq8YwbsxgkW`9gh&;?fZ_4#3xl<+pkvvxt0u3yO~5oK!N}!TR`Eus4Jv z1Oy-ka2#op6WZTfA|_F{5PqZ z=&<~v|9Z9vm@48HNj`spe&E9FK4>V@2j(7PB~Fn@!A?n>lzjs5=y+vt?Dy8QznK9z zcyuQkGdbHV;@~sHR?HeZPU9YX zqQ~^fGaETI{Hd|3*xIuJHzvWM!26Vsfp4!zoAI?sCWDFD&lHD?#s1PGa&eZAfmO$ zNb>8KP7QG7*?lAdaxgRFuYjzu^?Q9r^=(LT1?x4mLAz#Lyc+Mvg>mJxM;PYXZ)deQN2roFyGQWMol9&eKgfOIjI}PV4X)rDj3CVvy~-of>IW!|cJ z69ZDT2pnF;1PtSKTpD+AL&|(ml$(VK9T31hU}BNM|2mC1K7R5mN^-Fog)=XDDB(Ge3 zfbOvd{?eeaV|_*Q6gR0Tb6o5$&TjXDpOXB>@$$K_9F$3Vz8XNgH{?5?;Q=Xa>HiWR zZ6fy=xDE3t0{!&|EHNqhO?QhXR6UqcP!`9bs{NTUYLL%H{q~K0;Gke)Oy5*N*XSe! z+gD(Dnkbsejl!;RhedH77X6LV5 z$@S7VH^e6Ik30h_y7jfyb~QvSEr0m!1}-yov|d|1M-}<28eiTQc;W;$bz?k-#gn^o zm6+?*78Y3$DIS^*pSyBpaJM;|p(UUKdaT@AHHsOxF$N_rzdj}5WJB*2;8N`GucceM zsh%2)_rO#2ru^T%ckg^kKMJ6cV*(*ehysl{Ou(SIMvkiXtlv-d0s+!Rz#ySwA=%I3 zxlhDGU5ruAh^|z@+Sb<0?q$b@usbZ7VyIx0em+AkW~Vb4IMH!}<&)|?8R_Ar!lOAg{w7L)w`z43qwopP{Z43XJ9%QF zyUZ40@H>`}apFxjy&mJK1_2Og5$c7Q!n9-x>(&ja8(nNs!ZZ7wG)joAI^W6Bx z(%I`eRz=Z+ec(7C;(J2%CqRmw{WBOWOqWiB6c{Pi?)+Qb@|&Y)dV#@7_kG8;ES%#o z`5HAPmcgB9eC>*^9s||W%KNp)>ubz7WD#blB=4Yj0k8{aE7ZrU`!r9Y;Uc%BMQE`APiM=Y7{ z96fyaNm*+>hB+TxzfZ(`b6BHfK~+SkOH{HJrOag#4X$D)fp99?4&?9#-oMf_D{WDm zErUbj!W@QtPvqYl$Cw|?E2mPqOfL1K)YP+V8Ko@`EN*T75}Ft_z7{wF(aL^2FsuLo zL~99pw7|&7azyZd?+lQGByRA3Q?9<*V&6=E=t76#q9&u#P8 zeVCXrcN1|cG4s>dto&7xdub~xVc1Jz=sC$D@BD2@($Y@uiAbW}e7{vcndKoJnyf6! z8a<%fQ0Z|#e!SHEIVSRxo3opj)wGd+6=I)1IW^5In0w(pn}n--6jXg9pxvyE8hZ zY6P~pf}#CUlpoWPF^E%Q!zqm8PSIXwA)%Qn0%Pt|L$}+@N{>|JXSIn-#JVv3$nc(4 zOytf2D6*UP7Dd4l(V;sC!T3|{n3>_lC9`LxC#m8v?wVN23S%PaKcTuj`g=A zL~jry^nLq~VoYZ>T<)&;`{m!;nuNsEQ~Zw*1SoGuyqK!V9H3u}W;dJO zO5!gY5RlU1pI<0E2e9mY0`KkrZqg(EXEozDMkiB6LQO11L8CX@E&M)$Q~pM4nOR@E zt^N=^EUj%ExerNHrPUE&5f7NzxT?iB+q7;}5hEvd{6LUoCTQB64AjwK6rY{D%&<(k z;=6}KYSi3MtamRyXVLY|StZUh9ZgFrEC2Z29@CNi%%?P(c7kI$YiwLuAf?4&tuuWA zQ*QlCTO;-WXFF~Hzd)$h-gSE@tmO=RysE}9T&W@6N{J=Rt^P`h6dcvc=t{3r!LPMQGDeb&-#R)4}I1h`FBZz_+kZ&n`HNs z?hY5o6(jwIV|Ig82j-)#gjvU=DWYU40vX0K4b5L{z@TNFylWjY2FO*9elJ6tw{QejP9wy=5?2A5gH^xr95khARC{+=?K?{LIH||*m|5o6XXxfCh}0glf-Hf1q2@wf2p;U zzNjajSlgcmJ`?L{K^kb8L~S7!HXm-)pZqijkaw>#0F~Jg^n*+Xh5??~u z00eOduSf%o798;1{M|9 zTGooN?@*V1q%~Ou8-5dR)tG&{QzG6?Nofh5Q{`h+UXuU@Ns3o<8O`ArN%z@=!DEzh z*451>@WBpX;Mszi1x(7jjDA0XbCY4ykkc7CA$E}Bo-b$uxrxjkDcglG8TDpT+^^K+ z95WafrEdvI|fZJ<(qFJ4=G`z_Ly0@m+&{7b@hnKU_(tOf$cCrp7BvQtQ zXyGSH07-yBVXxqpVY!RY(`2)0G!HV~I6BnUYS_6J8HF0Pm7`+rr!0>aerN0srRpC% zp$55&4xb%ELLjz^+X7m2(K?e+w)fGT3Lx-lNH6Ey43fOrgWmW8Bs{e<-eJ~eV9VXp zsCxLkeISj3knH8h5$bGc*qPh&!I7MZ2K$yMb*Af4SghRs4&NQ~XDv9XXuTNP+Rpz~ zwrUl0jEEPzCr|NCDqTUAu7CU}U&s(Jz?hR(w zoz7Hm2JtHQ?T?)(D<^74G^==8omlAIXGtpv!>BZ&dnCt7$R3DlYNg!dfaF(0t^u&3Is^Bj4rGs34_fk^A)4mLDzaPH(cDgEbVrXq8@`gHk z)2|;RARp{weqns9Yrb*N+F|QdJ3PBmGOW<3{Q8Y9Iv0anFqXrVdy-2{M-IIQ9KaG) z@g)|w$PtvuZ0%DB7WtX1xaknh>Hl6-RuaNzLGH1Tpv6Q{2$isD0Wms2W{BGS3-T=& zgR#qrwKMjJ!kZ7U>%=|Ax5Bvud-%`!nlla)-Bv?xW-Z&cw$FIJBitL|HSjs_a|Yr0 zS64XRW{3|w#6{ENu_^3KfjWNDd(ei8-t|A4Rz1HFa8lhxhy5t_5Q`V&M2|7SUzw1D zJyht^d8U^)e=*O$w((#6Nr^ty$euE(nX5dr3T4O&g(;LxcmU$TY^F_80veDPx|=tC z?1iRg#IN;^p6gi+lX>%8XH`Q#rS@A!7xjdH_y)Kvy;H`IekQC0%F?|ytvXB|qWR*@ z%@>=##C5FM z_9vEfu})ss#7*^+<19zb^!2tU0s|=&K{2{TV*3mgId9#SF}kS_CDZu+L!rJKUM%PZr^v$R)LufR#2Vb|`NUTe-dj>+oEdlOup7$@D zNic%5JpGJfY%&a6<{PbC5_MSY{dqz&CG1VAJPIMWbtBd70aT?>a#&wreT~{27%_`Q z9Ja0g_bUn?dN1+&CljDvm%91r$&-&=*Qn0e_0HR8{i(9-qh+fuj-OdQqn2nVDO8wm z1LxCrh%FLBW3>+DDpmsyC7ogRm?4Vkjvh2XfC3)5So3^6wC&f<-{sqW?_IKVY2KzB%h3{g0Bcv=DBN3? zt}TB6Ly)mL#j==RExozb52w1c1JSu7hvpYnq*s}h7dY)LC`Tm8Oko$t9Z)Rzc@+KS z-ZaCBnJz9a_<~z}vwbuy8SkBDk%sR+`n{sYn0P&stAXY>Fm1??p7ufDq(o;2 zI>xA?L!)1O1D%nc+Km;BcW=R@dj*r!$KIpq6yZDv{-1jQ6*1J0rK7fr8V}ttNIe*$ z*~Z?3JGKBVFGXLT2rmK^!=suz&($r}cm6wfprXNDUz50vn7D%UiANiy>KO>jkhnEX zO<%N3xftJRexJ5XBt(V^PJWN8PZWC)wvAL9BW=xY;ApeCBT5 zRDZ6vXWO=JRGoKd>W1cB-gZ~%f%E@y0rryF#JM7&GJ@bk@RfOmh#PUcd~b8{#!Va7 zB1hv)&v8TTi>vP=2BwwczSQ&f4@izbr%#c<=G~<> zG56s43XiL(@j>_uXDE zKjW~@lABr9s9@Fh?|of|))#o_7M2>W_y70&FXMx{7i;Gzu2E3gGS|uNmHfPSmzJMi z-0;XY{hYv|pNm^~n&kicy5G5b6Dpsr^x5%hdFufQ_a;QhuisxH8+O%L)APj@{?GCe zuC)Q{jrMN%|9;g)SDp*);*_eqF?Wu=7%!p0hi+gWJJMACUfq_1DR0YLjD(vs{F}$} zU|IoyBs`9&kM|F1)OJH^ZobWuV!m+3!s4_wf1Y1;#Q1vO`I9X5!|rysnf=8|HB{Bk zHq7JYLsv`v4QIyOwoFkck61W|y2Ea00xGhu!|-XNcBhm09r3JrQkHMZYrQ_1ek*3Y+2-reFg{^x22RefVoS>H^Br?QV)Jn#{32VFHZ_bQ z$tj7*!cai)oWGYO6a_tE;$w*lpEeD*wq|h_)P`aRbb0Z&@>|IYbEi$an_lFxJmO$) zjg$D^Z+mZUWXnYhU3w5fb{C2n-Bl%I7T+=?^y$_6_p>E_rFZXiw>^<`^rUErJyVns z=#JuwC9_O`rZHDhY5xA)@Qe5V{imRKl3LTZwg=MyM8OlmTEHH!thLNp-)r%`7p#5!Wp_zT}|hJNR?$ z;YTB|Zu8xDgH)#mCY1trDuZS)14q?_agX#%mKZ+>=b4qjSX*&=K~2KzVihm9FJHgj zr*U0N3oOQx65aCA_tvk*oVLhYj!)cpF)dRW%$&)fTmNm#jis5^ zEjz1-AtJ~wf2)Ca@-Dvfy|Fi9e~;+eDxxm#p+{?U7VK=pg_WDV;#ULgb{%f&HodwX zF5^^S8sC3H(-Je!jRhEkyGA=!+pNDB-S=cs+sbOlP}qq1E!`{jYgiZb%rQTvvm4(2 zZPVchr1);3vTz2MqLIrb%{#2sJ3%QC+)}{CkS>)4Wq*gjZO} z*+-18J($pnTq!y_GgN7K;8}0-^0<*dNi-en4a>(Sk}tr!z(r(4g__N)lm5Ls5Q0g$ zCAIz;2*we|8!g&;d)}&@LkvYg-l$O{d)|f7xz^ux~!;X zB#Fg1W=I?d6$G{ov>Ha}4>&*8q8keL8TVH<$xM-xO1@cnZA^~IC1=PAVXqew9+4HXy8aNJQUMAheVsv-NjFyhJk2%9% zJz-%1#s1q+K_+bcZsOUMF961A{7oWD;wcOF#=0>3j;OpE`~47=DeEfcz)=`jsR>8E zpr9NUG3A@m;nc~)@lXXf21n0Lxrl2yR0t~nxro@9uDgG;8DBoQiU|&a($*2Zsoiax zbX-zlc5w|AqXf4?la+Bhx4CdqIojaLHVRu{9cy?ATWIz|6r(F@<@`nw#_lNzxTd47 z;IBOa6SXb6+6jut9YF6xV9|#ApZGHg=#2=@eD&3Y!54XG67I-k*TPF}%(-K!J%=zK z#s5S;G9O80`OU(z3u`GqAh3zyyGYY00Z*}Du&BO^szCzH5d0Jjo@{>cVA_ykhb`Hd z&8~P_&S(5c^24M>t3?cUg0x1^kw2AU=PeKBSG<3(S85Zi@ zw+>3)$uHi&Z^Ow<)*-*}8zZK-cD&1{>XmeK(#|nS7h1BsG#^q?Jb@AP%ds-A`1j21 zW=hkThZg)As;U~fux-&v+=8M5CT7IMUt?x@<=VA^$(AOyzi0>cVAPHkafD|#tT75s5$n` zJ~!&xw%oT=hp-=0iwX-V1Y45_a<_eM)pj%cxDcu;ar{ANAuSvS5|l~|9Y&biq^X?4 zCweQPM-fKbL1206qG-nP8uVZ%wtH9qzA{bXyK$uAYM6!-9p(LA7NvxAgQsEPBk8pI zG0vbW)F{G35yvbJQ)JhBQ{OV)60c^c0}@`(!ZdBSYu-!OLvixPUuBXP7>Jeg}|*qSYs9kWwGJvtv+7GnDDJDJJ$1w&{`) zz^6JL71a)gOhZgN4^a}V0Ccl$Wirw;q58r7`!DDYP-$!7QveBQALqm>2EPCc8lsef zb0Ij4+V@e=eDwN@hdp((LLca0wuvD(l=`aP!ux6;zW+3CuwNOn1Jhuh?RD(fZpI-z zI`Dw-2ikmvqIPA#bi7DIjPCi$|ACdkC^hEob^4dUNt@eCWzJvh>z&+(1lt9J<)#Fw zbYElvPpI?ICHts0Fsxw@`gUqAesn-3J2{{4g6G%mF{yZA=y)Z)zI4iG_g})ZY_3d& zQdHS@BaV+~UJ>^{x>$X=3|<2SbsXmRU=aQE79K1^0IkL4HzpnPDSppzL4n)Kv6dSL z&QU8?DBsVDrxzBs3A-{DBO20|AHJ;nsTAK-IqCj=d^}fz34E-sR;#@z8r<#&an6ZO zuv4|TYj>~|#E5lV0wiaCbO&_i7jzlI3z5TuSH@Z=|7nnj*AZ5RKg``~9{01@I5Ow$ zm+W6_oQE`7S7mf{y;b@$L#~V zKNx_6{!Dl*oFM5_b}-dJQy`+z0P~kjDOsBWe_>K~hGP$FwKidD1KRIDjMf_Cm#r9G zG;7gf2h{{Orl^X=BM6!7*nni0d6Pg6v7Y8|!*E9kcYJeuP{&ePGA+`7ar1RG=r!AN z8tWPp|EjkQ-zR!ZR2RFM-s*}(*gW9Lb?bLOKTrQL0HN6QOZL&|?s9EP(hhX>c8ZF& z4l^{~*V0z4U?B_1iBT^SI0p9s%sY@rh-tSJC6qXS1K!OK07IZ&M7Kqc_Vmvn(^Ac0O<&8Om8*X77meVBqOOdd(^P6eNiU zd7;!_N_RaV6a2^*emZ$nR64syAZ!JKFgnd4Q~P&cghs zCw-n%@j9&l>dy=n5}1?)4!vi3bd@6w#Bm8xNi@!CiNl5rvH5=5)_og_c`m|rmZ@Df zfAKT6*?f!B6q!R~u^BYn{!!v|nj~vhZ{PG7sQhMKwFwk~_6)J6gA*-t{rS2)ZWLAX z*fl59YP-SBg6oNU;t1GF^4&o;l^B)ODWBRa9A|j( zH-gqU)YnA+HQxIYej#7FmIkZ|aXFLZBp_8&IAszM^mDhSa(lc$s8LNhF>N=b`!77~ zQ?7@Q8P*^7iWtd3@Bz5m$F(Z+04!8zzz<)xt!@K#!}Y__Pp?NpI$0I-Rn_7#_UDx7 zd&F|}+pjO5K21fJlrqbDeD%_$OTE0k8?u(&OK7KQN+A1{0JMm02rf>*BD5zCpI^;l z9|~8HJ9pCa7=sD`{yWH}U%Y#lvT}o)S<67@+db8{cGdK3oXntKtf=vczGHq7-&#Az zNmxHHw^Unm3ErF*`;|G}3#QczZI9_-;kJw4Zp9JRsrSTkbY>iB=`J^E4}>$yv`Py! zpMaL>?)$3kHtQZ}{L$l)sp>FBPu-|K&>KX$-U3C2e+NvH|8$tALbA^$#{Yo*D;60H zR$$l!dFxBU2a#RLv;+fQwWMX(eAeE&k(9Fi1=BPC?JtDeq&ma1$JoagJ9T+{i|fLv z9%Lr+-eW%=0+^fW@U95>+yFM?66~D+q-~L_#XUEZ$1Zm~R%%Kt1S)w^eo@J*&d#Od zX2689zEvN`zdurwaR|UP^jq4yFgh-zk4#R)rHi8G5~t!mZ3|GAmql~JBVXL&S@?urZu5Yoml#u;7!ew?+sfA0g&5(C5@|u&kb9=!}xzC zXhb?ftRETuowZi&G=%V*XI_?5H+P< zCJXD=j}}u!Jej%N!j~&S#y;6FsoE`4TweVnW@^d}YZxvJ=Bn;JG|P^*bY2zvns3o* zSkTpL*X|K{hzkj~bLOKdlBhb!zMm5S!6uMB_MWPr(IHthA;wn zbND!<1sBh|>lh()`CPBkAem-~o*3%&cBVX_%P^X3Is`2u9F6Y+5#Ae^PV8Y(<_XD> z7L1K4rY;SQ2-$>5(&|lIm!Q%%J|utQ6a)moxPg!Yr0=6Z!W=fgWi*z#ixB&Ehdil{?&Ai`s+`-Y1%B|r_$u~ zD^(jaCd&73(oH#yY1NC%{0Opb#7t_}u0}9(N6px~yG1Fnsy@oFm%#b>Jk4Oi(+msFBv;|Ogu-A~ggPs~hBH@- zDSiEElUSvRImqJ0`ziEII?gAcg>M%qArFp>u*B zuj|c-1C|f`WjFXbQs7oz0VoklD#P6p=(Ve(e*ZJ&GxE|@v+Vq<$yon9v_4!2{f<(h zvgn$xOWDA~lU6_KXp7`2mFid8cEgvK7P#7h0~vlN!z76wH#P8GEt=1O{1lSYxR%8J z4^l^iS5#fmPpiDWy&Xh0Q=Q?HxjXW+wc?53F>SsvhsDz8&G1egA1n4-{4Dct#c`(J z$1!0s4=jcKdhJzb6UW&_cjtYAaxOui4lAY;~ny`?V}M9EGIVz7S6Uk4!uIGt57m!wBdSlQK_0QBsg zKNXV`Y{UmEwAAtiN!`Ram0EY+^5ut^tr@pE5^tOLSFCYb`h^Ou;UcQHD#`a~7KG#%R`SynwZDTb<=~6Jtjoqtz)NRQ+a^d7g>Pn`^T~ zrE@72iMj5+oGbiD>VL@|-S~X-UWW_Vd=$K`4%+&;IcSzGxd)>j#SNSZT6x#D^JIPs z;ZwH-^kW`Od2nsusL0r#=bq@gf-&W$J$`Pa*mQBE;iyxSAVTkrJvM5l_N_7BdF7;5 z%0c5C?)G)isCn}bJ4|i9olTo5CcOyk;W9(paX%>u8hcEaV{|ij@kF+>snA=;XEbW^ z3&T0Pt0$?qB(O~)PC3b#0>a2J9XyzdP8csJyIO@9G-oM*t!|}crwH|5sJJ%Ds8!g4 zM_cLEC3=M ztvyAQDV^O_%gh|hArh|y8N_C73Z*ZP>M)tfXU{65wYg(Dd`hHY!TO$Yff+9EBICQN zmJq&uIbdvR0ztewAuD65s~?@9ND)0X-RN3^lEL$2({LDT=|}GlQYEg)H-(PP1N!*r zF=GU$uy`DJ2S`%@H&aUY&P&Uwcjj&K4@&2#9T}Dc#~{3R$NVCH?1E6)?EYK*hl2bY z09*&)RKAdXvDdINqKnmJDLz3NqaBjzR69eZvv}1}7*Q~)rBuB5@)I;gXgS=#6xZ_5 z-9QYr;u<>C^4)W;5UQbJtW-^Nid) zJ^nr~qA*Tx-Ea($6}nd4^|HgC(0TShdCym>0Vj(FrfVy3uH!Oub%^kLiEJ#9A2xT{ zvNlxge3FC}IpmBPLdKP{-P$qGN9q$L0F`rk-Y zz*!E;_`L6t4$Do3E+$Y*1_lT|#%*|e^yw&Xig$r(sCIA{I&Bapfr8wc;ynO%wA}S) zKYBuyvXXU$CO8IA9}0lMrV)Y!c^s*mB64<&<#6QxWZ_g#GyG2prq!6H-0!J@b7YwH zl7w|)q)YXPGt|+dXG_0)$zNg>JU;PWj*;rq5unUs;jeY)lG;g4r4_R_fapc^c}zXt zwo8}2RHIV0P=K2^Uv4*kW&!1Mr4cu<_(Lo(i+G*wxc;D|*8$#1D0l{KCJkpWrIr-a zRDns^y#zyMMnnBQy!n;t^Fkcr@I#e`&)=%=O?!RtJ0gC8^9&95fPayFUZ@wBbtG;g z4&7Xr!VD7_j1o!4So|}pF!ObGKKGYgT|Y<+Avh|~-|$P^83Z=u?!$e!8B2-tc*KER z*qDqn4{*9)a{LnBBsoCMZe+^k*OP#+5fTYF2vQ*_uN;K|?f1f9`1qj5nip($(S3F+ zhA1E+Av%CWAWYR>ZGAQ&NrpttMP>1AP56ZiPguot__ezWb~6`md;|dSS^tCL;l-#; zEq$m8>g;}BE@vy+zsGVX2}-5ngUzv>l;hXG|4RejJ0ixkZ11>V=M)|(04^5jsb7AY z;5!W4?Ti%v5IeiaGwgrHPj;n<2qZ8Yz>M4f~9RAR&45$UDI{FXxrBZIxT<)1-st zgG>4(RZOn#J<2|2h}+rdzG+uuyTt#Do{-qWvi<*Y0S+Yz$xfQf(9r&PeU7{A?+lUleqIjhoI2#Jn=fOuXC}M2b!%)T?Ogx*ZJ@ zFgi3#ylAYeVxN}qNg#hr$T3`Qhvkn%<^p<1IZ7YM0HXn`0J7Hu#6Wquwud^S(oZ~a zt?lG9ih|-Xy-<6){Dt98=n)OT>OyVXbD|S4*9RYU8bd|_GZ{#xz3Jq0vl7WgFvl=g z%rqD~L#7d4dr(U3vN9!%IN;929xpA-iCQ~jAEN$)aH_?F`$WfX0Kt%~Uw|IT1qU9Y ztNyyhNug?ySol9h7=u_bMr1TuXW{mWLMzU1{phVf)@ofG+x;Nn-y0tuv0&+g?@~Ec z4xacc*!>G1i7z73XQF-j#;W|pSK-SpFS&}ABYetd_+Q46x$!KA&F048+)jnZ~ z_SeeQGMWX8L%F-YtHPS*2YM#Yc}!1iW_C8uqHy!&ep8`YjBvA!?6uzJ@#}fCwdabu zy%#4sM3!eVOj$aWU*e{WB=7#%xB!3u!zWI(2AdT^B@^-(>&@Pd9Caw_hd)_5cdi>d z+7wQTjN63AM^JFb;nEwhQcZ#G-taAlx9Co(iF+!*v?i1!u#cVK_GH3i>(`t=$0d@# z_c!RlL-UBHTy-OoeBX4_wzsxk>GE)vHg%)0L{Z!hr}a{q{n)zsS#-Y5Z!9}R&uyqy zV7Osl+!E#XF9X)xyaRRGZdsm98m^JuwBugCj!GEHN7?D?`^@n`3xFe*#a%X((X7(y z8sGHtRnS(iAOI8Of&+5+mR*!_V7&8i0cF;XU2;Ii;bk2cDS4fu4`k@lSwa<&ESmC^ zCFHU6XXnnH(QtHO0drI~guJAh8DD0;8b`d`)StX>xnB7){U*mpcIyp>%Y`F$ikeE> zbwcg}G(ySo{dsU5h$?T?@@0$GZ%1O z@p&=AZ%$H~<6fH9Nl_cv>wx|(jrDJyv>dkJ{psT;Puh2A_Px5={Q8qP^VL|_0mWN9 z9nq?XS}O=UdbMjY2^Yn$pB~fW*lTM#bm)+$Hyc2T7Zv3%7`<&?bYg!`<(c7@ZH@*w zdfR!q*`yJ)oqYRP$d2-yO<$_>d)UxQu<>GsXr+XFqE0E{+uTkJ$n4pk`n%WWf95zZ z#ap#drA(7aoF@8{MsJ z?DI_x`R;eXf2GV6gfgZp9-WNkXiiR{`K0J?*0SYJZRKeUpyc1NL<5d0zrzS>v+CV0PD_hSGwZisbBCO895Qt7Vl)I1RB9%f<%(dGX3uYj zCW8kC&JhOXoZrGIMg{A@@Tlb$U|A|aFrhlw`h{2%wsaVX9k^oXg#ndYgS#=!$7J*fO zPhFtuPC_uRNFD+=u!vo+}*NE2{r}~A3v6o6X^H}O|pBf;oxH{`EDo=nqodR zmpxBsRKhXoF^U|Gl=Cgq!n+&)Wjnrm(cfJFYT@q}+ zN3&g_xaLlJPRWBlks8mFvRnd58T8&kpTNb}9E1dVz7Qx}uxI_wD3^0qE+&o=lR9U( zuriB55;&950h|nnR}b6CSQ?eo2ub>ZroRe+={S~nV%No_S~j`)rcaAnA33JePXzuF z3Y#A+a_zNF~}j2 zaCbT?s({kQ>tYTp`K!Y@o5Gw{U7GH}hQ))&P=XpoQdqRkta#K-2n+b^(gpK%-ERLO z!YZCo2`W)uvg_G&4~xB; zdbV71sBE*%Zz76fM&uY2=N~C{92EdoAa=?Oh$R*rz$};zppDN4!!nK=q+hKFIC6iZ z!mA6fDIj9RAM-Z05Wl1?r5S*KX}!?kEQ}CNT@U_1a@a-th|pq(l&CiUGV15rf!Ke1 z;VfU(?_*HE0pgwx4lYUfVqT@waHIZ)_l4a+&R6eu3v#UqTRNnIk;Gm4lQDXI;aq#f zUIQUzTz4Wb)-H0ZnXa@obSYF1E4^LS;*C228VPF(pou~54uBtockSSkf}i7lAhxiS zMQ`$BhtQDQ8mfL|^^QLTjD+l5CRE&|rmS+9{%7wUIp<%?TCMymS%^de#7${j&d4Ul z*>ucC9tXqhXDU&)oI!>TiY8mADtJ`vWyQx98!pGM*4S+zR9WfghW5$ynRvkW7qp;Q zoq(vEPAwXXbsV@HxTmz~Z5FMyOk?k*KmKQt>HS`~)IST-Ei7P8PK{`Lpi9S&=b$%QJ0OuFcP^G2SQZF;l{ge40BQSKJVXf=KE2)@14`UU(jhu zH%&a3m@o_Hgt6ahN@bw=w3mInCbqGOW0OT>S3y{J0K*r!fq|~AUs12CGrnlXQotP# z3maE+WlZpG5aZz`dE&?)qdi>rFJ9PsI3(Ket}{6isl z=Z&vBq>x-cyZmgHZR6ygzdwBOY0+8ZSkCZ_QHOs8U$D5=O@*GQiSom%W}mF*aF zO`~zs-{)i!LUK$SYH3jXA53->pT92~^*rsyYgd)TbJyz|hG~S?Eiau2OE*<3B|Uur zBukhz8XMnK9fNWgIxrx8~y%?wKfl(Qt8a<0AG`lw#numfp?R~#9snoBo zz_53N7mGW7@O_fdvk%PURP`Pju9MO@Wf|OjcN`mX@3($VXYKcwwjGMy+N^#-?b^{F z?d_rcGy38L89%9dTMQpiNontivFbtTZuowj6%p`_kH6aw4;&-R+CRC$!J+ksZhZGV z6kPZ_DZRR~e}L_lf0TXix845ttG~Len{`%u-cW6?;DLW7?6|v7=ig6v-@2OC_3R%|NFLIaT9~ML2n0?f1B_x(ZBEM=J+q;MQ zXMJd}$tt95|BUlNXLHsnCS|UP%73DBYiR6*_dv10`%Djrk0w6C^f+AIK8e|Ck|n@G9q`B$sr)p z=&0AM?L8^H?aq34*wR%koUQ(Qw-tU*#8gbe+?f2=)L34J;IV<_BqEZ4L{nPaj5>iFZ!$V z%+`6nJscfvL@$VazaH0r>E<963jpaUM2xAmi$5Mxu=%nr~wnGO}6R-Iw;^H35L zCFxhF5f76J@iYN+yFE48$pqJ1{d%-&;+f~1Zc1Xyot>@ohfVYcm2ak?c5Juq?AZ@2 zf?W1^{WDXD4B$vyMC-43_V*4;tjX3N)4|Fk&88d7pV`}JLzuTW`R3AN;`9Fr7eMCr zLla&BiYN&^gx3R-x9ir8FFfPa(C8bh!*UIb&uAT#y9idsLD#yPJUf#w7wuO@U74!C z&_rk3n1#WfRpZ1F6-Y~InUoJ_N((Y| ztt!88BhZ7m`Y`;Aj2rdA_q_+*Cs->1X43a<-MaNknX2c&oezBaTn@1bCPakeLdt(; zE??pQ09+|eUPO6GR_M_v6SQ+pj^;o&vd9&x}oX968Q;9`atS<3r@ z%rHT3=1i~gM=U&Vwrht!;1my1G9G%CzvZyDmmgpYL#0 zwVlns0}ieJce#?6WYF?i-CxJ_RgwI7hJABs|7V?^E5Y&NW-lfol)YOXJxHrl!@L!9 zjIT&cv%nNUg$^-!4ZCvXI3R;IKf`eOwS>yDI0%Wxw5WT(!Ustm%rzXuvg8kOQRsup z8ZJ|4mt~u_PqzX4*2nl0P6U@(cpZ}#J$ChQFzHMmRYi~amVbQWCyb=W_hW~7EIw0%yK%3mu# zMA0@M;T%N#|DOPVc96P0KFs3%$yio`R_Bso^oH{M9POIBUCN_vQ1b?1pTL`PL8Mza zc>oJ#3dwQY+hs_4N=oUI2%bs4>Ic5ECFhjmPN>$_Lnu$wMhRS)0i;0DG z%2o7(@--k}MOXUf#j1kU;NsBwyWfcpo|#^+IXh#2c-XX=Wj?Qa=bo#3EWNa^f+75= zXYVBSLlA*JZP%W-(A|3s7=;9tfNk7oKKb(P+ZNEPq*~?-5jqpXviTDV4pc_f2ul+! zIuqhn_jn4=OG+2-+F$el53QFkw?ojAC&P0uJFFTv%9km;{sjI7iLB>5zg1Uj33G*F znzL(h_W2z%^ijowXLVn1xG?-FXwE%0K=(fDR8OC*R0n+)2oGH0Frgltlwza>RpDZk zb*mW`<=o`Eh>Q5HH=Dqv7OLxs_yw;Sj$0H=*k*E)0toG=#*+MEoCt9_2?${8Tn-zd zBeMf0bDU8E2*cW2(xJtdpV>!GfX0m=#(^?+DlTPgZVF$z{BEdCT0lo9uL zpoFF$hW)qU7Dz)Smn(Kv&!?EafY^#^HX?7o8%icRX`JQU5b zR#|)C#m?DQUt3%uY4fOA++R@@iIe_$x@>N&3SRIG)2uA65wvRTUL8%b4wC4D2B(-> z)c|Vl+xHOD7iNC55hn8HMA>#3VqUlz1IhRG%+lItlt%fFvS>5;d!eJ4^d8v1-yyff z?Y+HSA3hz{Pw7HfSd{St?>CZv)8G`0eVmCdo6J;F0?Oj*j(OM3!qeDK<0JDo^m_9t zUr?-NB)WI007B5r9(N%12np^C8Rh{_VGNc9Mk1cZ{KZ-SJ`=m5u&Y;V!b&4&d`Zoi z&{3hupw!`p#K8gz=pN&znkJl_8`J@# zA&qrc-#TvNBm&Vnfh4H|5YY=?OG`cY?D$EG1r~DTm0kDXi~#;|K#a0t^UoP7sZK58 zWhUcwBi_y2OY6yMWo)5DsZx>nym#42+2NT8Zx&QicGn2U6sn@p@>!1uykoAO>%m){Je%cQ&pX;sWmey*#Tt#UUk zt70JDOp+}FXI$b#5Y{OiHVn6O+lm@0%~4`YKPF|* zn}cJY%U5O1NlE3cv0<4{uEUD)aJJZ4Q^+K^^Qc8eY58S)@kyfM|FSWP!#rxAX3g z5%bJU4!`1{%&k+aRt~rox%}qVJp0A}iBj&&FOGyj%6~8ch}8)NeP%o)hKei^3AF0U z*Fzb$i=LkO%L*EHNDOh>%iu^gJbP?KSEhr2b3Mr7?j=SMuo=9Q*CNuj52Erg4%xVV z{nz;pySSHSMoaQ_%e@{az;ZaS{~t}~0oG&R_VMhMkr6iunNbNz$yP#SL>g8KX$W;^ z&+Jr4MZ<_>6>Z5%DhUbg63Pe}WhL}}PWSs9$8$XI`;hu~UB7Xj-*tX^VDK?tqbIbx z1U?J@cp{t6f${k!o4dz>iQ4uLyK~fmgQT8z{qi=a1?T_Z!BuBJ5+}C@T1{68U=`uWAH(c6-)W?l6sI?}r+5yE`6jxP z6=0GRDi^nGLNYxNRIy4V1CTheRZhQ_Td_ljm)dK^R%3s|e}8%3(ZV_ZM<}3-xHaJ7 z)my0@<_<>@}GLn6b&V56I!sT(#V$!nu7nwS@7gT2$dZEQ8kv_x0g zonH}!BiDssw+04X5?o5tz&LhG;N!XMaH)|R zsB`I`_47|m-wiorw|H?f_J+_XT)UVBp>MVLgrA8^#JbT1nhdE4|IHxmv1soF0z{F+ z96p(FJ!w|Sh$WU9%00VxSCD~Od;ztg7wEjqQsAYL57L{@6A{?p{l7RZ4Cn;;g^}at?5?5lUcF)QqPj1Z z26t|B>|3y5&oFiMj3xSjpJi@NPOIqs1mApHep8Us+S<{7jfTP{LyBLps?uHU{m;&1 zI)AB;hFx*tx=A<|w`T~z>qduFz#HwLT&ap+-=o*R2dEsxzQB@NpuuqDQ(IkwNvf4d zujEl;zToJw4Tw}sIZg$_(cV}o87Va6#VuUgumlQcUSw*Y~Pex>p!yo()wbn7yKg9Y7%1Uo^ixr^>UBIQS7)~ zZxcz^6Jn@GG?fS5CC$RTz}q7M*64C&N(H|NRIJDJez8A{KW3C4rrrLBqIO}Y#xj6} zmp5vc$wGf0vt>gIF$}%!g)E8WqgP_et>cnl3k3EW&rG$|H)2{UDZ69#jNyfnlrV%W(D&L+(aB@B@k*|};x{!cYEi7chzvC7v8o56~X043lKc%R@{4p zmO+1QXRov+`W)&#(Rv`mnt$?nGaB!3Dn{$r;vyA+IVH>in?)aD9{9*=ggFrlGw^TL zXWz5ez7;x3wgUplz^SCSE41$78?UVS96Ecj^9kMukpVvQ2BU_RzqS58_$7mE7mOniLq+upT(B?Xg1 z=`3I}(5*35hw{6-TAndBjTO$2LdnfIfnCs-1W8(vQ;gE`e5o7OQz@GjSOMKoTvIST zl*?H1@K#se-w}J5)You|x;9EuG$!_TK(i?Ud<4*1hv5o6j;Of)n9+5=hO8y#6`ej7 z6UmWOZ{HkgBmrQwaq{&f^+{$XBKx4pwNY^~;ZuVqu=h{m3b0an(aUeL zb(0N!HeaMZd$_1yt525|L+OL4@FcDjl&NvLRE6KazrNKF3{7G`Qr9af1YBymg$8{B zqOCA)AWBdf3e9y&y?Lwm?(pz%>$#7zNT}p~R8nGAX1h;%I+^x5vLKL}N5;qZuHrC> zb16LvXF8F~=mbp+K>)A&4ILVn?3#?Y6(W#;2I~&0DLZd|+N3^*!)0_Jx0!Sl(hbGO z`$Gcos}SMQ=h}Rin9X1A&9HMkV5h*e&I|Wl;=ERp-sG{O#d{F+u+s38@@E>oIz@A*CtKIDemr4&c zwOcm2VoKo|M@+B_D&s?Mwdur6sZ1?>?t98CE&!5n#xf78LBnAF>glzj{zt$&>ZNEC z$bQ$XETb^5^Q>fL=*$r$(9LTaQPaF>iuMhwJ=YjlP}E_$SX)raakC=f4bARwGA+Mt zuOrOJIiDl<$b}0V9MlVE9`ZHXOt~g{cJw1?L{`2$!O`@YuHn6MaD-A!#pkSN5RwWy$MseZ;O0$w8711CufI8CNs&-JjIZQ=%$p3cq^ohXbWKl=Qa zN`_m~iZZnrDe}#5^O5&bM(0Nf=Pm>9IoK9);NHI*kYRV3wpQ#AsC=jfpK+{-oHJo? z01;!{D=CbXqFYtdDM6rw>|;C9S^DP74Tt{c*x+2+Kpb>GlCuyk5RqTd{X~3NE!(rc zm^P?y;W?$P=dp{)9Z3ksnK9d$xspbEw!J!`f^l%b(0&1#&j~1iqLuLlAOaE_Ks#VJ zqiPu{6!{KpSnec(yn6CYq=H3&`w(}dbOqe)babLuN$CG@X8rz-nVh4~sP0>~{rc|R zyOn^FqOAZ%5zPy4zVpUEzm^@a)pFqx9jhJNZ?dI zzj#3V=44bd_M4MaP3YKa=)ONr_=?L}_`?PXB{EjlivLGpJG%2jT2H zL5DO0OG<=gOqy27sGo06Hf}lxv~1*>Pg_n;IWkVHG50gpy~5UDD4q*o&7{ow(XlJb zCPW@GH#FQnFPc35zX3sra0DknPcUIl*9#}4>!bsiBGfR)wHGI;Aql*I{e9KcIJ9D? zp6_932ysqm)a~##&A_fDJDP4LP;aH002$+>oqBNQYSo)_GW#Lzc%{QV1mZ!4Dg}lE zto!xqwH?K`(ZK6J)Ix}7vWZMs<1bDP z3{9`TN#o8ZN7Eovo49ijo3_(8Fd&bmrwpjE*C~Jdb}}Ki67U6g`ek%o^=LPoEB!Sz zVmlex{kyo+PVjHZ!BMWrn8}=8-YKpsE05XD{O?uXd0j1^_wV>C*kw>AP!gw^xGPXxygLT9>OU2yj%Pxjo!i?7?h z%0IR0TODnAHuV-jk?9w7_1Pd&3z*DhaQ+?~V@L z>(uh#2D*CA^P*tAoK7(l)jbUh+xbi}ggtOk_G;C%=@`+$Mf-Fc{h&wqs|^Hv%7k^~ zV+jMazE9eO41+2Yxcm|qLy+BnzQwkvp*77f&iA7Dd7t4Q3L>T(VQE?$Qop#;n zt9F%Qtd(Zb_eUlJ1yv=PNTisYJPU{QF9JU#WiTeYu>nFqhh9f^i@SwJ&-J?z1q~|& zyndb0)|c#e=C^!0`>MzLrGArhp4FnMvEDHz`aM8o@2-OtJve&{M=pn$YPl%){t=r_ zM8!WJU036iv#J#Hg0!+=A^4;Oh(#Z<C)6qzVexEeNTQq+FI-OwmR zcNnT9V2x)kQ{`U}FK5JK7P}BfIs%LngvRo)|7U0sZ?UakK|{-v+4=Sr6HE@wkJmU< z^M@{1BuLQzluEpe1Hr+HUK1L`S$UDc9^3Dj^IeMC)*{UVIidDL8i%~Cp$XOS+~}U3 z?XB7cX#`}-dqxJ^SJe&;H+GP-}DEGQZh#b7N<|8L)7M< z7rNRx2A31+0m@-HbeZ)Fn(`03hJzZMTO5{l86tJfh8bRigADCF-vdB%JBPS>llijp z?GtsU;>$`Rb1qoBY#$9wtN*;=Xu|Sy)GvYIiK{;r%xt;hg(cP-(E}u3y=+LrvpP5c`6QaAz)kF2RkS;66 z0O}mxsoL=2S1k2$p-*+scYIq7!-EWIncsDoYH_)|?*7hgxFon6F54%yiJ!bMfQVdn*AhLZ_1Iuji{A^ZTWD_ZgnQp zQyerw3y_nEbYxYjz;&3n*mTDa<9kQCL^i? zTJk46x1R7v@?DtzLdbfd)7RGKX}%9c1~Fs$bOj_0e4CaPv{bX)}XHa#etgjex7cL?u=M>xY)Q zEB!CrmUH$~nk6xuilYW7X&hO&6y7bpmG&5l8U(chCrT9Wq0iz!>LGR~v+s=DT@>fI zv^r(i;%f6JjXjc=uQl9`aYVPG;rDlWKI4x$`7s4)jJBff%HmK>6sOtU{fFRHwuv4& zV#Fm1#2{}?O-<)$%ciwZg0)>wI|6_3t1Qk=$(*TsWD2hQXTd)hmqze_er#0pOb*NF z#D>Mb5_Zf9*|@i-Nd4sS;frol2dqFr&}V@0rSFzp+vJ-(I%9Sfql9ccmU9~SZ|4mz zd%U@gj5)uD^%d3huDGosT)Uj0dBvxo{VC964I+#4@;bsHZS4QSoOwWqB^dqHMc4Wx{u6u2bDO?0y>OMO#@?%V>!swztU_23@`q-;<>RyZOQ2Nb*|BI%0q+`& z9(`j;S8T#5@!$0rfkNMSW?EV@in}e%JuoyCYtT(B8v?%wRkE?AymjyzTP_#yI324(u$ZT%^j>*NN4ZgEt@h70E3HxXIyfUwZwTk!|Yr+yA*-CEb`^_v{P7 zNHP{_B_BbIJ_LGkiaUN=oH4WROY4rl8p@tB{Tj)i>X0Rh9hZJDbiAlP#=v3lhbW2# zHx%S*YC&ZhVDpmx%LUt^W5=Qkwyum3m5~CT&(#_AY><`{{66DpH@*Y#j!)(U*0me5 z6@K+lu7+$TxR+~>#2;Y~$tpw5SEc539jjo^0YD$Hd-oX}BmzHyIJGipl3nKXQiM1B ztJ_0OF71LYtG3J7177d6RGV%L|0GGbORT#nwZ48VJ;rmk zjq*l)tln&gPq$^95+^mS<1IEgnJ}qk3?U7*mZTa*o zzG=JYWvlRuWn(wJ1N%tzNRPYeg&P7RG*g%Tv;E%$9y>OT`#`!Mq27&*jmfd>xntZ| zMhqeX;F~i7xQ_zw*B*!Dp<~BfW(;ZW{wL^TM<%gz<%M#F76!W}A7p?Pc@5co|H%|9 z(ibwVi_SZP0Sw?@)T%R}oQ3CAjwQ?ljy%rQuRd+nqm?S zaV2dOV}QX}rBz~pgMaP3`@mc~Z7z@4hO!~sbKv!fsGSIxVpPts_1>qiBSw@dO@=7K5%@m#%h^Xt{Rdn3EtoM?3rU#N^@SU* zQnlhM1NxV;+QLFmAvc~w#$l%&0u4w*v%WOn*Oo@`>43BK+Un(n{HD7}~h$ zMN&aHPb9xl02MI@NGmRHHkFpU-iNs{Z!zf6!30wx%jbu+vnX)g(tKZQl;*Pz%p=L0kt;*oTPL4Vs`-gRmI*z9^wJ182r^LvsisIF&#$Psd}3Eszj) zrH$M|&BzJF!L^cXU8eZ8L}WM~fu6V>NT%jU1V9!$c7DuKpVT|6ZL4qNLkxdx*&z@O zl9H;bw~$*{uGxixCSvgwjgGix<7BPX<5NPJFGsa;tktAeGO}6@V|~Nxgz$`)Ap?3fu)*BSh1R z9Ve%@Naj$mwF+oCB69w8I1_$CZ?Gkbm{o7|G2$8XMK2xp4c{h1t_t_uy~8 zV^FiIAs?54)u>^Y_>kayT4XK|AjP2@zCFeL9*Zm@yuY5_HW}+9bAE6~kh@&}H%P(1 z7MbtUNZ2qjk88hlM(6nJ=drlKqh_n+XLa`43EI|E zjtiog0h!LXbVNLTC?+hi!4_bOvsbSM)4g?mq8uM!p?^UYoJrsvnf3;wR!a(v z1z#fNMOkit$pzU0*4R7OBMaSH?!PHvb|>rK;n&4SHXguTTA2NwAD#YXa90N27Ovi&Z@^ z|Ke-&)fz^#EOlUwzHx`8rUK0et4&_bXmgh|Ex&{Gh>320+1J*|L}q?7`T3HFFG=p+=_ zW9T6I`^d4@Zb`>_W_31-Z#?Nw&(F}X1>N1<5k2en9(3tfuC+@K1}i~SFbHL zog_Y!`}4+KeIUZ4HTki(o&49$p6!<+2<(&~-Ijp0gt44Tc^C}_FS0%;grP$_;K6k} z^He7CI{x`nIF~+}rUyOjXxDk01}xX;BU3h@NIZN0`{Y;jcFR~#qaK#M9$EeK?f9kp z8H;C3IWfIy;@npBsAvrfHfo)nMUl{SSHy%-%Nm$3Wm$-+j*5Uv&*H|WYbE{kx_rE+ zmh=9w<>PThBbHy$Kb3E85L1P@eU4fFjB1bc^~DF*GP8pfEketV$KKtkS-NQyf`k7u zp7BFW{nBX?(s35YS3fx9p3{6Jb z*K%JKW3L;je=1_!_t#x8jXsG6t{w#9kDRQ}j&rQR&2JA$SZs1nEBySdb4w#V!-8=V z{-;Sv@-^*j>sWLkq3U8*Zfw{e%^w-3PLw_xo3OLs5)*Xu!pBW_@>^<>sBPkW*FV~K zVlT4sCJ>ZMpWAh2a`>QK#f6`1Bhm^Fv^tnLh4SR;*Ia8Hrb)ABTfDb%4S#&_-P%>S zkDEbENS>T)YG7#~OIFky07EDe>iW1Z_qGlxAN|i#^5ITQ9}zc_sY5oe7aE-V-Pts^ z+a9x-Gbw_tyN(~?Znwk7++DHmX>j@I<}s$>y}Mr5u(59Ay8i3xSvmgZn|>WE0}EmG z2EC0=h+cdz^qbSyeM=*J!v3g#?AZ6~kw*htpZ$LiM)dyt#`{;U%rf`+`v<*E?kGh) z^;T0*?%nFsW@jS|9RL5H4BhCy%`W*_ReU$R(XmNE=btIbH+OsgPYW9t1HCq3t8 zho%jgem993iVV~W9ep~+rbX23=9G3zOHaSmHMra7{hF#8pFxJ-=GM(|`r|m$wu4#z z&vb>|HiwlRSJmZy*=2aJFVrDT946bU3$vGDF_~uuGC)T#(sAe}GdaOH!d8tw&4Lx| ze`!Rni`Xb^w~iTuioHG5H-J{SOh$9P?$jf9E5euHp2@oV&~%<=S2b_e>;$Y160-Xr zTQytUbzpVYw>zlM7<{2`>|LVOzWG_?Mlu9XXjnjS&j+dAU3}8ZCc849;{a+MidxgU z(x7==r}ATI%4PB+gosSwi}~#9)M98KW9m84^1-Hta-ly4Zjif(bE<6m`XHaEYtyq?z)3k=CU=jS8?Hy8v^_oNf*%|K;ME5okC-Jb3L-<;P9Car~oEzO)U zBYQ}0RbJ2BWws#}$lz(`zEB9y1R9-qVmeH<)cLeiT&Q+tt1tJ>9?C<_8u?viDwDzJ z4R{IYGTKt{OHn?&s$lZ_p>PLgd{j=<1B}g#v-&+NVVp_EuU&-)wHA9R1r^(F+W5Xj ziowULe9&4oUk(Nt3XQi}#v4o^>W5*2;sOBs1WIezHKkx7K$Ay@ePq()`WicuAA zM|$U3YZ~|WYYRWHx?uf5o)l6{7;7nTK`f;SfgnRZn02k8wRO)#{SZQ$?ZFtoZ~`Bo zzLTzpa&$7vi_-)|cwT88Z|F2X%A?tJR{JV1H3CgL5)mfvnes}BKSymHpWd%j` zapbA3j{K|Be7T9!pz}=v+FAdfi@zL2C_ArH?J$b{EvGV^ZeIFqnO}g5nTOmRP-w3 zpMk-xNqrf76I5f9|ND?kX!H0c;5Vr#E}3Uzj?vP}f9Dk$sx z?s8vrUof4Jwq%Pe#04VR2~@|T>6tgLC5aOJa$+zQ`D2n9EnEsPfnXdQsU&0H!i06+ z(b42pDBE}l)j|tqo_w3;u1U9%oV5gLn)U_Z*-&5 z72Kr1ABdPhpDU<8c_jIBTNGanR;jI{gb;@%5=Y+Na|T$6dJ1ZTx8;~O*PDsh2oIQq z*&31pk%Ww}L!i$qBStUb%)ahqU9jGSG(mU-Doz8lkjjJVx-|t9aEmrW>loAJ`MUAP zfZ=(dEuhMs7+fRp8#4Z6`qA1^42J^qzI|@?dE%lRqedQm;sKXH2O`i69Xz8sd?=-5 zpv{h1{hhOOomOWLE8WlznMv?e!gxrFX%J$X{%>1Sm%Z*@${)Qkw^j5|)4@d!d89Ei ztoih=ZThX5U4fEv7pd@Mg@OT9?w>h_JADhe7%1ih1!G*`2@KvMl&6A|NfRioz)m^h z3N~J+fFKuJxA02tOx@LcCHjsPJT%|^L$Q8PIl4gVQ62ltYv{ibxi*rwiyZ;$ltGg7 zr(QQiZr|UJU;Ru@`dhc*68?G^VYp-l0H!j!V)V-={5dCdO8!WuXTYJBR%CKKZW?$Uy(g1EPzHAHY5ZUp;R}sz^WfBy z%6TjIY)q>-!S&NG1JHAJq8eaeS~5Q(&@I2S>%oF`S12XH^+bWmK_n3%c_8fa4C zUI0A^3=A7BF%l`Ir8kU1sN>RK<=vf+-C-wcOwr?kS`)H*pgQMpStVu=GHPXHv>u*G zIu$7v8NzleH8sp={Uy_fp+=q(HH;!j3MBZVH4ee~jxn*>DZbScNf8czj0~5-g`xQ3 zne4efcQ&$64P|S)Cb^Sd{eGk&GsVaOqnnm3j1&AqU;|)r1Zy%u4p|5PK~Esl^ZpX@%oL{M`(Bdr6+m zdqQg=iR@SDXqaoH?ANm#X9%S!(4&C*=|HbZBEE(Um&qwSD8vitFoT0O{aPc;6v=v$m3rgG=99WV%!X(1TLhns zBQx&&@+)aSfOPrWWFlkV`!9X~qGWM>!&(n#$retJ3I$SbqtjVLrbmt}i*pVdw+;enH!D{6S(=l}p7(u298XNQk4q`( zREnD4Af0KZP5~Ft#_IeSf#2uBSlE1GD%%dN>~2svsgD|+{sUc*a`%k4kHdY&2bZ(A zC`MM8R?&2gHMEFx=#Ze1Bfx44tnaAobS8}-HN%^WxZmmx0Ou8T*T5Yj@d}@4VZfiof7#! zSx2k@AC*h2F6BAtae)lV;K8}eVAaunll zBtbiDW)F4sW&^K%X$vq)lR&MTczQO4J@U}=Ii`boMqu6YLOBoRAi$j`lSp|>oDrl; zH8?UQx7-zxGQ%JnvV}NTg`!|yu(wcrTyAV%qK73d1NV~{*e3Qm817UX?sl-cx>~ls8kErPXNuVr3KH-b z=vH{)W#6vlG|sN~Ho2D%MRvEc%)#Ykr`$=KD#8Ms{Qqi=Tdm^r=FK)!J0R?)u)y3* zC)0=JPTBsu$D$&$q@<+rC#FA>k#+pNV0Dxxp%hu{+f30>fnYU8*ieiX}m-4Eje5N9sc;yBN1#+xLP#Y zcBxF4BS!`+UshNvgoa+63H=|=@`6CrzvmA#2L*I9cLXjD&iky6ajaMN|=O(#W+iN%Zu7^ZGbpe;D5F>{w7RwPlL;JL=G zTjurH%h}FmVf4h;LnnWQyB=hgDdEY<+KNnrab9;O$W@#`NK5sjykE)MTe+Y1*YQ&c z#fIdRNx+Zh9I~WfjD(~I03HNBIH7Ac?uyh!M-T_GWiDglJ=r>jV+oe53GD=BXzavS zxY+qYa2h$~qj<1Tpr*cq#(PD@#JD!-yN3PG**#A5Bvn-m!Us=X;T}fU@;A z%>NHFlPLzR{NBBKwUhG~DSm2X8_&M~tWoj(WtYF>q+Nrl>D>%EPu1OAL##`TiGR;U z2N&R6mVpEP{hD)%KaUrObIV1(Lk17-Jo0*?_xe|+ewb`9-7#lZGG^J~V(Jk^eS4dz z?dV^mxjC8Lbl|{?;rm0WS67UF;eW?=#xQgxY<%dt6`Ch4Z|iU*A;~k>FP%2rk2kWD zaTC=1+&L|2D)ETyTW@}TR)3rk=U*JWcI{d=HLOg$rI{2=oTo#rwvz}OH2{8e5Te%@ zcf$V5;-JNF!GInuIXx*jw%|#c-;_=nT5%`$;}7DQnXoK(7Sguci&n;XKllBHc^$xS zB;^try<*B>ae!EMk(5O^m5Ame?G`ka-NZw>Nk*I=&31 zU93QaD%FPtZ>4WHN84igq*3p>Gj>u-HRRt*1^I4ArtAbA1hlkPxrMb&8^05}8SXyJ zSUO{_AfIAlP*WnoWF*u-L(<-^S^CXLk7HfxLcY0J={WOr4Q?DggpPC2@?t$PSpX}h zaYPS7D#7o#BA)1XHv9wz2#KbaC%no=OS52nueLq3wc7wb${_<&0k4Vi4sp|ih>5sQ zC7hZ|kK@MK{nt}we`tNs8#1IF*`LL~hHciR+k?>TLG~YRHkm5j<(ID2+hc!h&RHXr zKCpYYm}Xge}6eWgD6iATdd zs}7v0w`Y9$nHev(?@3#;@A1$+pVREGE_u=}Rneh*Z=Y<%w$s`y-D8|uZ&$mOHxF!S zcc)kN!!bn@Sa{jEJa;Yd=Ks~H)u*B$-ZZ{FUvn``tdbIJO8^4KIWf-dls2L=HZ&AsVEU#_*NxJx)bJM-IpKaNyt;M>76CJG zHrDi+o`!>NTI;vv$g+JoMXW?=@R znhFXhx9=b3iQE*6QZ{0t4B&kW|1iWxurDElH(t^(0eV8cAxIdQD=c$r#|5X>R=>&y zO?g901g|}@kM9gN0trCN`Y5jMAb2!?h z>6CmU1^JCAKx0btu+xgN8^o=T|6+9mgCK}OB&T~^!pEIB-uLL5ZG3iir#F|y($B7x zcNuwk0pbitvf9UM!g#UZ&AYnh0Hz2_T}s2R6_0lbu*pFvng=*-wAt*T{hZ0*rhh#} z@)3bkhSOC#mt66SwQ}AS-{?lv`~1SfN3^pU8v4krF)kyT`)wc-IzvIIkKkQm##+3vvPIg zLd~_iNl6tO)@knTgv5=w_;dh&tekA7mVjj`DIH)YG~=qz6~u#|=`=`X-uJBO(-+29 zLNHh{Jt$)SwVwyFx64DO2xzTZ?5ykI;Zd^*jqMaeLt-ty6uV|+yJn3v=FE|Jk4?`O zd2=LY(*j7ail|7kMpP?~tiCtI@Jkz=kA-obxvu{nNb0V+bp?wbk?lTV`!ERjTyZ+) z%R>572S8^2c;}J3w2LzKdtSUR5^>mBMC?KC-`5gIzl=+0Nx_ic6IayV!Wvkn)433I zOO_2={6c69QC;ncN>EIBi0YI9v7e3)(9qZ?N*ds3p(Q677gonrU4#FyoxX) zg6p!~7@*S|A`^1)mHH<{TlYe5nVp?=YF7Vd3Y;;X?<*^p5k{#4O^<*bkYl$^C)Iu{ zf#7Y-quM6Bd~xEb>Tqk}GHAGkZbl|!qY7U-XmK|MboaB&%~AHVpNI*_GIO=8z8ZJ6 zHE0Tj+63WjN!&r_7xAV=qZ>2w@n?BH$SBZmbVp^y2m4RiGij&(X6US6zn*gWBp?*- z8P(DNpdV&x?gp|Zk?K*$1sle4I4FNl1oX`YO^^&16%`eUI=f!`!GJZdQ{<1925t1? z1%IkK7Gl@|Z8(6$MSOhF;TeQ5kZQ*VSoi5!Jr2l+G&EDtz=izuC~Nf*Kx}HT-T`tl z8P{}}AV-t`n4NI|6JOXDOEyqkzwEjxhGC#z-xv6Tjf*KC@!UU(JC}SM^td~v?Qu|` zbzB$fdU}U>kZ-tImelMDC^g8r73|gmXdLzL=L+Rw9lJT>m zu>rots|shR9k_`*mDLTfJ)Wt?9Q8I!m!^leKWvZ#*&GFFz|bC^To26M*VcW#MF7qJ zOSr{g3{!7$Y2ns_7V($eT_2yGyZ-Vw0dbi>-vhQ(^IOF9eulso(Ba@ef8FGa@sa2j zu71()KvDB>EK_fbOr2W`h`R@MqZFrFS`(02g5a?#sE0l?nQnDp83~Mna^irZ#4ICB z>Xxtqq|Oe!po$q3tQWrw z^8!w0TLk+$6%M@Iem7g-IKLO`P7jDJX2kr*I?Wh<0pJl+cxL9= z!|O+b?@dZziA6 zR#{o1G4QF*jwMh!dJL&X7FDBj{#)d4I0+)2g%>EYi`1@c2eLskCHNxO4H$3w$ECWX zeqXD%YH9biNRaN2QaRl{5s#)x>s7{gwG~0NmJYhnGaT2{h{xDWT6NmVPiS!^Vxzp* zESut7ch`>XaamSxiN2;XqLmwCp)qbhd=a0PmiB@LGUE8sPK|lXvH1_M+~17W;EE0E z{u<)!b&-R*!f#$>vi6i+i3xlv0bPj=$qw6;4}v66gdR+olV?3sVUIy7lp z=+NzZGL!eH`Hj%yVfXjoxsvAI9E?yDyx>SIPf2VRe&`M9>9ZS{dV6iF?bI6WGUq0r zc(7w~Sj96WRDR*UFGk&)yU7(bkK`FZnFO`e*6-e%nY%>TNvs>L5LVxR5xQH=&l_>r zP|4P(E6_7ANMW;*?eXh#WkJV1h5~yaq%{ps;Xbbke#L+zl43u%Uk?hxHQry69%!2m z8-<;ndG-?tn}Sx1sH=kXkW@UpQ=ir*&TDj+FQ-S;$dqRd8Q9-|_Lc~PTWnGW^DtXg z4k8)7$e{R31A4TJ{GS#eCdT9B$)0G8pOuw)U==UoPm`Mn&wfZ{ARe297F66%3qEm->pTG!8@pe$hH-eaC7z$6A%!(Vc|M*P~ zlMo2$#+dsF<1GggP`q-K3vE|^QIRccl!$xDu9guWbo3y?&zNO7mJR_uTO0CqQSQk6 zBUVMoD>XD}0s}BGNSHU^q_w7wAC5L{*!pa%1zZiHj$^&xX?+`UVzkw zKXFK+Y9urdPTh?pT2H5}BSUss_`fXbfHD>Js#MkVrG|zg-V!ksdM)WDFaI`d9S)T| zPxJ&*S1=a{!Ivp3Bvi~kBBA2Ei^-21fyS%(X>OeL>Rh_zt?u^Fo`faTg?OF5YO@-$ZwX{ZN)u!cZBQzT+gy1rz*)bN~ zEjx<4iv~to1Ue7OCyGR>GAWio?!h}2Z`kw6d;58{^x2vu(iB_yUq=5M`oWgt4 z@av23o?5n`H9{C<>HB%P3!3XQxX#JhnHm~Gx7}}izI-}97KLpnz~+)=|7bS45f%&v zKb_`!>XE@a#9RuI7X}v9R-wPT`Uw~svq_Wor2b)2qaYem>%Za3k$A-Fni`2ygts!| z*pquAnA} z{q+mD22uV0=)b}LZa39lu8DVeatqvU`Gv;z`IxI>Nn+#1+R6ksvO+w)%M|X?g-g&w zdu8QZjpU7-Ysy7V)3DN3%3O^~)a42j8_^5lt_QR8aPyYeJ7btOSf$M@~qr#reaoAqx(LS1Nr;ig}ojn@I(&m(%*s{3kQefmh7mw>`Gr@5qsi*6#P_xn_2$-f zY>OU7UNb;eLLzFm<7Qp>l*GKh`I@(&FZ~6(t=)|_zz8$A`uL%~W4b@9HVO$2W(E4P z^uRqx(CV97w~9?y+(d3!)osCJ zTdr?}OWyn++O8QOr{UMTulq4#1nsZcC0>_N?Xp9E6tYf42mkZ3Y-+g6tMh}TL7TYh#g6tnUOzE+ZahQPT%xiqd zu=PhAfw2$<=d16OVqRLcdUfBOHr#-_X|}}J3<)CU6F&Yi#|0c7l z919=}w2;@`qW*JD(>Brt7^1O02`ejGn~tnaW^^oo`KZ?kBlq9?UBur*U4Ov=a{$K> zQN&`FW)bYkIvHokBb8)4NeYClXvb;t5R-9DC3mRA0Ep0)Zyb~Rgf>C`2Sxn#a>fiG z@tVrJ;PlS`CgTE{%q{5t@#$0=xg{kg$R@#~B+)tiaW?2bd-E}n=HzLkYIU;~WLq=uS>fZv?y%T!>dvLbek=F@imFQ$dHfxDQ zO=CY%U(q-0VLOyjjWa#8GTt2>023oTJ`VF&9SwWnt{+;@?MN%F5hJdaIL|$2#HtOr z?3*~Z9Pz+;Z^}D8l2($Ibzo(g2PnBrv-IcT8wONLku^LdGG(KXF$hz@SBBP>=#<>b z%~ck||AOrVc{M;UOA>%sY-uRM&($GAUj#(H=6lI%nWFoGh&)NKSm@$X`pS(q?KZ58 zSb;H@2zU>=&o~x_us#nr$;`~8J|)ep$SC$uVF%vt zIRLw8*RGJ&EeYXZyvNal?*U!JxgfGEY*nL&OE*tiVY2UHKzQ^JDT5OEbz zwc>tQB#A8QBABP?Z zdIVtrzT4+|=*KPf8c-YC`i*Aot^g4GZ`^vkWfyL7mW%Yjvt~Ux*L?B%-}l|RZdtb1 zYuUMRNJOOdlX>YBP)p)wQ+m=V@)_AYe=Vj128fgk#oec@Q}ZvTLp$HQ`(4-B72P21 zGLI#SW8~-`pY!K3zOH6UC!`U_0J9TqIm++99dy`h+Gdw6#s5t7_VID?_YFRJ^y%|m zNdwSau0c!%3t_e4J!ttu{?WO>K+;@j0nzhZ3G;$C$3*Gm7h~@gxUKwK^ObIWUu$JX zui*-W2zPgvfewHnMbw`&XZ~WM zlhmAT{XsiCn_L%4u4E8#@ugg_PH06Vla3MW*^3wTyL1U!qQC1^zkn`*etxMP+o1VR zrjM4dlq|dRIg2Fc;>Jh4BDhEif#I7mA}M&@GT8D^9HqIMb) z5P3G<$v5n^tDBqbzd>$qrr*AOTPR%WLVT3Tm|I#9G@75E9~(3zb8A;#B?^6HLvYGH zs3yAwi%LzDVo{7?Z3ETTC0Bd0*AH` zSs$HwYi?P(C~0hYBdPxlk$rUcXhejjBt-C#@Zd-TM86~~y>zb(A8jJOCemL-^n`J; z2R=(_@9=}JefrV6YH?z*EWZfz7JQgPhuR}80cFR1Y?U(*%o}`+zfhomBBp8)a#E3! zQA^Ttl4#bvW5)okfmho9GJo>=+V`hrnafCwv^@0j-pFp8O(Fomg1rSEP<#p4Yo1lV zjaasTYF6@oKwHRuwk6+(YD*@%k}~KcEvj|ABK7cX%0g-N$-lokd(h$0Ehiqu$X|B? zg;Zkd&}aA8wn;otOaGhAm?2sNYp3)F4_eW?G)U2w_7COcv<(lm6?rtJnw4HYKI!P~ zuB7^pJ8_c;%QfyTOdTB@w6W`Ib)2i6gC9*{Ow8{&bYd810P_(zYxLn=J;>a&3_;=07x&IF1svH%%6j;UtY`H*FmcVL)S5K*Fn69v0Ee8@@|!Fd6_qtOZkHdl5`%~% zygKO%>PXF)e!K1|C zCUY5?`O*5^ihIhi2Zu=!;%L9}Ys6i@|d-c`8pa^sYV6K_(nir7~?YogfT?a7!&rY{lKEJK8`oh)y6sBYPqs*||@JhtEh z66#7(G76L5d1v~9Lv>?ZXDvVggr%s$KYrc1b)d4>t$PL<)@621u3(b)m69y79u+d9 zH@pm^Dm?+j!@>43+L@}CVV!3!*G-(`+zQN=GYxo+w6VQ8u^WT;rY>TqTLw=LQ_w8X zynNQoz`)@0l`FR%Jn$Ykjg+FA)^VID`1?`Ov|7Kuc1hS=1Ba#Sl^#W&0cPjeI1mt! zZhXyi@7_yPs8bGWZ|(NUYQ3pe$?=o#V!i~@>QMv>lp{`e+6?O^cA6)%HOCG~|CK%C zN#QL0>FSXFq%w&Uk@tg&Z^POu#QbDuUAF!ZVW^f2n~u3AirLK^t8V(kOU%E_M(J|0 zJx(76$8?>bj{6i|=J~sKMHiwMWyZGW1Phy4aN(f!(#mqRuOi>$o@Fk$#5@4YUf*_1 z{bgw4Z&R41ieZQ%UZ0=Y`R{NDwf_CjWj-@QLihBkDgzA0ffQ3Di|Ur{vj=erFE*jO zlK|Aso9mXSw{o5rG_+C-QEqOAoT{i-ez={3s}u`6xHv{MeWBexbgvyWQP+$hvU}l^ zh?V#^!ymbZXoBIcfKH`EX`!s~CjQNLep{a81FhF;b*gXQylLvccqJ*CqTb`_AOfG3 zV5r3q&8;x7Rox48N>Z&}>iL$gDf3$XH3c#>3R&dQ&Y!+t9~e>r50vatz@fOhgJYbz!R(he8^8ZQE8KY_TzQ)35FkNqT3X zvf$}z-ng$W9>RNsdl&rNQ&a5r0%uH`7YFXwBchP&Qkuc(5CP<~x~-Qs{3w zesyi_Pv4h6u&o)^5d;K$S=>=zD1Y(dR=_c5%injclzy2n z349z`NP?Z6qoWDwG|Ww%K({P|Ay7W|0$KO_c&lYK(qE%x75X@3F>(4tK&lWv_mQ5)$`c#g?GNL(3 z7L5kEf*_I5hgnuufBQ@#HdlLAemF$#OMMwY9!fd*_CQEqzLHkMl@goQL zSQR#~^d}n^PMvbkgFu%@eki$A1V?w|_Oc zSy5r(L2$3AAKw=&vMU@F=>__*^%4lgrP9(xCd0BT*^#137$1HB)*`GlaXpVP>>N)FPt* zXDn_A&b$UXUZra%C@S-P8PvQdg&$Fl(ItbExnvH5@b&y`1SoIeFOj`T_g|soxn!GorDaeJ?y!AxGdAMS9`a`)uedPjXQOHiB8bdhJ91MUM|a5_h3b5mDFu!cyM* z#YKmaHPaFuhouD;_Svp=4lTXkv2GoHA0><#kU=#uYmb%L&A9NAv2o=va1bC1X*h*D z<5C6T6)(Z!On)10T2zi(;2&~cKfAo#`jH*L6~+7_ej7LdYXEA z3C%YW#li?h`#n=1|5@bd=*c~8b3btCVR9-VvVYuNGp}M7YlJAQ+tfy%(1xC*Lfr7Y zt>aP2kdPoTh*-IH6Kbqrbg|{o^Dy2}M9N94FV3!P-?ZU~tSjI*DqXwQY_|4ak3|KG zL*>ke{9=S2fg#u+Ly)X)GFbpug_#BE66u@-^ZcxN_Qb00cg326T~$=pJWKohoDi6@ z10&uTZpWLzi<1PoXjiuY_F9bfm{kbZK;8$8ku;`#D*BH!x@cr>v*`q}Ca#6i3^xy2 zs6BLO2l$J3=A(C~F5-gQw%Bq5L_o=+YEE5RYbWzJGJY75ig|$}|FdKFA&L#~kp62u ze*E~Fw|lr~(?R{ePpqAX0SUU4mbe}RugrU>;s?X#few*`xa!4OND-5xHOMo`udLih z$w%zw$1E_!DgdFShe-`E-h61&>nC&fQ&$tHUDG;fXg6hQ7%A;2cGKzM zBo$g>;HA|3`Zbke(FX+Vynb+94uStIXlRHUw_1CuB>;;|(r9Sp3=0sr9f=-=r8 z#^GH;ZL*gIyeM^VhfX8c=@8(p*|EtHho;6~Amo3w$8W$E=u*Q?Xcv z6p9EI8>UoB&xyh`AKNL);$DMwkJUN`UnB<}jRh8jm_=RYC0>LEk#7t;r_tZ#UdyJ1 zowaGduy#L(HkUXZAPwm_Ju^?b-d)*<%=#{peMkS!`(FXjAx?7wA>Y3B5sn&$OH5p} ziRU*IRH^p=OGiu3b{Vy$oCi;yG$bmIBHr^@wSP`m422wVNU#<7CsavHOl0!A0B%yC zC)_On|RSgSn$Ro$LMdj9|gB_Z^t$4}#W3ZjyHx@pdsRpZAu=(KYy z<}%2c2gW+=b zS7IpBvSnwDJo=cuY<@;hPj4adfZ`^ro8kiXONo3FItma@0^wC}20s4aKgxG<7|CBo zbLXn?V^=Uz^aArbpslr*7hm2=DK53OBv|2ctlpT-M4Ss&am~@0%4v=1n#oFg#TN&U z%K>a}Z+{cOm>7UU2j#RUM;T0oDsL3CfB+{>l5-+LVJ^8#X!PU(O7}>8962{}8--6X zGp4!8DwR9bm~AOX0)I)vQ{b|KC>nSypDzEVzZ~`H^bO3!Y9vT=tor7J9N8@eEDkAP zTw6p&3oVs}H!X5BK|CemI=%_cL7ue5;=WYXj4l9Mq_m5OcRfDtWVT!8q51TY2_AOr zTZp_Ziwm?$61?4Lm%-ffFRwglRH00MVgv{ow_9E zn|oucDPsq}b_KrM0O$@zCu(xJrU)cOsi87@lM1R9kA#HPyZo~Cv^Qf?6-I)W@t^cT zTG!GDC($eh; z-W|3Cx{7Bjr@~-D0_VS~<`NJDW3_e7t;mpZo!{?oquT6!g1x68k$ zlsHW8Nx6nK(A0s={aDlypS84evdJP-A`t9U9pRa2$(qM zLxx<<%9?N({Y)I_U@(o>({SW8%$KyTd^=8bWPW@oL!WjlBWbluG^$-*9u^! zpnA_(Y$-s64EP;!oXNz8se5O4*C$NNr<{zjn{X)??nEYb?$#|U;T>(BeZ1!2!EIUo z%Eq|{Retj?x(r=2bdF4PA@TM4HI<2xXU{(W{=6zP!ucdHd}$tOJ=&ynpLa$F+3w@j z>Q)>}$qmY>3l{-T^2%sr9KPp27(*af3dm)4e*SK-&XeR)iEEX0o_%BU=JRSEn#l)Y zeSilj^%}=nIp^Fvijj!29A=G6W}%~F=Co=wGU9na^aALI_c0;=I(vi@O_I0JIRirv zZE#4XQ$pij34Wuy;>X&26_bngJV)a(xjLyg)6yEFT2G^=$GP7UGiJG@lMG&20a_KP?o-50AgSP0AlS8 zKUSy)2bQ(QF#=59h1Oz&-NWSMdX&O?uCMkLENg?V3H9vmefw@BT9xvf{37i5tiw%j zNAuV@AJ}9T2%C=$wta97oJZ!-)oGw7`Rp+y@*_HD$?%Qt_v2>EV7+6En}y_57=yZk z$_ANa6ZW>J!nKN<6_5cVw833_x=4kGv^$w%hLC%k+>K>FRlc*#Hxp1rEhzjCSY*ir}-pA;`7Z<$TAp%{x-TBz$tOhGv?3t#^aET z47*er_>W;Hh*)GaQ^||(u#t>rp(#syVf%8`+96x+X)AVHJLLY@5B(=TMHOS=Z!~qP zS5C|@!`2;Ff@8iwpTSOnM0mcm-~Tc79&kPH{r`VB_RPp$$tGDv$tr}5V}uGVk&2L{ zC@bZJ>?DdhWfjswD5a7ll~t)EQIv{=P`~@rxxTmm|GK$d*L58FjQ49ipO5t%cgT3z zvg}V)J$G6#IH#}9-m~Y!+1s!60c4`s7#pFa;+rug^@ji+lG`js`$)v=#(Dw*@h zc|o<*&cQE+g9?8$r<_DECK&<*B$<bwaYY`QpcSE&jmu`4hHrl1o0ZztX+d zdp!{bgAm5iAHWP#(Q92JN0Krm^194w$JvJq1KBL01dT;@%)MYZpX#jI=!n zENOzfQ)V=R05E;wsb&4|g*z-DrO@R?)^3MdTyA)C(w0xgYOS)$62u@s@xz$4PY1PA zM1v)ruNh_>CI8*4mQj43sdwfgGFcWi`|$dO2RuDLZfv8XF>R%hQEs6NTECJgSKe{=lnJxi>h??;oRu(ZBJ`((`3kd ztY|wsF#q&k=s!{epsXEnAm+Y$4__ye(U=(L=vOlW>Kms#uMVeztXmpsnD=fX|AE>z z@UE?bkBRBu^as1^SJm(@2ICK*)q478@n2l@aKuDaEP<>kno$Xh=0FRa-7;((FOvw7 zEv=gPkU5#))Ydi$9)IY`-?43ptD(w3lFu6D)JvXmw14~N*zi)$|3w>Q^wfbz&Rd}y zr~N-YaeFreRlnjVQ7-`{oAVl#ocodk?za2H_axe%|9I|km>y!%=ghbp86BMrk#XqQ zu{|_Ql5-|iGDx}9yu)}%MLA)akaaC>&1F3m>1`?Z zc+6GI-Hh^0&prWb*0>!FBw_;`b?b@7d{(q`#&@?#Fgbt?y`OvH_IpoPqsKru3Dk6G z+V0>en;Nbb)Poq&@A2r2^16e3b96|MF2iK~r?u3|rTujn}9?>7Uw9Cx? zLN0@J8Q%(AdrM3R>F4O%WH1~=i~8)#fJ>wc?7-qqm)NjTqg)$}Ev|F?J7+}tr*PS=NSAQF5P&OH9Sjh<2A1xUWi3|%3DD%Y(kzOh)@;R2-EX4lxvm$n$ zuCBnspoA2l0SH`=6uie?0vs4KUB?WFzn=|V9cIk7x15(Hu7xJyhW z$U^I50s`XAy7Qo9)oBgz%$z-GQmtngVp72GN#v-)5mADQtNl~$D zt0t!?VxKu++i~wD%E6ul;mROMdYEH4tB?P#udArXPL`I#ElN_|>PkO`YN~%j z`(==Ax$J~EKRKb!3!=_qUc<5|YMoFdth@aLKgcfw8vN+6QdRtYD|;Z>%>k?`|MsB){5 zx~~`2SK36slD^Sp0(ZkR1;xcpM#Ck41$G9RtR7RdNWI!?iX`p`LQ76oFvb9+u@2=8 z&!UGBPa7}C2-&$q3mDn)p&)ir*s$4X@kC0dNxx2^>GNy}+L*-q_bYEFa&FlJ1@S8O zO!K|9?Z?K;zqPL8ODVm0-cK3i718Gy01e!kZ*QJmJfzbBa?SYL&z?S&yv+69SqH_I z1tc$uXM_dH$bFjA$U$cK-U0A|Xtrx)*>Fst=jz3c=(g7tR!W(Z&dJEAtX zpl&*hQUoBiyvx`FCFj!VX|0HLhG3AjQl`%9T#t@E&sOZ9P}e;<(!1#9yYJ~~iZCir z5&c2_QBFh<Yg4ICsdYQw5lQYjC-GAo%N8VOQquj)HEA&X zub;?FjIt}PoPG;(`9*y_H3Y@Dp^X$ag}=vHW&Ydd8%0Xl0+np^Qmm?XoxiCJ7~tU9 zdtC80;P9MPMlx#j?(Z5S$k=n)6)421u3I6ouu)7qez5>7W^GL!-C~z43Q<7qvCa94Rvn@J*w(5~z+`>lo#ylh-8vNEV8JmF3!2FJW zTfZdt(xM?7P)uI#mjbMg0CLTy-#T0VHIfNYfC9ifcmpu#@_?hu?jMpu}d7x2Sv*Bbt;U{Ou!n&N1G5G zVf7OQ4-lpukbv|+T$mi{meX2|Uwm=V1dY3fp-ET-aJLsi=<>C>1m7PMeW#x*UJ7wE za9zXX13k$d0Oo+^jhPxm_$!bw0M_a<(%5LxhVmcI++5=8mtma@$G~qRE;TAhnHM2i zK)n2a$c(|s7s9e3MrUQwju5WmJX^#RR@B`cLLJW(=$F z^~`4?Vh^mSa02&HBU@~6^1Mj4_R)Fd@Xxe*(bV-Hz0U~9ONm_h7rV<7<|{1`Bj82m z@!`D!54mi!Hqx-$NVC5rtaj0Y1yj*<(85{qQ|#Z|Z!Wr3R|qoFy62HM5_YScRI>n> z*%C$l3UzfSF^i8*G}TN?=IAOaVmH-9TH^kbA*`Oi>brOAHf-Vx zG|GCE_rD5|k?u`Y*X)4rpu)A@*}FVqH3|GDi5<{md0}BN-B5k^-+%XT9c#Txj%c67 z@nke@ZEN z!`s_OJWJ-gKJDSbQJItX9<7f-c9Y3~YjVgd=v`cN1P)g5aH8VefOnmu0FCaD?Dgly zLi$Vx!W9IsHW}r&m&t~|rx6{=u;sip#&pd6RaC@6Af{Y|Usl-VRlHN1E1Vg(VHX_N z1y&uj>{^38-kv6?#TRm|%W!3^{H5Zc<%rbP<;OysBQ-f9>VHzS2jHbyFyy$>kLl;; zKVdjFUU?5loN16>N;9tV_Of$x7b2zLE35X7Gr z^WI|F2{!7*XKS(5hUVO5ZjuvIl=gYZ46AJ&?@a4+d3oWdO=z;H3o~dr#jq`-#9Op& z%dkcD3`%30!;=)A#MXTsA!)#JJ$fcSefd&`Jc%^{AoEYjLSHS@!YVceD?WUjV{LA( zgPR%nbP8uHr4+Pu7SVY+^q?Ja=5Qt69ucBQ!Y0KS{~F`#A;cc|RHC#tJ@un3HH@<9 zitRyBK@{d;!%~-wv?r<(oj0de7cSQf4zNdc24FdI;YxTuUp<%B;Vyf{vMZ$=Hi*kWPvk(<%BxlPc1J{)>=srL2J&nS>f>~F0bVPkp; z9FeB&+d8kc~2TfSjo~)Ia>ehul@P6h&CrJ;Bnr)Ej>DRygy@3+5wgG zK|xg=*Ta>BMxnt|V%bGrGCRX43i^qqCv2mhq|;tguTIc$59$aHu-T+ba9XqLKyM8H z(suc~h}*Lr!){J2U-#8<)~puL8NgbpmrGk^1P&T^f7+4Ib33euZUo!eMR|4>Q)Q>3 zUE|jp+D{#F{PW-b>=6+raW%Y*b)Dq$y|5`Kk6mb>yorWLXO-XtTMd7%*g4D!zn#$Rq@-m zm95$u|8@9_@7Ro)ARNM4H6BFngJaC|YuoKI?3io9mYvu;e@sFC=?4*m6J{9gAENsg zZXMvVZI$<$8l?Y2@8Y-0BlXTZWnYom@`ej5Dhh%wtwVK(ZnXGCmT7M!w_jm21|`%;l<) zf97Ks;i3^Jgpro|19syuk%*?k!h0inNZMWLcvQlsiY;55=ceX6BcCdnO_wlYf7x1< zA6uF}c(==9E{c1uPjH9OPd9>ZvDyFtf);-`Q!0QYGN;Wc5(|ym`bXPM&LO?ut#v-O zV)*f)AsfLiJ+m~q#fXN#=Wh)qlJ^1FjA^`!`hdU9&NA&;3M9$!U@DjmK=GG0}Iy&?Rd6!te{8e-Z6mUkT?f!%PHnaT8dy` zCeK5bNo*t^6!&WGr{r0~l!you)R!IzN0OLVQ5&1mYKDwDJ$Pxwg*#xfJaSIDml%R3 z{wy0U!*KYd)7VTjp%RFN<+3lSIy@$BeBHnQKAipRA@?}X;m4ReL_{oYy$cbdxMXsq>>S&LWsHEplc%4%wdZpv>vTlHz(%&Kup`<-I~ly~*%lX*dJsJUN@QG>Vc zIIyFYSKmET^gJIeJf-H|YR-VEdJ8Q57Ct}pwDNb|>%5;sEH3RcC@YJsne`$6*3Z+L z?_W8`hNn*~=ryqiXxyrW>t+WnU=UDXWMn5;8VJNSjk*nQKEfpDezhxxR}Qd zZ-^DM+#JIvX?vE)t|F8TV9&%qEqIZ6p4F1qdlL#@2YIiUYHfXaafgXn&i@8FyCoNy zVlNh}1d6rLPYVNPJibF+hQ2S`VjwZRoJJH_qW`Bnd+;U>U!-kKZquf~ldGvEazu!%a} za^VZ@D}eTU7hKe(hTvN-Ke3@=NzZjvH>w%@DlJq_W1kKHg-IP_mtq+$=ZN(YPMgBKr_V3bpu4Wor`KJt6xsXen zW^5!kh;XKc03U&=cFnEbZmuu;YRpuE0d%Xlrda#VuU@sD`Dn|@v?&T0rWCzX`vX`w zY--Od%LLRFPNn^+T4-T)iH{iw=4UnjQSdpsT*GT74Y<`Yc8u%3?tW3#%hsBYf^{~X zFmK(Y(O3Z``bJMm5aM6Rlg7a$tN)XO2MHBMI3z&XUX}wsre9p%*g0v~Q|JqUa z^{Q(|L0yf;Wq%#am@?=SV!iC$gn${dpW(Wd0yL8nDf{}ItD`kk;v=rIFu#7v^W{0m zT)c`b>%bgq3r-JtV0iCTNJb)+bWQZWv>v6GwyAp0X6*0N%uE2g#Rkv9FOOoF`S0Qp zu99>$D=0H~z9%Nm!(7Tt?~DuHvWQQ?e5RxLo5zkBGauUdf3C(g1T_lEWEgq^#=I2d zXRLG9;0{g;C}(BP?Ce9hJEqc4OH}uQi;K#VG&i|5A90g3I=G<>8VY?zmPi4u$-EcF zV1ok5$kF+b_-^j*O#TUk&|14@&9xh$yhYnrHxBa&Ls!Q7(R0}rTuXj1qI_D^{iCOk z9BHWF(DDFLn=@l{8`V4qm8M7cEkEgWm5$IRY74VIpC>p#XQ;oI`-?W_+6oiv331qM zY;Om?yAbsI;r!U_Q{NNqcb^7Lhj&=|Fk002kt2`%Jv;a8<>IdgBdA-Jt4~rWs|RU{ ze&ENoH}#y%TbVt(C735Fz+FdnU`pB>x>k-Fi$w@&E$L(Bm<3DM%Ojd5E{rMV& zlFyx)dz$zBpr`d_ruq_}MR78sk?4ub2v@h64eh+2*aw0^HkT@n8aRAxM$aMlkN=+* zpx@PO>a!s8=#siylGnS=J$_#iv9}O?D7_?NBt(p>13K)>YjeOGiFE%q9nL->d76Yp12t& z5v-LPZZtFcTcxdSP~FHC%c;%m%f#TicN=foZQK}7zp#(jsnNKKegh0o2M|Bk@#&nb zZ>JWn9H38WOc9^NkCB*U)NuU2smh06zAaaAs@6APtTtkZNNexxUTL3_dM+H%#(g(y z5;IxA*|TwFt94g9XkDKKRUlasD{j9Gp5KzW7xZYz2m}@h{$n%%LEVLrMTk3W!(5nd z3{{+ap2sFCX@Ij~In@K&H52O6IL4thtQu{%k%n>L_ zKn7(v4_y2$qE-QSvU&Is%?EJ2IaG&%TP@$5jDm}dd{(BvJ2u6x<(awe=;?B|WsQ7z zTQ4;2)3Y;9j*e%Ud9T`h90G#-yDVlGnQ3JL3`Eb|XnKjm0g1TA>*C^73%jD$HXo=Q zY)DEuw46cc@+j^)FdbIQP9SJyUtK2zXc_x#gBF#<2{Drg)D@oR@2q5Fan38Y)p> zF`N9`T{%5|B!2u_vFNbg{Q<4;uSa+c37z3wJ@aD5nlg2loc4#9t|$Hqi45TYn^)#D zLs!XpeAl*-llz$5InzoHJCCjUS0CR&Zwk)o-wACzsAsnMh1g>Ye+_os!Gu`F&;!L@Y1czWT)}&w1nPTP|?@`$s|V%uuTN`F3`d9cHdDS+D4s z2!1w=TSH*|pNYxUdS2BJ)~}Fjr>*OE9q%!7yK79Wjj=QRRwMWaErARVs)qo)aQANh z+_8~fC%bq5ZPa<9e|plsjddCg+^qIri!A5L)>A(GMDAL_uTblXIXgPv$uq*Rd|_5i zk2da9f&|>nCoHM-tCMdzx+q>WaJApoy+}5h*D-(7rtriuw)0|6Cf%CUx@c#q7Faa8 zh(#+_%w=EvOI_=;*j3^5x>((6d)rs$qtykAZ(ekFX-Jn%b4s)?+L9(6uY{sRkX;84YoD6)f(x*9e;r;3?!Yg@TboF zUGYMNQU;`L_(PAEJ_>!v?n7o^6ZN3}Y4hBy8sG6f+VFISj2JO6Xu(YRTFR!DJh^gu zrB1@hk260V?o}$kW|-|8cyor+zw)bY-lMN7MoD2$`)OzX|9*Y$-AB96pFiUg-X0qR zEJp0ZcDr5At8KUfBLXN?R)ed(^`U!Ja?Osi?|d!GFFC`?FsevMU4GCp|bWzAE-1L+3mp7 zoSa!i4%lm+>FJVqw8KRIdD8c#u0N@SNkIN2*{fR&PZD1g3Hew_FavNw!3L8 z)eKon%-TB3EmVs)H^oHlS8%eS){mglf=59+JUu*0jzveK=5n+sS#l9*at7G0kO8k^ zwxq9ffWd6509xQ2@Rc}Wl*~g30Cw$EZu3VIAu8T&OrM_@5pwoypQq2CUvECN%*vTR zrmzDW&r6$B(u@S4pi2ZV7R3KH5<2zV`z?zFY z&BISnI4&H&SN4S?!&8?3=~v4Ux1SVlKB3Z~D|G=arKpxk*X4$L{@eh& zt~n5?<-*G&?Tv#AR$aJ)pK3qJ1Kse76Ztg`71!#{|BO7W_ZO`ZviK?7`4*n%28uty(H(f;19O6m@w+nYA3@9|k$ z`d{-Gcc0MwgT~%~kba+5aaIe!Ax>i`3I0G#NF(o5xBoy6bt3Z7oAQqPA?~C_P6X=O zN4ejL95%DTsUbh+wECJ4<1XAc8A>{XI!?uDhB}W<2;;nIY4cF4?$84;G2y?SN{>xS z_wqHbJbG321PnYe!${Rz0@2y%7`N4(_0{~{TXo$men1pOu`*`ZlM1kyVcHrBY~ z8d6wq`2krxy7Rb=`;(KCi^$};50VoG2#ShtvZ{CV?5WkhMKkLdz>Uth%po~>f7b-d z9^vm>ub+pKv0+Bwua7L$cV$}pui&ueu@-Fma$xz#k9cz_EzC)RUJ97WDUoaO%XvY$ z#3CZ|vH#KY-873!z_(lh@seO2ZL>ePXzs;D494!pG2#&JT=iz!$$ugppnRAN?b+Mq z`?)y_Iu025kW0?y(rVyjl&CotW2-AGxV=HA|2_9DK}zyM+HnyW*Z!| zsMOFQF-aug)~%c?`{tL0a=}8?i*_8L9nP2aqEeKianq*Jr+j_+aiX1ecyX%Hk*-sv z{08Ii!et=7^TY5x-~uQoO~Ilq@BEsxb>|!}h^<1AQRB2hUVRwjC&_}!ef!=EXywrJ zIMZ<|nYct=U}Ff0dq^d1X4y@y9uzj65{g5EZ6=c;VNsraaJqHl#%mC%-Lrzs8eE^; z=UR)1&5cb+R+*2`lspn#S>hcO+8@1RA5M!=Y2`DRKEcuue=9Ln&BaiPGB(^KzT1;{ zi)qJFEXuDyj%$*&>_JE#(;Utk9eT8XIp`s71__Utj6G4lZ00fYar+?;L#Y=wxTvpJ zasmXmyf$C`^`otOdAZZ=Ji=YTXdTOY=fK?gg# zIJEf(1+b#1)FBN_T%e++kepYLq?qB!)f2o^6KSbm?yF|BHsK15ZW=!x5E(YWr*%Tb zBfIURg*h{8*RGw($fla3O~3KL0qPZd_x3`XDPi2)>G0@NfV&Bb-NVV`pqz57%C}(( z{*g6SR;BGk8HYap?CsmHKKQ7M?Pne&_b>r4{d32{uukacrY? zI?b>T8HzYL?Ba>z$Fmn5WDDbWjwu3nV^W`rz2@@sFOU{Q&A#{RMpQ(^%gB22oP2-S zV1QB2{iq)j)}OG&jl~ZgUcc<9qA_-?DaDY%3ufaetE)#PY+|R-bw%XV@WCI=dd4SN z?^z}q5=;qv9*4BxAFMQ;paoOT&F?dEWY~ed?(R&cc=e`ZsmA(zi;B<_CzQr$Xl!vT z*gWRP=*}uyNa2wwEhRyLwE>eTLUzUioa6i9g~n(_h7GneyFCt|nj~x;jyU*}9RzIB zag^oX$qi}#uxQnnAKO?nMXX}c7G~t+JhYsewKftlV^aaJ=C)=?S{17}ASkFYAAsnA zU2~Rw;lOf7J#g;k&8~!-$%t~p#di8sUqXD0uQVvpkR#sJ6ZOP-vaHCQg zqpmJHY}ow!`o}|1m_f` zJ?>0!{_yQvQT?Oe4KHV?S6^-Ip8Q2TK;fxfOd=?P(*Al|)x3F8{|_N|G&9i(V{VCz z`9|<|+n@z0E;X_WLT~o*@i|EjefWw#T)Z-gg+i8)oeR@OXuliH5D{t{HoBpa5h?~d zKshm(5C)2rz-QTDudXqVJSP#3Kl{Y6DFn^uLc=Q&tjYpJ^Zy-dbI9G^=<8(w3WWx1 zEvm|9*@wYz?Z$N(zflF@zpOLrK6w5Ek>=J*8)Yp)5-A)#jJGiBaHEn;DVaSCv+yl= zvbejun_D3Uvt$&Rl6Ao@j);n4!Uzc(@+&Zd0EE}*{4Koi4Zm40ld2dM8xf<5NRGEg ztq9H=JI&-l^|z6~BbZ^2fI&`(L4zKAQ0LQ2QUUtf+@~bdwTGn?y9zCB$Y-FmUw9)8Kfbu@xgE8&vAcSGP8|ltpS^e?u@0-WeZBlg;E;sPM)i^t)N|pX+eng#pR75JXmYw&FR?<^XmRhW?<@v< zy>n9#M@<9mBk4UrAokzSz@}a6qbdN&Cw`DAed68)MjXu9pcFW)rf`o*Rvf81vK;|vn-5$uIpjLZJkYzB$xNJ~rmU*wD@ zdrlf-Fpe9v@zLq}N!I-j>-8a|lrS8^dmt_QV#@zdA2XS7m3GX!0Dme1=Mahl3Zr48 zM#GzQ<4qzpV_8kcQWWzX}L;3|lL>UJLjVL@R_q0LeRc>t1{XO?7?Ds8NI$YuLzK-AO?ZT zCfNU=ukkF1_Ba8Z6_X>E_hiW{y z<6nmQv85%dhwf$veSq{$f4oY3SU_FLE8xqq&3U8I;KxW|QKf6t${M&XXRj)kI=KIkMU)$5VjAMwUlq=wDoPz3phDk0fdE8)1~k>Ty{s zP7%ye^Q>q^1@dLA1tctn8EFBXo4Ib9;Wsv5hF_}ztrAVtOgNMcUi>8OQfX4hTMGuX z`GJ72Z1p==>*EblAjLjZPUvviX1LzEHET>(+5{)}Z@=$Zxfa|rea@C~&l!O9s?_3P zk4OzY%K^&!N1f9M)+C~y7bFygz>l~O*ZJoEyr*8pxT)uAYvk>;c;}XI-+xqns>LWU z8IJyQXU{4i$GE-h>*z=g+%gy^0PJqkv1>YID8~lIB%VCQI(?|fg99$ z6ho4fR1H;;x97fsC3qU0APy7On#`$#)0sMX@{VaHp2x zdY8w@h#d8!id4RxS2#An%xKV{AHG;AKj1ZaO&yA7N!&q<^}GmP@Z~!Hzl~KpGka0u zl-SY?wsAXq_H3Iajo&}=N!TkGAftUZZ`mSqc)D9A?C%VLCetIi1ySrotXbt@W&Un^ z`m4?qLy#t5=}B~ETHAZupXZB^Nm!D>?7(HTX5Bidz566vg>)N;?(hcV-a%dk3=^?e z>qEU)+8$0V#<(axT0fYT1f-S9?vBW;a^Fh1Y{Fc1XjYy$k)=-({0** zE;3I?jL(fI8Cx8aEhhSVy?$*X1u|lfjW=(nX`9bo(~K^cXmGN3@a=k{o-w5 z%!oyiCkfuL-{EU39fE)kWx$z-$M(rDyIqMJ^d*yF#5jxLn>|rGsM3D~s9t#bcABJ+AzmmuKiBB}gz7b_78gxxcq< zoYE==+}>)_w!(NfEN|CN<;B})duE8{{i_qOY530EqP5+MtgTlvHJ}|z&7QM+j&okha3s z^tJ}4jeO4-;_MK(a%Uo4hvO{5SvHvmCeAg>qxv&QV39_R=!u3h{0|Y`9xUs)Im8eZ zw7_B6sRm79I%QBC8*o3*%kxATmUG7S?W2|vEhOQdZj3XzZx;3}eHCPfSl4k$+Zv2! zB#$7sv<)T%Y%5Kg0ngK>$2bMz59w2iFb_%g0?Fx5GBX=e-1GZ9-gQ$Nvj!Mu5@nOU z3+jR3u}fLCRC<9F1kek}T1IIZ>*8N#3>Kw{WL@j2m}#(Hv^sM!Aq7f=Ga0_lTGiW= zFxAl3lMRL(7r`a98EBHI`91Gsp?Bwp@rT_Qf0T8Qf23+dzy&@MwszY#YI?a)P?Fck`iGCw0DL#F;QScKAh__{`I z+#As?Ft+D4%;(Zu_Jh&2N$3~>D%V5)ezbNk)Pwjl4r*q-N0OogvA_t6?xH66V;E|} zYoLB{2f(JB@BYwq?fZ*OQYaBkb|V;7+9_TiQH)56)mpL_W%7dEq&(IKbwE8eUoPg&YXkQ;D)<8(>Hu}MnF zE-GR+nRJR=j=GB$9n0R3z+L{Gj0|x;0&B|OA>LG?2_fOpT|RJX&q!q^S;f41oACZi z7{w-}%vlN|NSA5+8R?{Uj(g`8!yHYERjW){)eu1%xwDfB?PO{%Q(UF&rs0&El%th% za{Y1d{UwS!j4Q^KC4xl7N2;N6J5sVS0#DS1O5s`gafl3faCjn8GMCyTS@-51dm$&F; zX67#HW+ANrd1tV+;0|uB(l*W6m0UDsP)A7}lbOfdd@;Ag7KbobqV~@mEI8jyWwoUx z=4=yQh%G$@KX**-=M8D;=@rLjL=0^W3c6Ac8RI>2;2OlOn>>ExNJXwpao(^}9UX6^ zsn0q7<2MeFXisfUS-DNP(|I&q#;iH??Cje7+gr;XS0z6i^9fPD;iD#TJKi$+Xx zj#7vS33>FUf7atggF1@ihqBD!+;kf%qKc0gD5DzrKeFt&>Gt_nlRxe6X@UGy{Ky}_ zD!mAYG$((ipI_Wo{Y~u!_jo zs!-ftzpOyHNJ#=$Unt|~=tQ(!e2)Kv`UO5+Fj{U{?w;!(?qnWbaxAr&p?A9)7oO0! zhdE*s&+Z`pKMJLIsvH(OZ+~@yN=T^K(%HZ)Z+BuUU@==ufEhX{v#%}sbJ0!i6G!VW zMDIKAirD&ITpr*{2Dvq#KgYAh<*bsSdf>e@?H8_G5qHC%T|9mIsj(x8N|zdkKbUQ& zgp3|KDv|wM^;oOIgd}pZfJX3^ zGK-xnxLJr_h-ba#_P~^F(lc|1 z8Bvk7!f;F|f{AtwyZ+CX-8)!MsfC+NyWtdOO}f8*YXPXqheE~Jkat63gv=3yIBNG_ z90I!La5DwwAB3T)m7yeo8GQ&I@E)-ePtZi-c4wzu$SPH1&>;RhXk%t{+|24MB`TZ{ z4b)*GQg-av;jp<7B#1yBsjxW0&;m?wtW;pC?UBU%l{AX%iLlg zllZ4w@VtsDXv&lZ90CbfL=GllSCjxfub;607a=K$GxxcFB&S6ZRRB#1V&)O$YVJ4AZiWYvQz({rpMwElkJ?jfb=*t zQ)i(w+yFJEQTcOCnIAqNmS!&+!YrhM7jekc)|AgijHvWtJ(8GXM~_k(rfYxe0rY_F zeML+KQnT=)-KoWyC{e8)x-JaX?+f({Jy*T5{_9GavsatYyKb_Cx6!PNl5zNzZ$%qn zrInQpZ*J^AeC6HK@9r z5^B8&w8%M9ZAL-QJFh3CE62hUx!;hr>(&iG2vf5ZItqAfA(LLMn2uddFn$*Hz^u?v zPV9iJO_1Ya4-uUr>SE82H^CrVG;4MiI8TOzL0?6VpM+py$gp9|U7YMx9{J){&rNj9 zV?YF)KHYr328-SO_g`iPW*nM7NJUwhnItR%$>oEu?bxA%4h)4@56@?9_C1oFmv;tf zW)RaN*nLcTlJ}y66d4tTM{03<1+1;&@j-|HC=?kLHR|9Y2b{k%ha7LOfGe>L>cNPM zP2Cgv@<_$@_PffkjS3P7eyZpN-c&gfp}#MoP)59=+n|19>bhj<)8k|xdqk#l2fh0p;xG~{^p#rG7E8*Un-46reJ zbVkb?faGFOkQ=&FB#*dIS7oUg4N-V$|DHF>O)f?&8HPzNPOz*b8H6h0fwb$%g?1h# ze}C9SCB>iX$DiRgivU7|oL~EG`Rx0QrwpFwKUdTzgfLF}RoRF+B}WV{%Vx_ z)z$xUeP-4FbglOqGGxdaWR=yGg*tqfe-^0BC*;RReeara?pNlDw+AProEcn{7OtN9 zLR7+zjY$=dF};u&aN!cL?XYZt%%}qVbaaZ?TC^)Oj_(!whXLVKHb|IFoPAnUs!}R{19s(T68L*Q6yyCr9>6uPVx4 z<-7*74i|=P{-G8MNE&R5s3AYk zsxoHu&R%8iKPzY#V$Tg5!k$%yEHA}Yp6!HT=S5GKgJ!CK|m4czi<9=iuipQ-jq4qNo45(kE&8{e@TE#+&HPIl@l zTOExI9UViHCG9IUB<952RhigRq+20VLQ;;Uyyqe_Snt9*)SEYN1|V*eR$ZR#b$FQL z@@C`BF4(cDLWTO{hBGGm9veWSs2j+6i8>Iow&lVRXK^78m~-k+;5(-cC;G5S5yQL3 zp6s$==j6Dgf>D02vE(&@`M}5;MPU*N~vkiMkjB9i4Ypl4(-semhp@@5Hln#i|({(+4ra4Q#3VzEIq`XcCQd{1|Mb->8}D0q zXMDlsX0ew!GD-kynYFR5bMAh%@FCWg2Y4^HT)!vumVq#5T;8KA2qa>1*{Xs?@>2i< zFu*F~f&VwHhCoF^gM z`m@|cWrgEwDl1LyjP{yMh}8C)?A6soGUP03q9A7L=Sd6UN@Z3Q6GP3z%c;(t@SK$L zlNT*qXnxo|WT>ROp)6YfilV3}9&HS{*+{r0&(?~K6mZ;$4g zT8QpI?gj(r?RaQE9v)NfYIOU7t}dD46v`=#m<7xmq^*4hZ>o+c<{my|B8~6zCSJ+? z@BnhEAM^2<30TK*rz0^!<*#mLeYn-oBzds;?u5d2{4w|((F*b*QLUfO?zJpwuhp3@ z74-nZGcP~O1T49D>C(eDPSo;v(dDv1P!xc;X#t{R#^fwON~owP6Ns6IcQG>ZIDtY| zQMSRV6fm^rB@mM5sk_LHQsM*hOghgwH73Po#QG(x(9OwcV^)kzflYq13rK}os+*%} zH8wIXcL9DjWPY>e&0}z|nZx2c{Pg|P1Lth+|3~`AEG^V)P)AlIu!{MS+mrfL>>OgQ zoqOl@I}fZ1bS+tw{v{ZFJAXWhtRgSxt$R+5o`Hb*>zs6@O!S2wQHEi;afP@wm5lc+uk(=!SJVk67_9+)O(K-yPS{DlaxLSO6;p zoN|C%!1!J9blo!0h#TAYV;OlhpO)e%MHK(i!kZeA=-)M<7DRXujKGzyo;~I`{b&ZA z^+nhEI-9P#y5v>_1Bm2?_OR|hXwaYgTPd!+c8kS-rfYF9xYYeveM-%A*RpSq=*=%s z_mnMo3yxXBZ6j7-JRcBTqyuqqm$hFl{Hem+%g?UwIF($K0g+_|W=cZntc87qJsUO2fs5e=o!IU?+3Vo2kH{QLzn9;I~;q0%? zt8kg2@PL3~_Q&w+&koyC`^v)aIdR)bL|uabk+?W!aF59>Yu<(JtL4trJ7hy3;FO8j zEMqHhfNK07f3tPZ7tZl&^z8A!ffZ1>Elnzhd(C}wT714o;^=E7iWw8yZea_3?Je{N%l4IsiShQTm+D$l>3)5S=s4n zFIK+{>E(aaBNO@7B{TvqYX-K}{tf!DFxli}IL$H)eLhSJU%CXpDJu^dH0Z4BucF?6 z$|OGQ&lk`4t-vDp8)r>0&<6AQf~BtjYe)6VW0o(K9eLteF%?iZl#jpbzQvvgjXpFz zaHR9D$ITSvSy@EmiN{1W^G{Sq0PVYBm|5cnX9-O}vwAYEJYMP7?4g^Bf=TrOL5B3P zjefc^HiGV<3lBZbW66pY>%LfCJy=b>7wt1}Jgg+bLR;@lw1nb5a47O!Jmk0|Lz&C- zn{Y}c(t&AOWSB4>%||Ks4q)i);ont~njKyAc0<^hS5paU96Hpq^&Qi3%09~zYF^&( zhH{`c#iq~*Ri-9(tVgP8I8h7CoF&U`-#(!81jwtGE}V=`%C^5%C}A5aF#mf_k7BB9 zaA@imi3b7&$ZkD;R?u}L%?Vj{E$O!e4+tC5ZnZjpFl6rKd;NmUIm9-u^H7@fpa_=syhQ27Nkt%<+clhBPWD& zTVn%|5e+dL%85fOKaqS>Yt;!yJgGtjynC0lH;Cwivs8D(a(CQp-X0bWb}I~>t4w?V z_a<#(GHR#pO-v0}Z`!(FYaun?HcGhv@(2x9EgC)CKmEP=ukTgT7K8Bm7Ubt0aavvU zm^cMVyhv@Ewx_YoJ>lxa7UMCk>jtf%{>!dl+mmoFn^|=X>B0QMg-i@Z(nhTUf(!yW zz5sCJAOmAmAS0p~H5m1Z3HF(upVN#2`CN2lT{PD9V1AcOb^*M?Qlqg0X?cb_# zee+ug6MVO>+Slyr?a#B^N8aEQgbxngI zV=ew_DBh4^YR?Jti*>p4?Do0XV~nB&7nXqo*HY$U_7h|dfK1%iXlx|dKh33ZOVUxB z$B|LDf9>;_Gq9X64I$brMqM`V2U^NH0Q18p(sJ0GE!Rg^MMSBXo4x5aJZ)-C#RBSa z5}QEAiYTC@U}l{|_7ntb+cZuJE+$M*2z`LO>6Vtkb2>ET&%+|4i84X?56OSc!!04x zC1OVRlS7W4Wf#zky==Xsy`r!F{ZrOHi$1hUzG-kda{QgmOKjuxPnTz}L|ys6ERVE+ z;br=eZs`6Iv8i!ViAowd9sH4?sWJ@8ufzwdg2RRxxr6F+O++C7xx2UA0(_7AN)&aN zy7hI0alh165+lKk=h`g)I$aR0y2KA*8;SyBTXwGdnQD1rW_eYe$<%TB-Yb0Ow^UX} zlFsEI@`xVw!~Y=)pMVGaHu!Wd;%5oyV!FxA>Iw9))! zv??+G){SY=VV3z-?etUC|6W% zl6OD%d_M-uQRcm>>=$mP?KBw?zPJ+_kAk4!&#Hk9Et^(vIpRnEDW8wFYjW~huhYru z1ARj#)LFWv7Drh%wJu2>rbvIxj+Yn&EKcvn8BeUwP#+kW@%`zJ8|SBVG1LsqcraJV ze*iqVO1oy{UgWr+2yGN*|0ciD5$8Yd1DAx)MvYRmKd0^+>zj`o!OwDJXO&4~%ar~< zGlH3GX@dvzrfUYQ{XK7~;*a4o+v@$#$3P7~rqOfr?qQ)($%7QL6W?VG@pAW5ORnpX z`)1}pgFTAB{7^YQi*o76^gz>#t-7k3&7RuhWQdEd*Nr1$jaXo3r&emODVGeSLqtlS zm(0#vv31CljT{~_;E+N?vZRJplonlF{@)vRy6+OdLq_??ctSCUtk_mHE%W17$){k; z%J@q4!5-sBxF)UOhtP5_#N+6jf9v+`)5T7f3I}dF_#15977+Jf{SS@$Zn|9ElS$Z> zv=4!*&|m&dmXQoky|eGgzM-pYTPpBV(X6%O_ZHCDblG+7RRsYKx(&Xm96y&&3P+boE)xX2Bp4nY zC`dmqCK4zl$$M!kMP6z@55rd%20O6$yx>!D&o+~EW)2P+n0Nh{oxY)=bxELp)ZR21DIrf@3>T_7gqO2=Hdz!Y8A@dEio`x1{m|82uB2N@PADCMe(x z-b)tsA$UQ}+eOQm;So``di(D;@5$K-y#EAC^8DW;E z9@kbmzp~j@JG+s^rpX&2%Ag$C%3%=7;?qLhc^_QfreO+NQMUybEw&ncb<+N+fnN6A zVKPF6*k?~%wI$-Cr{h-Rxmf?o%MUZWb=h+c`VcY4p2#gg}fo$N-V4*0P@kd z++@Vy6le*VKQ3~3V79#?q!@BM=d`2y4+7mJ$%@SF%k4c+&Xt*?T2sbu_v34WX){B( zc2aALD!ZYOp;74Vo)>F0J_4iEfk!KjA=`rkRaCBJUksV)WZGiBJQ2PaQ|#s6A+l^A zM!jPNQL^bU!0yVGgN-)=JI`yS!uV@()wmHk?Ba6w+2}D@A0^TXLIoPm?aTzRF6HGQ zAZHGnLHA1U`FiHR0+ATc9w#RF_EmHvgdv*?WX-Wnsw2N zrKxvTHTIy}RX?1BXf?auCpKzVG=lkPpE9yl$g@2D+h$)Va?z5nwR7(k0It3&-nuCj znx1-$^&;BAkagDNWug)?!7xx322i)<#%Jzb+a_X=QfDW$5Qhzl3)25k{HINiDIIbP~fL)G9|=1h0w%T7XE zd<}PzXpF^zvuH@9V-Jqrvox~bDfz_VEL#nfIpp2Dcgnz->^j5(iCh}wT-UGDKCN$b za=;_B#S$5Zitj%+m>W2O1pP=jJ!nl{17?fIIqS^IJRn8 zf1Q<56g!;6eyLw6tux^0!fRhGnxp+yUs)NYqL03ib94nJAco(W1AfTc+nw)MVu84* zYzLIb4n9-{PfrmTt+fJ#o8`eOUHWWmE4z|r?TlIs(*Fv$BeNtq-Lr#=z=RN+Q%6f~ zxvGY`qKxm510}n&D<=aI#x*rxVd}qgz8of>6(;S~-R&`XyWJZ^qKikLT>gB)wJmY~ zrv-QisEAKQ@`~{bz_^9B{_aArxa^Qk`5seX6C1;o(~r;Uj}63jlW%ZPP~nD|vuB4l zA9R&Sg|A6l>?dLd22?~a!ARWh-E)&Rg}nI4|7oc_b@}o$iRQu|LJ-n1lvMtr{IqSD zLV+q&ywPeXR^M7%PKC1jRBuHNw%AFs(fjqAcQCKaRkABp8~-@C@BK~Rbc!Jk^Z zT(xv`={fe0Z{(*9>&M`Yk|k(lGK6&BAj=eR>kezXgg#a z(t-NeJDa@Let1+{|2XBi-Pj%N0}fqkw13o_(D=&-pFOv>UYzvd<&qg6t*whAJ73%! zayc@z{E}*D#KJRcyGA%zL>k5!ZVrh&He~*B&)tKr&MFR=dh?%laeMwcvdz`?<)#69 zj8yG4N5{Soe;$79t%d*l3Gd&YUSAXLe`m8My!soT)qbVdx3Aw$K;X%-)=GElYOQ6i z5Ck3p3Z3yEgdwz&ECOY3ZZo9A#p`CQg90y2+1CjI6M^D2L z^Vm<@6Fyx^5Wa;?hANwSh{lj}vMySULDyqNiT&Z|si~s-Ng^z>=>% zJ=X08OaK-}X?b~SXYQBLAj`Jq@ErGc`TJx;1lr`#CBhhA!=*L%#g(PQl;RKfX#TbE zYHyONkg{O@4E%N-7aEsMS!GJeha^~qTKcVf9QgOQT;M_%94wUM)< zY`qQ?jkB<{e8aXzgU>)!3-1k^EQZ-sXc12ZpCK^&{|WB?Bv)?Q^e2+3b7ym^71U`J zV-mjf?tjF2t-<9^^ntkLRbBnQIpo}}L(_4qzk#&BAc&it4!G_Z*uVeR)Xuf^TjT+V z{w%~`C}vag8O!bW6>xWLX}{Pq{ATLa7^=?Z6|uM&+GL*@Y2E}0{F8EL&i7r-?A|xs z%?JdYw%Sh8pPN2e5id?Me5N!U`Sjt_J{dA>z1J>^Clf$(AMUrx6CT$5rouX@KY$`DG`$k@ z%AT1=2lVCa1r(0g(WfNCX3Ho0pJjny#hh~c$iSjU^S+XKjzCnD2wbKKGE4Q*BYjaw z%Bhl&uO0BS?QaAE5~RQ$<_EapgUq&>jF>#639h(eI)D8$irZtcTC-J4#)%N$C zOCEK(ylBXT-_==+`J1k%=kfZ7c+@y4Nr3Cot8Ko{nvXrP_y~+*=l@n`&iCGHt~f0i ze{i?4YQ-qu_zgXJIhTzD;N(?gxMliFJM{5%Rj7>2(f57-xUzo8qxmm;H63xN=kTMC zdj}G&XJPTLn&)UHO|fSnACil+!*D5y;F3E6ah~E84bc1BL`ljPp-p?$#;2;XaxzQ_ z4GQLj{FISz9)4TKr~&L(V0;pYj}{9l(=H%JvD~4aa0Dk)?ATG|l~G-6yA-4jYCie| zPq|;cdevy)aAoC&D8WUoNG)NvuP8 zC;NeaM!cu`?34V z8yoW(!P2fnhoUDn+e$=L*E7ej?P4*t!T9eai@L@8)EZ;WAxDVU*i(7x#EBCtqO~0> z&l*i~s2$y8S*SsRje3!c%Y>~Zzse~uAKwT&qK$v&+$hQ^)k%+x6(tH7&euKBthBum zV^37}+j1L6`$u<5`DGu1#u%Ug=3Rn#m^kycxVSHPtqd5z;&_lvL+5ptgA{qSBUjM! z6&)=rig9qN`KHNGFODW1^&cmv=H8f=Z>;zWIA!sQNo?>wHwQ?Ol3wTp2Qa`5hV8Z_S{=Zrrmoa_rGgeHf zRJI`z6zmoiVqEX~?=#$Qd-O;)rH%p7qD5>@uuez>-&Ujr=#EI;VbNzMD`X#C3caDc3?D z`ihCG8-7n07`fMkE*O^XL5PX2Ogygm5U^d_4IY!kP%FTMsbC~h6f;e>v|TL0NLZ`k_rGFOl3{YCkKZI ze{Zi{@7)jrCKK#WZ^-dX1U|F41=eK`RaFf}<;_H+US}q3U7(k`k>y3eQyv}@nGdB&+IehsD4DdPd67Y$$^pQ=?J|CM9F6z!91WLOPEL-H zOx5_Av{pHTslTnLm`9CSO>NlHwb5CTUm$s?E$-q-JuQ?e8tB0tTX>hFA#&Ji3*R0e z3uDtJmza(F8+y26Dh1;2i60)ei4G)0%5QkN)?QKLqQimHuwJEgCOqGqYI zKTHiH5yVgP^XIOQ{jE+TI^KTy$+UN>xv-)O26e2CDfoSG+`&Lh7f7NyMn(R18F#&H z?#HJ!KR4ry5#E%_=wW*Lz}@$roFDGYx60-P6%7R3J!}{{;9!y7FeR_Eq+N+Op z0F>lHoOkE#&1)RE@Q_3;q~?V&g5g$1P6WSm=TpMk$OW7-@Nh?`2cCm@d_rqCbC^;V z;4BRJp+qyu&R?*3bL7GzJb(hjLaCA-a3r;O42uO`jwA(LPVT^(8i&Fj)niqotmEP-bVHgU`X~jAkgB|O0RTX!TE**KNOhpG; zy`X`9j=R_`lIkmB4a6~lv{}t_!4kL#J8m4&K@nC_X0disQmXOs^eGxA#62}yChaJa zu)_;3{wo3{s;c+Z+fVYZn>FjBAPO*QPm57Ld%oEwyVd7*{k|!|d+>T|>wX7|E}(mY z?k?R}x)H#d&MR25cDQWlIwY^J1hr%Y(ZID8^>vaMC-5m>;yP`AmklGXkM?_u_gFd* zHa0wF(8@mAV}A~7^xH}caWb^4L(1$a_5~jkokO@NYFCZDx}DI$o!K^fIm8i@;MeKv zSX<`WTq~0zxItjX%fY^G<=_1@H@Cns`rD-kKVmxmiqw{x=u^pfeKYs!aHzQnk8{G8 z4&!mTC**2Z>le`8`or5kEXWzuhq6i znZ{NEg_3Upyd#uCL}Vlm&kO14RV^KQX7e;U-QCS<1(cvZj<>#2O}s~T{I__ANz9-+ zSn0%%_41GQn)&Yh8=l|+(FSquBeLA%?%q_`uc9Kk6M9VC)&++eprv@C`Tf)XG>$mn zMJmS*4rK@;)Rzp_uKX5TpBL&Q8L%ufY<{mPR^`^mD%XHM&9rto_K05dSKG+lJaG{w zNQ4*YS*en4*sr935pg?OnuZKh0uMfS=T1)rhIaI*IYvAZ=VzRadcVGXr;Jn0H&5!V z;o63#97($!XjYd**wx=VerF zn;>rnHO8Onm2X{uvUj*ql>KJ~t(@;CYiaHU;sC=c9c3K`&$Betpkt>_{o|^&6&+AhDO(G>7HigPsCn6R@Vh%MzCs9i0Dbh)s`KlzLaw?Q; zlS4;2luVIBDn*3v<6isy6Thxq*DhD#^Lc;X@7I0b&--~;otfOuJp;$yQ~%&@|0?}{ zl8?i{?_PI*99O*8t8(yg=~wtuNDmK6Z~i#4Yoo<$=BRJ^I}Wjmn3!A3UDMaBW|xpz zVoi`%$GenF?}w8gN(N(A*Z1dE%zy2^OY)0KP;9?O5-v51)?h{Vwi@+vsn7%t}r1XEPmJ_g> z)uKhSAQtmK&5iq`eqbvJ`GU5gpt3-}?wf%6gxudRT{;vJ0<~giNDU8(b=tMewtMln zva_SVU&$Mp0;69`%eVUQ_hcrY0CWB^CqxDQ1dQ%t=cIu{hG3eW`96RB_JSSTx9i_s zz0y=`k=o}Iq z!7PRGmiO-m2ESB!njS4LtcKnH8AhTajy)$30k&yWfjg@^Uk|#v;pOo^tDeF);OP97 zb6t*jc5-1e+GGlk1mCrz7|s2t*;SPW|Y zv#)P4?el!C_qq0L`3?Rys@4yK$3nL%*&H$cOwo2_@2kS|9L`wkN^*!7#)}khpVs4y zaC)$sU&)Jfboe6Fm zcz%S~3Ts3A;LZiw188Y{Ykv*DwI-pEPGi`ZjafZ5)4_Rrd0CS06<)=2DIY&SUQ$=^ z^Rg>V^KWlYrKF-Ij~O%g(P>LvC)@$#)8$`*d6=q&25}tWbG#e94x?rWzXwY5hM$ic z_z&wWwsDwen8!AN@g{56()Jb&1~MW@|M+$FXwy+SnlbEL#*NEIq?){^;9%Kfy~h$2 zhsvFoE^L3Sd)!N(acO7)@BUEvy{)Zn=DVo%P&ev)iq6C}1P!v;T;kd|$d6GyO0A9@ z7IAO<(PGkx+|nY~k)-up`7?1a&IsVhSqRZfs?+h1&t8*Y%a2HZL^j9*op@sf*{^Y! zQ8G}(UIXiIdj02wDm?qKDmhd-F%L_-ifj;^f$m!3Jfdt?Ii7gz)z#T?*{5L$ zV-wl7_#;0yuh6XsZK$iOD>V55fhYUzCjye@IhY_5Ilk%jHwbPK^(QWw+GAcMYdW;xctFb~4G=!8Ku^ZTa*>fHhis-Vl-&aqc#OQV!4|mh zvJP6z*vV5MgEDphCWN|R8MDEt{NF`Z>Eny3MOQ(ej}Un%kD(~aSw+!d8zi~itoYJ3 z^~hu(3n=Hzy&JwXI^8gsX0dmHc7XAuqkoMkEV)0RBC3O}QkKJ{>rgAWdScck)=|(`#1Y#d%3L?iljm6IyZkB{ z=0Je_7KS@@StnbMjkDCnxJm#axKR+zAH=>Hq^vw5=k2rV>PkGAZ?Iz%Ii_q8_*_8s zw?5C99)EUZJBxUkP~}^pP^B71HsuVkctX|WDd0b>MpSMlaXD$lBo9ky!$^(n&h7AT z{3-Zt?Sygr{08K~U}giCwatmqA*uq`3WJJGZx*KHECtHu5Qg8{^K|>YS6~j-5mzs) zW7bi5Ju21Dv*tYgzb$tUbFqNm;G*3aTNCw zn?iSas>IPCVIBC+pzRIF*`gX#N?dQUh@53gY(QYlRxMHfJq(L`-T`uxuBcSn(`IgT z!jh`MeI?0f&YTG_`{qXI?TX^@C7})h2Fa;w)K;8Zdb!^M?Spq>FVhummCu;FYZkOZ z*_L5tFV5rta>G%COr-SLO+376JVAnWZ|rESpyN&i!4bDQsPl8V^Cb?Mnaul-#%mU# zMElxvt8gpVOujP{)PY7#Htm;EQtWAHw(9#L2dZSzc(+HSh3B(|hQ4AxHh5xpynp7g zITT-D;$mo_c-%MCnpsG=qRr3MpMx_!6`daYYQ(V~NL|hnU??&_s*AY8YEGZ&=Qo`b zL!cSa`og_73ft14mGIGQ7XuYr(gY=`U4qHYz*0YgGVXH$-4TkhB5Cb8%sk02z;`{p(PZeOS7G?jT4 zoJvbmXDGKWe(&wr1uY1t1(EOWhtbAk$%&|>Ddr<$s6{bFkywNvo?EYHCuqtw(5Mjn_J zmW}n2J7!7XAmkC2S6?Us0eC`%MLBv6xRv$%7JOl7R9Wnt#G8xTaZSv(5#>AE0^8l; zHp~J|8wjh>)Kp^AKrcm>BdrmYWC6QIPZtev8{x(()ZlH3+R`6#_T9q5nMjgo!zwQ} zP`#9MJ(>f%S_0kPCg=d#2=Ud;!j9?h5gG5oX@!G7T3dTDpjniG_bx`CSuXd_o}zng zUJ~DRongDo=Io6(v^Kp$ZP3{iPMF%>BOt;NYAL#(J9XU?AZHFxFbM2SThdO>n2G); zD$+|#Pd{bXTd`Ww#VEP&TO8`4ZO z{u%pe2GGGXFPx@K3(GY2?AhJjyLZ-qti z3ywjXp*fJnT`~KWS>Um%_00}lQ2c~K{*j>e&AFuqSB=*RynGpGbyy)_k3I>1QSk43gPdm>O4|ksn`Ws%pDI%;xL@~-b zW7AQ{trouNHWwPg%-g~T=HbhBk{YMf{b)$dso>$K#mdsG-X4IaTa9Yhi(ZoX4D2D~ zFL<&D{HVb&k}r?mw=;xT)7kuf%zeI0`)z4l?8%dxMR>>}!t;*`PNd!n9Vl0HPPczq zm3Z+YGeYXU0+^nKn0q;K&PAD8aRcRJ%RR2NYpnb*x=&#iwXtgttZnSol=Z+lsrdPc zKyx?^%G(c*;~dQL34}d(>`^Y_L^JL%Ev*+zj~TVqM0?dgoCx6|e0Kja^LkDy>ToF@ z#^Hsiu?So*HLV41rh#kj;i0zhM7S~DUd@L?*>-7(rRB`b_VSk@H34ZS;}V*Z`_Ku8 z-1>Ea{=4qMfftyBWPB>W;u(D0_k~`JD?CI=TY}L7YkI zk8KK;NDq+6FJ`MC5k-f{wyVnij?5w?jN?{Z5+z(=py>Ma!+@}z7y+lScELK#7rK}Q}O$DeNsR(+x|;}B(CFn|%Sh^bN!!XvP#@1DsG{S+J$mN66bIx!&h#wTS3 z ztEdOT9ZbY`VBR(3i(A;ew6WT!h=htDM?oSEggpr^WuDuwbp4+v9K)(aHHlDWHjK{3 z@TAEIVs_89k9m1?^6C6F36_*+P7dDZ@PJl381$+)r%s-Hkcc2t)0P{Z@*DN^c9H<5 z-e$uzaiGZQ49E?$B2kB6alnC^j7JDl1;sS(Y#oVYq!>1|9tQyDR4judkU^Va09Q?P zDAmuNDVDNulS>be5IqT=yEj4jGZLF20~bq0uq&X-^{s&3CO7cA3a! z>Ra0`2N(oyKUyTlJRP8?)dQ4|l9Yx4a=D zgC~~yJD0veIvn;*wiFIGi&Eu|=rdoPX#aZR#6?H`fL!2N(#S9rRGckxGbWj!%=e03 z9JJ=8<8UB>uk?!8swe0kOn&m&aL{i+`QD(Ug1&~(-t1nKejb_G)$x*afh0HkB0!<( z3p=jfX#s%Da!z{6`xqbpi}#r;s&2Eqj02o87BI8-@P z3;ql3*Y*-uAKgE<#pRb9=7Fw2c5E29b`gyLT(Owffp31OYv0v-X{?oaQ3f50XjErl z3Mz8?u>7VsI1rmvf13iM@5&O_tw~s>+u{GA)dNo8w5S2gwGWD+Xe&HMs01Ud>Rsr} z=_hgM?*RxyueIG9Q@D}j)`;i;n*zUd3s4%4EZIGn7!q$CZCUd-puEYwc$yj6j-dHv zKQlYU#mY5u{-`X>pCvn*S1Jt>D2B=91@Nj$pPLc#*HaUT%lOq9V{`6LpFTQ2ycGC= zxq`ilDcv6xl8H%78<-aCNfJS-zJujuN=iz$$GzmEuo`;{e^BN%a9FH>ynj|K3uv?c zb@BBz3DVkh_^bvm!-{L$bz3s8lg&K-X7wa=9-20}fK=AXs~65`)tY~*HzWCY!7q?_ z!g6v<(+ETrawx3ACL10Mv`520wPJK6(;5j_S-RgKikbLn!OxeUl$<=3bN_m7Zp`XS z-EuudOn@w>*m39kiiph2Ol8$8{_Q?7p?4p)`=jM}+ z@TI~v?hwWuW3yfV$#^w-11>!}Ru3mf<_jPVz zXc1-#AE;aSl*YI#V-CDuyVB>;)bbHsU0@*LiYuF*zUCCqj`+&OU|iZ?`S~5oa~(j~ zvO49c4R>WWRr*c?D_-mH|2h&u4V%t9WPRh0NY8 zW17~1>Yt|Jy3-5)ZHT<@^p>T%Y&smz1NlT$0uX0}3rTOL7<;zW%N>WfEM}SnrgOuV zGS8^Rwr|%O;}iiDXbOv8unfSp%Kh_(Ly{Q9<;9SDqHnVko$j#yM$O9bzJe(Q!D?{? z`N;-SRK;Q-$;VbX!+EI7BaMal!)0XH{Qha(F3W_y#?|&?-rv~86W>cw(ZtenpK|;< zjcpZ?>uf?k`{WZD(X|iuhDXwI@p9ODFtnf>Yp!$FmXF_fBZXfi)>t$Nm;!jV9(jM$ zLmdL3kWO7ollwCG;K}^cm3?88pD*Dd2+iHNUk7c)Lx0Ef0|1ZVj4HNN(BsoYiTMQc zu0C;&W}%DxUWkF73=TXHsbk%~8qybWAzl*J=0Pr{IstuP6wUa^f7HBpGWu%GRPU09-SM-?X!W3L4W!`o4koj`oS@CZQ18}rY(b> zm-rrjKN0K9U^BWEC^p4>;*60^QJl86j0$V(8zjr)Ov!3kz{Lw=@*QfD@{U58h+B za#O`b2&!N+<4Y#H-Z%z&!h(V;ukNG&FrXsYA%5N5ZaYo8la}$fzt5Df&!c2QipOsj zj-o(Q2?;7R2Fv$`makeTBdD2KsC!qv#j6`0>KsOrz{7SKLXglr5F6NCzvZFMu1#iV zx*wa%8qce%r&-&@r%Yu(2@92l^XbROrimK`w=xEw9t8S?tDCFmUWXF|$+F}A1jmT< zYo4PYd|lM>@+~bpK!WiT_;5#{YVv^Z;yRJ9kus*Fq{|3qIKYuD<<8{U5XS~Khr5SO zPrlUE-@(4Z!Hi*+P{Y{4V(lRTMwv8=1!q&aKs7c~*EmY~)2h|KXwKeYIcHAL$~ks}cW1z_61{Sv-jnPtBhJKoqSs>gC*otlz!rqKwVw5tp0Vp`{OQ)?N$1ATbB6wr7j5? z)>+b|G;$%pOz0p#ljbE`4x`vBr2Qwmwz&GV%I?*}J2&z1lMXvFv7~H$&%ero3T&#X zszy2}*PhSuw@`4evAb Date: Wed, 22 Aug 2018 21:37:33 -0700 Subject: [PATCH 043/160] upload key (#12301) --- KEYS | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/KEYS b/KEYS index 41cfcfdc2c1a..03b3ed40ccf6 100644 --- a/KEYS +++ b/KEYS @@ -539,3 +539,63 @@ C0s/d2Nj+1o4aisymgm9e87IAkDc3eXCo2WMHPkR89nOs/vYpyMrlobKcyCvsDC/ hSr9ITOiZCc= =VCxR -----END PGP PUBLIC KEY BLOCK----- + +pub rsa4096 2018-08-23 [SC] + CB9164C76A803D861DE16E5B91052D922E28A38F +uid [ultimate] Sheng Zha (szha) +sig 3 91052D922E28A38F 2018-08-23 Sheng Zha (szha) +sub rsa4096 2018-08-23 [E] +sig 91052D922E28A38F 2018-08-23 Sheng Zha (szha) + +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBFt+Ey0BEAC1+bteK715FULfFz4ItsgifWAN1GkclMRvhcZxGl6dH6GraOlQ +b06ctlJk0g7uzvFCXIKvnE8GIN3ZH5zQzS1mXmeLWSicnA9sHNrwZQi6GCKsAuLF +HZC5WED7yEGRzMbeOc7oov7yROjgWEb+cq7qdcdR9ZfYU0genGLFraxw/buALASW +ohB0Iu7p0Hf/VQFQZJJOXDUSRJ9Bh9yrVizRGhHRy54Ex3spWCMvTadfui0qDUEq +c2O+FudCnZFWlP4VYZWWp3sPLBIcIiSLHutgsh7dmJNOSNChzxjwHrtclweaWdUQ +2PLtuDzeE7GIkuAWCl37w9pjbL+IIVTyN/4exTjhN8bFjbCc38reFKbJR6X5S7OP +ucozmZb5vqOous7RY5SLBfhseZLG0TJY0n0cl3enolt5sPWBDI/smL5mtCZN06mX +kDNbSMEBFzWnRUg7aJu041lU7W2KAKIydpKSDB3gQT59BDP8jaGc/UBBZGZNechs +bN6P/dJ34BONJbBAqWzk7H1PBdH4zgewtttA9ZMqx4GIlQpd8/J9eWGM+W5Be6X+ +uRBBps3jCX1K+BJYOcCijUTUrrApyt7eoPKzznK37NMqNN7mt+Bjc5cN5FXT5ChT +mJry/dkfw3ROeonk/kWy4FQLEfTUvOVy0vs2cxYPJanv1/l0X9xWw44+eQARAQAB +tCZTaGVuZyBaaGEgKHN6aGEpIDx6aGFzaGVuZ0BhcGFjaGUub3JnPokCTgQTAQgA +OBYhBMuRZMdqgD2GHeFuW5EFLZIuKKOPBQJbfhMtAhsDBQsJCAcCBhUKCQgLAgQW +AgMBAh4BAheAAAoJEJEFLZIuKKOPoK0QAJYmMaLCoDT93xbIVz247Y07Qs9isFJ5 +ZDaOOuJE8dhJbwl9rZ1gD3KUZuJ9hZk5kdHdE6NcV2+eSSdcIHgy6WT0iwLFe8KD +sd5nVjUBhQNjz4ecNwwAqBRWDnXq0Qf+fFeja2vrDkruQ0yHAAxdzf1fqh2xOuoS +yn8IKyAIOE2U0QrPo3otHdou5zEzlDuZqWhbn1mHwk22Y/D/AWgTjJCk9Mn4ZxNf +7kIESUt5ekzf6Woc9du2zpKD0Zt0RElC6xkLBBhnn1BcEio+bzxD3PQwBGVrVfxe +CLumDM9LqS7e20o9sYQiNbpDtpAV0iYiUL5lvekXEHCE0zaaRCWLjZsDzY2BgADZ +OaC87tep57Ug9FlSWeU9wtoo/78DXGL8f2M1teUmA1Em1D7Mxql0zNgmaHXBpkxK +9b45x9C9DvfKbVOVr14IA/jJzxpxsLDjynLc36SguGbiDRcHcL63K2u+leo7kH9X +a6UpTm+n6JXGgPlhTvznAz8ZQCitEv/8zQHhAVZS7A+lKdLbY/m0niyd9rKMFZnA +SWUTnJu0U0wQnxdP76otQ8d4UjfzLYANlI+CMyoZPbJDi9Yg2TsU/vqMDuFs9Vj+ +Dkh6wAVIakvrUzgPwy22jmiXA0r5PMhtMoi3YbGXcsjnf+gkThIDasE5X7qCmGnH +fnDgkTUMZ1mIuQINBFt+Ey0BEAC2BM64GDpmGGwrcPt1v4HFoISQhdv1s67IE0XY +rwV3jFuyXk0gYPbGnCW9QCphUA8/jes0iZHYRMcd0PI9maUQCYZJUI00qtRwVW8v +tVCwxh/SrdIo+HiCNsfQ7Y0byeeFVsDyzlg/VG8kjsjQsOEK7iZzvWczDcS1E7NF +leg/17jrU6Rpg2rJlLWah/aYHgB32naf2niH3kRQYvE3ZizdBXxCw8WCGE9ZPeZt +kE/yBz8vw1vYdCYtoPXBLO1N6ugjuxdhuOC+QBDcbwzKfO5EouxRIG49GMy4o9zC +PYwST7rXOoTacMRQm676OeXGSLiiR9ywTgkDPsYJkCI/yf10W2XHHvK7bPcsFbU9 +zifr74WSWLC5UVQCdiEK30nqSrKmDTPHqgkDftXH6d+K+upPh4iNjUBroGZxORWo +aDmkXBvccUOCwfO9w9UTDg7lSxUZC8Uq13QQUK4oKAOlvdD3gmeORhxsdt+6FOtn +WXX0VyjUZyY1/oOGscNLs1RHKHKRdFtDCDogd3LpFcU5QTiYcRX0fEQD7i9zUAGz +5tXTbL6VWO9kwwyjbWsL2KxPGTj0tmbp/JtcS9wxye7KxWA4eJoMlXCNzQPEdUzh +SmMlh5AAgvb4eBp3yamBuPd4IVANPLbcbsvkDOiY8Ix/tbjYy8smrwxn7gTokZL1 +DQBXSwARAQABiQI2BBgBCAAgFiEEy5Fkx2qAPYYd4W5bkQUtki4oo48FAlt+Ey0C +GwwACgkQkQUtki4oo4/AZw//Xr9FOVnxzOtM7xKln3YwFUwr7Z4IR+3Oc6UtKfSO +C6VDyo0CVIHvTDwijsOZOay5OZqY9VjjctoTSDPnh/gJnSeT56I3VtPPiWuwuOqm +YD8HnwcOb0ZifeZr9GMgrVehJ4t9dMAIo1l5MWPP5/p+KHm5FZYkZk/5AtsCAxh5 +tBEK001fxWo85pEZfZx1zMAfchhKH3VncMCqAi964+/dOIjUlvpBJBDC29GrqW5Y +0LP0TsoQ766bWKJ261QyGhILZawu5EDdqwE7BBLT1NQFvQTEkJ8FwurgFTQVfLln +lAa2DDCPpV0omfUZAr8oHHhNLowIzbqXe3q1mac+Yr1l4L8zZl9gXWGbR/6fvwf/ +yUzaUrh285ZsgsIKACyjV4tsS/iKJv+dM2Hu39OtMtoRI9oKFNHb9HkElHj4MSqF +SBkyOtsGeqivwPlnZMfzLkicBpmyQHYmgOtfcmSuxhs5aCuespzXfZw8Nv2Aergc +TWhiJbJn7HPY21RJfLwOgo/o76LmdHhwn+PMhsvxox8pMYigLaTj4himBgk/W8bd +I2j8j2s5+rQ+LMbaRYUPeqP3/cBl59uIPAMigD8+ZHb4yCOVoBVRtG6LJoLHqRzd +a4LYL628Ksuv1Yxn/Uhb5nDPxU5RKRDeogn07wtaqSG12T9pcIwmExW1RN5kvXc2 +3Os= +=XL0V +-----END PGP PUBLIC KEY BLOCK----- From 72efe41c5e0f1c7bbf7cf328f17fc93edc4ce76b Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Thu, 23 Aug 2018 13:13:11 +0200 Subject: [PATCH 044/160] [MXNET-855] Update clang to version 6 (#12305) --- Jenkinsfile | 14 +++++++------- ci/docker/install/ubuntu_clang.sh | 8 ++++---- ci/docker/runtime_functions.sh | 12 ++++++------ 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 50b86ec71900..bbb40575426d 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -173,12 +173,12 @@ core_logic: { } } }, - 'CPU: Clang 5': { + 'CPU: Clang 6': { node(NODE_LINUX_CPU) { - ws('workspace/build-cpu-clang50') { + ws('workspace/build-cpu-clang60') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_clang50', false) + utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_clang60', false) } } } @@ -194,13 +194,13 @@ core_logic: { } } }, - 'CPU: Clang 5 MKLDNN': { + 'CPU: Clang 6 MKLDNN': { node(NODE_LINUX_CPU) { - ws('workspace/build-cpu-mkldnn-clang50') { + ws('workspace/build-cpu-mkldnn-clang60') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_clang50_mkldnn', false) - utils.pack_lib('mkldnn_cpu_clang5', mx_mkldnn_lib) + utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_clang60_mkldnn', false) + utils.pack_lib('mkldnn_cpu_clang6', mx_mkldnn_lib) } } } diff --git a/ci/docker/install/ubuntu_clang.sh b/ci/docker/install/ubuntu_clang.sh index 39a5600ce9d6..40761716933e 100755 --- a/ci/docker/install/ubuntu_clang.sh +++ b/ci/docker/install/ubuntu_clang.sh @@ -21,11 +21,11 @@ # the whole docker cache for the image set -ex -# Install clang 3.9 (the same version as in XCode 8.*) and 5.0 (latest major release) +# Install clang 3.9 (the same version as in XCode 8.*) and 6.0 (latest major release) wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - && \ apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-3.9 main" && \ - apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main" && \ + apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main" && \ apt-get update && \ - apt-get install -y clang-3.9 clang-5.0 && \ + apt-get install -y clang-3.9 clang-6.0 && \ clang-3.9 --version && \ - clang-5.0 --version + clang-6.0 --version diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index 35311396e34a..1e38ec48e6ce 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -349,11 +349,11 @@ build_ubuntu_cpu_clang39() { -j$(nproc) } -build_ubuntu_cpu_clang50() { +build_ubuntu_cpu_clang60() { set -ex - export CXX=clang++-5.0 - export CC=clang-5.0 + export CXX=clang++-6.0 + export CC=clang-6.0 build_ccache_wrappers @@ -381,11 +381,11 @@ build_ubuntu_cpu_clang39_mkldnn() { -j$(nproc) } -build_ubuntu_cpu_clang50_mkldnn() { +build_ubuntu_cpu_clang60_mkldnn() { set -ex - export CXX=clang++-5.0 - export CC=clang-5.0 + export CXX=clang++-6.0 + export CC=clang-6.0 build_ccache_wrappers From e1b22657db52e2748fffd38900201c88f17b4734 Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Thu, 23 Aug 2018 13:13:46 +0200 Subject: [PATCH 045/160] [MXNET-849] - Enable armv7 with pinned docker images. (#12272) * Revert "Temporarily disable ARMv7 builds (#12260)" This reverts commit a4aced7123ba8aba13f0525276c909f4a78a04ee. * [MXNET-849] Pin armv7 dockcross image Co-authored-by: Anton Chernov --- Jenkinsfile | 20 ++++++++++---------- ci/docker/Dockerfile.build.armv7 | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index bbb40575426d..0e4aa199a6c7 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -363,16 +363,16 @@ core_logic: { } } }, - // 'ARMv7':{ - // node(NODE_LINUX_CPU) { - // ws('workspace/build-ARMv7') { - // timeout(time: max_time, unit: 'MINUTES') { - // utils.init_git() - // utils.docker_run('armv7', 'build_armv7', false) - // } - // } - // } - // }, + 'ARMv7':{ + node(NODE_LINUX_CPU) { + ws('workspace/build-ARMv7') { + timeout(time: max_time, unit: 'MINUTES') { + utils.init_git() + utils.docker_run('armv7', 'build_armv7', false) + } + } + } + }, 'ARMv6':{ node(NODE_LINUX_CPU) { ws('workspace/build-ARMv6') { diff --git a/ci/docker/Dockerfile.build.armv7 b/ci/docker/Dockerfile.build.armv7 index 6316270f9cf8..2ad3bea519ca 100755 --- a/ci/docker/Dockerfile.build.armv7 +++ b/ci/docker/Dockerfile.build.armv7 @@ -18,7 +18,7 @@ # # Dockerfile to build MXNet for Android ARMv7 -FROM dockcross/linux-armv7 +FROM mxnetci/dockcross-linux-armv7:08212018 ENV ARCH armv7l ENV HOSTCC gcc From 8ee8357375495fcfe54aac3f2960e46a6350ee93 Mon Sep 17 00:00:00 2001 From: Hao Jin Date: Thu, 23 Aug 2018 08:45:01 -0700 Subject: [PATCH 046/160] re-enable randomized test_ndarray_pickle (#12292) --- tests/python/unittest/test_ndarray.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/tests/python/unittest/test_ndarray.py b/tests/python/unittest/test_ndarray.py index c9bc0cd1e1e4..e354dcddf73e 100644 --- a/tests/python/unittest/test_ndarray.py +++ b/tests/python/unittest/test_ndarray.py @@ -240,20 +240,18 @@ def test_ndarray_scalar(): assert(np.sum(d.asnumpy()) < 1e-5) -@with_seed(0) +@with_seed() def test_ndarray_pickle(): maxdim = 5 - nrepeat = 10 - for repeat in range(nrepeat): - for dim in range(1, maxdim): - a = random_ndarray(dim) - b = mx.nd.empty(a.shape) - a[:] = np.random.uniform(-10, 10, a.shape) - b[:] = np.random.uniform(-10, 10, a.shape) - a = a + b - data = pkl.dumps(a) - a2 = pkl.loads(data) - assert np.sum(a.asnumpy() != a2.asnumpy()) == 0 + for dim in range(1, maxdim): + a = random_ndarray(dim) + b = mx.nd.empty(a.shape) + a[:] = np.random.uniform(-10, 10, a.shape) + b[:] = np.random.uniform(-10, 10, a.shape) + a = a + b + data = pkl.dumps(a) + a2 = pkl.loads(data) + assert np.sum(a.asnumpy() != a2.asnumpy()) == 0 @with_seed() From ac85d7a15f39df8cb413c4fb1cb69dd931b0e619 Mon Sep 17 00:00:00 2001 From: Hao Jin Date: Thu, 23 Aug 2018 09:32:27 -0700 Subject: [PATCH 047/160] re-enable randomized test_ndarray_elementwise (#12300) --- tests/python/unittest/test_ndarray.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/python/unittest/test_ndarray.py b/tests/python/unittest/test_ndarray.py index e354dcddf73e..e9eea43b1ea5 100644 --- a/tests/python/unittest/test_ndarray.py +++ b/tests/python/unittest/test_ndarray.py @@ -119,7 +119,7 @@ def test_ndarray_setitem(): assert same(x.asnumpy(), x_np) -@with_seed(0) +@with_seed() def test_ndarray_elementwise(): nrepeat = 10 maxdim = 4 From 43581a7cb3393b1e6660c36c5ef4d59a09b212dc Mon Sep 17 00:00:00 2001 From: Istvan Fehervari Date: Thu, 23 Aug 2018 11:15:40 -0700 Subject: [PATCH 048/160] Generalized broadcast_like operator (#11984) * Added input_axes and other_axes to broadcast_like See https://github.com/apache/incubator-mxnet/issues/11871 * Added a simple sanity test * Fixed linting * Fixed linting issues * Renamed parameters, added negative indexing, more testcases * Fixed linting * Replaced params with optionals Not specified axes will result into whole shape, empty tuples shall raise exception. Added tests * Re-added the default param values * Fixed indentation --- src/operator/tensor/broadcast_reduce_op.h | 71 ++++++++++++++++--- .../tensor/broadcast_reduce_op_value.cc | 5 ++ tests/python/unittest/test_ndarray.py | 19 +++++ tests/python/unittest/test_symbol.py | 3 +- 4 files changed, 87 insertions(+), 11 deletions(-) diff --git a/src/operator/tensor/broadcast_reduce_op.h b/src/operator/tensor/broadcast_reduce_op.h index 351315ab0c81..0944d255a45f 100644 --- a/src/operator/tensor/broadcast_reduce_op.h +++ b/src/operator/tensor/broadcast_reduce_op.h @@ -147,6 +147,17 @@ struct BroadcastToParam : public dmlc::Parameter { } }; +struct BroadcastLikeParam : public dmlc::Parameter { + dmlc::optional lhs_axes; + dmlc::optional rhs_axes; + DMLC_DECLARE_PARAMETER(BroadcastLikeParam) { + DMLC_DECLARE_FIELD(lhs_axes).set_default(dmlc::optional()) + .describe("Axes to perform broadcast on in the first input array"); + DMLC_DECLARE_FIELD(rhs_axes).set_default(dmlc::optional()) + .describe("Axes to copy from the second input array"); + } +}; + inline int CheckAxis(int axis, int ndim) { CHECK(axis < ndim && axis >= -ndim) << "axis " << axis << " exceeds the input dimension of " << ndim; @@ -350,20 +361,60 @@ inline bool BroadcastLikeShape(const nnvm::NodeAttrs& attrs, CHECK_EQ(out_attrs->size(), 1U); TShape& lhs_shape = (*in_attrs)[0]; TShape& rhs_shape = (*in_attrs)[1]; - TShape oshape = TShape(rhs_shape); - if (lhs_shape.ndim() == 0 || lhs_shape.ndim() == 0) return false; - CHECK_EQ(lhs_shape.ndim(), rhs_shape.ndim()) - << "Operand of shape " << lhs_shape << " cannot be broadcasted to " << rhs_shape; + if ((lhs_shape.ndim() == 0) || (lhs_shape.ndim() == 0)) { + return false; + } - for (index_t i = 0; i < lhs_shape.ndim(); ++i) { - if (rhs_shape[i] != 0) { - CHECK(lhs_shape[i] == rhs_shape[i] || lhs_shape[i] == 1) - << "Array cannot be broadcasted from " << lhs_shape << " to " << rhs_shape; - } else { - oshape[i] = lhs_shape[i]; + const BroadcastLikeParam& param = nnvm::get(attrs.parsed); + TShape oshape; + + // lhs or rhs or both params were not specified + if (!param.lhs_axes.has_value() || !param.rhs_axes.has_value()) { + CHECK_EQ(lhs_shape.ndim(), rhs_shape.ndim()) + << "Operand of shape " << lhs_shape << " cannot be broadcasted to " << rhs_shape; + + oshape = TShape(rhs_shape); + for (index_t i = 0; i < lhs_shape.ndim(); ++i) { + if (rhs_shape[i] != 0) { + CHECK(lhs_shape[i] == rhs_shape[i] || lhs_shape[i] == 1) + << "Array cannot be broadcasted from " << lhs_shape << " to " << rhs_shape; + } else { + oshape[i] = lhs_shape[i]; + } + } + } else { + auto lhs_axes = param.lhs_axes.value(); + auto rhs_axes = param.rhs_axes.value(); + + CHECK(rhs_axes.ndim() == lhs_axes.ndim()) + << "Input_axis and other_axis size does not match"; + + CHECK(lhs_axes.ndim() > 0) + << "Empty axes tuple is not allowed"; + + oshape = TShape(lhs_shape); + for (index_t i = 0; i < lhs_axes.ndim(); ++i) { + auto copyfrom = lhs_axes[i]; + if (copyfrom < 0) { + copyfrom = lhs_shape.ndim() + copyfrom; + } + CHECK(copyfrom >= 0 && copyfrom < oshape.ndim()) + << "Invalid dimension specified in lhs_axes: " << lhs_axes[i]; + + auto copyto = rhs_axes[i]; + if (copyto < 0) { + copyto = rhs_shape.ndim() + copyto; + } + CHECK(copyto >= 0 && copyto < rhs_shape.ndim()) + << "Invalid dimension specified in rhs_axes: " << rhs_axes[i]; + + CHECK(lhs_shape[copyfrom] == 1) << "Input axis " << lhs_axes[i] + << " at dimension " << i << " cannot be broadcasted to " << rhs_shape[copyto]; + oshape[copyfrom] = rhs_shape[copyto]; } } + SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return true; } diff --git a/src/operator/tensor/broadcast_reduce_op_value.cc b/src/operator/tensor/broadcast_reduce_op_value.cc index 929c3dfcf0a6..c3bc9cfd3f08 100644 --- a/src/operator/tensor/broadcast_reduce_op_value.cc +++ b/src/operator/tensor/broadcast_reduce_op_value.cc @@ -31,6 +31,7 @@ DMLC_REGISTER_PARAMETER(NormParam); DMLC_REGISTER_PARAMETER(ReduceAxisParam); DMLC_REGISTER_PARAMETER(BroadcastAxesParam); DMLC_REGISTER_PARAMETER(BroadcastToParam); +DMLC_REGISTER_PARAMETER(BroadcastLikeParam); inline std::string get_reduce_axes_description(const std::string& op_name, int line) { std::string doc = R"code(Computes the __op__ of array elements over given axes. @@ -309,7 +310,11 @@ For example:: broadcast_like([[1,2,3]], [[5,6,7],[7,8,9]]) = [[ 1., 2., 3.], [ 1., 2., 3.]]) + broadcast_like([9], [1,2,3,4,5], lhs_axes=(0,), rhs_axes=(-1,)) = [9,9,9,9,9] + )code" ADD_FILELINE) +.set_attr_parser(ParamParser) +.add_arguments(BroadcastLikeParam::__FIELDS__()) .set_attr("FInferShape", BroadcastLikeShape) .set_attr("FCompute", BroadcastCompute); diff --git a/tests/python/unittest/test_ndarray.py b/tests/python/unittest/test_ndarray.py index e9eea43b1ea5..071c770f55ef 100644 --- a/tests/python/unittest/test_ndarray.py +++ b/tests/python/unittest/test_ndarray.py @@ -549,8 +549,27 @@ def test_broadcast_like(): err = np.square(ndarray_ret - numpy_ret).mean() assert err < 1E-8 + def test_broadcast_like_axis(): + testcases = [ + # Lhs shape, rhs shape, lhs axis, rhs axis, result + [(1, 2, 1, 3), (5, 6, 7, 8), (0,2), (1,3), (6, 2, 8, 3)], + [(1,), (5,), (0,), (-1,), (5,)], + [(1, 7, 9, 1, 1), (9,), (-2,), (0,), (1, 7, 9, 9, 1)], + [(1, 7, 9, 1, 1), (9, 1), (-2, -1), (-2, -1), (1, 7, 9, 9, 1)], + [(2, 1), (1, 7, 9, 1, 1), (1,), (-3,), (2, 9)] + ] + + for test_data in testcases: + lhs = mx.nd.random.uniform(shape=test_data[0]) + rhs = mx.nd.random.uniform(shape=test_data[1]) + output = mx.nd.broadcast_like(lhs, rhs, lhs_axes=test_data[2], rhs_axes=test_data[3]) + + assert_exception(mx.nd.broadcast_like, mx.base.MXNetError, lhs, rhs, lhs_axes=(), rhs_axes=()) + assert output.shape == test_data[4] + test_broadcast_to() test_broadcast_like() + test_broadcast_like_axis() @with_seed() diff --git a/tests/python/unittest/test_symbol.py b/tests/python/unittest/test_symbol.py index aece9a378129..d022c68237a6 100644 --- a/tests/python/unittest/test_symbol.py +++ b/tests/python/unittest/test_symbol.py @@ -171,7 +171,7 @@ def test_symbol_infer_shape_var(): def test_symbol_fluent(): has_grad = set(['flatten', 'expand_dims', 'flip', 'tile', 'transpose', 'sum', 'nansum', 'prod', 'nanprod', 'mean', 'max', 'min', 'reshape', 'broadcast_to', 'split', - 'broadcast_axes', 'pad', 'swapaxes', 'slice', 'slice_axis', 'slice_like', + 'broadcast_axes', 'broadcast_like', 'pad', 'swapaxes', 'slice', 'slice_axis', 'slice_like', 'take', 'one_hot', 'pick', 'sort', 'topk', 'argsort', 'argmax', 'argmin', 'clip', 'abs', 'sign', 'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan', 'degrees', 'radians', 'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh', @@ -212,6 +212,7 @@ def check_fluent_regular(func, kwargs, shape=(5, 17, 1), equal_nan=False): check_fluent_regular('slice_like', {'axes': (0, -2), 'shape_like': mx.sym.zeros((3, 3))}) check_fluent_regular('clip', {'a_min': 0.25, 'a_max': 0.75}) check_fluent_regular('broadcast_axes', {'axis': (2,), 'size': (5,)}) + check_fluent_regular('broadcast_like', {'rhs': mx.sym.ones((1, 5)), 'lhs_axes': (0,), 'rhs_axes': (1,)}, shape=(1,9)) check_fluent_regular('pad', {'mode': 'constant', 'pad_width': (0,0,0,0,3,0,0,4)}, shape=(5, 17, 2, 3)) check_fluent_regular('reshape_like', {'rhs': mx.sym.ones((30, 17))}, shape=(5, 17, 2, 3)) From 4664a3005db259d9220fd843540d515a7d3d6036 Mon Sep 17 00:00:00 2001 From: cclauss Date: Thu, 23 Aug 2018 20:43:05 +0200 Subject: [PATCH 049/160] [MXNET-696] Define cmp() in Python 3 again (#12295) --- .../nightly/model_backwards_compatibility_check/common.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/nightly/model_backwards_compatibility_check/common.py b/tests/nightly/model_backwards_compatibility_check/common.py index 8950a9270839..d8ffca25a3f3 100644 --- a/tests/nightly/model_backwards_compatibility_check/common.py +++ b/tests/nightly/model_backwards_compatibility_check/common.py @@ -29,6 +29,13 @@ import re from mxnet.test_utils import assert_almost_equal +try: + cmp # Python 2 +except NameError: + # See: https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons + def cmp(x, y): # Python 3 + return (x > y) - (x < y) + # Set fixed random seeds. mx.random.seed(7) np.random.seed(7) From cc30fabe2f36278f2e251d49f72edee107eb5496 Mon Sep 17 00:00:00 2001 From: Roshani Nagmote Date: Thu, 23 Aug 2018 13:27:27 -0700 Subject: [PATCH 050/160] MXNet to ONNX export tutorial (#12297) * mxnet to onnx export tutorial added * test added * addressing review comment * comments addressed * few more fixes * addressing comments * addressing comments * retrigger build --- docs/api/python/contrib/onnx.md | 1 + docs/tutorials/onnx/export_mxnet_to_onnx.md | 134 ++++++++++++++++++++ tests/tutorials/test_tutorials.py | 3 + 3 files changed, 138 insertions(+) create mode 100644 docs/tutorials/onnx/export_mxnet_to_onnx.md diff --git a/docs/api/python/contrib/onnx.md b/docs/api/python/contrib/onnx.md index d7c34ec1e01f..449941459163 100644 --- a/docs/api/python/contrib/onnx.md +++ b/docs/api/python/contrib/onnx.md @@ -35,6 +35,7 @@ This document describes all the ONNX-MXNet APIs. :maxdepth: 1 /tutorials/onnx/super_resolution.md + /tutorials/onnx/export_mxnet_to_onnx.md /tutorials/onnx/inference_on_onnx_model.md /tutorials/onnx/fine_tuning_gluon.md ``` diff --git a/docs/tutorials/onnx/export_mxnet_to_onnx.md b/docs/tutorials/onnx/export_mxnet_to_onnx.md new file mode 100644 index 000000000000..a9c03bed8b12 --- /dev/null +++ b/docs/tutorials/onnx/export_mxnet_to_onnx.md @@ -0,0 +1,134 @@ + +# Exporting MXNet model to ONNX format + +[Open Neural Network Exchange (ONNX)](https://github.com/onnx/onnx) provides an open source format for AI models. It defines an extensible computation graph model, as well as definitions of built-in operators and standard data types. + +In this tutorial, we will show how you can save MXNet models to the ONNX format. + +MXNet-ONNX operators coverage and features are updated regularly. Visit the [ONNX operator coverage](https://cwiki.apache.org/confluence/display/MXNET/ONNX+Operator+Coverage) page for the latest information. + +In this tutorial, we will learn how to use MXNet to ONNX exporter on pre-trained models. + +## Prerequisites + +To run the tutorial you will need to have installed the following python modules: +- [MXNet >= 1.3.0](http://mxnet.incubator.apache.org/install/index.html) +- [onnx]( https://github.com/onnx/onnx#installation) v1.2.1 (follow the install guide) + +*Note:* MXNet-ONNX importer and exporter follows version 7 of ONNX operator set which comes with ONNX v1.2.1. + + +```python +import mxnet as mx +import numpy as np +from mxnet.contrib import onnx as onnx_mxnet +import logging +logging.basicConfig(level=logging.INFO) +``` + +## Downloading a model from the MXNet model zoo + +We download the pre-trained ResNet-18 [ImageNet](http://www.image-net.org/) model from the [MXNet Model Zoo](http://data.mxnet.io/models/imagenet/). +We will also download synset file to match labels. + +```python +# Download pre-trained resnet model - json and params by running following code. +path='http://data.mxnet.io/models/imagenet/' +[mx.test_utils.download(path+'resnet/18-layers/resnet-18-0000.params'), + mx.test_utils.download(path+'resnet/18-layers/resnet-18-symbol.json'), + mx.test_utils.download(path+'synset.txt')] +``` + +Now, we have downloaded ResNet-18 symbol, params and synset file on the disk. + +## MXNet to ONNX exporter API + +Let us describe the MXNet's `export_model` API. + +```python +help(onnx_mxnet.export_model) +``` + +```python +Help on function export_model in module mxnet.contrib.onnx.mx2onnx.export_model: + +export_model(sym, params, input_shape, input_type=, onnx_file_path=u'model.onnx', verbose=False) + Exports the MXNet model file, passed as a parameter, into ONNX model. + Accepts both symbol,parameter objects as well as json and params filepaths as input. + Operator support and coverage - https://cwiki.apache.org/confluence/display/MXNET/ONNX + + Parameters + ---------- + sym : str or symbol object + Path to the json file or Symbol object + params : str or symbol object + Path to the params file or params dictionary. (Including both arg_params and aux_params) + input_shape : List of tuple + Input shape of the model e.g [(1,3,224,224)] + input_type : data type + Input data type e.g. np.float32 + onnx_file_path : str + Path where to save the generated onnx file + verbose : Boolean + If true will print logs of the model conversion + + Returns + ------- + onnx_file_path : str + Onnx file path +``` + +`export_model` API can accept the MXNet model in one of the following two ways. + +1. MXNet sym, params objects: + * This is useful if we are training a model. At the end of training, we just need to invoke the `export_model` function and provide sym and params objects as inputs with other attributes to save the model in ONNX format. +2. MXNet's exported json and params files: + * This is useful if we have pre-trained models and we want to convert them to ONNX format. + +Since we have downloaded pre-trained model files, we will use the `export_model` API by passing the path for symbol and params files. + +## How to use MXNet to ONNX exporter API + +We will use the downloaded pre-trained model files (sym, params) and define input variables. + +```python +# Downloaded input symbol and params files +sym = './resnet-18-symbol.json' +params = './resnet-18-0000.params' + +# Standard Imagenet input - 3 channels, 224*224 +input_shape = (1,3,224,224) + +# Path of the output file +onnx_file = './mxnet_exported_resnet50.onnx' +``` + +We have defined the input parameters required for the `export_model` API. Now, we are ready to covert the MXNet model into ONNX format. + +```python +# Invoke export model API. It returns path of the converted onnx model +converted_model_path = onnx_mxnet.export_model(sym, params, [input_shape], np.float32, onnx_file) +``` + +This API returns path of the converted model which you can later use to import the model into other frameworks. + +## Check validity of ONNX model + +Now we can check validity of the converted ONNX model by using ONNX checker tool. The tool will validate the model by checking if the content contains valid protobuf: + +```python +from onnx import checker +import onnx + +# Load onnx model +model_proto = onnx.load(converted_model_path) + +# Check if converted ONNX protobuf is valid +checker.check_graph(model_proto.graph) +``` + +If the converted protobuf format doesn't qualify to ONNX proto specifications, the checker will throw errors, but in this case it successfully passes. + +This method confirms exported model protobuf is valid. Now, the model is ready to be imported in other frameworks for inference! + + diff --git a/tests/tutorials/test_tutorials.py b/tests/tutorials/test_tutorials.py index 22d00c181b64..2c8768228d71 100644 --- a/tests/tutorials/test_tutorials.py +++ b/tests/tutorials/test_tutorials.py @@ -124,6 +124,9 @@ def test_nlp_cnn(): def test_onnx_super_resolution(): assert _test_tutorial_nb('onnx/super_resolution') +def test_onnx_export_mxnet_to_onnx(): + assert _test_tutorial_nb('onnx/export_mxnet_to_onnx') + def test_onnx_fine_tuning_gluon(): assert _test_tutorial_nb('onnx/fine_tuning_gluon') From 490cf99f629c1effd6e1a4a0b5a425042bba1bac Mon Sep 17 00:00:00 2001 From: Chance Bair Date: Fri, 24 Aug 2018 16:27:49 +0200 Subject: [PATCH 051/160] Disable flaky test test_operator.test_dropout (#12330) --- tests/python/unittest/test_operator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index e1e5c9e61c26..fc6b81454229 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -5728,6 +5728,7 @@ def test_stack(): @with_seed() +@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12329") def test_dropout(): def zero_count(array, ratio): zeros = 0 From 5189495b0ea1b1c924bfec3adf7233149b098f01 Mon Sep 17 00:00:00 2001 From: Carin Meier Date: Fri, 24 Aug 2018 11:11:07 -0400 Subject: [PATCH 052/160] add initializer test (#12196) add profiler and lr-scheduler tests basic symbol test add more symbol tests re-enable deeper visualization graph example and add simple test running cljfmt add license - fix typo --- .../examples/tutorial/src/tutorial/symbol.clj | 1 - .../src/org/apache/clojure_mxnet/profiler.clj | 2 +- .../apache/clojure_mxnet/initializer_test.clj | 45 +++++++++++++++++++ .../clojure_mxnet/lr_scheduler_test.clj | 24 ++++++++++ .../apache/clojure_mxnet/profiler_test.clj | 31 +++++++++++++ .../org/apache/clojure_mxnet/symbol_test.clj | 32 ++++++++++++- .../clojure_mxnet/visualization_test.clj | 32 +++++++++++++ 7 files changed, 164 insertions(+), 3 deletions(-) create mode 100644 contrib/clojure-package/test/org/apache/clojure_mxnet/initializer_test.clj create mode 100644 contrib/clojure-package/test/org/apache/clojure_mxnet/lr_scheduler_test.clj create mode 100644 contrib/clojure-package/test/org/apache/clojure_mxnet/profiler_test.clj create mode 100644 contrib/clojure-package/test/org/apache/clojure_mxnet/visualization_test.clj diff --git a/contrib/clojure-package/examples/tutorial/src/tutorial/symbol.clj b/contrib/clojure-package/examples/tutorial/src/tutorial/symbol.clj index 0dd0e4daeb2e..bec71dee81f5 100644 --- a/contrib/clojure-package/examples/tutorial/src/tutorial/symbol.clj +++ b/contrib/clojure-package/examples/tutorial/src/tutorial/symbol.clj @@ -50,7 +50,6 @@ net ;=> #object[org.apache.mxnet.Symbol 0x5c78c8c2 "org.apache.mxnet.Symbol@5c78 (def b (sym/variable "b")) (def c (sym/+ a b)) - ;; Each symbol takes a (unique) string name. NDArray and Symbol both represent a single tensor. Operators represent the computation between tensors. Operators take symbol (or NDArray) as inputs and might also additionally accept other hyperparameters such as the number of hidden neurons (num_hidden) or the activation type (act_type) and produce the output. ;; We can view a symbol simply as a function taking several arguments. And we can retrieve those arguments with the following method call: diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/profiler.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/profiler.clj index 48fd0414d82f..5b4f9b198131 100644 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/profiler.clj +++ b/contrib/clojure-package/src/org/apache/clojure_mxnet/profiler.clj @@ -36,7 +36,7 @@ ([state] (Profiler/profilerSetState state)) ([] - (profiler-set-state false))) + (profiler-set-state "stop"))) (defn dump-profile " Dump profile and stop profiler. Use this to save profile diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/initializer_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/initializer_test.clj new file mode 100644 index 000000000000..288a41496f0b --- /dev/null +++ b/contrib/clojure-package/test/org/apache/clojure_mxnet/initializer_test.clj @@ -0,0 +1,45 @@ +;; +;; Licensed to the Apache Software Foundation (ASF) under one or more +;; contributor license agreements. See the NOTICE file distributed with +;; this work for additional information regarding copyright ownership. +;; The ASF licenses this file to You under the Apache License, Version 2.0 +;; (the "License"); you may not use this file except in compliance with +;; the License. You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +(ns org.apache.clojure-mxnet.initializer-test + (:require [org.apache.clojure-mxnet.initializer :as initializer] + [org.apache.clojure-mxnet.ndarray :as ndarray] + [clojure.test :refer :all])) + +(defn exercise-initializer [init] + (-> init + (initializer/init-weight "test-weight" (ndarray/zeros [3 3]))) + + (is (number? + (-> init + (initializer/apply "test-weight" (ndarray/zeros [3 3])) + (ndarray/->vec) + (first))))) + +(deftest test-uniform + (exercise-initializer (initializer/uniform)) + (exercise-initializer (initializer/uniform 0.8))) + +(deftest test-normal + (exercise-initializer (initializer/normal)) + (exercise-initializer (initializer/normal 0.2))) + +(deftest test-xavier + (exercise-initializer (initializer/xavier)) + (exercise-initializer (initializer/xavier {:rand-type "gaussian" + :factor-type "in" + :magnitude 2}))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/lr_scheduler_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/lr_scheduler_test.clj new file mode 100644 index 000000000000..c60389a87020 --- /dev/null +++ b/contrib/clojure-package/test/org/apache/clojure_mxnet/lr_scheduler_test.clj @@ -0,0 +1,24 @@ +;; +;; Licensed to the Apache Software Foundation (ASF) under one or more +;; contributor license agreements. See the NOTICE file distributed with +;; this work for additional information regarding copyright ownership. +;; The ASF licenses this file to You under the Apache License, Version 2.0 +;; (the "License"); you may not use this file except in compliance with +;; the License. You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +(ns org.apache.clojure-mxnet.lr-scheduler-test + (:require [org.apache.clojure-mxnet.lr-scheduler :as lr-scheduler] + [clojure.test :refer :all])) + +(deftest test-factor-scheduler + ;; just excercising + (lr-scheduler/factor-scheduler 2 0.3)) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/profiler_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/profiler_test.clj new file mode 100644 index 000000000000..f4b74343fa1d --- /dev/null +++ b/contrib/clojure-package/test/org/apache/clojure_mxnet/profiler_test.clj @@ -0,0 +1,31 @@ +;; +;; Licensed to the Apache Software Foundation (ASF) under one or more +;; contributor license agreements. See the NOTICE file distributed with +;; this work for additional information regarding copyright ownership. +;; The ASF licenses this file to You under the Apache License, Version 2.0 +;; (the "License"); you may not use this file except in compliance with +;; the License. You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +(ns org.apache.clojure-mxnet.profiler-test + (:require [org.apache.clojure-mxnet.profiler :as profiler] + [clojure.test :refer :all])) + +;; Just excercising the interop + +(deftest test-profiler + (do + (profiler/profiler-set-config {:filename "test-profile.json" + :profile-symbolic 1}) + (profiler/profiler-set-state "run") + (profiler/profiler-set-state "stop") + (profiler/profiler-set-state) + (profiler/dump-profile 0))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/symbol_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/symbol_test.clj index 6df2a10f888a..89b51237d3a5 100644 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/symbol_test.clj +++ b/contrib/clojure-package/test/org/apache/clojure_mxnet/symbol_test.clj @@ -17,9 +17,12 @@ (ns org.apache.clojure-mxnet.symbol-test (:require [org.apache.clojure-mxnet.dtype :as dtype] + [org.apache.clojure-mxnet.executor :as executor] + [org.apache.clojure-mxnet.ndarray :as ndarray] [org.apache.clojure-mxnet.symbol :as sym] [org.apache.clojure-mxnet.util :as util] - [clojure.test :refer :all])) + [clojure.test :refer :all] + [org.apache.clojure-mxnet.context :as context])) (deftest test-compose (let [data (sym/variable "data") @@ -61,3 +64,30 @@ (let [data (sym/variable "data") data2 (sym/clone data)] (is (= (sym/to-json data) (sym/to-json data2))))) + +(deftest test-basic-bind + (let [a (sym/variable "a") + b (sym/variable "b") + c (sym/+ a b) + ex (sym/bind c {"a" (ndarray/ones [2 2]) "b" (ndarray/ones [2 2])})] + (is (= [2.0 2.0 2.0 2.0]) (-> (executor/forward ex) + (executor/outputs) + (first) + (ndarray/->vec))))) +(deftest test-simple-bind + (let [a (sym/ones [3]) + b (sym/ones [3]) + c (sym/+ a b) + ex (sym/simple-bind c (context/default-context))] + (is (= [2.0 2.0 2.0] (-> (executor/forward ex) + (executor/outputs) + (first) + (ndarray/->vec)))))) + +(deftest test-infer-shape + (let [a (sym/variable "a") + b (sym/variable "b") + c (sym/+ a b) + [arg-shapes out-shapes] (sym/infer-shape c {"a" [2 2] "b" [2 2]})] + (is (= [[2 2] [2 2]] arg-shapes)) + (is (= [[2 2]] out-shapes)))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/visualization_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/visualization_test.clj new file mode 100644 index 000000000000..a2bea9478390 --- /dev/null +++ b/contrib/clojure-package/test/org/apache/clojure_mxnet/visualization_test.clj @@ -0,0 +1,32 @@ +;; +;; Licensed to the Apache Software Foundation (ASF) under one or more +;; contributor license agreements. See the NOTICE file distributed with +;; this work for additional information regarding copyright ownership. +;; The ASF licenses this file to You under the Apache License, Version 2.0 +;; (the "License"); you may not use this file except in compliance with +;; the License. You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +(ns org.apache.clojure-mxnet.visualization-test + (:require [org.apache.clojure-mxnet.symbol :as sym] + [org.apache.clojure-mxnet.visualization :as viz] + [clojure.test :refer :all]) + (:import (org.apache.mxnet Visualization$Dot))) + +(deftest test-plot-network + (let [to-plot-sym (as-> (sym/variable "data") data + (sym/flatten "fl" {:data data}) + (sym/softmax-output "softmax" {:data data})) + dot (viz/plot-network to-plot-sym + {"data" [1 1 28 28]} + {:title "foo" + :node-attrs {:shape "oval" :fixedsize "false"}})] + (is (instance? Visualization$Dot dot)))) From 7bfe42786f79c3214b367aa9ef756f9e3f0eb132 Mon Sep 17 00:00:00 2001 From: Taliesin Beynon Date: Fri, 24 Aug 2018 18:00:13 +0200 Subject: [PATCH 053/160] Allow stop of arange to be inferred from dims. (#12064) * Allow stop of arange to be inferred from dims. Enabled via a flag. * modify NDArray/Symbol to add infer_range param * Add test for arange-with-inference. * Add a comment to readme about JDK 8. * Fix approx=. Include a test of this fix as well. --- contrib/clojure-package/README.md | 4 ++- .../src/org/apache/clojure_mxnet/ndarray.clj | 2 +- .../src/org/apache/clojure_mxnet/symbol.clj | 12 ++++++++- .../apache/clojure_mxnet/operator_test.clj | 11 ++++++++ .../org/apache/clojure_mxnet/test_util.clj | 6 +++-- .../org/apache/clojure_mxnet/util_test.clj | 8 ++++++ python/mxnet/ndarray/ndarray.py | 4 +-- python/mxnet/symbol/symbol.py | 4 +-- .../main/scala/org/apache/mxnet/NDArray.scala | 9 +++---- .../main/scala/org/apache/mxnet/Symbol.scala | 25 ++++++++++++++++--- src/operator/tensor/init_op.h | 10 +++++++- tests/python/unittest/test_operator.py | 8 ++++++ 12 files changed, 85 insertions(+), 18 deletions(-) diff --git a/contrib/clojure-package/README.md b/contrib/clojure-package/README.md index 5e7356caf647..ea678ccf2db4 100644 --- a/contrib/clojure-package/README.md +++ b/contrib/clojure-package/README.md @@ -107,7 +107,9 @@ The jars from maven with the needed MXNet native binaries in it. On startup, the ### Build from MXNET Source -Checkout the latest sha from the main package +First, ensure you have JDK 8 on your system. Later versions may produce cryptic build errors mentioning `scala.reflect.internal.MissingRequirementError`. + +Checkout the latest SHA from the main package: `git clone --recursive https://github.com/apache/incubator-mxnet.git ~/mxnet` `cd ~/mxnet` diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/ndarray.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/ndarray.clj index e37a8bc8c98d..7ca4ede9733c 100644 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/ndarray.clj +++ b/contrib/clojure-package/src/org/apache/clojure_mxnet/ndarray.clj @@ -89,7 +89,7 @@ (NDArray/arange (float start) ($/option (float stop)) step repeat ctx dtype)) ([start stop] (arange start stop {}))) - + (defn slice "Return a sliced NDArray that shares memory with current one." ([ndarray i] diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/symbol.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/symbol.clj index 42ae034eb6d3..12135fb75cab 100644 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/symbol.clj +++ b/contrib/clojure-package/src/org/apache/clojure_mxnet/symbol.clj @@ -135,10 +135,20 @@ ([start stop {:keys [step repeat dtype] :or {step (float 1) repeat (int 1) dtype base/MX_REAL_TYPE} :as opts}] - (Symbol/arange (float start) ($/option (float stop)) step repeat nil dtype)) + (Symbol/arange (float start) ($/option (float stop)) step repeat false nil dtype)) ([start stop] (arange start stop {}))) +(defn arange-with-inference + "Behaves like arange operator, but infers the stop value from the output shape, + which must be known from the rest of the net." + ([start {:keys [step repeat dtype] + :or {step (float 1) repeat (int 1) dtype base/MX_REAL_TYPE} + :as opts}] + (Symbol/arange (float start) ($/option nil) step repeat true nil dtype)) + ([start] + (arange-with-inference start {}))) + ;;; manually defined because of a conflicting arity of 2 with the auto-gen (defn min ([sym-name kwargs-map symbol-list kwargs-map-1] diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/operator_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/operator_test.clj index a71a312e1ae6..1b4b2ea2fbe3 100644 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/operator_test.clj +++ b/contrib/clojure-package/test/org/apache/clojure_mxnet/operator_test.clj @@ -222,6 +222,17 @@ (is (= 0 (count (executor/grad-arrays exec)))) (is (approx= 1e-4 result (-> (executor/outputs exec) (first)))))) +(deftest test-arange-with-inference + (let [arange (sym/arange-with-inference 0) + data (sym/variable "data") + added (sym/+ arange data) + result (range 0 4) + data-tmp (ndarray/zeros [4]) + exec (sym/bind added (context/default-context) {"data" data-tmp})] + (executor/forward exec) + (is (= 0 (count (executor/grad-arrays exec)))) + (is (approx= 1e-4 result (-> (executor/outputs exec) (first)))))) + (deftest test-scalar-pow (let [data (sym/variable "data") shape-vec [1 1] diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/test_util.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/test_util.clj index dcdbea645796..ecd54ca72773 100644 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/test_util.clj +++ b/contrib/clojure-package/test/org/apache/clojure_mxnet/test_util.clj @@ -22,6 +22,8 @@ (if (and (number? x) (number? y)) (let [diff (Math/abs (- x y))] (< diff tolerance)) - (reduce (fn [x y] (and x y)) - (map #(approx= tolerance %1 %2) x y)))) + (and + (= (count x) (count y)) + (reduce (fn [x y] (and x y)) + (map #(approx= tolerance %1 %2) x y))))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/util_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/util_test.clj index 5551fab435f6..de3480827ba4 100644 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/util_test.clj +++ b/contrib/clojure-package/test/org/apache/clojure_mxnet/util_test.clj @@ -21,6 +21,7 @@ [org.apache.clojure-mxnet.util :as util] [org.apache.clojure-mxnet.ndarray :as ndarray] [org.apache.clojure-mxnet.symbol :as sym] + [org.apache.clojure-mxnet.test-util :as test-util] [clojure.spec.alpha :as s]) (:import (org.apache.mxnet Shape NDArrayFuncReturn NDArray) (scala.collection Map Set) @@ -183,3 +184,10 @@ (deftest test-validate (is (nil? (util/validate! string? "foo" "Not a string!"))) (is (thrown-with-msg? Exception #"Not a string!" (util/validate! ::x 1 "Not a string!")))) + +(deftest test-approx= + (let [data1 [1 1 1 1] + data2 [1 1 1 1 9 9 9 9] + data3 [1 1 1 2]] + (is (not (test-util/approx= 1e-9 data1 data2))) + (is (test-util/approx= 2 data1 data3)))) \ No newline at end of file diff --git a/python/mxnet/ndarray/ndarray.py b/python/mxnet/ndarray/ndarray.py index 46b21a90d4c6..d6d619f30cab 100644 --- a/python/mxnet/ndarray/ndarray.py +++ b/python/mxnet/ndarray/ndarray.py @@ -2475,7 +2475,7 @@ def moveaxis(tensor, source, destination): # pylint: disable= no-member, protected-access, too-many-arguments, redefined-outer-name -def arange(start, stop=None, step=1.0, repeat=1, ctx=None, dtype=mx_real_t): +def arange(start, stop=None, step=1.0, repeat=1, infer_range=False, ctx=None, dtype=mx_real_t): """Returns evenly spaced values within a given interval. Values are generated within the half-open interval [`start`, `stop`). In other @@ -2519,7 +2519,7 @@ def arange(start, stop=None, step=1.0, repeat=1, ctx=None, dtype=mx_real_t): if ctx is None: ctx = current_context() return _internal._arange(start=start, stop=stop, step=step, repeat=repeat, - dtype=dtype, ctx=str(ctx)) + infer_range=infer_range, dtype=dtype, ctx=str(ctx)) # pylint: enable= no-member, protected-access, too-many-arguments diff --git a/python/mxnet/symbol/symbol.py b/python/mxnet/symbol/symbol.py index 5f6cbd6b6e14..da5533f36668 100644 --- a/python/mxnet/symbol/symbol.py +++ b/python/mxnet/symbol/symbol.py @@ -2886,7 +2886,7 @@ def full(shape, val, dtype=None, **kwargs): return _internal._full(shape=shape, dtype=dtype, value=float(val), **kwargs) # pylint: disable=redefined-outer-name -def arange(start, stop=None, step=1.0, repeat=1, name=None, dtype=None): +def arange(start, stop=None, step=1.0, repeat=1, infer_range=False, name=None, dtype=None): """Returns evenly spaced values within a given interval. Parameters @@ -2911,7 +2911,7 @@ def arange(start, stop=None, step=1.0, repeat=1, name=None, dtype=None): if dtype is None: dtype = _numpy.float32 return _internal._arange(start=start, stop=stop, step=step, repeat=repeat, - name=name, dtype=dtype) + infer_range=infer_range, name=name, dtype=dtype) def histogram(a, bins=10, range=None, **kwargs): """Compute the histogram of the input data. diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/NDArray.scala b/scala-package/core/src/main/scala/org/apache/mxnet/NDArray.scala index 548c30b73a14..8b5e1e010954 100644 --- a/scala-package/core/src/main/scala/org/apache/mxnet/NDArray.scala +++ b/scala-package/core/src/main/scala/org/apache/mxnet/NDArray.scala @@ -407,11 +407,10 @@ object NDArray extends NDArrayBase { * @param dType The data type of the `NDArray`. The default datatype is `DType.Float32`. * @return NDArray of evenly spaced values in the specified range. */ - def arange(start: Float, stop: Option[Float] = None, step: Float = 1.0f, - repeat: Int = 1, ctx: Context = Context.defaultCtx, - dType: DType = Base.MX_REAL_TYPE): NDArray = { - val params = Map("start" -> start, "step" -> step, - "repeat" -> repeat, "ctx" -> ctx.toString, "dtype" -> dType.toString()) + def arange(start: Float, stop: Option[Float], step: Float, + repeat: Int, ctx: Context, dType: DType): NDArray = { + val params = Map("start" -> start, "step" -> step, "repeat" -> repeat, + "infer_range" -> false, "ctx" -> ctx.toString, "dtype" -> dType.toString()) val fParams = if (stop == None) params else params ++ Map("stop" -> stop.get) NDArray.genericNDArrayFunctionInvoke("_arange", Seq(), fParams)(0) } diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Symbol.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Symbol.scala index 194d3681523f..e3e1a320358e 100644 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Symbol.scala +++ b/scala-package/core/src/main/scala/org/apache/mxnet/Symbol.scala @@ -955,9 +955,28 @@ object Symbol extends SymbolBase { * @return Symbol The created Symbol. */ def arange(start: Float, stop: Option[Float] = None, step: Float = 1.0f, - repeat: Int = 1, name: String = null, dType: DType = Base.MX_REAL_TYPE): Symbol = { - val params = Map("start" -> start, "step" -> step, - "repeat" -> repeat, "dtype" -> dType.toString()) + repeat: Int = 1, name: String = null, dType: DType = Base.MX_REAL_TYPE): Symbol = { + arange(start, stop, step, repeat, infer_range = false, name, dType) + } + + /** + * Returns evenly spaced values within a given interval. + * stop value can be infered from the output shape, + * which must be known from the rest of the net. + * @param start Start of interval. The default start value is 0. + * @param stop End of interval. + * @param step Spacing between values. The default step size is 1. + * @param repeat Number of times to repeat each element. The default repeat count is 1. + * @param infer_range Infer the stop value from output shape + * @param ctx Device context. Default context is the current default context. + * @param dType The data type of the `NDArray`. The default datatype is `DType.Float32`. + * @return NDArray of evenly spaced values in the specified range. + */ + def arange(start: Float, stop: Option[Float], step: Float, + repeat: Int, infer_range: Boolean, name: String, + dType: DType): Symbol = { + val params = Map("start" -> start, "step" -> step, "repeat" -> repeat, + "infer_range" -> infer_range, "dtype" -> dType.toString()) val fParams = if (stop == None) params else params ++ Map("stop" -> stop.get) createSymbolGeneral("_arange", name, null, Array.empty[Symbol], fParams) } diff --git a/src/operator/tensor/init_op.h b/src/operator/tensor/init_op.h index 4af3a40f42ab..304911a02a78 100644 --- a/src/operator/tensor/init_op.h +++ b/src/operator/tensor/init_op.h @@ -123,6 +123,7 @@ struct RangeParam : public dmlc::Parameter { dmlc::optional stop; double step; int repeat; + bool infer_range; std::string ctx; int dtype; DMLC_DECLARE_PARAMETER(RangeParam) { @@ -140,6 +141,10 @@ struct RangeParam : public dmlc::Parameter { .set_default(1) .describe("The repeating time of all elements." " E.g repeat=3, the element a will be repeated three times --> a, a, a."); + DMLC_DECLARE_FIELD(infer_range) + .set_default(false) + .describe("Whether to infer the stop position from the start, step, repeat, and output tensor" + "size."); DMLC_DECLARE_FIELD(ctx) .set_default("") .describe("Context of output, in format [cpu|gpu|cpu_pinned](n)." @@ -176,7 +181,7 @@ struct InitOpWithScalarParam : dmlc::Parameter { inline void RangeParamParser(nnvm::NodeAttrs* attrs) { RangeParam param; param.Init(attrs->dict); - if (!static_cast(param.stop)) { + if (!static_cast(param.infer_range) && !static_cast(param.stop)) { param.stop = param.start; param.start = 0; } @@ -471,6 +476,9 @@ inline bool RangeShape(const nnvm::NodeAttrs& attrs, << "Range does not support step=0, received " << param.step; CHECK(param.repeat > 0) << "Range only supports repeat > 0, received " << param.repeat; + if (param.infer_range && !param.stop.has_value()) { + return false; + } if (param.step > 0) { CHECK(param.start < param.stop.value()) << "Invalid range (start, stop, step) = " diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index fc6b81454229..fd60611add8c 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -3646,10 +3646,18 @@ def test_arange(): nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype) assert_almost_equal(np_out, nd_out.asnumpy()) + def test_arange_inferstop(): + s = mx.sym.arange(start=0, stop=None, infer_range=True) + s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5])) + exe = s.bind(ctx=mx.cpu(), args={}) + exe.forward() + assert_almost_equal(exe.outputs[0].asnumpy(), np.array([0,1,2,3,4])) + test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32) test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32) test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16) test_arange() + test_arange_inferstop() @with_seed() From ac10e95fc8e128c6f44c11947fe6571395a251fe Mon Sep 17 00:00:00 2001 From: Vishaal Kapoor <40836875+vishaalkapoor@users.noreply.github.com> Date: Fri, 24 Aug 2018 09:09:42 -0700 Subject: [PATCH 054/160] [MXAPPS-581] Disable a long test in the SD nightly. (#12326) * Disable a test that's taking longer than 10 minutes with the Python 2 interpreter in the Straight Dope Nightly. --- tests/nightly/straight_dope/test_notebooks_single_gpu.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/nightly/straight_dope/test_notebooks_single_gpu.py b/tests/nightly/straight_dope/test_notebooks_single_gpu.py index a60498c87868..555b8092b392 100644 --- a/tests/nightly/straight_dope/test_notebooks_single_gpu.py +++ b/tests/nightly/straight_dope/test_notebooks_single_gpu.py @@ -35,6 +35,7 @@ 'chapter02_supervised-learning/environment', 'chapter03_deep-neural-networks/kaggle-gluon-kfold', 'chapter04_convolutional-neural-networks/deep-cnns-alexnet', # > 10 mins. + 'chapter05_recurrent-neural-networks/rnns-gluon', # > 10 mins. 'chapter06_optimization/gd-sgd-scratch', # Overflow warning is intended. 'chapter06_optimization/gd-sgd-gluon', # Overflow warning is intended. 'chapter07_distributed-learning/multiple-gpus-scratch', @@ -176,9 +177,6 @@ def test_lstm_scratch(self): def test_gru_scratch(self): assert _test_notebook('chapter05_recurrent-neural-networks/gru-scratch') - def test_rnns_gluon(self): - assert _test_notebook('chapter05_recurrent-neural-networks/rnns-gluon') - # Chapter 6 def test_optimization_intro(self): From 2276bb0e30b1fe601eb288cb4f1b673484892d4b Mon Sep 17 00:00:00 2001 From: cclauss Date: Fri, 24 Aug 2018 18:19:56 +0200 Subject: [PATCH 055/160] Tighten up PyLint directives again (#12322) * Tighten up PyLint directives again * Disable too-many-locals * Doubly disable too-many-locals --- ci/other/pylintrc | 20 +++++++------------- python/mxnet/base.py | 2 +- python/mxnet/executor_manager.py | 2 +- python/mxnet/gluon/rnn/rnn_cell.py | 14 ++++++-------- python/mxnet/image/detection.py | 3 +-- python/mxnet/model.py | 3 +-- python/mxnet/ndarray/register.py | 1 + python/mxnet/symbol/symbol.py | 1 + python/mxnet/util.py | 2 +- python/setup.py | 3 ++- 10 files changed, 22 insertions(+), 29 deletions(-) diff --git a/ci/other/pylintrc b/ci/other/pylintrc index db3da4cae57d..841a3bea13fa 100644 --- a/ci/other/pylintrc +++ b/ci/other/pylintrc @@ -83,24 +83,15 @@ enable=indexing-exception,old-raise-syntax,undefined-variable # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" disable= - design, - similarities, no-self-use, attribute-defined-outside-init, - locally-disabled, - star-args, - pointless-except, bad-option-value, global-statement, fixme, - suppressed-message, - useless-suppression, - locally-enabled, no-member, no-name-in-module, import-error, unsubscriptable-object, - unbalanced-tuple-unpacking, protected-access, superfluous-parens, invalid-name, @@ -111,15 +102,18 @@ disable= chained-comparison, consider-using-dict-comprehension, consider-using-set-comprehension, - invalid-envvar-default, - singleton-comparison, try-except-raise, useless-object-inheritance, - useless-return, c-extension-no-member, deprecated-lambda, redefined-builtin, - unexpected-keyword-arg + too-few-public-methods, + too-many-arguments, + too-many-branches, + too-many-instance-attributes, + too-many-locals, + too-many-public-methods, + too-many-statements # disable=unicode-builtin,delslice-method,using-cmp-argument,setslice-method,dict-view-method,parameter-unpacking,range-builtin-not-iterating,print-statement,file-builtin,old-raise-syntax,basestring-builtin,execfile-builtin,indexing-exception,import-star-module-level,coerce-method,long-builtin,old-ne-operator,old-division,no-absolute-import,raw_input-builtin,old-octal-literal,oct-method,xrange-builtin,hex-method,unpacking-in-except,nonzero-method,raising-string,intern-builtin,reload-builtin,metaclass-assignment,cmp-method,filter-builtin-not-iterating,apply-builtin,map-builtin-not-iterating,next-method-called,unichr-builtin,buffer-builtin,dict-iter-method,input-builtin,coerce-builtin,getslice-method,useless-suppression,standarderror-builtin,zip-builtin-not-iterating,suppressed-message,cmp-builtin,backtick,long-suffix,reduce-builtin,round-builtin diff --git a/python/mxnet/base.py b/python/mxnet/base.py index 2bfcdd62eda0..89e1c9e087b5 100644 --- a/python/mxnet/base.py +++ b/python/mxnet/base.py @@ -574,7 +574,7 @@ def _get_op_name_prefix(op_name): return "" -# pylint: enable=too-many-locals, invalid-name +# pylint: enable=invalid-name def _init_op_module(root_namespace, module_name, make_op_func): """ Registers op functions created by `make_op_func` under diff --git a/python/mxnet/executor_manager.py b/python/mxnet/executor_manager.py index 825aa76e43ce..9a53562204b8 100644 --- a/python/mxnet/executor_manager.py +++ b/python/mxnet/executor_manager.py @@ -127,7 +127,7 @@ def _bind_exec(sym, ctx, input_shapes, param_names, need_grad=False, assert(arg_types is not None) arg_arrays = [] - grad_arrays = {} if need_grad != False else None + grad_arrays = {} if need_grad is not False else None arg_names = sym.list_arguments() diff --git a/python/mxnet/gluon/rnn/rnn_cell.py b/python/mxnet/gluon/rnn/rnn_cell.py index 21cc8043154e..557837c3fa51 100644 --- a/python/mxnet/gluon/rnn/rnn_cell.py +++ b/python/mxnet/gluon/rnn/rnn_cell.py @@ -252,14 +252,12 @@ def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=N #pylint: disable=no-self-use def _get_activation(self, F, inputs, activation, **kwargs): """Get activation function. Convert if is string""" - if activation == 'tanh': - return F.tanh(inputs, **kwargs) - elif activation == 'sigmoid': - return F.sigmoid(inputs, **kwargs) - elif activation == 'relu': - return F.relu(inputs, **kwargs) - elif activation == 'softsign': - return F.softsign(inputs, **kwargs) + func = {'tanh': F.tanh, + 'relu': F.relu, + 'sigmoid': F.sigmoid, + 'softsign': F.softsign}.get(activation) + if func: + return func(inputs, **kwargs) elif isinstance(activation, string_types): return F.Activation(inputs, act_type=activation, **kwargs) elif isinstance(activation, LeakyReLU): diff --git a/python/mxnet/image/detection.py b/python/mxnet/image/detection.py index 63a44ab4643c..caaa4006302d 100644 --- a/python/mxnet/image/detection.py +++ b/python/mxnet/image/detection.py @@ -308,8 +308,7 @@ def _random_crop_proposal(self, label, height, width): h -= 1 w = int(round(h * ratio)) area = w * h - if (area < min_area or area > max_area or w > width or h > height \ - or w <= 0 or h <= 0): + if not (min_area <= area <= max_area and 0 <= w <= width and 0 <= h <= height): continue y = random.randint(0, max(0, height - h)) diff --git a/python/mxnet/model.py b/python/mxnet/model.py index 3a50553a615c..2666f8bbcd4f 100644 --- a/python/mxnet/model.py +++ b/python/mxnet/model.py @@ -132,7 +132,7 @@ def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_nam size = len(valid_grad_arrays) start = 0 # Use aggregation by default only with NCCL - default_batch = 16 + default_batch = '16' batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch)) while start < size: end = start + batch if start + batch < size else size @@ -378,7 +378,6 @@ def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names, _multiple_callbacks(eval_end_callback, eval_end_params) eval_data.reset() # end of all epochs - return def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params): diff --git a/python/mxnet/ndarray/register.py b/python/mxnet/ndarray/register.py index 48d5c01fb436..3b19a772411d 100644 --- a/python/mxnet/ndarray/register.py +++ b/python/mxnet/ndarray/register.py @@ -26,6 +26,7 @@ from ..base import mx_uint, check_call, _LIB, py_str, _init_op_module, _Null # pylint: disable=unused-import +# pylint: disable=too-many-locals def _generate_ndarray_function_code(handle, name, func_name, signature_only=False): """Generate function for ndarray op by handle and function name.""" real_name = ctypes.c_char_p() diff --git a/python/mxnet/symbol/symbol.py b/python/mxnet/symbol/symbol.py index da5533f36668..4864ce991632 100644 --- a/python/mxnet/symbol/symbol.py +++ b/python/mxnet/symbol/symbol.py @@ -1285,6 +1285,7 @@ def _get_ndarray_inputs(arg_key, args, arg_names, allow_missing): raise TypeError('Only accept list of NDArrays or dict of str to NDArray') return c_array(NDArrayHandle, arg_handles), arg_arrays + # pylint: disable=too-many-locals def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None, group2ctx=None, shared_arg_names=None, shared_exec=None, shared_buffer=None, **kwargs): diff --git a/python/mxnet/util.py b/python/mxnet/util.py index 57bc2bf76389..62c05d252828 100644 --- a/python/mxnet/util.py +++ b/python/mxnet/util.py @@ -27,4 +27,4 @@ def makedirs(d): from distutils.dir_util import mkpath mkpath(d) else: - os.makedirs(d, exist_ok=True) + os.makedirs(d, exist_ok=True) # pylint: disable=unexpected-keyword-arg diff --git a/python/setup.py b/python/setup.py index add5e6681fe6..915635398224 100644 --- a/python/setup.py +++ b/python/setup.py @@ -20,6 +20,8 @@ from __future__ import absolute_import import os import sys + +from setuptools import find_packages # need to use distutils.core for correct placement of cython dll kwargs = {} if "--inplace" in sys.argv: @@ -29,7 +31,6 @@ from setuptools import setup from setuptools.extension import Extension kwargs = {'install_requires': ['numpy<=1.15.0,>=1.8.2', 'requests<2.19.0,>=2.18.4', 'graphviz<0.9.0,>=0.8.1'], 'zip_safe': False} -from setuptools import find_packages with_cython = False if '--with-cython' in sys.argv: From 91730fdb6180ddcc3ab171df72c37f0ac15b59fd Mon Sep 17 00:00:00 2001 From: Lin Yuan Date: Fri, 24 Aug 2018 12:51:18 -0700 Subject: [PATCH 056/160] Fix a bug in where op with 1-D input (#12325) * Fix a bug in where op with 1-D input * Add unit test --- src/operator/tensor/control_flow_op.h | 1 + tests/python/unittest/test_operator.py | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/src/operator/tensor/control_flow_op.h b/src/operator/tensor/control_flow_op.h index 94e65109c358..e9aa9f63faec 100644 --- a/src/operator/tensor/control_flow_op.h +++ b/src/operator/tensor/control_flow_op.h @@ -189,6 +189,7 @@ inline bool WhereOpShape(const nnvm::NodeAttrs& attrs, return true; } else if ((*in_attrs)[0].ndim() == 1) { CHECK_EQ((*in_attrs)[0].Size(), static_cast(tshape[0])); + return true; } return false; } diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index fd60611add8c..d0bc450415e9 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -4515,6 +4515,14 @@ def test_invalid_shape(): y=mx.nd.array([[8,9],[10,11],[12,13]]), condition=mx.nd.array([1,0])), MXNetError) + def test_1d_cond(): + cond = mx.nd.array([1, 0, 1]) + x = mx.nd.array([[2, 3], [4, 5], [6, 7]]) + y = mx.nd.array([[7, 8], [9, 10], [10, 11]]) + expect_out = np.array([[2, 3], [9, 10], [6, 7]]) + out = mx.nd.where(cond, x, y).asnumpy() + assert(expect_out.all() == out.all()) + test_where_helper((5, 9), True) test_where_helper((5, 9), False) test_where_helper((5, 7, 9), True) @@ -4526,6 +4534,7 @@ def test_invalid_shape(): test_where_numeric_gradient((5, 7, 9), True) test_where_numeric_gradient((5, 7, 9), False) test_invalid_shape() + test_1d_cond() @with_seed() def test_new_softmax(): From 4f8d39f5da7775d2ab1e36504b337cbee9575630 Mon Sep 17 00:00:00 2001 From: Anirudh Date: Fri, 24 Aug 2018 15:35:38 -0700 Subject: [PATCH 057/160] [MXNET-825] Fix CGAN R Example with MNIST dataset (#12283) * Fix CGAN R Tutorial with MNIST dataset * fix nit issues * format using formatR --- example/gan/CGAN_mnist_R/CGAN_mnist_setup.R | 121 -------- example/gan/CGAN_mnist_R/CGAN_train.R | 298 ++++++++++++++------ example/gan/CGAN_mnist_R/iterators.R | 64 +++-- 3 files changed, 240 insertions(+), 243 deletions(-) delete mode 100644 example/gan/CGAN_mnist_R/CGAN_mnist_setup.R diff --git a/example/gan/CGAN_mnist_R/CGAN_mnist_setup.R b/example/gan/CGAN_mnist_R/CGAN_mnist_setup.R deleted file mode 100644 index ad57bc541230..000000000000 --- a/example/gan/CGAN_mnist_R/CGAN_mnist_setup.R +++ /dev/null @@ -1,121 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -require("imager") -require("dplyr") -require("readr") -require("mxnet") - -source("iterators.R") - -###################################################### -### Data import and preperation -### First download MNIST train data at Kaggle: -### https://www.kaggle.com/c/digit-recognizer/data -###################################################### -train <- read_csv('data/train.csv') -train<- data.matrix(train) - -train_data <- train[,-1] -train_data <- t(train_data/255*2-1) -train_label <- as.integer(train[,1]) - -dim(train_data) <- c(28, 28, 1, ncol(train_data)) - -################################################## -#### Model parameters -################################################## -random_dim<- 96 -gen_features<- 96 -dis_features<- 32 -image_depth = 1 -fix_gamma<- T -no_bias<- T -eps<- 1e-5 + 1e-12 -batch_size<- 64 - - -################################################## -#### Generator Symbol -################################################## -data = mx.symbol.Variable('data') - -gen_rand<- mx.symbol.normal(loc=0, scale=1, shape=c(1, 1, random_dim, batch_size), name="gen_rand") -gen_concat<- mx.symbol.Concat(data = list(data, gen_rand), num.args = 2, name="gen_concat") - -g1 = mx.symbol.Deconvolution(gen_concat, name='g1', kernel=c(4,4), num_filter=gen_features*4, no_bias=T) -gbn1 = mx.symbol.BatchNorm(g1, name='gbn1', fix_gamma=fix_gamma, eps=eps) -gact1 = mx.symbol.Activation(gbn1, name='gact1', act_type='relu') - -g2 = mx.symbol.Deconvolution(gact1, name='g2', kernel=c(3,3), stride=c(2,2), pad=c(1,1), num_filter=gen_features*2, no_bias=no_bias) -gbn2 = mx.symbol.BatchNorm(g2, name='gbn2', fix_gamma=fix_gamma, eps=eps) -gact2 = mx.symbol.Activation(gbn2, name='gact2', act_type='relu') - -g3 = mx.symbol.Deconvolution(gact2, name='g3', kernel=c(4,4), stride=c(2,2), pad=c(1,1), num_filter=gen_features, no_bias=no_bias) -gbn3 = mx.symbol.BatchNorm(g3, name='gbn3', fix_gamma=fix_gamma, eps=eps) -gact3 = mx.symbol.Activation(gbn3, name='gact3', act_type='relu') - -g4 = mx.symbol.Deconvolution(gact3, name='g4', kernel=c(4,4), stride=c(2,2), pad=c(1,1), num_filter=image_depth, no_bias=no_bias) -G_sym = mx.symbol.Activation(g4, name='G_sym', act_type='tanh') - - -################################################## -#### Discriminator Symbol -################################################## -data = mx.symbol.Variable('data') -dis_digit = mx.symbol.Variable('digit') -label = mx.symbol.Variable('label') - -dis_digit<- mx.symbol.Reshape(data=dis_digit, shape=c(1,1,10,batch_size), name="digit_reshape") -dis_digit<- mx.symbol.broadcast_to(data=dis_digit, shape=c(28,28,10, batch_size), name="digit_broadcast") - -data_concat <- mx.symbol.Concat(list(data, dis_digit), num.args = 2, dim = 1, name='dflat_concat') - -d1 = mx.symbol.Convolution(data=data_concat, name='d1', kernel=c(3,3), stride=c(1,1), pad=c(0,0), num_filter=24, no_bias=no_bias) -dbn1 = mx.symbol.BatchNorm(d1, name='dbn1', fix_gamma=fix_gamma, eps=eps) -dact1 = mx.symbol.LeakyReLU(dbn1, name='dact1', act_type='elu', slope=0.25) -pool1 <- mx.symbol.Pooling(data=dact1, name="pool1", pool_type="max", kernel=c(2,2), stride=c(2,2), pad=c(0,0)) - -d2 = mx.symbol.Convolution(pool1, name='d2', kernel=c(3,3), stride=c(2,2), pad=c(0,0), num_filter=32, no_bias=no_bias) -dbn2 = mx.symbol.BatchNorm(d2, name='dbn2', fix_gamma=fix_gamma, eps=eps) -dact2 = mx.symbol.LeakyReLU(dbn2, name='dact2', act_type='elu', slope=0.25) - -d3 = mx.symbol.Convolution(dact2, name='d3', kernel=c(3,3), stride=c(1,1), pad=c(0,0), num_filter=64, no_bias=no_bias) -dbn3 = mx.symbol.BatchNorm(d3, name='dbn3', fix_gamma=fix_gamma, eps=eps) -dact3 = mx.symbol.LeakyReLU(dbn3, name='dact3', act_type='elu', slope=0.25) - -d4 = mx.symbol.Convolution(dact2, name='d3', kernel=c(4,4), stride=c(1,1), pad=c(0,0), num_filter=64, no_bias=no_bias) -dbn4 = mx.symbol.BatchNorm(d4, name='dbn4', fix_gamma=fix_gamma, eps=eps) -dact4 = mx.symbol.LeakyReLU(dbn4, name='dact4', act_type='elu', slope=0.25) - -# pool4 <- mx.symbol.Pooling(data=dact3, name="pool4", pool_type="avg", kernel=c(4,4), stride=c(1,1), pad=c(0,0)) - -dflat = mx.symbol.Flatten(dact4, name="dflat") - -dfc <- mx.symbol.FullyConnected(data=dflat, name="dfc", num_hidden=1, no_bias=F) -D_sym = mx.symbol.LogisticRegressionOutput(data=dfc, label=label, name='D_sym') - - -######################## -### Graph -######################## -input_shape_G<- c(1, 1, 10, batch_size) -input_shape_D<- c(28, 28, 1, batch_size) - -graph.viz(G_sym, type = "graph", direction = "LR") -graph.viz(D_sym, type = "graph", direction = "LR") - diff --git a/example/gan/CGAN_mnist_R/CGAN_train.R b/example/gan/CGAN_mnist_R/CGAN_train.R index 9c7649f3e269..7d3225483c02 100644 --- a/example/gan/CGAN_mnist_R/CGAN_train.R +++ b/example/gan/CGAN_mnist_R/CGAN_train.R @@ -15,81 +15,187 @@ # specific language governing permissions and limitations # under the License. -##################################################### +require("imager") +require("dplyr") +require("readr") +require("mxnet") + +source("iterators.R") + +### Data import and preperation +# First download MNIST train data at Kaggle: +# https://www.kaggle.com/c/digit-recognizer/data + +train <- read_csv("data/train.csv") +train <- data.matrix(train) + +train_data <- train[, -1] +train_data <- t(train_data/255 * 2 - 1) +train_label <- as.integer(train[, 1]) + +dim(train_data) <- c(28, 28, 1, ncol(train_data)) + +### Model parameters +random_dim <- 96 +gen_features <- 96 +dis_features <- 32 +image_depth <- 1 +fix_gamma <- T +no_bias <- T +eps <- 1e-05 + 1e-12 +batch_size <- 64 + + +### Generator Symbol +data <- mx.symbol.Variable("data") + +gen_rand <- mx.symbol.normal(loc = 0, scale = 1, shape = c(1, 1, random_dim, batch_size), + name = "gen_rand") +gen_concat <- mx.symbol.concat(data = list(data, gen_rand), num.args = 2, name = "gen_concat") + +g1 <- mx.symbol.Deconvolution(gen_concat, name = "g1", kernel = c(4, 4), num_filter = gen_features * + 4, no_bias = T) +gbn1 <- mx.symbol.BatchNorm(g1, name = "gbn1", fix_gamma = fix_gamma, eps = eps) +gact1 <- mx.symbol.Activation(gbn1, name = "gact1", act_type = "relu") + +g2 <- mx.symbol.Deconvolution(gact1, name = "g2", kernel = c(3, 3), stride = c(2, + 2), pad = c(1, 1), num_filter = gen_features * 2, no_bias = no_bias) +gbn2 <- mx.symbol.BatchNorm(g2, name = "gbn2", fix_gamma = fix_gamma, eps = eps) +gact2 <- mx.symbol.Activation(gbn2, name = "gact2", act_type = "relu") + +g3 <- mx.symbol.Deconvolution(gact2, name = "g3", kernel = c(4, 4), stride = c(2, + 2), pad = c(1, 1), num_filter = gen_features, no_bias = no_bias) +gbn3 <- mx.symbol.BatchNorm(g3, name = "gbn3", fix_gamma = fix_gamma, eps = eps) +gact3 <- mx.symbol.Activation(gbn3, name = "gact3", act_type = "relu") + +g4 <- mx.symbol.Deconvolution(gact3, name = "g4", kernel = c(4, 4), stride = c(2, + 2), pad = c(1, 1), num_filter = image_depth, no_bias = no_bias) +G_sym <- mx.symbol.Activation(g4, name = "G_sym", act_type = "tanh") + + +### Discriminator Symbol +data <- mx.symbol.Variable("data") +dis_digit <- mx.symbol.Variable("digit") +label <- mx.symbol.Variable("label") + +dis_digit <- mx.symbol.Reshape(data = dis_digit, shape = c(1, 1, 10, batch_size), + name = "digit_reshape") +dis_digit <- mx.symbol.broadcast_to(data = dis_digit, shape = c(28, 28, 10, batch_size), + name = "digit_broadcast") + +data_concat <- mx.symbol.concat(list(data, dis_digit), num.args = 2, dim = 1, name = "dflat_concat") + +d1 <- mx.symbol.Convolution(data = data_concat, name = "d1", kernel = c(3, 3), stride = c(1, + 1), pad = c(0, 0), num_filter = 24, no_bias = no_bias) +dbn1 <- mx.symbol.BatchNorm(d1, name = "dbn1", fix_gamma = fix_gamma, eps = eps) +dact1 <- mx.symbol.LeakyReLU(dbn1, name = "dact1", act_type = "elu", slope = 0.25) +pool1 <- mx.symbol.Pooling(data = dact1, name = "pool1", pool_type = "max", kernel = c(2, + 2), stride = c(2, 2), pad = c(0, 0)) + +d2 <- mx.symbol.Convolution(pool1, name = "d2", kernel = c(3, 3), stride = c(2, 2), + pad = c(0, 0), num_filter = 32, no_bias = no_bias) +dbn2 <- mx.symbol.BatchNorm(d2, name = "dbn2", fix_gamma = fix_gamma, eps = eps) +dact2 <- mx.symbol.LeakyReLU(dbn2, name = "dact2", act_type = "elu", slope = 0.25) + +d3 <- mx.symbol.Convolution(dact2, name = "d3", kernel = c(3, 3), stride = c(1, 1), + pad = c(0, 0), num_filter = 64, no_bias = no_bias) +dbn3 <- mx.symbol.BatchNorm(d3, name = "dbn3", fix_gamma = fix_gamma, eps = eps) +dact3 <- mx.symbol.LeakyReLU(dbn3, name = "dact3", act_type = "elu", slope = 0.25) + +d4 <- mx.symbol.Convolution(dact2, name = "d3", kernel = c(4, 4), stride = c(1, 1), + pad = c(0, 0), num_filter = 64, no_bias = no_bias) +dbn4 <- mx.symbol.BatchNorm(d4, name = "dbn4", fix_gamma = fix_gamma, eps = eps) +dact4 <- mx.symbol.LeakyReLU(dbn4, name = "dact4", act_type = "elu", slope = 0.25) + +# pool4 <- mx.symbol.Pooling(data=dact3, name='pool4', pool_type='avg', +# kernel=c(4,4), stride=c(1,1), pad=c(0,0)) + +dflat <- mx.symbol.Flatten(dact4, name = "dflat") + +dfc <- mx.symbol.FullyConnected(data = dflat, name = "dfc", num_hidden = 1, no_bias = F) +D_sym <- mx.symbol.LogisticRegressionOutput(data = dfc, label = label, name = "D_sym") + + +### Graph +input_shape_G <- c(1, 1, 10, batch_size) +input_shape_D <- c(28, 28, 1, batch_size) + +graph.viz(G_sym, type = "graph", direction = "LR") +graph.viz(D_sym, type = "graph", direction = "LR") + + ### Training module for GAN -##################################################### -devices<- mx.cpu() +# Change this to mx.gpu() when running on gpu machine. +devices <- mx.cpu() -data_shape_G<- c(1, 1, 10, batch_size) -data_shape_D<- c(28, 28, 1, batch_size) -digit_shape_D<- c(10, batch_size) +data_shape_G <- c(1, 1, 10, batch_size) +data_shape_D <- c(28, 28, 1, batch_size) +digit_shape_D <- c(10, batch_size) mx.metric.binacc <- mx.metric.custom("binacc", function(label, pred) { - res <- mean(label==round(pred)) + res <- mean(label == round(pred)) return(res) }) mx.metric.logloss <- mx.metric.custom("logloss", function(label, pred) { - res <- mean(label*log(pred)+(1-label)*log(1-pred)) + res <- mean(label * log(pred) + (1 - label) * log(1 - pred)) return(res) }) -############################################## ### Define iterators -iter_G<- G_iterator(batch_size = batch_size) -iter_D<- D_iterator(batch_size = batch_size) +iter_G <- G_iterator(batch_size = batch_size) +iter_D <- D_iterator(batch_size = batch_size) -exec_G<- mx.simple.bind(symbol = G_sym, data=data_shape_G, ctx = devices, grad.req = "write") -exec_D<- mx.simple.bind(symbol = D_sym, data=data_shape_D, digit=digit_shape_D, ctx = devices, grad.req = "write") +exec_G <- mx.simple.bind(symbol = G_sym, data = data_shape_G, ctx = devices, grad.req = "write") +exec_D <- mx.simple.bind(symbol = D_sym, data = data_shape_D, digit = digit_shape_D, + ctx = devices, grad.req = "write") ### initialize parameters - To Do - personalise each layer -initializer<- mx.init.Xavier(rnd_type = "gaussian", factor_type = "avg", magnitude = 3) +initializer <- mx.init.Xavier(rnd_type = "gaussian", factor_type = "avg", magnitude = 3) -arg_param_ini_G<- mx.init.create(initializer = initializer, shape.array = mx.symbol.infer.shape(G_sym, data=data_shape_G)$arg.shapes, ctx = mx.cpu()) -aux_param_ini_G<- mx.init.create(initializer = initializer, shape.array = mx.symbol.infer.shape(G_sym, data=data_shape_G)$aux.shapes, ctx = mx.cpu()) +arg_param_ini_G <- mx.init.create(initializer = initializer, shape.array = mx.symbol.infer.shape(G_sym, + data = data_shape_G)$arg.shapes, ctx = devices) +aux_param_ini_G <- mx.init.create(initializer = initializer, shape.array = mx.symbol.infer.shape(G_sym, + data = data_shape_G)$aux.shapes, ctx = devices) -arg_param_ini_D<- mx.init.create(initializer = initializer, shape.array = mx.symbol.infer.shape(D_sym, data=data_shape_D, digit=digit_shape_D)$arg.shapes, ctx = mx.cpu()) -aux_param_ini_D<- mx.init.create(initializer = initializer, shape.array = mx.symbol.infer.shape(D_sym, data=data_shape_D, digit=digit_shape_D)$aux.shapes, ctx = mx.cpu()) +arg_param_ini_D <- mx.init.create(initializer = initializer, shape.array = mx.symbol.infer.shape(D_sym, + data = data_shape_D, digit = digit_shape_D)$arg.shapes, ctx = devices) -mx.exec.update.arg.arrays(exec_G, arg_param_ini_G, match.name=TRUE) -mx.exec.update.aux.arrays(exec_G, aux_param_ini_G, match.name=TRUE) +aux_param_ini_D <- mx.init.create(initializer = initializer, shape.array = mx.symbol.infer.shape(D_sym, + data = data_shape_D, digit = digit_shape_D)$aux.shapes, ctx = devices) -mx.exec.update.arg.arrays(exec_D, arg_param_ini_D, match.name=TRUE) -mx.exec.update.aux.arrays(exec_D, aux_param_ini_D, match.name=TRUE) +mx.exec.update.arg.arrays(exec_G, arg_param_ini_G, match.name = TRUE) +mx.exec.update.aux.arrays(exec_G, aux_param_ini_G, match.name = TRUE) + +mx.exec.update.arg.arrays(exec_D, arg_param_ini_D, match.name = TRUE) +mx.exec.update.aux.arrays(exec_D, aux_param_ini_D, match.name = TRUE) input_names_G <- mxnet:::mx.model.check.arguments(G_sym) input_names_D <- mxnet:::mx.model.check.arguments(D_sym) -################################################### -#initialize optimizers -optimizer_G<-mx.opt.create(name = "adadelta", - rho=0.92, - epsilon = 1e-6, - wd=0, - rescale.grad=1/batch_size, - clip_gradient=1) +### initialize optimizers +optimizer_G <- mx.opt.create(name = "adadelta", rho = 0.92, epsilon = 1e-06, wd = 0, + rescale.grad = 1/batch_size, clip_gradient = 1) + +updater_G <- mx.opt.get.updater(optimizer = optimizer_G, weights = exec_G$ref.arg.arrays, + ctx = devices) -updater_G<- mx.opt.get.updater(optimizer = optimizer_G, weights = exec_G$ref.arg.arrays) +optimizer_D <- mx.opt.create(name = "adadelta", rho = 0.92, epsilon = 1e-06, wd = 0, + rescale.grad = 1/batch_size, clip_gradient = 1) -optimizer_D<-mx.opt.create(name = "adadelta", - rho=0.92, - epsilon = 1e-6, - wd=0, - rescale.grad=1/batch_size, - clip_gradient=1) -updater_D<- mx.opt.get.updater(optimizer = optimizer_D, weights = exec_D$ref.arg.arrays) +updater_D <- mx.opt.get.updater(optimizer = optimizer_D, weights = exec_D$ref.arg.arrays, + ctx = devices) -#################################### -#initialize metric -metric_G<- mx.metric.binacc -metric_G_value<- metric_G$init() +### initialize metric +metric_G <- mx.metric.binacc +metric_G_value <- metric_G$init() -metric_D<- mx.metric.binacc -metric_D_value<- metric_D$init() +metric_D <- mx.metric.binacc +metric_D_value <- metric_D$init() -iteration<- 1 +iteration <- 1 iter_G$reset() iter_D$reset() @@ -102,71 +208,81 @@ for (iteration in 1:2400) { ### Random input to Generator to produce fake sample G_values <- iter_G$value() G_data <- G_values[input_names_G] - mx.exec.update.arg.arrays(exec_G, arg.arrays = G_data, match.name=TRUE) - mx.exec.forward(exec_G, is.train=T) + mx.exec.update.arg.arrays(exec_G, arg.arrays = G_data, match.name = TRUE) + mx.exec.forward(exec_G, is.train = T) - ### Feed Discriminator with Concatenated Generator images and real images - ### Random input to Generator + ### Feed Discriminator with Concatenated Generator images and real images Random + ### input to Generator D_data_fake <- exec_G$ref.outputs$G_sym_output - D_digit_fake <- G_values$data %>% mx.nd.Reshape(shape=c(-1, batch_size)) + D_digit_fake <- G_values$data %>% mx.nd.Reshape(shape = c(-1, batch_size)) D_values <- iter_D$value() D_data_real <- D_values$data D_digit_real <- D_values$digit ### Train loop on fake - mx.exec.update.arg.arrays(exec_D, arg.arrays = list(data=D_data_fake, digit=D_digit_fake, label=mx.nd.array(rep(0, batch_size))), match.name=TRUE) - mx.exec.forward(exec_D, is.train=T) + mx.exec.update.arg.arrays(exec_D, arg.arrays = list(data = D_data_fake, digit = D_digit_fake, + label = mx.nd.array(rep(0, batch_size))), match.name = TRUE) + mx.exec.forward(exec_D, is.train = T) mx.exec.backward(exec_D) - update_args_D<- updater_D(weight = exec_D$ref.arg.arrays, grad = exec_D$ref.grad.arrays) - mx.exec.update.arg.arrays(exec_D, update_args_D, skip.null=TRUE) + update_args_D <- updater_D(weight = exec_D$ref.arg.arrays, grad = exec_D$ref.grad.arrays) + mx.exec.update.arg.arrays(exec_D, update_args_D, skip.null = TRUE) - metric_D_value <- metric_D$update(label = mx.nd.array(rep(0, batch_size)), exec_D$ref.outputs[["D_sym_output"]], metric_D_value) + metric_D_value <- metric_D$update(label = as.array(mx.nd.array(rep(0, batch_size))), + pred = as.array(exec_D$ref.outputs[["D_sym_output"]]), metric_D_value) ### Train loop on real - mx.exec.update.arg.arrays(exec_D, arg.arrays = list(data=D_data_real, digit=D_digit_real, label=mx.nd.array(rep(1, batch_size))), match.name=TRUE) - mx.exec.forward(exec_D, is.train=T) + mx.exec.update.arg.arrays(exec_D, arg.arrays = list(data = D_data_real, digit = D_digit_real, + label = mx.nd.array(rep(1, batch_size))), match.name = TRUE) + mx.exec.forward(exec_D, is.train = T) mx.exec.backward(exec_D) - update_args_D<- updater_D(weight = exec_D$ref.arg.arrays, grad = exec_D$ref.grad.arrays) - mx.exec.update.arg.arrays(exec_D, update_args_D, skip.null=TRUE) + update_args_D <- updater_D(weight = exec_D$ref.arg.arrays, grad = exec_D$ref.grad.arrays) + mx.exec.update.arg.arrays(exec_D, update_args_D, skip.null = TRUE) - metric_D_value <- metric_D$update(mx.nd.array(rep(1, batch_size)), exec_D$ref.outputs[["D_sym_output"]], metric_D_value) + metric_D_value <- metric_D$update(label = as.array(mx.nd.array(rep(1, batch_size))), + pred = as.array(exec_D$ref.outputs[["D_sym_output"]]), metric_D_value) ### Update Generator weights - use a seperate executor for writing data gradients - exec_D_back<- mxnet:::mx.symbol.bind(symbol = D_sym, arg.arrays = exec_D$arg.arrays, aux.arrays = exec_D$aux.arrays, grad.reqs = rep("write", length(exec_D$arg.arrays)), ctx = devices) - mx.exec.update.arg.arrays(exec_D_back, arg.arrays = list(data=D_data_fake, digit=D_digit_fake, label=mx.nd.array(rep(1, batch_size))), match.name=TRUE) - mx.exec.forward(exec_D_back, is.train=T) + exec_D_back <- mxnet:::mx.symbol.bind(symbol = D_sym, arg.arrays = exec_D$arg.arrays, + aux.arrays = exec_D$aux.arrays, grad.reqs = rep("write", length(exec_D$arg.arrays)), + ctx = devices) + mx.exec.update.arg.arrays(exec_D_back, arg.arrays = list(data = D_data_fake, + digit = D_digit_fake, label = mx.nd.array(rep(1, batch_size))), match.name = TRUE) + mx.exec.forward(exec_D_back, is.train = T) mx.exec.backward(exec_D_back) - D_grads<- exec_D_back$ref.grad.arrays$data - mx.exec.backward(exec_G, out_grads=D_grads) + D_grads <- exec_D_back$ref.grad.arrays$data + mx.exec.backward(exec_G, out_grads = D_grads) - update_args_G<- updater_G(weight = exec_G$ref.arg.arrays, grad = exec_G$ref.grad.arrays) - mx.exec.update.arg.arrays(exec_G, update_args_G, skip.null=TRUE) + update_args_G <- updater_G(weight = exec_G$ref.arg.arrays, grad = exec_G$ref.grad.arrays) + mx.exec.update.arg.arrays(exec_G, update_args_G, skip.null = TRUE) - ### Update metrics - #metric_G_value <- metric_G$update(values[[label_name]], exec_G$ref.outputs[[output_name]], metric_G_value) + ### Update metrics metric_G_value <- metric_G$update(values[[label_name]], + ### exec_G$ref.outputs[[output_name]], metric_G_value) - if (iteration %% 25==0){ + if (iteration%%25 == 0) { D_metric_result <- metric_D$get(metric_D_value) - cat(paste0("[", iteration, "] ", D_metric_result$name, ": ", D_metric_result$value, "\n")) + cat(paste0("[", iteration, "] ", D_metric_result$name, ": ", D_metric_result$value, + "\n")) } - if (iteration==1 | iteration %% 100==0){ + if (iteration == 1 | iteration%%100 == 0) { - metric_D_value<- metric_D$init() + metric_D_value <- metric_D$init() - par(mfrow=c(3,3), mar=c(0.1,0.1,0.1,0.1)) + par(mfrow = c(3, 3), mar = c(0.1, 0.1, 0.1, 0.1)) for (i in 1:9) { - img <- as.array(exec_G$ref.outputs$G_sym_output)[,,,i] - plot(as.cimg(img), axes=F) + img <- as.array(exec_G$ref.outputs$G_sym_output)[, , , i] + plot(as.cimg(img), axes = F) } - + print(as.numeric(as.array(G_values$digit))) print(as.numeric(as.array(D_values$label))) } } +ifelse(!dir.exists(file.path(".", "models")), dir.create(file.path(".", "models")), + "Folder already exists") mx.symbol.save(D_sym, filename = "models/D_sym_model_v1.json") mx.nd.save(exec_D$arg.arrays, filename = "models/D_aux_params_v1.params") mx.nd.save(exec_D$aux.arrays, filename = "models/D_aux_params_v1.params") @@ -177,23 +293,23 @@ mx.nd.save(exec_G$aux.arrays, filename = "models/G_aux_params_v1.params") ### Inference -G_sym<- mx.symbol.load("models/G_sym_model_v1.json") -G_arg_params<- mx.nd.load("models/G_arg_params_v1.params") -G_aux_params<- mx.nd.load("models/G_aux_params_v1.params") +G_sym <- mx.symbol.load("models/G_sym_model_v1.json") +G_arg_params <- mx.nd.load("models/G_arg_params_v1.params") +G_aux_params <- mx.nd.load("models/G_aux_params_v1.params") -digit<- mx.nd.array(rep(9, times=batch_size)) -data<- mx.nd.one.hot(indices = digit, depth = 10) -data<- mx.nd.reshape(data = data, shape = c(1,1,-1, batch_size)) +digit <- mx.nd.array(rep(9, times = batch_size)) +data <- mx.nd.one.hot(indices = digit, depth = 10) +data <- mx.nd.reshape(data = data, shape = c(1, 1, -1, batch_size)) -exec_G<- mx.simple.bind(symbol = G_sym, data=data_shape_G, ctx = devices, grad.req = "null") -mx.exec.update.arg.arrays(exec_G, G_arg_params, match.name=TRUE) -mx.exec.update.arg.arrays(exec_G, list(data=data), match.name=TRUE) -mx.exec.update.aux.arrays(exec_G, G_aux_params, match.name=TRUE) +exec_G <- mx.simple.bind(symbol = G_sym, data = data_shape_G, ctx = devices, grad.req = "null") +mx.exec.update.arg.arrays(exec_G, G_arg_params, match.name = TRUE) +mx.exec.update.arg.arrays(exec_G, list(data = data), match.name = TRUE) +mx.exec.update.aux.arrays(exec_G, G_aux_params, match.name = TRUE) -mx.exec.forward(exec_G, is.train=F) +mx.exec.forward(exec_G, is.train = F) -par(mfrow=c(3,3), mar=c(0.1,0.1,0.1,0.1)) +par(mfrow = c(3, 3), mar = c(0.1, 0.1, 0.1, 0.1)) for (i in 1:9) { - img <- as.array(exec_G$ref.outputs$G_sym_output)[,,,i] - plot(as.cimg(img), axes=F) + img <- as.array(exec_G$ref.outputs$G_sym_output)[, , , i] + plot(as.cimg(img), axes = F) } diff --git a/example/gan/CGAN_mnist_R/iterators.R b/example/gan/CGAN_mnist_R/iterators.R index 6069296c24f2..dffe468ad2c7 100644 --- a/example/gan/CGAN_mnist_R/iterators.R +++ b/example/gan/CGAN_mnist_R/iterators.R @@ -16,64 +16,66 @@ # under the License. -G_iterator<- function(batch_size){ +G_iterator <- function(batch_size) { - batch<- 0 - batch_per_epoch<-5 + batch <- 0 + batch_per_epoch <- 5 - reset<- function(){ - batch<<- 0 + reset <- function() { + batch <<- 0 } - iter.next<- function(){ - batch<<- batch+1 - if (batch>batch_per_epoch) { + iter.next <- function() { + batch <<- batch + 1 + if (batch > batch_per_epoch) { return(FALSE) } else { return(TRUE) } } - value<- function(){ - set.seed(123+batch) - digit<- mx.nd.array(sample(0:9, size = batch_size, replace = T)) - data<- mx.nd.one.hot(indices = digit, depth = 10) - data<- mx.nd.reshape(data = data, shape = c(1,1,-1, batch_size)) - return(list(data=data, digit=digit)) + value <- function() { + set.seed(123 + batch) + digit <- mx.nd.array(sample(0:9, size = batch_size, replace = T)) + data <- mx.nd.one.hot(indices = digit, depth = 10) + data <- mx.nd.reshape(data = data, shape = c(1, 1, -1, batch_size)) + return(list(data = data, digit = digit)) } - return(list(reset=reset, iter.next=iter.next, value=value, batch_size=batch_size, batch=batch)) + return(list(reset = reset, iter.next = iter.next, value = value, batch_size = batch_size, + batch = batch)) } -D_iterator<- function(batch_size){ +D_iterator <- function(batch_size) { - batch<- 0 - batch_per_epoch<-5 + batch <- 0 + batch_per_epoch <- 5 - reset<- function(){ - batch<<- 0 + reset <- function() { + batch <<- 0 } - iter.next<- function(){ - batch<<- batch+1 - if (batch>batch_per_epoch) { + iter.next <- function() { + batch <<- batch + 1 + if (batch > batch_per_epoch) { return(FALSE) } else { return(TRUE) } } - value<- function(){ - set.seed(123+batch) - idx<- sample(length(train_label), size = batch_size, replace = T) - data<- train_data[,,,idx, drop=F] - label<- mx.nd.array(train_label[idx]) - digit<- mx.nd.one.hot(indices = label, depth = 10) + value <- function() { + set.seed(123 + batch) + idx <- sample(length(train_label), size = batch_size, replace = T) + data <- train_data[, , , idx, drop = F] + label <- mx.nd.array(train_label[idx]) + digit <- mx.nd.one.hot(indices = label, depth = 10) - return(list(data=mx.nd.array(data), digit=digit, label=label)) + return(list(data = mx.nd.array(data), digit = digit, label = label)) } - return(list(reset=reset, iter.next=iter.next, value=value, batch_size=batch_size, batch=batch)) + return(list(reset = reset, iter.next = iter.next, value = value, batch_size = batch_size, + batch = batch)) } From 4e6366cd896d8fe93472701ef773de67961d6df7 Mon Sep 17 00:00:00 2001 From: Hao Jin Date: Fri, 24 Aug 2018 15:36:42 -0700 Subject: [PATCH 058/160] set proper atol for check_with_uniform (#12313) --- tests/python/unittest/test_ndarray.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/python/unittest/test_ndarray.py b/tests/python/unittest/test_ndarray.py index 071c770f55ef..9d07deacb39b 100644 --- a/tests/python/unittest/test_ndarray.py +++ b/tests/python/unittest/test_ndarray.py @@ -56,9 +56,9 @@ def check_with_uniform(uf, arg_shapes, dim=None, npuf=None, rmin=-10, type_list= if isinstance(out1, mx.nd.NDArray): out1 = out1.asnumpy() if dtype == np.float16: - assert_almost_equal(out1, out2, rtol=2e-3) + assert_almost_equal(out1, out2, rtol=2e-3, atol=1e-5) else: - assert_almost_equal(out1, out2) + assert_almost_equal(out1, out2, atol=1e-5) def random_ndarray(dim): shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim)) From d2495d11bcb8fed53d5ef7e3b03067eb4248f7ff Mon Sep 17 00:00:00 2001 From: Roshani Nagmote Date: Fri, 24 Aug 2018 16:27:01 -0700 Subject: [PATCH 059/160] [MXAPPS-581] Disable a long test in the SD nightly. (#12326) (#12339) * Disable a test that's taking longer than 10 minutes with the Python 2 interpreter in the Straight Dope Nightly. From 15e43c096a54329006e5e44c3723b60694ff1239 Mon Sep 17 00:00:00 2001 From: Luobao Date: Sat, 25 Aug 2018 07:50:35 +0800 Subject: [PATCH 060/160] Fall back when sparse arrays are passed to MKLDNN-enabled operators (#11664) * softmax_fallbach * Fallback Amend This is the final rectify for fallback problem(functions call) * Lint amend * test_try * Patch for test fail * Pooling amend * Delete non_rectified_operation_test * fallback_normal * Fixed_dispatch * activation-amend * activation second * activation backward * activate_try * activation_debug * Act change. * test_random * mkldnn choice * format_modify * rebase --- src/operator/nn/activation.cc | 57 +++--------- src/operator/nn/batch_norm.cc | 3 +- src/operator/nn/convolution.cc | 43 ++++----- src/operator/nn/deconvolution.cc | 36 +++----- src/operator/nn/lrn.cc | 36 +++----- src/operator/nn/mkldnn/mkldnn_base.cc | 4 +- src/operator/nn/pooling.cc | 42 +++------ src/operator/nn/softmax.cc | 21 ++--- tests/python/mkl/test_mkldnn.py | 121 +++++++++++++++++++++++++- 9 files changed, 198 insertions(+), 165 deletions(-) diff --git a/src/operator/nn/activation.cc b/src/operator/nn/activation.cc index 277ca8e3013f..b8c2045fba12 100644 --- a/src/operator/nn/activation.cc +++ b/src/operator/nn/activation.cc @@ -31,6 +31,8 @@ #include "./mkldnn/mkldnn_base-inl.h" #include "./mkldnn/mkldnn_ops-inl.h" #endif // MXNET_USE_MKLDNN +#include "../operator_common.h" +#include "../../common/utils.h" namespace mxnet { namespace op { @@ -101,6 +103,7 @@ void ActivationGradComputeExCPU(const nnvm::NodeAttrs& attrs, } #endif +#if MXNET_USE_MKLDNN == 1 inline static bool ActivationStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, @@ -108,20 +111,9 @@ inline static bool ActivationStorageType(const nnvm::NodeAttrs& attrs, std::vector *out_attrs) { CHECK_EQ(in_attrs->size(), 1); CHECK_EQ(out_attrs->size(), 1); - bool ret = ElemwiseStorageType<1, 1, false, false, false>(attrs, dev_mask, - dispatch_mode, - in_attrs, out_attrs); -#if MXNET_USE_MKLDNN == 1 const ActivationParam& param = nnvm::get(attrs.parsed); - if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)) { - *dispatch_mode = DispatchMode::kFComputeEx; - } - if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) { - *dispatch_mode = DispatchMode::kFComputeFallback; - return ret; - } -#endif - return ret; + return MKLDNNStorageType(attrs, dev_mask, SupportMKLDNNAct(param), + dispatch_mode, in_attrs, out_attrs); } inline static bool BackwardActStorageType(const nnvm::NodeAttrs& attrs, @@ -129,46 +121,17 @@ inline static bool BackwardActStorageType(const nnvm::NodeAttrs& attrs, DispatchMode* dispatch_mode, std::vector *in_attrs, std::vector *out_attrs) { - bool ret = false; const ActivationParam& param = nnvm::get(attrs.parsed); -#if (MXNET_USE_CUDNN == 1 || MXNET_USE_MKLDNN == 1) if (param.act_type != activation::kReLU) { CHECK_EQ(in_attrs->size(), 3U); - ret = ElemwiseStorageType<3, 1, false, false, false>(attrs, dev_mask, - dispatch_mode, - in_attrs, out_attrs); } else { // for ReLU activation, the backward pass only needs ograd and output CHECK_EQ(in_attrs->size(), 2U); - ret = ElemwiseStorageType<2, 1, false, false, false>(attrs, dev_mask, - dispatch_mode, - in_attrs, out_attrs); - } -#else - if (param.act_type == activation::kSoftSign) { - CHECK_EQ(in_attrs->size(), 3U); - ret = ElemwiseStorageType<3, 1, false, false, false>(attrs, dev_mask, - dispatch_mode, - in_attrs, out_attrs); - } else { - CHECK_EQ(in_attrs->size(), 2U); - ret = ElemwiseStorageType<2, 1, false, false, false>(attrs, dev_mask, - dispatch_mode, - in_attrs, out_attrs); } -#endif - CHECK_EQ(out_attrs->size(), 1U); -#if MXNET_USE_MKLDNN == 1 - if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)) { - *dispatch_mode = DispatchMode::kFComputeEx; - } - if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) { - *dispatch_mode = DispatchMode::kFComputeFallback; - return ret; - } -#endif - return ret; + return MKLDNNStorageType(attrs, dev_mask, SupportMKLDNNAct(param), + dispatch_mode, in_attrs, out_attrs); } +#endif MXNET_OPERATOR_REGISTER_UNARY(Activation) .describe(R"code(Applies an activation function element-wise to the input. @@ -183,7 +146,9 @@ The following activation functions are supported: )code" ADD_FILELINE) .set_attr_parser(ParamParser) +#if MXNET_USE_MKLDNN == 1 .set_attr("FInferStorageType", ActivationStorageType) +#endif .set_attr("FListOutputNames", [](const NodeAttrs& attrs) { return std::vector{"output"}; @@ -204,7 +169,9 @@ NNVM_REGISTER_OP(_backward_Activation) }) .set_num_outputs(1) .set_attr("TIsBackward", true) +#if MXNET_USE_MKLDNN == 1 .set_attr("FInferStorageType", BackwardActStorageType) +#endif .set_attr("FInferShape", ElemwiseShape<3, 1>) .set_attr("FInferType", ElemwiseType<3, 1>) .set_attr("FInplaceOption", [](const NodeAttrs& attrs){ diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc index c7b1b609990e..b15f84e107e0 100644 --- a/src/operator/nn/batch_norm.cc +++ b/src/operator/nn/batch_norm.cc @@ -27,6 +27,7 @@ #include "batch_norm-inl.h" #include #include "../elemwise_op_common.h" +#include "../operator_common.h" #if MXNET_USE_MKLDNN == 1 #include "./mkldnn/mkldnn_batch_norm-inl.h" #endif @@ -544,7 +545,7 @@ Both *mean* and *var* returns a scalar by treating the input as a vector. Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and -the inverse of ``data_var``, which are needed for the backward pass. Note that gradient of these +the inverse of ``data_var``, which are needed for the backward pass. Note that gradient of these two outputs are blocked. Besides the inputs and the outputs, this operator accepts two auxiliary diff --git a/src/operator/nn/convolution.cc b/src/operator/nn/convolution.cc index 18c0132023d3..8f25cf0dcbb1 100644 --- a/src/operator/nn/convolution.cc +++ b/src/operator/nn/convolution.cc @@ -26,11 +26,14 @@ #include "./convolution-inl.h" #include "../elemwise_op_common.h" -#include "./mkldnn/mkldnn_ops-inl.h" -#include "./mkldnn/mkldnn_base-inl.h" +#include "../operator_common.h" #if MXNET_USE_NNPACK == 1 #include "../nnpack/nnpack_pooling-inl.h" #endif // MXNET_USE_NNPACK +#if MXNET_USE_MKLDNN == 1 +#include "./mkldnn/mkldnn_base-inl.h" +#include "./mkldnn/mkldnn_ops-inl.h" +#endif // MXNET_USE_MKLDNN namespace mxnet { namespace op { @@ -288,27 +291,19 @@ static bool ConvolutionType(const nnvm::NodeAttrs& attrs, return true; } +#if MXNET_USE_MKLDNN == 1 inline static bool ConvStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, - std::vector *in_attrs, - std::vector *out_attrs) { + std::vector* in_attrs, + std::vector* out_attrs) { const ConvolutionParam& param = nnvm::get(attrs.parsed); uint32_t in_expected = param.no_bias ? 2 : 3; CHECK_EQ(in_attrs->size(), in_expected); CHECK_EQ(out_attrs->size(), 1); - DispatchMode wanted_mode; -#if MXNET_USE_MKLDNN == 1 - if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) - wanted_mode = DispatchMode::kFComputeFallback; - else if (dev_mask == mshadow::cpu::kDevMask) - wanted_mode = DispatchMode::kFComputeEx; - else -#endif - wanted_mode = DispatchMode::kFCompute; - return storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, wanted_mode); + return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs, + out_attrs); } inline static bool BackwardConvStorageType(const nnvm::NodeAttrs& attrs, @@ -322,18 +317,10 @@ inline static bool BackwardConvStorageType(const nnvm::NodeAttrs& attrs, CHECK_EQ(in_attrs->size(), in_expected); CHECK_EQ(out_attrs->size(), out_expected); - DispatchMode wanted_mode; -#if MXNET_USE_MKLDNN == 1 - if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) - wanted_mode = DispatchMode::kFComputeFallback; - else if (dev_mask == mshadow::cpu::kDevMask) - wanted_mode = DispatchMode::kFComputeEx; - else -#endif - wanted_mode = DispatchMode::kFCompute; - return storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, wanted_mode); + return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs, + out_attrs); } +#endif void ConvolutionParamParser(nnvm::NodeAttrs* attrs) { using namespace mshadow; @@ -492,7 +479,9 @@ There are other options to tune the performance. }) .set_attr("FInferShape", ConvolutionShape) .set_attr("FInferType", ConvolutionType) +#if MXNET_USE_MKLDNN == 1 .set_attr("FInferStorageType", ConvStorageType) +#endif .set_attr("FCompute", ConvolutionCompute) #if MXNET_USE_MKLDNN == 1 .set_attr("FComputeEx", ConvolutionComputeExCPU) @@ -512,7 +501,9 @@ NNVM_REGISTER_OP(_backward_Convolution) return params.no_bias ? 2 : 3; }) .set_attr("TIsBackward", true) +#if MXNET_USE_MKLDNN == 1 .set_attr("FInferStorageType", BackwardConvStorageType) +#endif .set_attr("FResourceRequest", [](const NodeAttrs& n) { return std::vector{ResourceRequest::kTempSpace}; }) diff --git a/src/operator/nn/deconvolution.cc b/src/operator/nn/deconvolution.cc index 54b77aafda0f..a4be1a0c56a0 100644 --- a/src/operator/nn/deconvolution.cc +++ b/src/operator/nn/deconvolution.cc @@ -25,8 +25,12 @@ */ #include "./deconvolution-inl.h" +#include "../operator_common.h" +#include "../../common/utils.h" +#if MXNET_USE_MKLDNN == 1 #include "./mkldnn/mkldnn_ops-inl.h" #include "./mkldnn/mkldnn_base-inl.h" +#endif namespace mxnet { namespace op { @@ -256,6 +260,7 @@ static bool DeconvolutionType(const nnvm::NodeAttrs& attrs, return true; } +#if MXNET_USE_MKLDNN == 1 inline static bool DeconvStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, @@ -266,17 +271,8 @@ inline static bool DeconvStorageType(const nnvm::NodeAttrs& attrs, CHECK_EQ(in_attrs->size(), in_expected); CHECK_EQ(out_attrs->size(), 1); - DispatchMode wanted_mode; -#if MXNET_USE_MKLDNN == 1 - if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) - wanted_mode = DispatchMode::kFComputeFallback; - else if (dev_mask == mshadow::cpu::kDevMask) - wanted_mode = DispatchMode::kFComputeEx; - else -#endif - wanted_mode = DispatchMode::kFCompute; - return storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, wanted_mode); + return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs, + out_attrs); } inline static bool BackwardDeconvStorageType(const nnvm::NodeAttrs& attrs, @@ -289,20 +285,10 @@ inline static bool BackwardDeconvStorageType(const nnvm::NodeAttrs& attrs, CHECK_EQ(in_attrs->size(), param.no_bias ? 3U : 4U); CHECK_EQ(out_attrs->size(), out_expected); - DispatchMode wanted_mode; -#if MXNET_USE_MKLDNN == 1 - if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) - wanted_mode = DispatchMode::kFComputeFallback; - else if (dev_mask == mshadow::cpu::kDevMask) - wanted_mode = DispatchMode::kFComputeEx; - else -#endif - wanted_mode = DispatchMode::kFCompute; - return storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, wanted_mode); + return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs, + out_attrs); } -#if MXNET_USE_MKLDNN == 1 static void DeconvolutionComputeExCPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector& inputs, @@ -419,7 +405,9 @@ NNVM_REGISTER_OP(Deconvolution) }) .set_attr("FInferShape", DeconvolutionShape) .set_attr("FInferType", DeconvolutionType) +#if MXNET_USE_MKLDNN == 1 .set_attr("FInferStorageType", DeconvStorageType) +#endif .set_attr("FResourceRequest", [](const NodeAttrs& n) { return std::vector{ResourceRequest::kTempSpace}; }) @@ -440,7 +428,9 @@ NNVM_REGISTER_OP(_backward_Deconvolution) return params.no_bias ? 2 : 3; }) .set_attr("TIsBackward", true) +#if MXNET_USE_MKLDNN == 1 .set_attr("FInferStorageType", BackwardDeconvStorageType) +#endif .set_attr("FResourceRequest", [](const NodeAttrs& n) { return std::vector{ResourceRequest::kTempSpace}; }) diff --git a/src/operator/nn/lrn.cc b/src/operator/nn/lrn.cc index 4433519df81b..587cf930920e 100644 --- a/src/operator/nn/lrn.cc +++ b/src/operator/nn/lrn.cc @@ -28,6 +28,7 @@ #include "../operator_common.h" #if MXNET_USE_MKLDNN == 1 #include "./mkldnn/mkldnn_lrn-inl.h" +#include "./mkldnn/mkldnn_base-inl.h" #endif namespace mxnet { @@ -81,24 +82,16 @@ struct LRNGrad { } }; +#if MXNET_USE_MKLDNN == 1 bool LRNForwardInferStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, std::vector *in_attrs, std::vector *out_attrs) { CHECK(!in_attrs->empty()); -#if MXNET_USE_MKLDNN == 1 - if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) { - return storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, DispatchMode::kFComputeFallback); - } else if (dev_mask == mshadow::cpu::kDevMask) { - return storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, DispatchMode::kFComputeEx); - } -#endif - storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, DispatchMode::kFCompute); - return true; + + return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs, + out_attrs); } bool LRNBackwardInferStorageType(const nnvm::NodeAttrs& attrs, @@ -107,20 +100,11 @@ bool LRNBackwardInferStorageType(const nnvm::NodeAttrs& attrs, std::vector *in_attrs, std::vector *out_attrs) { CHECK(!in_attrs->empty()); -#if MXNET_USE_MKLDNN == 1 - if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) { - return storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, DispatchMode::kFComputeFallback); - } else if (dev_mask == mshadow::cpu::kDevMask) { - return storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, DispatchMode::kFComputeEx); - } -#endif - return storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, DispatchMode::kFCompute); + + return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs, + out_attrs); } -#if MXNET_USE_MKLDNN == 1 void LRNComputeExCPU(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector &inputs, @@ -183,7 +167,9 @@ number of kernels in the layer. .set_attr_parser(ParamParser) .set_attr("FInferShape", LRNShape) .set_attr("FInferType", LRNType) +#if MXNET_USE_MKLDNN == 1 .set_attr("FInferStorageType", LRNForwardInferStorageType) +#endif .set_attr("FListInputNames", [](const NodeAttrs& attrs) { return std::vector{"data"}; @@ -203,7 +189,9 @@ number of kernels in the layer. NNVM_REGISTER_OP(_backward_LRN) .set_num_outputs(1) .set_attr_parser(ParamParser) +#if MXNET_USE_MKLDNN == 1 .set_attr("FInferStorageType", LRNBackwardInferStorageType) +#endif .set_attr("TIsBackward", true) #if MXNET_USE_MKLDNN == 1 .set_attr("FComputeEx", LRNGradComputeExCPU) diff --git a/src/operator/nn/mkldnn/mkldnn_base.cc b/src/operator/nn/mkldnn/mkldnn_base.cc index 27c574deae53..f3facd966aa7 100644 --- a/src/operator/nn/mkldnn/mkldnn_base.cc +++ b/src/operator/nn/mkldnn/mkldnn_base.cc @@ -536,7 +536,9 @@ bool MKLDNNStorageType(const nnvm::NodeAttrs &attrs, DispatchMode wanted_mode; #if MXNET_USE_MKLDNN == 1 - if (dev_mask == mshadow::cpu::kDevMask && support_mkldnn) + if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) + wanted_mode = DispatchMode::kFComputeFallback; + else if (dev_mask == mshadow::cpu::kDevMask && support_mkldnn) wanted_mode = DispatchMode::kFComputeEx; else #endif diff --git a/src/operator/nn/pooling.cc b/src/operator/nn/pooling.cc index 7cb14503b1ca..2d118142bc79 100644 --- a/src/operator/nn/pooling.cc +++ b/src/operator/nn/pooling.cc @@ -30,8 +30,8 @@ #endif // MXNET_USE_NNPACK #if MXNET_USE_MKLDNN == 1 #include "./mkldnn/mkldnn_pooling-inl.h" +#include "./mkldnn/mkldnn_base-inl.h" #endif // MXNET_USE_MKLDNN - namespace mxnet { namespace op { @@ -284,7 +284,6 @@ void PoolingGradComputeExCPU(const nnvm::NodeAttrs &attrs, const OpContext &ctx, } FallBackCompute(PoolingGradCompute, attrs, ctx, inputs, req, outputs); } -#endif inline static bool PoolingStorageType(const nnvm::NodeAttrs &attrs, const int dev_mask, @@ -292,21 +291,11 @@ inline static bool PoolingStorageType(const nnvm::NodeAttrs &attrs, std::vector *in_attrs, std::vector *out_attrs) { CHECK_EQ(in_attrs->size(), 1); - -#if MXNET_USE_MKLDNN == 1 const PoolingParam ¶m = nnvm::get(attrs.parsed); - if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) { - return storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, DispatchMode::kFComputeFallback); - } else if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)) { - return storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, DispatchMode::kFComputeEx); - } -#else - CHECK_EQ(out_attrs->size(), 1); -#endif - return storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, DispatchMode::kFCompute); + bool support_mkldnn_pool = SupportMKLDNNPooling(param); + + return MKLDNNStorageType(attrs, dev_mask, support_mkldnn_pool, + dispatch_mode, in_attrs, out_attrs); } inline static bool BackwardPoolingStorageType(const nnvm::NodeAttrs &attrs, @@ -317,21 +306,12 @@ inline static bool BackwardPoolingStorageType(const nnvm::NodeAttrs &attrs, const PoolingParam ¶m = nnvm::get(attrs.parsed); CHECK_EQ(in_attrs->size(), GetNumBackInputs(param)); CHECK_EQ(out_attrs->size(), 1); + bool support_mkldnn_pool = SupportMKLDNNPooling(param); -#if MXNET_USE_MKLDNN == 1 - if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) { - return storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, DispatchMode::kFComputeFallback); - } else if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)) { - return storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, DispatchMode::kFComputeEx); - } -#else - CHECK_EQ(in_attrs->size(), 3); -#endif - return storage_type_assign(out_attrs, mxnet::kDefaultStorage, - dispatch_mode, DispatchMode::kFCompute); + return MKLDNNStorageType(attrs, dev_mask, support_mkldnn_pool, + dispatch_mode, in_attrs, out_attrs); } +#endif DMLC_REGISTER_PARAMETER(PoolingParam); @@ -408,7 +388,9 @@ For each window ``X``, the mathematical expression for Lp pooling is: return std::vector{"output"}; }) .set_attr_parser(PoolingParamParser) +#if MXNET_USE_MKLDNN == 1 .set_attr("FInferStorageType", PoolingStorageType) +#endif .set_attr("FInferType", PoolingType) .set_attr("FInferShape", PoolingShape) .set_attr("FCompute", PoolingCompute) @@ -437,9 +419,9 @@ NNVM_REGISTER_OP(_backward_Pooling) .set_attr("FResourceRequest", [](const NodeAttrs& n) { return std::vector{ResourceRequest::kTempSpace}; }) -#endif .set_attr("FInferStorageType", BackwardPoolingStorageType) +#endif .set_attr_parser(PoolingParamParser) #if MXNET_USE_MKLDNN == 1 .set_attr("FComputeEx", PoolingGradComputeExCPU) diff --git a/src/operator/nn/softmax.cc b/src/operator/nn/softmax.cc index c58f382bbade..0fad3d6e6951 100644 --- a/src/operator/nn/softmax.cc +++ b/src/operator/nn/softmax.cc @@ -25,8 +25,11 @@ #include "./softmax-inl.h" #include "../tensor/elemwise_unary_op.h" #include "../tensor/elemwise_binary_op.h" +#include "../operator_common.h" +#if MXNET_USE_MKLDNN == 1 #include "mkldnn/mkldnn_base-inl.h" #include "mkldnn/mkldnn_ops-inl.h" +#endif namespace mxnet { namespace op { @@ -50,7 +53,6 @@ static void SoftmaxComputeExCPU(const nnvm::NodeAttrs& attrs, FallBackCompute(SoftmaxCompute, attrs, ctx, inputs, req, outputs); } -#endif inline static bool SoftmaxStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, @@ -60,19 +62,10 @@ inline static bool SoftmaxStorageType(const nnvm::NodeAttrs& attrs, CHECK_EQ(in_attrs->size(), 1); CHECK_EQ(out_attrs->size(), 1); - DispatchMode wanted_mode; -#if MXNET_USE_MKLDNN == 1 - // We only run MKLDNN op if it runs on CPU. - if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) - wanted_mode = DispatchMode::kFComputeFallback; - else if (dev_mask == mshadow::cpu::kDevMask) - wanted_mode = DispatchMode::kFComputeEx; - else -#endif - wanted_mode = DispatchMode::kFCompute; - return storage_type_assign(out_attrs, static_cast((*in_attrs)[0]), - dispatch_mode, wanted_mode); + return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs, + out_attrs); } +#endif MXNET_OPERATOR_REGISTER_UNARY(softmax) .describe(R"code(Applies the softmax function. @@ -106,8 +99,8 @@ Example:: .set_attr("FCompute", SoftmaxCompute) #if MXNET_USE_MKLDNN == 1 .set_attr("FComputeEx", SoftmaxComputeExCPU) -#endif .set_attr("FInferStorageType", SoftmaxStorageType) +#endif .set_attr("FGradient", ElemwiseGradUseOut{"_backward_softmax"}) .add_arguments(SoftmaxParam::__FIELDS__()); diff --git a/tests/python/mkl/test_mkldnn.py b/tests/python/mkl/test_mkldnn.py index 03f3c76bb65b..6287bfc96fab 100644 --- a/tests/python/mkl/test_mkldnn.py +++ b/tests/python/mkl/test_mkldnn.py @@ -93,7 +93,7 @@ def __getitem__(self, key): # below line triggers different execution thread for _ in loader: y = net(mx.nd.array(np.ones(X))).asnumpy() - # output should be 016711406 (non-mkldnn mode output) + # output should be 016711406 (non-mkldnn mode output) assert_almost_equal(y[0, 0, 0, 0], 0.016711406) break @@ -242,6 +242,125 @@ def check_batchnorm_training(stype): check_batchnorm_training(stype) +@with_seed() +def test_softmax(): + def check_softmax_training(stype): + for shape in [(2, 3), (2, 3, 2, 2)]: + data_tmp = np.random.normal(-0.1, 0.1, size=shape) + + data = mx.symbol.Variable('data', stype=stype) + in_location = [mx.nd.array(data_tmp).tostype(stype)] + + test = mx.symbol.softmax(data, axis=-1) + check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4) + + stypes = ['row_sparse', 'default'] + for stype in stypes: + check_softmax_training(stype) + + +@with_seed() +def test_pooling(): + def check_pooling_training(stype): + for shape in [(3, 3, 10), (3, 3, 20, 20)]: + data_tmp = np.random.normal(-0.1, 0.1, size=shape) + data = mx.symbol.Variable('data', stype=stype) + in_location = [mx.nd.array(data_tmp).tostype(stype)] + + if np.array(shape).shape[0] == 3: + test = mx.symbol.Pooling(data=data, kernel=(3,), stride=(2), pool_type='avg') + elif np.array(shape).shape[0] == 4: + test = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pool_type='avg') + else: + return 0 + check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4) + + stypes = ['row_sparse', 'default'] + for stype in stypes: + check_pooling_training(stype) + + +@with_seed() +def test_activation(): + def check_activation_training(stype): + for shape in [(2, 3, 3), (2, 3, 2, 2)]: + data_tmp = np.random.normal(-0.1, 1, size=shape) + + data = mx.symbol.Variable('data', stype=stype) + in_location = [mx.nd.array(data_tmp).tostype(stype)] + + test = mx.symbol.Activation(data, act_type="relu") + check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4) + + stypes = ['row_sparse', 'default'] + for stype in stypes: + check_activation_training(stype) + + +def test_convolution(): + def check_convolution_training(stype): + for shape in [(3, 3, 10), (3, 3, 10, 10)]: + data_tmp = np.random.normal(-0.1, 1, size=shape) + data = mx.symbol.Variable('data', stype=stype) + + if np.array(shape).shape[0] == 3: + test = mx.symbol.Convolution(data=data, kernel=(3,), stride=(2), num_filter=4) + weight_tmp = np.random.normal(-0.1, 0.1, size=(4, 3, 3)) + elif np.array(shape).shape[0] == 4: + test = mx.symbol.Convolution(data=data, kernel=(3, 3), stride=(2, 2), num_filter=4) + weight_tmp = np.random.normal(-0.1, 0.1, size=(4, 3, 3, 3)) + else: + return 0 + bias_tmp = np.random.normal(0.1, 0.1, size=(4,)) + in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(weight_tmp).tostype(stype), + mx.nd.array(bias_tmp).tostype(stype)] + check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4) + + stypes = ['row_sparse', 'default'] + for stype in stypes: + check_convolution_training(stype) + + +def test_Deconvolution(): + def check_Deconvolution_training(stype): + for shape in [(3, 3, 10), (3, 3, 10, 10)]: + data_tmp = np.random.randint(256, size=shape) + data = mx.symbol.Variable('data', stype=stype) + + if np.array(shape).shape[0] == 3: + test = mx.symbol.Deconvolution(data=data, kernel=(3,), stride=(2), num_filter=4) + weight_tmp = np.random.normal(-0.1, 0.1, size=(3, 4, 3)) + elif np.array(shape).shape[0] == 4: + test = mx.symbol.Deconvolution(data=data, kernel=(3, 3), stride=(2, 2), num_filter=4) + weight_tmp = np.random.normal(-0.1, 0.1, size=(3, 4, 3, 3)) + else: + return 0 + bias_tmp = np.random.normal(0.1, 0.1, size=(4,)) + in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(weight_tmp).tostype(stype), + mx.nd.array(bias_tmp).tostype(stype)] + check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4) + + stypes = ['row_sparse', 'default'] + for stype in stypes: + check_Deconvolution_training(stype) + + +@with_seed() +def test_LRN(): + def check_LRN_training(stype): + for shape in [(3, 4, 5, 5)]: + data_tmp = np.random.normal(-0.1, 0.1, size=shape) + data = mx.symbol.Variable('data', stype=stype) + in_location = [mx.nd.array(data_tmp).tostype(stype)] + + test = mx.symbol.LRN(data, nsize=3) + check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4) + + stypes = ['row_sparse', 'default'] + for stype in stypes: + check_LRN_training(stype) + + @with_seed() def test_fullyconnected(): def check_fullyconnected_training(stype): From 5b37cf64d629a61b39d49a557ce0d656d6d42abc Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Fri, 24 Aug 2018 20:20:32 -0700 Subject: [PATCH 061/160] explain the details for Scala Experimental (#12348) --- .../src/main/scala/org/apache/mxnet/NDArrayCollector.scala | 4 ++++ .../main/scala/org/apache/mxnet/annotation/Experimental.scala | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayCollector.scala b/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayCollector.scala index 3952b73cfb06..0b7f9af705f1 100644 --- a/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayCollector.scala +++ b/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayCollector.scala @@ -133,6 +133,10 @@ class NDArrayCollector private(private val autoDispose: Boolean = true, * If the return type of scope is NDArray or NDArrayFuncReturn, * it is smart enough NOT to collect or dispose the returned NDArray.
* However in other cases, it is users' responsibility NOT to leak allocated NDArrays outside. + *
+ * We might switch to try -with-resources statement (by AutoCloseable in Java 1.7+) + * and deprecate this method later, thus it is marked as Experimental. + * * @param codeBlock code block to be executed within the scope. * @tparam T return type of the function codeBlock. * @return The result of function codeBlock. diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/annotation/Experimental.scala b/scala-package/core/src/main/scala/org/apache/mxnet/annotation/Experimental.scala index 147d651fb04f..d63194d48bc5 100644 --- a/scala-package/core/src/main/scala/org/apache/mxnet/annotation/Experimental.scala +++ b/scala-package/core/src/main/scala/org/apache/mxnet/annotation/Experimental.scala @@ -21,7 +21,7 @@ import java.lang.annotation.{ElementType, Retention, Target, _} /** * Experimental: there is a comparably high chance that - * the API will undergo some kind of changes + * the API will be changed or removed. */ @Retention(RetentionPolicy.RUNTIME) @Target(Array(ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER, From 39d0d06ad936e254e0d20999ecc6dca4a7aadaaf Mon Sep 17 00:00:00 2001 From: Carin Meier Date: Sat, 25 Aug 2018 05:37:20 -0400 Subject: [PATCH 062/160] Add cloverage codecov report to CI for clojure (#12335) --- contrib/clojure-package/ci-test.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/clojure-package/ci-test.sh b/contrib/clojure-package/ci-test.sh index eda3919f5ce0..ba2d258e12d0 100755 --- a/contrib/clojure-package/ci-test.sh +++ b/contrib/clojure-package/ci-test.sh @@ -21,3 +21,4 @@ set -evx MXNET_HOME=${PWD} cd ${MXNET_HOME}/contrib/clojure-package lein test +lein cloverage --codecov From ad34e05bc970ab2e09cbf809c2d2b5fdda8abff3 Mon Sep 17 00:00:00 2001 From: Chance Bair Date: Sat, 25 Aug 2018 11:39:56 +0200 Subject: [PATCH 063/160] Disable flaky test test_ndarray.test_order (#12311) --- tests/python/unittest/test_ndarray.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/python/unittest/test_ndarray.py b/tests/python/unittest/test_ndarray.py index 9d07deacb39b..c48801ec1cec 100644 --- a/tests/python/unittest/test_ndarray.py +++ b/tests/python/unittest/test_ndarray.py @@ -639,6 +639,7 @@ def test_arange(): assert_almost_equal(pred, gt) @with_seed() +@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12310") def test_order(): ctx = default_context() dat_size = 5 From 54d5777dc2ff949c5f566fbe29a946a20cc3d3f7 Mon Sep 17 00:00:00 2001 From: JackieWu Date: Sun, 26 Aug 2018 03:40:37 +0800 Subject: [PATCH 064/160] add activation information for mxnet.gluon.nn._Conv (#12354) --- python/mxnet/gluon/nn/conv_layers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/mxnet/gluon/nn/conv_layers.py b/python/mxnet/gluon/nn/conv_layers.py index 96ecc21c81b3..5f20d20c02ab 100644 --- a/python/mxnet/gluon/nn/conv_layers.py +++ b/python/mxnet/gluon/nn/conv_layers.py @@ -153,6 +153,8 @@ def __repr__(self): s += ', groups={num_group}' if self.bias is None: s += ', bias=False' + if self.act: + s += ', {}'.format(self.act) s += ')' shape = self.weight.shape return s.format(name=self.__class__.__name__, From 7230bb9b5f2f8caabf7bb64689e49ef6a5529b66 Mon Sep 17 00:00:00 2001 From: zhiyuan-huang Date: Sun, 26 Aug 2018 13:33:23 +0800 Subject: [PATCH 065/160] MKLDNN Forward FullyConnected op cache (#11611) * Enable primitive allocation cache for FullyConnected * Enable primitive allocation cache for FullyConnected * fix indent and pass in_data as last argument for CreateMKLDNNMem * fix indent and pass in_data as last argument for CreateMKLDNNMem --- src/operator/nn/fully_connected-inl.h | 17 +++ .../nn/mkldnn/mkldnn_fully_connected.cc | 118 ++++++++++++++++-- 2 files changed, 123 insertions(+), 12 deletions(-) diff --git a/src/operator/nn/fully_connected-inl.h b/src/operator/nn/fully_connected-inl.h index 2338f8974aae..2b75419d2a81 100644 --- a/src/operator/nn/fully_connected-inl.h +++ b/src/operator/nn/fully_connected-inl.h @@ -61,6 +61,11 @@ struct FullyConnectedParam : public dmlc::Parameter { DMLC_DECLARE_FIELD(flatten).set_default(true) .describe("Whether to collapse all but the first axis of the input data tensor."); } + bool operator==(const FullyConnectedParam& other) const { + return this->num_hidden == other.num_hidden && + this->no_bias == other.no_bias && + this->flatten == other.flatten; + } }; template @@ -228,4 +233,16 @@ void FullyConnectedGradCompute(const nnvm::NodeAttrs& attrs, } // namespace op } // namespace mxnet +namespace std { +template<> +struct hash { + size_t operator()(const mxnet::op::FullyConnectedParam& val) { + size_t ret = 0; + ret = dmlc::HashCombine(ret, val.num_hidden); + ret = dmlc::HashCombine(ret, val.no_bias); + ret = dmlc::HashCombine(ret, val.flatten); + return ret; + } +}; +} // namespace std #endif // MXNET_OPERATOR_NN_FULLY_CONNECTED_INL_H_ diff --git a/src/operator/nn/mkldnn/mkldnn_fully_connected.cc b/src/operator/nn/mkldnn/mkldnn_fully_connected.cc index f86f8dbefa2b..5f672cd51fd5 100644 --- a/src/operator/nn/mkldnn/mkldnn_fully_connected.cc +++ b/src/operator/nn/mkldnn/mkldnn_fully_connected.cc @@ -82,6 +82,100 @@ inline static mkldnn::inner_product_backward_weights::primitive_desc GetIPBwdWei } } +class MKLDNNFullyConnectForward { + std::shared_ptr data; + std::shared_ptr weight; + std::shared_ptr out; + std::shared_ptr bias; + std::shared_ptr ipFwd; + + public: + mkldnn::inner_product_forward::primitive_desc ipFwd_pd; + + MKLDNNFullyConnectForward(const FullyConnectedParam ¶m, bool is_train, + const NDArray &data, const NDArray &weight, + const NDArray *bias, + const mkldnn::memory::desc &output) + : ipFwd_pd(GetIPFwd(data, weight, bias, output, is_train)) {} + + void SetNewMem(const mkldnn::memory &data, const mkldnn::memory &weight, + const mkldnn::memory *bias, const mkldnn::memory &output) { + if (this->data == nullptr) + this->data = std::shared_ptr(new mkldnn::memory( + ipFwd_pd.src_primitive_desc(), data.get_data_handle())); + else + this->data->set_data_handle(data.get_data_handle()); + + if (this->weight == nullptr) + this->weight = std::shared_ptr(new mkldnn::memory( + ipFwd_pd.weights_primitive_desc(), weight.get_data_handle())); + else + this->weight->set_data_handle(weight.get_data_handle()); + + if (this->out == nullptr) + this->out = std::shared_ptr(new mkldnn::memory( + ipFwd_pd.dst_primitive_desc(), output.get_data_handle())); + else + this->out->set_data_handle(output.get_data_handle()); + + if (bias != nullptr) { + if (this->bias == nullptr) + this->bias = std::shared_ptr(new mkldnn::memory( + ipFwd_pd.bias_primitive_desc(), bias->get_data_handle())); + else + this->bias->set_data_handle(bias->get_data_handle()); + if (this->ipFwd == nullptr) + this->ipFwd = std::shared_ptr( + new mkldnn::inner_product_forward( + ipFwd_pd, mkldnn::primitive::at(*this->data), + mkldnn::primitive::at(*this->weight), + mkldnn::primitive::at(*this->bias), *this->out)); + } else if (this->ipFwd == nullptr) { + this->ipFwd = std::shared_ptr( + new mkldnn::inner_product_forward( + ipFwd_pd, mkldnn::primitive::at(*this->data), + mkldnn::primitive::at(*this->weight), *this->out)); + } + } + const mkldnn::inner_product_forward &GetIpFwd() const { + return *ipFwd; + } +}; + +typedef ParamOpSign MKLDNNFullyconSignature; + +static inline MKLDNNFullyConnectForward &GetFCFwd( + const nnvm::NodeAttrs &attrs, const NDArray &data, const NDArray &weight, + const NDArray *bias, const mkldnn::memory::desc &output, + const bool is_train) { +#if DMLC_CXX11_THREAD_LOCAL + static thread_local std::unordered_map fcFwds; +#else + static MX_THREAD_LOCAL std::unordered_map fcFwds; +#endif + const FullyConnectedParam& param = nnvm::get(attrs.parsed); + MKLDNNFullyconSignature key(param); + key.AddSign(data); + key.AddSign(weight); + key.AddSign(is_train); + + if (bias) + key.AddSign(*bias); + + auto it = fcFwds.find(key); + if (it == fcFwds.end()) { + MKLDNNFullyConnectForward fcFwd(param, is_train, data, weight, bias, + output); + auto ins_ret = fcFwds.insert( + std::pair(key, fcFwd)); + CHECK(ins_ret.second); + it = ins_ret.first; + } + return it->second; +} + void MKLDNNFCForward(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector &in_data, const std::vector &req, @@ -112,21 +206,21 @@ void MKLDNNFCForward(const nnvm::NodeAttrs& attrs, const OpContext &ctx, out_md = mkldnn::memory::desc(out_dims, get_mkldnn_type(out_data[fullc::kOut].dtype()), mkldnn::memory::format::any); } - - mkldnn::inner_product_forward::primitive_desc ipFwd_pd = GetIPFwd(data, weight, - param.no_bias ? nullptr : &in_data[fullc::kBias], out_md, ctx.is_train); - auto data_mem = data.GetMKLDNNDataReorder(ipFwd_pd.src_primitive_desc()); - auto weight_mem = weight.GetMKLDNNDataReorder(ipFwd_pd.weights_primitive_desc()); + MKLDNNFullyConnectForward &FCFwd = + GetFCFwd(attrs, data, weight, param.no_bias ? nullptr : &in_data[fullc::kBias], + out_md, ctx.is_train); + auto data_mem = data.GetMKLDNNDataReorder(FCFwd.ipFwd_pd.src_primitive_desc()); + auto weight_mem = weight.GetMKLDNNDataReorder(FCFwd.ipFwd_pd.weights_primitive_desc()); auto out_mem = CreateMKLDNNMem(out_data[fullc::kOut], - ipFwd_pd.dst_primitive_desc(), req[fullc::kOut]); - if (param.no_bias) { - MKLDNNStream::Get()->RegisterPrim(mkldnn::inner_product_forward( - ipFwd_pd, *data_mem, *weight_mem, *out_mem.second)); + FCFwd.ipFwd_pd.dst_primitive_desc(), req[fullc::kOut], &data); + if (!param.no_bias) { + auto bias_mem = in_data[fullc::kBias].GetMKLDNNDataReorder( + FCFwd.ipFwd_pd.bias_primitive_desc()); + FCFwd.SetNewMem(*data_mem, *weight_mem, bias_mem, *out_mem.second); } else { - auto bias_mem = in_data[fullc::kBias].GetMKLDNNDataReorder(ipFwd_pd.bias_primitive_desc()); - MKLDNNStream::Get()->RegisterPrim(mkldnn::inner_product_forward(ipFwd_pd, - *data_mem, *weight_mem, *bias_mem, *out_mem.second)); + FCFwd.SetNewMem(*data_mem, *weight_mem, nullptr, *out_mem.second); } + MKLDNNStream::Get()->RegisterPrim(FCFwd.GetIpFwd()); CommitOutput(out_data[fullc::kOut], out_mem); MKLDNNStream::Get()->Submit(); } From c88b8ee18c4b3d25435cf97ee8124d36543b6981 Mon Sep 17 00:00:00 2001 From: Alexander Zai Date: Sun, 26 Aug 2018 09:26:07 -0700 Subject: [PATCH 066/160] fix flaky test: test_broadcast_binary_op (#11875) * cast inputs to f32 * retrigger * retrigger * remove extra cast * remove commented out function * retrigger --- tests/python/unittest/test_operator.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index d0bc450415e9..38c90e6747ef 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -1844,7 +1844,6 @@ def test_bmod(a, b): #c = a % b c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64') # '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32. - #check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data) check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0) check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data) @@ -1913,10 +1912,16 @@ def test_bdiv(a, b): check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide) check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data) - def test_bmod(a, b): + def test_bmod(a_, b_): + # Python and numpy operate only in double so to avoid numerical errors we have to use + # doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044 + a = mx.sym.cast(a_, dtype='float64') + b = mx.sym.cast(b_, dtype='float64') + # '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32. c = mx.sym.broadcast_mod(a, b) check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo) - check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out * (a // b)), gen_broadcast_data, atol=1) + check_binary_op_backward(c, + lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data) def test_bmod_int(a, b): c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32')) @@ -1974,13 +1979,7 @@ def test_bxor(a, b): test_bminus(a, b) test_bmul(a, b) test_bdiv(a, b) - ''' - Flaky Test Disabled due to master build failure: - http://jenkins.mxnet-ci.amazon-ml.com/blue/organizations/jenkins/incubator-mxnet/detail/master/1248/pipeline - Github Issue: https://github.com/apache/incubator-mxnet/issues/11838 - test_bmod(a, b) - ''' test_bmod_int(a, b) test_bpow(a, b) test_bequal(a, b) From 84665e3e5132f02ac99a1922ab41d4a78dbb5ca1 Mon Sep 17 00:00:00 2001 From: Indu Bharathi Date: Sun, 26 Aug 2018 09:35:08 -0700 Subject: [PATCH 067/160] [MXNET-422] Distributed training tutorial (#10955) * First draft * Python syntax highlighting * Polishing * Add distributed MNIST * rename * Polishing * Add images * Add images * Link to the example python file. Minor edits. * Minor changes * Use images from web-data * Rename folder * Remove images from example folder * Add license header * Use png image instead of svg * Add distributed training tutorial to tutorials index * Use CIFAR-10 instead of MNIST. * Fix language errors * Add a sample output from distributed training * Add the output of store.num_workers and store.rank --- docs/tutorials/index.md | 1 + example/distributed_training/README.md | 255 +++++++++++++++++++ example/distributed_training/cifar10_dist.py | 176 +++++++++++++ 3 files changed, 432 insertions(+) create mode 100644 example/distributed_training/README.md create mode 100644 example/distributed_training/cifar10_dist.py diff --git a/docs/tutorials/index.md b/docs/tutorials/index.md index ae0851425be0..32c4a16a8e0d 100644 --- a/docs/tutorials/index.md +++ b/docs/tutorials/index.md @@ -40,6 +40,7 @@ Select API:  * Practitioner Guides * [Multi-GPU training](http://gluon.mxnet.io/chapter07_distributed-learning/multiple-gpus-gluon.html) External link * [Checkpointing and Model Serialization (a.k.a. saving and loading)](/tutorials/gluon/save_load_params.html) External link ([Alternative](http://gluon.mxnet.io/chapter03_deep-neural-networks/serialization.html)) + * [Distributed Training](https://github.com/apache/incubator-mxnet/tree/master/example/distributed_training) * [Inference using an ONNX model](/tutorials/onnx/inference_on_onnx_model.html) * [Fine-tuning an ONNX model on Gluon](/tutorials/onnx/fine_tuning_gluon.html) * [Visualizing Decisions of Convolutional Neural Networks](/tutorials/vision/cnn_visualization.html) diff --git a/example/distributed_training/README.md b/example/distributed_training/README.md new file mode 100644 index 000000000000..b0b0447725b5 --- /dev/null +++ b/example/distributed_training/README.md @@ -0,0 +1,255 @@ +# Distributed Training using Gluon + +Deep learning models are usually trained using GPUs because GPUs can do a lot more computations in parallel that CPUs. But even with the modern GPUs, it could take several days to train big models. Training can be done faster by using multiple GPUs like described in [this](https://gluon.mxnet.io/chapter07_distributed-learning/multiple-gpus-gluon.html) tutorial. However only a certain number of GPUs can be attached to one host (typically 8 or 16). To make the training even faster, we can use multiple GPUs attached to multiple hosts. + +In this tutorial, we will show how to train a model faster using multi-host distributed training. + +![Multiple GPUs connected to multiple hosts](https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/example/distributed_training/distributed_training.png) + +We will use data parallelism to distribute the training which involves splitting the training data across GPUs attached to multiple hosts. Since the hosts are working with different subset of the training data in parallel, the training completes a lot faster. + +In this tutorial, we will train a ResNet18 network using CIFAR-10 dataset using two hosts each having four GPUs. + +## Distributed Training Architecture: + +Multihost distributed training involves working with three different types of processes - worker, parameter server and scheduler. + +![Distributed training architecture](https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/example/distributed_training/dist_train_arch.png) + +### Parameter Server: +The parameters of the model needs to be shared with all hosts since multiple hosts are working together to train one model. To make this sharing efficient, the parameters are split across multiple hosts. A parameter server in each host stores a subset of parameters. In the figure above, parameters are split evenly between the two hosts. At the end of every iteration, each host communicates with every other host to update all parameters of the model. + +### Worker: +Each host has a worker process which in each iteration fetches a batch of data, runs forward and backward pass on all GPUs in the host, computes the parameter updates and sends those updates to the parameter servers in each host. Since we have multiple workers to train the model, each worker only needs to process 1/N part of the training data where N is the number of workers. + +### Scheduler: +Scheduler is responsible for scheduling the workers and parameter servers. There is only one scheduler in the entire cluster. + +## Moving to distributed training: + +[cifar10_dist.py](cifar10_dist.py) contains code that trains a ResNet18 network using distributed training. In this section we'll walk through parts of that file that are unique to distributed training. + +### Step 1: Use a distributed key-value store: + +Like mentioned above, in distributed training, parameters are split into N parts and distributed across N hosts. This is done automatically by the [distributed key-value store](https://mxnet.incubator.apache.org/tutorials/python/kvstore.html). User only needs to create the distributed kv store and ask the `Trainer` to use the created store. + +```python +store = mxnet.kv.create('dist') +``` + +It is the job of the trainer to take the gradients computed in the backward pass and update the parameters of the model. We'll tell the trainer to store and update the parameters in the distributed kv store we just created instead of doing it in GPU of CPU memory. For example, + +```python +trainer = gluon.Trainer(net.collect_params(), + 'adam', {'learning_rate': .001}, + kvstore=store) +``` + +## Step 2: Split the training data: + +In distributed training (using data parallelism), training data is split into equal parts across all workers and each worker uses its subset of the training data for training. For example, if we had two machines, each running a worker, each worker managing four GPUs we'll split the data like shown below. Note that we don't split the data depending on the number of GPUs but split it depending on the number of workers. + +![Splitting data](https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/example/distributed_training/split_data.png) + +Each worker can find out the total number of workers in the cluster and its own rank which is an integer between 0 and N-1 where N is the number of workers. + +```python +store = kv.create('dist') +print("Total number of workers: %d" % store.num_workers) +print("This worker's rank: %d" % store.rank) +``` + +``` +Total number of workers: 2 +This worker's rank: 0 +``` + +Knowing the number of workers and a particular worker's rank, it is easy to split the dataset into partitions and pick one partition to train depending on the rank of the worker. Here is a sampler that does exactly that. + +```python +class SplitSampler(gluon.data.sampler.Sampler): + """ Split the dataset into `num_parts` parts and sample from the part with index `part_index` + Parameters + ---------- + length: int + Number of examples in the dataset + num_parts: int + Partition the data into multiple parts + part_index: int + The index of the part to read from + """ + def __init__(self, length, num_parts=1, part_index=0): + # Compute the length of each partition + self.part_len = length // num_parts + # Compute the start index for this partition + self.start = self.part_len * part_index + # Compute the end index for this partition + self.end = self.start + self.part_len + + def __iter__(self): + # Extract examples between `start` and `end`, shuffle and return them. + indices = list(range(self.start, self.end)) + random.shuffle(indices) + return iter(indices) + + def __len__(self): + return self.part_len +``` + +We can then create a `DataLoader` using the `SplitSampler` like shown below: + +```python +# Load the training data +train_data = gluon.data.DataLoader(gluon.data.vision.CIFAR10(train=True, transform=transform), + batch_size, + sampler=SplitSampler(50000, store.num_workers, store.rank)) +``` + +## Step 3: Training with multiple GPUs + +Note that we didn't split the dataset by the number of GPUs. We split it by the number of workers which usually translates to number of machines. It is the worker's responsibility to split the partition it has across multiple GPUs it might have and run the training in parallel across multiple GPUs. + +To train with multiple GPUs, we first need to specify the list of GPUs we want to use for training: + +```python +ctx = [mx.gpu(i) for i in range(gpus_per_machine)] +``` + +We can then train a batch like shown below: + +```python +# Train a batch using multiple GPUs +def train_batch(batch, ctx, net, trainer): + + # Split and load data into multiple GPUs + data = batch[0] + data = gluon.utils.split_and_load(data, ctx) + + # Split and load label into multiple GPUs + label = batch[1] + label = gluon.utils.split_and_load(label, ctx) + + # Run the forward and backward pass + forward_backward(net, data, label) + + # Update the parameters + this_batch_size = batch[0].shape[0] + trainer.step(this_batch_size) +``` + +Here is the code that runs the forward (computing loss) and backward (computing gradients) pass on multiple GPUs: + +```python +# We'll use cross entropy loss since we are doing multiclass classification +loss = gluon.loss.SoftmaxCrossEntropyLoss() + +# Run one forward and backward pass on multiple GPUs +def forward_backward(net, data, label): + + # Ask autograd to remember the forward pass + with autograd.record(): + # Compute the loss on all GPUs + losses = [loss(net(X), Y) for X, Y in zip(data, label)] + + # Run the backward pass (calculate gradients) on all GPUs + for l in losses: + l.backward() +``` + +Given `train_batch`, training an epoch is simple: + +```python +for batch in train_data: + # Train the batch using multiple GPUs + train_batch(batch, ctx, net, trainer) +``` + +## Final Step: Launching the distributed training + +Note that there are several processes that needs to be launched on multiple machines to do distributed training. One worker and one parameter server needs to be launched on each host. Scheduler needs to be launched on one of the hosts. While this can be done manually, MXNet provides the [`launch.py`](https://github.com/apache/incubator-mxnet/blob/master/tools/launch.py) tool to make this easy. + +For example, the following command launches distributed training on two machines: + +``` +python ~/mxnet/tools/launch.py -n 2 -s 2 -H hosts \ + --sync-dst-dir /home/ubuntu/cifar10_dist \ + --launcher ssh \ + "python /home/ubuntu/cifar10_dist/cifar10_dist.py" +``` + +- `-n 2` specifies the number of workers that must be launched +- `-s 2` specifies the number of parameter servers that must be launched. +- `--sync-dst-dir` specifies a destination location where the contents of the current directory will be rsync'd +- `--launcher ssh` tells `launch.py` to use ssh to login on each machine in the cluster and launch processes. +- `"python /home/ubuntu/dist/dist.py"` is the command that will get executed in each of the launched processes. +- Finally, `-H hosts` specifies the list of hosts in the cluster to be used for distributed training. + +Let's take a look at the `hosts` file. + +``` +~/dist$ cat hosts +d1 +d2 +``` + +'d1' and 'd2' are the hostnames of the hosts we want to run distributed training using. `launch.py` should be able to ssh into these hosts by providing just the hostname on the command line. For example: + +``` +~/dist$ ssh d1 +Welcome to Ubuntu 16.04.3 LTS (GNU/Linux 4.4.0-1049-aws x86_64) + + * Documentation: https://help.ubuntu.com + * Management: https://landscape.canonical.com + * Support: https://ubuntu.com/advantage + + Get cloud support with Ubuntu Advantage Cloud Guest: + http://www.ubuntu.com/business/services/cloud + +0 packages can be updated. +0 updates are security updates. + + +Last login: Wed Jan 31 18:06:45 2018 from 72.21.198.67 +``` + +Note that no authentication information was provided to login to the host. This can be done using multiple methods. One easy way is to specify the ssh certificates in `~/.ssh/config`. Example: + +``` +~$ cat ~/.ssh/config +Host d1 + HostName ec2-34-201-108-233.compute-1.amazonaws.com + port 22 + user ubuntu + IdentityFile /home/ubuntu/my_key.pem + IdentitiesOnly yes + +Host d2 + HostName ec2-34-238-232-97.compute-1.amazonaws.com + port 22 + user ubuntu + IdentityFile /home/ubuntu/my_key.pem + IdentitiesOnly yes +``` + +A better way is to use ssh agent forwarding. Check [this](https://aws.amazon.com/blogs/security/securely-connect-to-linux-instances-running-in-a-private-amazon-vpc/) article for more details. + +Here is a sample output from running distributed training: + +``` +$ python ~/mxnet/tools/launch.py -n 2 -s 2 -H hosts --sync-dst-dir /home/ubuntu/cifar10_dist --launcher ssh "python /home/ubuntu/cifar10_dist/cifar10_dist.py" +2018-06-03 05:30:05,609 INFO rsync /home/ubuntu/cifar10_dist/ -> a1:/home/ubuntu/cifar10_dist +2018-06-03 05:30:05,879 INFO rsync /home/ubuntu/cifar10_dist/ -> a2:/home/ubuntu/cifar10_dist +Epoch 0: Test_acc 0.467400 +Epoch 0: Test_acc 0.466800 +Epoch 1: Test_acc 0.568500 +Epoch 1: Test_acc 0.571300 +Epoch 2: Test_acc 0.586300 +Epoch 2: Test_acc 0.594000 +Epoch 3: Test_acc 0.659200 +Epoch 3: Test_acc 0.653300 +Epoch 4: Test_acc 0.681200 +Epoch 4: Test_acc 0.687900 +``` + +Note that the output from all hosts are merged and printed to the console. + diff --git a/example/distributed_training/cifar10_dist.py b/example/distributed_training/cifar10_dist.py new file mode 100644 index 000000000000..506afbbe081a --- /dev/null +++ b/example/distributed_training/cifar10_dist.py @@ -0,0 +1,176 @@ +#!/usr/bin/env python + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import print_function +import random, sys + +import mxnet as mx +from mxnet import autograd, gluon, kv, nd +from mxnet.gluon.model_zoo import vision + +import numpy as np + +# Create a distributed key-value store +store = kv.create('dist') + +# Clasify the images into one of the 10 digits +num_outputs = 10 + +# 64 images in a batch +batch_size_per_gpu = 64 +# How many epochs to run the training +epochs = 5 + +# How many GPUs per machine +gpus_per_machine = 4 +# Effective batch size across all GPUs +batch_size = batch_size_per_gpu * gpus_per_machine + +# Create the context (a list of all GPUs to be used for training) +ctx = [mx.gpu(i) for i in range(gpus_per_machine)] + +# Convert to float 32 +# Having channel as the first dimension makes computation more efficient. Hence the (2,0,1) transpose. +# Dividing by 255 normalizes the input between 0 and 1 +def transform(data, label): + return nd.transpose(data.astype(np.float32), (2,0,1))/255, label.astype(np.float32) + +class SplitSampler(gluon.data.sampler.Sampler): + """ Split the dataset into `num_parts` parts and sample from the part with index `part_index` + + Parameters + ---------- + length: int + Number of examples in the dataset + num_parts: int + Partition the data into multiple parts + part_index: int + The index of the part to read from + """ + def __init__(self, length, num_parts=1, part_index=0): + # Compute the length of each partition + self.part_len = length // num_parts + # Compute the start index for this partition + self.start = self.part_len * part_index + # Compute the end index for this partition + self.end = self.start + self.part_len + + def __iter__(self): + # Extract examples between `start` and `end`, shuffle and return them. + indices = list(range(self.start, self.end)) + random.shuffle(indices) + return iter(indices) + + def __len__(self): + return self.part_len + +# Load the training data +train_data = gluon.data.DataLoader(gluon.data.vision.CIFAR10(train=True, transform=transform), + batch_size, + sampler=SplitSampler(50000, store.num_workers, store.rank)) + +# Load the test data +test_data = gluon.data.DataLoader(gluon.data.vision.CIFAR10(train=False, transform=transform), + batch_size, shuffle=False) + +# Use ResNet from model zoo +net = vision.resnet18_v1() + +# Initialize the parameters with Xavier initializer +net.collect_params().initialize(mx.init.Xavier(), ctx=ctx) + +# SoftmaxCrossEntropy is the most common choice of loss function for multiclass classification +softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss() + +# Use Adam optimizer. Ask trainer to use the distributer kv store. +trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': .001}, kvstore=store) + +# Evaluate accuracy of the given network using the given data +def evaluate_accuracy(data_iterator, net): + + acc = mx.metric.Accuracy() + + # Iterate through data and label + for i, (data, label) in enumerate(data_iterator): + + # Get the data and label into the GPU + data = data.as_in_context(ctx[0]) + label = label.as_in_context(ctx[0]) + + # Get network's output which is a probability distribution + # Apply argmax on the probability distribution to get network's classification. + output = net(data) + predictions = nd.argmax(output, axis=1) + + # Give network's prediction and the correct label to update the metric + acc.update(preds=predictions, labels=label) + + # Return the accuracy + return acc.get()[1] + +# We'll use cross entropy loss since we are doing multiclass classification +loss = gluon.loss.SoftmaxCrossEntropyLoss() + +# Run one forward and backward pass on multiple GPUs +def forward_backward(net, data, label): + + # Ask autograd to remember the forward pass + with autograd.record(): + # Compute the loss on all GPUs + losses = [loss(net(X), Y) for X, Y in zip(data, label)] + + # Run the backward pass (calculate gradients) on all GPUs + for l in losses: + l.backward() + +# Train a batch using multiple GPUs +def train_batch(batch, ctx, net, trainer): + + # Split and load data into multiple GPUs + data = batch[0] + data = gluon.utils.split_and_load(data, ctx) + + # Split and load label into multiple GPUs + label = batch[1] + label = gluon.utils.split_and_load(label, ctx) + + # Run the forward and backward pass + forward_backward(net, data, label) + + # Update the parameters + this_batch_size = batch[0].shape[0] + trainer.step(this_batch_size) + +# Run as many epochs as required +for epoch in range(epochs): + + # Iterate through batches and run training using multiple GPUs + batch_num = 1 + for batch in train_data: + + # Train the batch using multiple GPUs + train_batch(batch, ctx, net, trainer) + + batch_num += 1 + + # Print test accuracy after every epoch + test_accuracy = evaluate_accuracy(test_data, net) + print("Epoch %d: Test_acc %f" % (epoch, test_accuracy)) + sys.stdout.flush() + From 48d215534c560dfce2a761f7253fd1aba615ed4c Mon Sep 17 00:00:00 2001 From: Rahul Huilgol Date: Sun, 26 Aug 2018 09:47:31 -0700 Subject: [PATCH 068/160] [MXNET-535] Fix bugs in LR Schedulers and add warmup (#11234) * Add warmup and fix inconsistencies with learning rate schedulers * add comments * remove assert --- example/image-classification/common/fit.py | 3 +- python/mxnet/lr_scheduler.py | 147 ++++++++++++++++++--- tests/python/unittest/test_optimizer.py | 52 ++++++++ 3 files changed, 183 insertions(+), 19 deletions(-) diff --git a/example/image-classification/common/fit.py b/example/image-classification/common/fit.py index b3b13053addf..5775f30bd885 100755 --- a/example/image-classification/common/fit.py +++ b/example/image-classification/common/fit.py @@ -49,7 +49,8 @@ def _get_lr_scheduler(args, kv): steps = [epoch_size * (x - begin_epoch) for x in step_epochs if x - begin_epoch > 0] if steps: - return (lr, mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=args.lr_factor)) + return (lr, mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=args.lr_factor, + base_lr=args.lr)) else: return (lr, None) diff --git a/python/mxnet/lr_scheduler.py b/python/mxnet/lr_scheduler.py index 963560d17853..436085620a2e 100644 --- a/python/mxnet/lr_scheduler.py +++ b/python/mxnet/lr_scheduler.py @@ -17,6 +17,7 @@ """Scheduling learning rate.""" import logging +from math import cos, pi class LRScheduler(object): """Base class of a learning rate scheduler. @@ -28,9 +29,41 @@ class LRScheduler(object): ---------- base_lr : float, optional The initial learning rate. + warmup_steps: int + number of warmup steps used before this scheduler starts decay + warmup_begin_lr: float + if using warmup, the learning rate from which it starts warming up + warmup_mode: string + warmup can be done in two modes. + 'linear' mode gradually increases lr with each step in equal increments + 'constant' mode keeps lr at warmup_begin_lr for warmup_steps """ - def __init__(self, base_lr=0.01): + def __init__(self, base_lr=0.01, + warmup_steps=0, warmup_begin_lr=0, warmup_mode='linear'): self.base_lr = base_lr + assert isinstance(warmup_steps, int) + self.warmup_steps = warmup_steps + + self.warmup_final_lr = base_lr + self.warmup_begin_lr = warmup_begin_lr + if self.warmup_begin_lr > self.warmup_final_lr: + raise ValueError("Base lr has to be higher than warmup_begin_lr") + if self.warmup_steps < 0: + raise ValueError("Warmup steps has to be positive or 0") + if warmup_mode not in ['linear', 'constant']: + raise ValueError("Supports only linear and constant modes of warmup") + self.warmup_mode = warmup_mode + + def get_warmup_lr(self, num_update): + assert num_update < self.warmup_steps + if self.warmup_mode == 'linear': + increase = (self.warmup_final_lr - self.warmup_begin_lr) \ + * float(num_update) / float(self.warmup_steps) + return self.warmup_begin_lr + increase + elif self.warmup_mode == 'constant': + return self.warmup_begin_lr + else: + raise ValueError("Invalid warmup mode %s"%self.warmup_mode) def __call__(self, num_update): """Return a new learning rate. @@ -66,8 +99,9 @@ class FactorScheduler(LRScheduler): stop_factor_lr : float, optional Stop updating the learning rate if it is less than this value. """ - def __init__(self, step, factor=1, stop_factor_lr=1e-8): - super(FactorScheduler, self).__init__() + def __init__(self, step, factor=1, stop_factor_lr=1e-8, base_lr=0.01, + warmup_steps=0, warmup_begin_lr=0, warmup_mode='linear'): + super(FactorScheduler, self).__init__(base_lr, warmup_steps, warmup_begin_lr, warmup_mode) if step < 1: raise ValueError("Schedule step must be greater or equal than 1 round") if factor > 1.0: @@ -78,6 +112,9 @@ def __init__(self, step, factor=1, stop_factor_lr=1e-8): self.count = 0 def __call__(self, num_update): + if num_update < self.warmup_steps: + return self.get_warmup_lr(num_update) + # NOTE: use while rather than if (for continuing training via load_epoch) while num_update > self.count + self.step: self.count += self.step @@ -108,9 +145,19 @@ class MultiFactorScheduler(LRScheduler): The list of steps to schedule a change factor: float The factor to change the learning rate. + warmup_steps: int + number of warmup steps used before this scheduler starts decay + warmup_begin_lr: float + if using warmup, the learning rate from which it starts warming up + warmup_mode: string + warmup can be done in two modes. + 'linear' mode gradually increases lr with each step in equal increments + 'constant' mode keeps lr at warmup_begin_lr for warmup_steps """ - def __init__(self, step, factor=1): - super(MultiFactorScheduler, self).__init__() + def __init__(self, step, factor=1, base_lr=0.01, warmup_steps=0, warmup_begin_lr=0, + warmup_mode='linear'): + super(MultiFactorScheduler, self).__init__(base_lr, warmup_steps, + warmup_begin_lr, warmup_mode) assert isinstance(step, list) and len(step) >= 1 for i, _step in enumerate(step): if i != 0 and step[i] <= step[i-1]: @@ -125,6 +172,9 @@ def __init__(self, step, factor=1): self.count = 0 def __call__(self, num_update): + if num_update < self.warmup_steps: + return self.get_warmup_lr(num_update) + # NOTE: use while rather than if (for continuing training via load_epoch) while self.cur_step_ind <= len(self.step)-1: if num_update > self.step[self.cur_step_ind]: @@ -138,33 +188,94 @@ def __call__(self, num_update): return self.base_lr class PolyScheduler(LRScheduler): - """ Reduce the learning rate by given a list of steps. + """ Reduce the learning rate according to a polynomial of given power. - Calculate the new learning rate by:: + Calculate the new learning rate, after warmup if any, by:: - base_lr * (1-nup/max_nup)^pwr + final_lr + (start_lr - final_lr) * (1-nup/max_nup)^pwr if nup < max_nup, 0 otherwise. Parameters ---------- - max_update: maximum number of updates before the decay reaches 0. - base_lr: base learning rate - pwr: power of the decay term as a funtion of the current number of updates. - + max_update: int + maximum number of updates before the decay reaches final learning rate. + base_lr: float + base learning rate to start from + pwr: int + power of the decay term as a function of the current number of updates. + final_lr: float + final learning rate after all steps + warmup_steps: int + number of warmup steps used before this scheduler starts decay + warmup_begin_lr: float + if using warmup, the learning rate from which it starts warming up + warmup_mode: string + warmup can be done in two modes. + 'linear' mode gradually increases lr with each step in equal increments + 'constant' mode keeps lr at warmup_begin_lr for warmup_steps """ - def __init__(self, max_update, base_lr=0.01, pwr=2): - super(PolyScheduler, self).__init__(base_lr) + def __init__(self, max_update, base_lr=0.01, pwr=2, final_lr=0, + warmup_steps=0, warmup_begin_lr=0, warmup_mode='linear'): + super(PolyScheduler, self).__init__(base_lr, warmup_steps, warmup_begin_lr, warmup_mode) assert isinstance(max_update, int) if max_update < 1: raise ValueError("maximum number of updates must be strictly positive") + self.power = pwr self.base_lr_orig = self.base_lr self.max_update = max_update - self.power = pwr - self.base_lr = self.base_lr_orig + self.final_lr = final_lr + self.max_steps = self.max_update - self.warmup_steps + + def __call__(self, num_update): + if num_update < self.warmup_steps: + return self.get_warmup_lr(num_update) + if num_update <= self.max_update: + self.base_lr = self.final_lr + (self.base_lr_orig - self.final_lr) * \ + pow(1 - float(num_update - self.warmup_steps) / float(self.max_steps), self.power) + return self.base_lr + +class CosineScheduler(LRScheduler): + """ Reduce the learning rate according to a cosine function + + Calculate the new learning rate by:: + + final_lr + (start_lr - final_lr) * (1+cos(pi * nup/max_nup))/2 + if nup < max_nup, 0 otherwise. + + Parameters + ---------- + max_update: int + maximum number of updates before the decay reaches 0 + base_lr: float + base learning rate + final_lr: float + final learning rate after all steps + warmup_steps: int + number of warmup steps used before this scheduler starts decay + warmup_begin_lr: float + if using warmup, the learning rate from which it starts warming up + warmup_mode: string + warmup can be done in two modes. + 'linear' mode gradually increases lr with each step in equal increments + 'constant' mode keeps lr at warmup_begin_lr for warmup_steps + """ + + def __init__(self, max_update, base_lr=0.01, final_lr=0, + warmup_steps=0, warmup_begin_lr=0, warmup_mode='linear'): + super(CosineScheduler, self).__init__(base_lr, warmup_steps, warmup_begin_lr, warmup_mode) + assert isinstance(max_update, int) + if max_update < 1: + raise ValueError("maximum number of updates must be strictly positive") + self.base_lr_orig = base_lr + self.max_update = max_update + self.final_lr = final_lr + self.max_steps = self.max_update - self.warmup_steps def __call__(self, num_update): + if num_update < self.warmup_steps: + return self.get_warmup_lr(num_update) if num_update <= self.max_update: - self.base_lr = self.base_lr_orig * pow(1.0 - float(num_update) / float(self.max_update), - self.power) + self.base_lr = self.final_lr + (self.base_lr_orig - self.final_lr) * \ + (1 + cos(pi * (num_update - self.warmup_steps) / self.max_steps)) / 2 return self.base_lr diff --git a/tests/python/unittest/test_optimizer.py b/tests/python/unittest/test_optimizer.py index 449cdb423466..496a61f356b3 100644 --- a/tests/python/unittest/test_optimizer.py +++ b/tests/python/unittest/test_optimizer.py @@ -1040,6 +1040,58 @@ def test_adagrad(): g_stype='row_sparse') +def test_factor_scheduler(): + base_lr = 1 + step = 100 + factor = 0.1 + sched = mx.lr_scheduler.FactorScheduler(step, factor, stop_factor_lr=1e-4, base_lr=base_lr, + warmup_steps=20, warmup_begin_lr=0.1, warmup_mode='constant') + + assert (sched(0) == 0.1) + np.testing.assert_almost_equal(sched(10), 0.1) + assert (sched(21) == base_lr), sched(21) + np.testing.assert_almost_equal(sched(101), base_lr * factor) + np.testing.assert_almost_equal(sched(201), base_lr * factor * factor) + np.testing.assert_almost_equal(sched(1000), 1e-4) + +def test_multifactor_scheduler(): + base_lr = 0.1 + steps = [15, 25] + factor = 0.1 + sched = mx.lr_scheduler.MultiFactorScheduler(steps, factor, base_lr=base_lr, + warmup_steps=10, warmup_begin_lr=0.05, warmup_mode='linear') + + assert sched(0) == 0.05 + np.testing.assert_almost_equal(sched(5), 0.05 + (base_lr - 0.05)/2) + np.testing.assert_almost_equal(sched(15), base_lr) + np.testing.assert_almost_equal(sched(16), base_lr * factor) + np.testing.assert_almost_equal(sched(20), base_lr * factor) + np.testing.assert_almost_equal(sched(26), base_lr * factor * factor) + np.testing.assert_almost_equal(sched(100), base_lr * factor * factor) + +def test_poly_scheduler(): + base_lr = 3 + final_lr = 0 + steps = 1000 + poly_sched = mx.lr_scheduler.PolyScheduler(steps, base_lr=base_lr, pwr=2, final_lr=final_lr, + warmup_steps=100, warmup_begin_lr=0, warmup_mode='linear') + + np.testing.assert_almost_equal(poly_sched(0), 0) + np.testing.assert_almost_equal(poly_sched(50), float(base_lr)/2) + np.testing.assert_almost_equal(poly_sched(100), base_lr) + assert (poly_sched(101) < poly_sched(100)) + assert (poly_sched(500) < 1.6) + np.testing.assert_almost_equal(poly_sched(steps), final_lr) + +def test_cosine_scheduler(): + # also tests case without warmup + base_lr = 3 + final_lr = 0.1 + steps = 1000 + cosine_sched = mx.lr_scheduler.CosineScheduler(steps, base_lr=base_lr, final_lr=final_lr) + np.testing.assert_almost_equal(cosine_sched(0), base_lr) + np.testing.assert_almost_equal(cosine_sched(steps), final_lr) + assert (cosine_sched(500) > 1.5) if __name__ == '__main__': import nose From 308ada1e412a56343e012f1ef7a4aa4fbe243032 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Sun, 26 Aug 2018 21:04:59 -0700 Subject: [PATCH 069/160] Make check_isfinite, check_scale optional in clip_global_norm (#12042) * Make check_isfinite, check_scale optional in clip_global_norm If both are set to false, clip_global_norm does not force any synchronization and throughput can be increased. * Add tests * Remove check_scale * Document return type * Fix test_gluon_gpu --- python/mxnet/gluon/utils.py | 38 ++++++++++++++++++++++------- tests/python/gpu/test_gluon_gpu.py | 16 +++++++----- tests/python/unittest/test_gluon.py | 11 +++++---- 3 files changed, 45 insertions(+), 20 deletions(-) diff --git a/python/mxnet/gluon/utils.py b/python/mxnet/gluon/utils.py index f04479d23716..d5a14a6859a7 100644 --- a/python/mxnet/gluon/utils.py +++ b/python/mxnet/gluon/utils.py @@ -115,8 +115,23 @@ def split_and_load(data, ctx_list, batch_axis=0, even_split=True): return [i.as_in_context(ctx) for i, ctx in zip(slices, ctx_list)] -def clip_global_norm(arrays, max_norm): +def clip_global_norm(arrays, max_norm, check_isfinite=True): """Rescales NDArrays so that the sum of their 2-norm is smaller than `max_norm`. + + Parameters + ---------- + arrays : list of NDArray + max_norm : float + check_isfinite : bool, default True + If True, check that the total_norm is finite (not nan or inf). This + requires a blocking .asscalar() call. + + Returns + ------- + NDArray or float + Total norm. Return type is NDArray of shape (1,) if check_isfinite is + False. Otherwise a float is returned. + """ def _norm(array): if array.stype == 'default': @@ -126,15 +141,20 @@ def _norm(array): assert len(arrays) > 0 ctx = arrays[0].context total_norm = ndarray.add_n(*[_norm(arr).as_in_context(ctx) for arr in arrays]) - total_norm = ndarray.sqrt(total_norm).asscalar() - if not np.isfinite(total_norm): - warnings.warn(UserWarning('nan or inf is detected. Clipping results will be undefined.'), - stacklevel=2) + total_norm = ndarray.sqrt(total_norm) + if check_isfinite: + if not np.isfinite(total_norm.asscalar()): + warnings.warn( + UserWarning('nan or inf is detected. ' + 'Clipping results will be undefined.'), stacklevel=2) scale = max_norm / (total_norm + 1e-8) - if scale < 1.0: - for arr in arrays: - arr *= scale - return total_norm + scale = ndarray.min(ndarray.concat(scale, ndarray.ones(1, ctx=ctx), dim=0)) + for arr in arrays: + arr *= scale.as_in_context(arr.context) + if check_isfinite: + return total_norm.asscalar() + else: + return total_norm def _indent(s_, numSpaces): diff --git a/tests/python/gpu/test_gluon_gpu.py b/tests/python/gpu/test_gluon_gpu.py index 42d65dab5fdc..69375afdfe0a 100644 --- a/tests/python/gpu/test_gluon_gpu.py +++ b/tests/python/gpu/test_gluon_gpu.py @@ -111,12 +111,16 @@ def test_gluon_ctc_consistency(): @with_seed() def test_global_norm_clip_multi_device(): - x1 = mx.nd.ones((3,3), ctx=mx.gpu(0)) - x2 = mx.nd.ones((4,4), ctx=mx.cpu(0)) - norm = gluon.utils.clip_global_norm([x1, x2], 1.0) - assert norm == 5.0 - assert_almost_equal(x1.asnumpy(), np.ones((3,3))/5) - assert_almost_equal(x2.asnumpy(), np.ones((4,4))/5) + for check_isfinite in [True, False]: + x1 = mx.nd.ones((3,3), ctx=mx.gpu(0)) + x2 = mx.nd.ones((4,4), ctx=mx.cpu(0)) + norm = gluon.utils.clip_global_norm([x1, x2], 1.0, check_isfinite=check_isfinite) + if check_isfinite: + assert norm == 5.0 + else: + assert norm.asscalar() == 5.0 + assert_almost_equal(x1.asnumpy(), np.ones((3, 3)) / 5) + assert_almost_equal(x2.asnumpy(), np.ones((4, 4)) / 5) def _check_batchnorm_result(input, num_devices=1, cuda=False): diff --git a/tests/python/unittest/test_gluon.py b/tests/python/unittest/test_gluon.py index 61b441a5f842..bf9f5a77c844 100644 --- a/tests/python/unittest/test_gluon.py +++ b/tests/python/unittest/test_gluon.py @@ -735,10 +735,10 @@ def test_sequential_warning(): @with_seed() def test_global_norm_clip(): stypes = ['default', 'row_sparse'] - def check_global_norm_clip(stype): + def check_global_norm_clip(stype, check_isfinite): x1 = mx.nd.ones((3,3)).tostype(stype) x2 = mx.nd.ones((4,4)).tostype(stype) - norm = gluon.utils.clip_global_norm([x1, x2], 1.0) + norm = gluon.utils.clip_global_norm([x1, x2], 1.0, check_isfinite=check_isfinite) assert norm == 5.0 assert_almost_equal(x1.asnumpy(), np.ones((3,3))/5) assert_almost_equal(x2.asnumpy(), np.ones((4,4))/5) @@ -746,11 +746,12 @@ def check_global_norm_clip(stype): x3 = mx.nd.array([1.0, 2.0, float('nan')]).tostype(stype) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") - gluon.utils.clip_global_norm([x1, x3], 2.0) - assert len(w) == 1 + gluon.utils.clip_global_norm([x1, x3], 2.0, check_isfinite=check_isfinite) + assert len(w) == check_isfinite for stype in stypes: - check_global_norm_clip(stype) + for check_isfinite in [True, False]: + check_global_norm_clip(stype, check_isfinite) @with_seed() def test_embedding(): From 2f7395877b8271ded36dbf9155437b613dd8df8f Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Mon, 27 Aug 2018 13:18:13 +0200 Subject: [PATCH 070/160] [MXNET-859] Add a clang-tidy stage to CI (#12282) --- .clang-tidy | 61 +++++++++++++++++++++++++++++++ Jenkinsfile | 10 +++++ ci/docker/install/ubuntu_clang.sh | 7 +++- ci/docker/runtime_functions.sh | 26 +++++++++++++ src/c_api/.clang-tidy | 19 ++++++++++ 5 files changed, 122 insertions(+), 1 deletion(-) create mode 100644 .clang-tidy create mode 100644 src/c_api/.clang-tidy diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 000000000000..993656e12766 --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,61 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# The checks defined here will be run and will display by default as warnings. +Checks: > + -*, cppcoreguidelines-c-copy-assignment-signature, + cppcoreguidelines-interfaces-global-init, cppcoreguidelines-no-malloc, + cppcoreguidelines-pro-bounds-constant-array-index, cppcoreguidelines-pro-type-const-cast, + cppcoreguidelines-pro-type-cstyle-cast, cppcoreguidelines-pro-type-member-init, + cppcoreguidelines-pro-type-static-cast-downcast, cppcoreguidelines-pro-type-union-access, + cppcoreguidelines-pro-type-vararg, cppcoreguidelines-slicing, + cppcoreguidelines-special-member-functions, clang-analyzer-security.FloatLoopCounter, + clang-analyzer-security.insecureAPI.*, clang-analyzer-core.CallAndMessage, + clang-analyzer-core.DivideZero, clang-analyzer-core.DynamicTypePropagation, + clang-analyzer-core.NonNullParamChecker, clang-analyzer-core.NullDereference, + clang-analyzer-core.StackAddressEscape, clang-analyzer-core.UndefinedBinaryOperatorResult, + clang-analyzer-core.VLASize, clang-analyzer-core.builtin.BuiltinFunctions, + clang-analyzer-core.builtin.NoReturnFunctions, clang-analyzer-core.uninitialized.ArraySubscript, + clang-analyzer-core.uninitialized.Assign, clang-analyzer-core.uninitialized.Branch, + clang-analyzer-core.uninitialized.CapturedBlockVariable, + clang-analyzer-core.uninitialized.UndefReturn, clang-analyzer-cplusplus.NewDelete, + clang-analyzer-cplusplus.NewDeleteLeaks, clang-analyzer-cplusplus.SelfAssignment, + clang-analyzer-deadcode.DeadStores, modernize-avoid-bind, modernize-deprecated-headers, + modernize-loop-convert, modernize-make-shared, modernize-pass-by-value, + modernize-raw-string-literal, modernize-redundant-void-arg, modernize-replace-auto-ptr, + modernize-replace-random-shuffle, modernize-return-braced-init-list, modernize-shrink-to-fit, + modernize-unary-static-assert, modernize-use-bool-literals, modernize-use-default-member-init, + modernize-use-emplace, modernize-use-equals-default, modernize-use-equals-delete, + modernize-use-noexcept, modernize-use-nullptr, modernize-use-override, + modernize-use-transparent-functors, modernize-use-using, performance-* + +# cppcoreguidelines checks not enabled: +# cppcoreguidelines-pro-bounds-pointer-arithmetic +# cppcoreguidelines-pro-bounds-array-to-pointer-decay +# cppcoreguidelines-pro-type-reinterpret-cast + +# modernize checks not enabled: +# modernize-use-auto +# modernize-make-unique (C++14 and newer only) + +# In order to trigger an error, you must have a rule defined both in checks and in this section. +WarningsAsErrors: > + cppcoreguidelines-no-malloc + +# Todo: define a better regex match that includes most project headers, but excludes third party +# code. +HeaderFilterRegex: '^src/.*' diff --git a/Jenkinsfile b/Jenkinsfile index 0e4aa199a6c7..6a93fd586414 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -183,6 +183,16 @@ core_logic: { } } }, + 'CPU: Clang Tidy': { + node(NODE_LINUX_CPU) { + ws('workspace/build-cpu-clang60_tidy') { + timeout(time: max_time, unit: 'MINUTES') { + utils.init_git() + utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_clang_tidy', false) + } + } + } + }, 'CPU: Clang 3.9 MKLDNN': { node(NODE_LINUX_CPU) { ws('workspace/build-cpu-mkldnn-clang39') { diff --git a/ci/docker/install/ubuntu_clang.sh b/ci/docker/install/ubuntu_clang.sh index 40761716933e..cb0f234a1c15 100755 --- a/ci/docker/install/ubuntu_clang.sh +++ b/ci/docker/install/ubuntu_clang.sh @@ -26,6 +26,11 @@ wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - && \ apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-3.9 main" && \ apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main" && \ apt-get update && \ - apt-get install -y clang-3.9 clang-6.0 && \ + apt-get install -y clang-3.9 clang-6.0 clang-tidy-6.0 && \ clang-3.9 --version && \ clang-6.0 --version + +# Use llvm's master version of run-clang-tidy.py. This version has mostly minor updates, but +# importantly will properly return a non-zero exit code when an error is reported in clang-tidy. +# Please remove the below if we install a clang version higher than 6.0. +wget https://raw.githubusercontent.com/llvm-mirror/clang-tools-extra/7654135f0cbd155c285fd2a37d87e27e4fff3071/clang-tidy/tool/run-clang-tidy.py -O /usr/lib/llvm-6.0/share/clang/run-clang-tidy.py diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index 1e38ec48e6ce..a124e105838d 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -365,6 +365,32 @@ build_ubuntu_cpu_clang60() { -j$(nproc) } +build_ubuntu_cpu_clang_tidy() { + set -ex + + export CXX=clang++-6.0 + export CC=clang-6.0 + export CLANG_TIDY=/usr/lib/llvm-6.0/share/clang/run-clang-tidy.py + + pushd . + cd /work/build + cmake \ + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \ + -DCMAKE_C_COMPILER_LAUNCHER=ccache \ + -DUSE_CUDA=OFF \ + -DUSE_MKL_IF_AVAILABLE=OFF \ + -DUSE_OPENCV=ON \ + -DCMAKE_BUILD_TYPE=Debug \ + -G Ninja \ + -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ + /work/mxnet + + ninja -v + cd /work/mxnet + $CLANG_TIDY -p /work/build -j $(nproc) -clang-tidy-binary clang-tidy-6.0 /work/mxnet/src + popd +} + build_ubuntu_cpu_clang39_mkldnn() { set -ex diff --git a/src/c_api/.clang-tidy b/src/c_api/.clang-tidy new file mode 100644 index 000000000000..2af4b0d7f526 --- /dev/null +++ b/src/c_api/.clang-tidy @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Disable most clang-tidy checks in the c_api folder. +Checks: -*,readability-non-const-parameter From 6a7bfe905bafe94a33eccd0cb8bc42f0d667f606 Mon Sep 17 00:00:00 2001 From: Pedro Larroy <928489+larroy@users.noreply.github.com> Date: Mon, 27 Aug 2018 15:00:00 +0200 Subject: [PATCH 071/160] Separate refactoring from #12276 in a prior PR (#12296) * Separate minor refactoring from #12276 in a prior PR * Address CR comments --- ci/build.py | 164 ++++++++++++++++++--------------- ci/docker/runtime_functions.sh | 3 + ci/docker_cache.py | 5 +- ci/util.py | 71 ++++++++++++++ 4 files changed, 165 insertions(+), 78 deletions(-) diff --git a/ci/build.py b/ci/build.py index a9d6a63537f2..f1a5e99e2d0e 100755 --- a/ci/build.py +++ b/ci/build.py @@ -23,31 +23,33 @@ """ __author__ = 'Marco de Abreu, Kellen Sunderland, Anton Chernov, Pedro Larroy' -__version__ = '0.1' +__version__ = '0.2' import argparse import glob import logging -import os import re import shutil import subprocess import sys import tempfile -import platform from copy import deepcopy from itertools import chain -from subprocess import call, check_call +from subprocess import call, check_call, check_output from typing import * from util import * +import pprint +import requests + CCACHE_MAXSIZE = '500G' -def under_ci() -> bool: - """:return: True if we run in Jenkins.""" - return 'JOB_NAME' in os.environ -def get_platforms(path: Optional[str] = "docker"): +def get_dockerfiles_path(): + return "docker" + + +def get_platforms(path: str = get_dockerfiles_path()) -> List[str]: """Get a list of architectures given our dockerfiles""" dockerfiles = glob.glob(os.path.join(path, "Dockerfile.build.*")) dockerfiles = list(filter(lambda x: x[-1] != '~', dockerfiles)) @@ -57,10 +59,11 @@ def get_platforms(path: Optional[str] = "docker"): def get_docker_tag(platform: str, registry: str) -> str: + """:return: docker tag to be used for the container""" return "{0}/build.{1}".format(registry, platform) -def get_dockerfile(platform: str, path="docker") -> str: +def get_dockerfile(platform: str, path=get_dockerfiles_path()) -> str: return os.path.join(path, "Dockerfile.build.{0}".format(platform)) @@ -68,18 +71,18 @@ def get_docker_binary(use_nvidia_docker: bool) -> str: return "nvidia-docker" if use_nvidia_docker else "docker" -def build_docker(platform: str, docker_binary: str, registry: str, num_retries: int) -> None: +def build_docker(platform: str, docker_binary: str, registry: str, num_retries: int, use_cache: bool) -> str: """ Build a container for the given platform :param platform: Platform :param docker_binary: docker binary to use (docker/nvidia-docker) :param registry: Dockerhub registry name :param num_retries: Number of retries to build the docker image + :param use_cache: will pass cache_from to docker to use the previously pulled tag :return: Id of the top level image """ - tag = get_docker_tag(platform=platform, registry=registry) - logging.info("Building container tagged '%s' with %s", tag, docker_binary) + logging.info("Building docker container tagged '%s' with %s", tag, docker_binary) # # We add a user with the same group as the executing non-root user so files created in the # container match permissions of the local user. Same for the group. @@ -90,41 +93,29 @@ def build_docker(platform: str, docker_binary: str, registry: str, num_retries: # cache-from is needed so we use the cached images tagged from the remote via # docker pull see: docker_cache.load_docker_cache # + # This also prevents using local layers for caching: https://github.com/moby/moby/issues/33002 + # So to use local caching, we should omit the cache-from by using --no-dockerhub-cache argument to this + # script. + # # This doesn't work with multi head docker files. - # - - for i in range(num_retries): - logging.info('%d out of %d tries to build the docker image.', i + 1, num_retries) - - cmd = [docker_binary, "build", - "-f", get_dockerfile(platform), - "--build-arg", "USER_ID={}".format(os.getuid()), - "--build-arg", "GROUP_ID={}".format(os.getgid()), - "--cache-from", tag, - "-t", tag, - "docker"] + # + cmd = [docker_binary, "build", + "-f", get_dockerfile(platform), + "--build-arg", "USER_ID={}".format(os.getuid()), + "--build-arg", "GROUP_ID={}".format(os.getgid())] + if use_cache: + cmd.extend(["--cache-from", tag]) + cmd.extend(["-t", tag, get_dockerfiles_path()]) + + @retry(subprocess.CalledProcessError, tries=num_retries) + def run_cmd(): logging.info("Running command: '%s'", ' '.join(cmd)) - try: - check_call(cmd) - # Docker build was successful. Call break to break out of the retry mechanism - break - except subprocess.CalledProcessError as e: - saved_exception = e - logging.error('Failed to build docker image') - # Building the docker image failed. Call continue to trigger the retry mechanism - continue - else: - # Num retries exceeded - logging.exception('Exception during build of docker image', saved_exception) - logging.fatal('Failed to build the docker image, aborting...') - sys.exit(1) + check_call(cmd) + run_cmd() # Get image id by reading the tag. It's guaranteed (except race condition) that the tag exists. Otherwise, the # check_call would have failed - image_id = _get_local_image_id(docker_binary=docker_binary, docker_tag=tag) - if not image_id: - raise FileNotFoundError('Unable to find docker image id matching with {}'.format(tag)) - return image_id + return _get_local_image_id(docker_binary=docker_binary, docker_tag=tag) def _get_local_image_id(docker_binary, docker_tag): @@ -134,14 +125,17 @@ def _get_local_image_id(docker_binary, docker_tag): :return: Image id as string or None if tag does not exist """ cmd = [docker_binary, "images", "-q", docker_tag] - image_id_b = subprocess.check_output(cmd) + image_id_b = check_output(cmd) image_id = image_id_b.decode('utf-8').strip() + if not image_id: + raise RuntimeError('Unable to find docker image id matching with tag {}'.format(docker_tag)) return image_id def buildir() -> str: return os.path.join(get_mxnet_root(), "build") + def default_ccache_dir() -> str: # Share ccache across containers if 'CCACHE_DIR' in os.environ: @@ -152,6 +146,7 @@ def default_ccache_dir() -> str: except PermissionError: logging.info('Unable to make dirs at %s, falling back to local temp dir', ccache_dir) # In osx tmpdir is not mountable by default + import platform if platform.system() == 'Darwin': ccache_dir = "/tmp/_mxnet_ccache" os.makedirs(ccache_dir, exist_ok=True) @@ -166,7 +161,7 @@ def container_run(platform: str, local_ccache_dir: str, command: List[str], dry_run: bool = False, - interactive: bool = False) -> str: + interactive: bool = False) -> int: tag = get_docker_tag(platform=platform, registry=docker_registry) mx_root = get_mxnet_root() local_build_folder = buildir() @@ -193,15 +188,13 @@ def container_run(platform: str, logging.info("Executing:\n%s\n", cmd) ret = call(runlist) - docker_run_cmd = ' '.join(runlist) if not dry_run and interactive: into_cmd = deepcopy(runlist) # -ti can't be after the tag, as is interpreted as a command so hook it up after the -u argument idx = into_cmd.index('-u') + 2 into_cmd[idx:idx] = ['-ti'] - cmd = '\\\n\t'.join(into_cmd) + cmd = ' \\\n\t'.join(into_cmd) logging.info("Executing:\n%s\n", cmd) - docker_run_cmd = ' '.join(into_cmd) ret = call(into_cmd) if not dry_run and not interactive and ret != 0: @@ -209,11 +202,12 @@ def container_run(platform: str, logging.error("You can get into the container by adding the -i option") raise subprocess.CalledProcessError(ret, cmd) - return docker_run_cmd + return ret def list_platforms() -> str: - print("\nSupported platforms:\n{}".format('\n'.join(get_platforms()))) + return "\nSupported platforms:\n{}".format('\n'.join(get_platforms())) + def load_docker_cache(tag, docker_registry) -> None: if docker_registry: @@ -221,24 +215,34 @@ def load_docker_cache(tag, docker_registry) -> None: import docker_cache logging.info('Docker cache download is enabled from registry %s', docker_registry) docker_cache.load_docker_cache(registry=docker_registry, docker_tag=tag) + # noinspection PyBroadException except Exception: logging.exception('Unable to retrieve Docker cache. Continue without...') else: logging.info('Distributed docker cache disabled') -def main() -> int: - # We need to be in the same directory than the script so the commands in the dockerfiles work as - # expected. But the script can be invoked from a different path - base = os.path.split(os.path.realpath(__file__))[0] - os.chdir(base) - logging.getLogger().setLevel(logging.INFO) +def log_environment(): + instance_id = ec2_instance_id_hostname() + if instance_id: + logging.info("EC2 Instance id: %s", instance_id) + pp = pprint.PrettyPrinter(indent=4) + logging.debug("Build environment: %s", pp.pformat(dict(os.environ))) + - def script_name() -> str: - return os.path.split(sys.argv[0])[1] +def script_name() -> str: + return os.path.split(sys.argv[0])[1] + +def main() -> int: + logging.getLogger().setLevel(logging.INFO) + logging.getLogger("requests").setLevel(logging.WARNING) logging.basicConfig(format='{}: %(asctime)-15s %(message)s'.format(script_name())) + logging.info("MXNet container based build tool.") + log_environment() + chdir_to_script_directory() + parser = argparse.ArgumentParser(description="""Utility for building and testing MXNet on docker containers""", epilog="") parser.add_argument("-p", "--platform", @@ -284,8 +288,10 @@ def script_name() -> str: default=1, type=int) - parser.add_argument("-c", "--cache", action="store_true", - help="Enable docker registry cache") + parser.add_argument("-c", "--no-dockerhub-cache", action="store_true", + help="Disables use of --cache-from option on docker build, allowing docker" + " to use local layers for caching. If absent, we use the cache from dockerhub" + " which is the default.") parser.add_argument("command", help="command to run in the container", @@ -297,35 +303,36 @@ def script_name() -> str: type=str) args = parser.parse_args() + def use_cache(): - return args.cache or under_ci() + return not args.no_dockerhub_cache or under_ci() command = list(chain(*args.command)) docker_binary = get_docker_binary(args.nvidiadocker) - shared_memory_size = args.shared_memory_size - num_docker_build_retires = args.docker_build_retries if args.list: - list_platforms() + print(list_platforms()) elif args.platform: platform = args.platform tag = get_docker_tag(platform=platform, registry=args.docker_registry) if use_cache(): load_docker_cache(tag=tag, docker_registry=args.docker_registry) - build_docker(platform, docker_binary, registry=args.docker_registry, num_retries=num_docker_build_retires) + build_docker(platform=platform, docker_binary=docker_binary, registry=args.docker_registry, + num_retries=args.docker_build_retries, use_cache=use_cache()) if args.build_only: logging.warning("Container was just built. Exiting due to build-only.") return 0 if command: - container_run(platform=platform, docker_binary=docker_binary, shared_memory_size=shared_memory_size, + container_run(platform=platform, docker_binary=docker_binary, shared_memory_size=args.shared_memory_size, command=command, docker_registry=args.docker_registry, local_ccache_dir=args.ccache_dir, interactive=args.interactive) elif args.print_docker_run: - print(container_run(platform=platform, docker_binary=docker_binary, shared_memory_size=shared_memory_size, - command=[], dry_run=True, docker_registry=args.docker_registry, local_ccache_dir=args.ccache_dir)) + print(container_run(platform=platform, docker_binary=docker_binary, shared_memory_size=args.shared_memory_size, + command=[], dry_run=True, docker_registry=args.docker_registry, + local_ccache_dir=args.ccache_dir)) elif args.interactive: - container_run(platform=platform, docker_binary=docker_binary, shared_memory_size=shared_memory_size, + container_run(platform=platform, docker_binary=docker_binary, shared_memory_size=args.shared_memory_size, command=command, docker_registry=args.docker_registry, local_ccache_dir=args.ccache_dir, interactive=args.interactive) @@ -334,7 +341,7 @@ def use_cache(): assert not args.interactive, "when running with -i must provide a command" cmd = ["/work/mxnet/ci/docker/runtime_functions.sh", "build_{}".format(platform)] logging.info("No command specified, trying default build: %s", ' '.join(cmd)) - container_run(platform=platform, docker_binary=docker_binary, shared_memory_size=shared_memory_size, + container_run(platform=platform, docker_binary=docker_binary, shared_memory_size=args.shared_memory_size, command=cmd, docker_registry=args.docker_registry, local_ccache_dir=args.ccache_dir) @@ -346,15 +353,20 @@ def use_cache(): tag = get_docker_tag(platform=platform, registry=args.docker_registry) if use_cache(): load_docker_cache(tag=tag, docker_registry=args.docker_registry) - build_docker(platform, docker_binary, args.docker_registry, num_retries=num_docker_build_retires) + build_docker(platform, docker_binary, args.docker_registry, num_retries=args.docker_build_retries, + use_cache=use_cache()) if args.build_only: continue - build_platform = "build_{}".format(platform) - cmd = ["/work/mxnet/ci/docker/runtime_functions.sh", build_platform] shutil.rmtree(buildir(), ignore_errors=True) - container_run(platform=platform, docker_binary=docker_binary, shared_memory_size=shared_memory_size, - command=cmd, docker_registry=args.docker_registry, local_ccache_dir=args.ccache_dir) - plat_buildir = os.path.join(get_mxnet_root(), build_platform) + build_platform = "build_{}".format(platform) + plat_buildir = os.path.abspath(os.path.join(get_mxnet_root(), '..', + "mxnet_{}".format(build_platform))) + if os.path.exists(plat_buildir): + logging.warning("{} already exists, skipping".format(plat_buildir)) + continue + command = ["/work/mxnet/ci/docker/runtime_functions.sh", build_platform] + container_run(platform=platform, docker_binary=docker_binary, shared_memory_size=args.shared_memory_size, + command=command, docker_registry=args.docker_registry, local_ccache_dir=args.ccache_dir) shutil.move(buildir(), plat_buildir) logging.info("Built files left in: %s", plat_buildir) @@ -383,7 +395,7 @@ def use_cache(): ./build.py -a - Builds for all platforms and leaves artifacts in build_ + Builds for all platforms and leaves artifacts in build_. """) diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index a124e105838d..0b6a42c3cda0 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -593,6 +593,9 @@ build_ubuntu_gpu_cmake() { ninja -v } +build_ubuntu_blc() { + echo "pass" +} # Testing diff --git a/ci/docker_cache.py b/ci/docker_cache.py index 7a6d1106d38d..bebcb25fb8f8 100755 --- a/ci/docker_cache.py +++ b/ci/docker_cache.py @@ -30,6 +30,7 @@ import sys import subprocess import json +from typing import * import build as build_util @@ -59,7 +60,7 @@ def build_save_containers(platforms, registry, load_cache) -> int: return 1 if is_error else 0 -def _build_save_container(platform, registry, load_cache) -> str: +def _build_save_container(platform, registry, load_cache) -> Optional[str]: """ Build image for passed platform and upload the cache to the specified S3 bucket :param platform: Platform @@ -77,7 +78,7 @@ def _build_save_container(platform, registry, load_cache) -> str: logging.debug('Building %s as %s', platform, docker_tag) try: # Increase the number of retries for building the cache. - image_id = build_util.build_docker(docker_binary='docker', platform=platform, registry=registry, num_retries=10) + image_id = build_util.build_docker(docker_binary='docker', platform=platform, registry=registry, num_retries=10, use_cache=True) logging.info('Built %s as %s', docker_tag, image_id) # Push cache to registry diff --git a/ci/util.py b/ci/util.py index 22631f30435f..98605bedf765 100644 --- a/ci/util.py +++ b/ci/util.py @@ -17,6 +17,7 @@ import os import contextlib +import requests def get_mxnet_root() -> str: curpath = os.path.abspath(os.path.dirname(__file__)) @@ -41,3 +42,73 @@ def remember_cwd(): finally: os.chdir(curdir) +def retry(target_exception, tries=4, delay_s=1, backoff=2): + """Retry calling the decorated function using an exponential backoff. + + http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/ + original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry + + :param target_exception: the exception to check. may be a tuple of + exceptions to check + :type target_exception: Exception or tuple + :param tries: number of times to try (not retry) before giving up + :type tries: int + :param delay_s: initial delay between retries in seconds + :type delay_s: int + :param backoff: backoff multiplier e.g. value of 2 will double the delay + each retry + :type backoff: int + """ + import time + from functools import wraps + + def decorated_retry(f): + @wraps(f) + def f_retry(*args, **kwargs): + mtries, mdelay = tries, delay_s + while mtries > 1: + try: + return f(*args, **kwargs) + except target_exception as e: + logging.warning("Exception: %s, Retrying in %d seconds...", str(e), mdelay) + time.sleep(mdelay) + mtries -= 1 + mdelay *= backoff + return f(*args, **kwargs) + + return f_retry # true decorator + + return decorated_retry + + +# noinspection SyntaxError +def under_ci() -> bool: + """:return: True if we run in Jenkins.""" + return 'JOB_NAME' in os.environ + + +def ec2_instance_id_hostname() -> str: + if under_ci(): + result = [] + try: + r = requests.get("http://instance-data/latest/meta-data/instance-id") + if r.status_code == 200: + result.append(r.content.decode()) + r = requests.get("http://instance-data/latest/meta-data/public-hostname") + if r.status_code == 200: + result.append(r.content.decode()) + return ' '.join(result) + except ConnectionError: + pass + return '?' + else: + return '' + + +def chdir_to_script_directory(): + # We need to be in the same directory than the script so the commands in the dockerfiles work as + # expected. But the script can be invoked from a different path + base = os.path.split(os.path.realpath(__file__))[0] + os.chdir(base) + + From 4e03087d7d9ffc340cfb47146f5d8b85bd96f6ac Mon Sep 17 00:00:00 2001 From: Aaron Markham Date: Mon, 27 Aug 2018 11:53:54 -0700 Subject: [PATCH 072/160] adding apache conf promo to home page (#12347) --- docs/_static/mxnet-theme/index.html | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/docs/_static/mxnet-theme/index.html b/docs/_static/mxnet-theme/index.html index 3647e23a736c..c8417ef023cf 100644 --- a/docs/_static/mxnet-theme/index.html +++ b/docs/_static/mxnet-theme/index.html @@ -1,8 +1,8 @@

-

Introducing the Scala Inference API

-

A model loading and inference API is now available for Scala developers. Try out the examples for single shot detection and loading models for image classification. -

- Learn More +

Introducing the Scala Inference API

+

A model loading and inference API is now available for Scala developers. Try out the examples for single shot detection and loading models for image classification. +

+ Learn More
From 9e48f704d2ebb3a8041bc4105e6db5d4bcd37e02 Mon Sep 17 00:00:00 2001 From: Da Zheng Date: Mon, 27 Aug 2018 11:59:13 -0700 Subject: [PATCH 073/160] Add a tutorial for control flow operators. (#12340) * the first version. * fix. * add to test. * fix. * fix. * fix * fix. * fix. * add title. * add link * fix. --- .../control_flow/ControlFlowTutorial.md | 388 ++++++++++++++++++ docs/tutorials/index.md | 1 + tests/tutorials/test_tutorials.py | 3 + 3 files changed, 392 insertions(+) create mode 100644 docs/tutorials/control_flow/ControlFlowTutorial.md diff --git a/docs/tutorials/control_flow/ControlFlowTutorial.md b/docs/tutorials/control_flow/ControlFlowTutorial.md new file mode 100644 index 000000000000..9e4c66f8521d --- /dev/null +++ b/docs/tutorials/control_flow/ControlFlowTutorial.md @@ -0,0 +1,388 @@ +# Hybridize Gluon models with control flows. + +MXNet currently provides three control flow operators: `cond`, `foreach` and `while_loop`. Like other MXNet operators, they all have a version for NDArray and a version for Symbol. These two versions have exactly the same semantics. We can take advantage of this and use them in Gluon to hybridize models. + +In this tutorial, we use a few examples to demonstrate the use of control flow operators in Gluon and show how a model that requires control flow is hybridized. + +## Prepare running the code + + +```python +import mxnet as mx +from mxnet.gluon import HybridBlock +``` + +## foreach +`foreach` is a for loop that iterates over the first dimension of the input data (it can be an array or a list of arrays). It is defined with the following signature: + +```python +foreach(body, data, init_states, name) => (outputs, states) +``` + +It runs the Python function defined in `body` for every slice from the input arrays. The signature of the `body` function is defined as follows: + +```python +body(data, states) => (outputs, states) +``` + +The inputs of the `body` function have two parts: `data` is a slice of an array (if there is only one input array in `foreach`) or a list of slices (if there are a list of input arrays); `states` are the arrays from the previous iteration. The outputs of the `body` function also have two parts: `outputs` is an array or a list of arrays; `states` is the computation states of the current iteration. `outputs` from all iterations are concatenated as the outputs of `foreach`. + +The following pseudocode illustrates the execution of `foreach`. + +```python +def foreach(body, data, init_states): + states = init_states + outs = [] + + for i in range(data.shape[0]): + s = data[i] + out, states = body(s, states) + outs.append(out) + outs = mx.nd.stack(*outs) + return outs, states +``` + +### Example 1: `foreach` works like map +`foreach` can work like a map function of a functional language. In this case, the states of `foreach` can be an empty list, which means the computation doesn't carry computation states across iterations. + +In this example, we use `foreach` to increase each element's value of an array by one. + + +```python +data = mx.nd.arange(5) +print(data) +``` + + + [ 0. 1. 2. 3. 4.] + + + + +```python +def add1(data, _): + return data + 1, [] + +class Map(HybridBlock): + def hybrid_forward(self, F, data): + out, _ = F.contrib.foreach(add1, data, []) + return out + +map_layer = Map() +out = map_layer(data) +print(out) +``` + + + [[ 1.] + [ 2.] + [ 3.] + [ 4.] + [ 5.]] + + + +We can hybridize the block and run the computation again. It should generate the same result. + + +```python +map_layer.hybridize() +out = map_layer(data) +print(out) +``` + + + [[ 1.] + [ 2.] + [ 3.] + [ 4.] + [ 5.]] + + + +### Example 2: `foreach` works like scan +`foreach` can work like a scan function in a functional language. In this case, the outputs of the Python function is an empty list. + + +```python +def sum(data, state): + return [], state + data + +class Scan(HybridBlock): + def hybrid_forward(self, F, data): + _, state = F.contrib.foreach(sum, data, F.zeros((1))) + return state +scan_layer = Scan() +state = scan_layer(data) +print(data) +print(state) +``` + + + [ 0. 1. 2. 3. 4.] + + + [ 10.] + + + + +```python +scan_layer.hybridize() +state = scan_layer(data) +print(state) +``` + + + [ 10.] + + + +### Example 3: `foreach` with both outputs and states +This is probably the most common use case of `foreach`. We extend the previous scan example and return both output and states. + + +```python +def sum(data, state): + return state + data, state + data + +class ScanV2(HybridBlock): + def hybrid_forward(self, F, data): + out, state = F.contrib.foreach(sum, data, F.zeros((1))) + return out, state +scan_layer = ScanV2() +out, state = scan_layer(data) +print(out) +print(state) +``` + + + [[ 0.] + [ 1.] + [ 3.] + [ 6.] + [ 10.]] + + + [ 10.] + + + + +```python +scan_layer.hybridize() +out, state = scan_layer(data) +print(out) +print(state) +``` + + + [[ 0.] + [ 1.] + [ 3.] + [ 6.] + [ 10.]] + + + [ 10.] + + + +### Example 4: use `foreach` to run an RNN on a variable-length sequence +Previous examples illustrate `foreach` with simple use cases. Here we show an example of processing variable-length sequences with `foreach`. The same idea is used by `dynamic_rnn` in TensorFlow for processing variable-length sequences. + + +```python +class DynamicRNNLayer(HybridBlock): + def __init__(self, cell, prefix=None, params=None): + super(DynamicRNNLayer, self).__init__(prefix=prefix, params=params) + self.cell = cell + def hybrid_forward(self, F, inputs, begin_state, valid_length): + states = begin_state + zeros = [] + for s in states: + zeros.append(F.zeros_like(s)) + # the last state is the iteration number. + states.append(F.zeros((1))) + def loop_body(inputs, states): + cell_states = states[:-1] + # Get the iteration number from the states. + iter_no = states[-1] + out, new_states = self.cell(inputs, cell_states) + # Copy the old state if we have reached the end of a sequence. + for i, state in enumerate(cell_states): + new_states[i] = F.where(F.broadcast_greater(valid_length, iter_no), + new_states[i], state) + new_states.append(iter_no + 1) + return out, new_states + + outputs, states = F.contrib.foreach(loop_body, inputs, states) + outputs = F.SequenceMask(outputs, sequence_length=valid_length, + use_sequence_length=True, axis=0) + # the last state is the iteration number. We don't need it. + return outputs, states[:-1] + + +seq_len = 10 +batch_size = 2 +input_size = 5 +hidden_size = 6 + +rnn_data = mx.nd.normal(loc=0, scale=1, shape=(seq_len, batch_size, input_size)) +init_states = [mx.nd.normal(loc=0, scale=1, shape=(batch_size, hidden_size)) for i in range(2)] +valid_length = mx.nd.round(mx.nd.random.uniform(low=1, high=10, shape=(batch_size))) + +lstm = DynamicRNNLayer(mx.gluon.rnn.LSTMCell(hidden_size)) +lstm.initialize() +res, states = lstm(rnn_data, [x for x in init_states], valid_length) + +lstm.hybridize() +res, states = lstm(rnn_data, [x for x in init_states], valid_length) +``` + +## while_loop +`while_loop` defines a while loop. It has the following signature: + +```python +while_loop(cond, body, loop_vars, max_iterations, name) => (outputs, states) +``` + +Instead of running over the first dimension of an array, `while_loop` checks a condition function in every iteration and runs a `body` function for computation. The signature of the `body` function is defined as follows: + +```python +body(state1, state2, ...) => (outputs, states) +``` + +The inputs of the `body` function in `while_loop` are a little different from the one in `foreach`. It has a variable number of input arguments. Each input argument is a loop variable and the number of arguments is determined by the number of loop variables. The outputs of the `body` function also have two parts: `outputs` is an array or a list of arrays; `states` are loop variables and will be passed to the next iteration as inputs of `body`. Like `foreach`, both `outputs` and `states` can be an empty list. `outputs` from all iterations are concatenated as the outputs of `while_loop`. + +### Example 5: scan with while_loop +`while_loop` is more general than `foreach`. We can also use it to iterate over an array and sum all of its values together. In this example, instead of summing over the entire array, we only sum over the first 4 elements. + +**Note**: the output arrays of the current implementation of `while_loop` is determined by `max_iterations`. As such, even though the while loop in this example runs 4 iterations, it still outputs an array of 5 elements. The last element in the output array is actually filled with an arbitrary value. + + +```python +class ScanV2(HybridBlock): + def hybrid_forward(self, F, data): + def sum(state, i): + s = state + data[i] + return s, [s, i + 1] + + def sum_cond(state, i): + return i < 4 + + out, state = F.contrib.while_loop(sum_cond, sum, + [F.zeros((1)), F.zeros((1))], max_iterations=5) + return out, state +scan_layer = ScanV2() +out, state = scan_layer(data) +print(out) +print(state) +``` + + + [[ 0.] + [ 1.] + [ 3.] + [ 6.] + [ 0.]] + + [ + [ 6.] + , + [ 4.] + ] + + +## cond +`cond` defines an if condition. It has the following signature: + +```python +cond(pred, then_func, else_func, name) +``` + +`cond` checks `pred`, which is a symbol or an NDArray with one element. If its value is true, it calls `then_func`. Otherwise, it calls `else_func`. The signature of `then_func` and `else_func` are as follows: + +```python +func() => [outputs] +``` + +`cond` requires all outputs from `then_func` and `else_func` have the same number of Symbols/NDArrays with the same shapes and data types. + +### Example 6: skip RNN computation with cond +Example 4 shows how to process a batch with sequences of different lengths. It performs computation for all steps but discards some of the computation results. + +In this example, we show how to skip computation after we have reached the end of a sequence, whose length is indicated by `length`. The code below only works for a batch with one sequence. + + +```python +class SkipRNNCell(HybridBlock): + def __init__(self, cell, prefix=None, params=None): + super(SkipRNNCell, self).__init__(prefix=prefix, params=params) + self.cell = cell + def hybrid_forward(self, F, i, length, data, states): + def run_rnn(): + return self.cell(data, states) + + def copy_states(): + return F.zeros_like(data), states + out, state = F.contrib.cond(i < length, run_rnn, copy_states) + return out, state + +class RNNLayer(HybridBlock): + def __init__(self, cell, prefix=None, params=None): + super(RNNLayer, self).__init__(prefix=prefix, params=params) + self.cell = SkipRNNCell(cell) + def hybrid_forward(self, F, length, data, init_states): + def body(data, states): + i = states[0] + out, states = self.cell(i, length, data, states[1]) + return out, [i + 1, states] + print() + out, state = F.contrib.foreach(body, data, [F.zeros((1)), init_states]) + return out, state + + +seq_len = 5 +batch_size = 1 +input_size = 3 +hidden_size = 3 + +rnn_data = mx.nd.normal(loc=0, scale=1, shape=(seq_len, batch_size, input_size)) +init_states = [mx.nd.normal(loc=0, scale=1, shape=(batch_size, hidden_size)) for i in range(2)] + +cell = mx.gluon.rnn.LSTMCell(hidden_size) +layer = RNNLayer(cell) +layer.initialize() + +out, states = layer(mx.nd.array([3]), rnn_data, init_states) +print(rnn_data) +print(out) +``` + + () + + [[[-1.25296438 0.387312 -0.41055229]] + + [[ 1.28453672 0.21001032 -0.08666432]] + + [[ 1.46422136 -1.30581355 0.9344402 ]] + + [[ 0.5380863 -0.16038011 0.84187603]] + + [[-1.00553632 3.13221502 -0.4358989 ]]] + + + [[[-0.02620504 0.1605694 0.29636264]] + + [[-0.00474182 0.08719197 0.17757624]] + + [[ 0.00631597 0.04674901 0.12468992]] + + [[ 0. 0. 0. ]] + + [[ 0. 0. 0. ]]] + + + + diff --git a/docs/tutorials/index.md b/docs/tutorials/index.md index 32c4a16a8e0d..3632388b82a9 100644 --- a/docs/tutorials/index.md +++ b/docs/tutorials/index.md @@ -97,6 +97,7 @@ Select API:  * [Fine-Tuning a pre-trained ImageNet model with a new dataset](/faq/finetune.html) * [Large-Scale Multi-Host Multi-GPU Image Classification](/tutorials/vision/large_scale_classification.html) * [Importing an ONNX model into MXNet](/tutorials/onnx/super_resolution.html) + * [Hybridize Gluon models with control flows](/tutorials/control_flow/ControlFlowTutorial.html) * API Guides * Core APIs * NDArray diff --git a/tests/tutorials/test_tutorials.py b/tests/tutorials/test_tutorials.py index 2c8768228d71..503df017ffe0 100644 --- a/tests/tutorials/test_tutorials.py +++ b/tests/tutorials/test_tutorials.py @@ -183,3 +183,6 @@ def test_vision_large_scale_classification(): def test_vision_cnn_visualization(): assert _test_tutorial_nb('vision/cnn_visualization') + +def test_control_flow(): + assert _test_tutorial_nb('control_flow/ControlFlowTutorial') From 3643b276c1f74e45afee16405b6cbb617e01ff86 Mon Sep 17 00:00:00 2001 From: Vishaal Kapoor <40836875+vishaalkapoor@users.noreply.github.com> Date: Mon, 27 Aug 2018 13:08:42 -0700 Subject: [PATCH 074/160] [MXAPPS-581] Disable an additional long test in the SD nightly (#12343) * Disable an additional test in the SD nightly that also takes over the timeout. --- tests/nightly/straight_dope/test_notebooks_single_gpu.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/nightly/straight_dope/test_notebooks_single_gpu.py b/tests/nightly/straight_dope/test_notebooks_single_gpu.py index 555b8092b392..5eeb52f516e1 100644 --- a/tests/nightly/straight_dope/test_notebooks_single_gpu.py +++ b/tests/nightly/straight_dope/test_notebooks_single_gpu.py @@ -41,6 +41,7 @@ 'chapter07_distributed-learning/multiple-gpus-scratch', 'chapter07_distributed-learning/multiple-gpus-gluon', 'chapter07_distributed-learning/training-with-multiple-machines', + 'chapter08_computer-vision/visual-question-answer', # > 10 mins. 'chapter11_recommender-systems/intro-recommender-systems', # Early draft, non-working. 'chapter12_time-series/intro-forecasting-gluon', 'chapter12_time-series/intro-forecasting-2-gluon', @@ -226,9 +227,6 @@ def test_object_detection(self): def test_fine_tuning(self): assert _test_notebook('chapter08_computer-vision/fine-tuning') - def test_visual_question_answer(self): - assert _test_notebook('chapter08_computer-vision/visual-question-answer') - # Chapter 9 def test_tree_lstm(self): From cb7dc7f39014e9cb43fd53ee7ad93c961f013545 Mon Sep 17 00:00:00 2001 From: Aaron Markham Date: Mon, 27 Aug 2018 13:24:17 -0700 Subject: [PATCH 075/160] Update ONNX API docs references (#12317) * update onnx API references * update descriptions --- docs/api/python/contrib/onnx.md | 22 +++++++++---------- .../contrib/onnx/mx2onnx/export_model.py | 2 +- .../contrib/onnx/onnx2mx/import_model.py | 3 ++- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/docs/api/python/contrib/onnx.md b/docs/api/python/contrib/onnx.md index 449941459163..f8210ad6a001 100644 --- a/docs/api/python/contrib/onnx.md +++ b/docs/api/python/contrib/onnx.md @@ -22,10 +22,9 @@ This document describes all the ONNX-MXNet APIs. .. autosummary:: :nosignatures: - mxnet.contrib.onnx.import_model - mxnet.contrib.onnx.get_model_metadata - mxnet.contrib.onnx.import_to_gluon - mxnet.contrib.onnx.export_model + mxnet.contrib.onnx.onnx2mx.import_model + mxnet.contrib.onnx.onnx2mx.import_to_gluon + mxnet.contrib.onnx.mx2onnx.export_model ``` ## ONNX Tutorials @@ -33,7 +32,7 @@ This document describes all the ONNX-MXNet APIs. ```eval_rst .. toctree:: :maxdepth: 1 - + /tutorials/onnx/super_resolution.md /tutorials/onnx/export_mxnet_to_onnx.md /tutorials/onnx/inference_on_onnx_model.md @@ -43,7 +42,7 @@ This document describes all the ONNX-MXNet APIs. ## ONNX Examples * Face Recognition with [ArcFace](https://github.com/onnx/models/tree/master/models/face_recognition/ArcFace) -* Image Classification with [MobileNet](https://github.com/onnx/models/tree/master/models/image_classification/mobilenet), [ResNet](https://github.com/onnx/models/tree/master/models/image_classification/resnet), [SqueezeNet](https://github.com/onnx/models/tree/master/models/image_classification/squeezenet), [VGG](https://github.com/onnx/models/tree/master/models/image_classification/vgg) +* Image Classification with [MobileNet](https://github.com/onnx/models/tree/master/models/image_classification/mobilenet), [ResNet](https://github.com/onnx/models/tree/master/models/image_classification/resnet), [SqueezeNet](https://github.com/onnx/models/tree/master/models/image_classification/squeezenet), [VGG](https://github.com/onnx/models/tree/master/models/image_classification/vgg) ## API Reference @@ -51,11 +50,12 @@ This document describes all the ONNX-MXNet APIs. ```eval_rst -.. automodule:: mxnet.contrib.onnx.import_model -.. automodule:: mxnet.contrib.onnx.get_model_metadata -.. automodule:: mxnet.contrib.onnx.import_to_gluon -.. automodule:: mxnet.contrib.onnx.export_model - +.. automodule:: mxnet.contrib.onnx.onnx2mx.import_model + :members: import_model, get_model_metadata +.. automodule:: mxnet.contrib.onnx.onnx2mx.import_to_gluon + :members: import_to_gluon +.. automodule:: mxnet.contrib.onnx.mx2onnx.export_model + :members: export_model ``` diff --git a/python/mxnet/contrib/onnx/mx2onnx/export_model.py b/python/mxnet/contrib/onnx/mx2onnx/export_model.py index 0dbfdc1d7b92..33292bf664a7 100644 --- a/python/mxnet/contrib/onnx/mx2onnx/export_model.py +++ b/python/mxnet/contrib/onnx/mx2onnx/export_model.py @@ -18,7 +18,7 @@ # coding: utf-8 #pylint: disable-msg=too-many-arguments -"""export function""" +"""Exports an MXNet model to the ONNX model format""" from __future__ import absolute_import from __future__ import division from __future__ import print_function diff --git a/python/mxnet/contrib/onnx/onnx2mx/import_model.py b/python/mxnet/contrib/onnx/onnx2mx/import_model.py index 4e4d78637557..e190c3bdadc0 100644 --- a/python/mxnet/contrib/onnx/onnx2mx/import_model.py +++ b/python/mxnet/contrib/onnx/onnx2mx/import_model.py @@ -16,7 +16,7 @@ # under the License. # coding: utf-8 -"""import function""" +"""Functions for importing ONNX models to MXNet and for checking metadata""" # pylint: disable=no-member from .import_onnx import GraphProto @@ -72,6 +72,7 @@ def get_model_metadata(model_file): 'output_tensor_data' : } + """ graph = GraphProto() try: From d9ea96ad3c367a2b93d68d3e0edb63ea63c6841c Mon Sep 17 00:00:00 2001 From: Haibin Lin Date: Mon, 27 Aug 2018 13:25:57 -0700 Subject: [PATCH 076/160] Documentation update related to sparse support (#12367) * Update sparse.md * Update sparse.md * Update csr.md * Update row_sparse.md * Update train.md --- docs/api/python/ndarray/sparse.md | 10 +++------- docs/api/python/symbol/sparse.md | 7 +++---- docs/tutorials/sparse/csr.md | 4 +--- docs/tutorials/sparse/row_sparse.md | 7 +------ docs/tutorials/sparse/train.md | 2 +- 5 files changed, 9 insertions(+), 21 deletions(-) diff --git a/docs/api/python/ndarray/sparse.md b/docs/api/python/ndarray/sparse.md index 85d33b193a6b..2ade059a70c9 100644 --- a/docs/api/python/ndarray/sparse.md +++ b/docs/api/python/ndarray/sparse.md @@ -16,7 +16,7 @@ This document lists the routines of the *n*-dimensional sparse array package: ``` The `CSRNDArray` and `RowSparseNDArray` API, defined in the `ndarray.sparse` package, provides -imperative sparse tensor operations on **CPU**. +imperative sparse tensor operations. An `CSRNDArray` inherits from `NDArray`, and represents a two-dimensional, fixed-size array in compressed sparse row format. @@ -63,16 +63,13 @@ A detailed tutorial is available at ```eval_rst -.. note:: ``mxnet.ndarray.sparse.RowSparseNDArray`` and ``mxnet.ndarray.sparse.CSRNDArray`` DO NOT support the ``mxnet.gluon`` high-level interface yet. - .. note:: ``mxnet.ndarray.sparse`` is similar to ``mxnet.ndarray`` in some aspects. But the differences are not negligible. For instance: - - Only a subset of operators in ``mxnet.ndarray`` have specialized implementations in ``mxnet.ndarray.sparse``. - Operators such as Convolution and broadcasting do not have sparse implementations yet. + - Only a subset of operators in ``mxnet.ndarray`` have efficient sparse implementations in ``mxnet.ndarray.sparse``. + - If an operator do not occur in the ``mxnet.ndarray.sparse`` namespace, that means the operator does not have an efficient sparse implementation yet. If sparse inputs are passed to such an operator, it will convert inputs to the dense format and fallback to the already available dense implementation. - The storage types (``stype``) of sparse operators' outputs depend on the storage types of inputs. By default the operators not available in ``mxnet.ndarray.sparse`` infer "default" (dense) storage type for outputs. Please refer to the [API Reference](#api-reference) section for further details on specific operators. - - GPU support for ``mxnet.ndarray.sparse`` is experimental. Only a few sparse operators are supported on GPU such as ``sparse.dot``. .. note:: ``mxnet.ndarray.sparse.CSRNDArray`` is similar to ``scipy.sparse.csr_matrix`` in some aspects. But they differ in a few aspects: @@ -559,7 +556,6 @@ We summarize the interface for each class in the following sections. sgd_update sgd_mom_update adam_update - ftrl_update adagrad_update ``` diff --git a/docs/api/python/symbol/sparse.md b/docs/api/python/symbol/sparse.md index d26ba07853de..cd8272cedd7d 100644 --- a/docs/api/python/symbol/sparse.md +++ b/docs/api/python/symbol/sparse.md @@ -16,7 +16,7 @@ This document lists the routines of the sparse symbolic expression package: ``` The `Sparse Symbol` API, defined in the `symbol.sparse` package, provides -sparse neural network graphs and auto-differentiation on CPU. +sparse neural network graphs and auto-differentiation. The storage type of a variable is speficied by the `stype` attribute of the variable. The storage type of a symbolic expression is inferred based on the storage types of the variables and the operators. @@ -43,12 +43,11 @@ array([ 1., 1.], .. note:: most operators provided in ``mxnet.symbol.sparse`` are similar to those in ``mxnet.symbol`` although there are few differences: - - Only a subset of operators in ``mxnet.symbol`` have specialized implementations in ``mxnet.symbol.sparse``. - Operators such as reduction and broadcasting do not have sparse implementations yet. + - Only a subset of operators in ``mxnet.symbol`` have efficient sparse implementations in ``mxnet.symbol.sparse``. + - If an operator do not occur in the ``mxnet.symbol.sparse`` namespace, that means the operator does not have an efficient sparse implementation yet. If sparse inputs are passed to such an operator, it will convert inputs to the dense format and fallback to the already available dense implementation. - The storage types (``stype``) of sparse operators' outputs depend on the storage types of inputs. By default the operators not available in ``mxnet.symbol.sparse`` infer "default" (dense) storage type for outputs. Please refer to the API reference section for further details on specific operators. - - GPU support for ``mxnet.symbol.sparse`` is experimental. ``` diff --git a/docs/tutorials/sparse/csr.md b/docs/tutorials/sparse/csr.md index c2842ac16bd9..0aede1ab4313 100644 --- a/docs/tutorials/sparse/csr.md +++ b/docs/tutorials/sparse/csr.md @@ -512,9 +512,7 @@ Note that in the file the column indices are expected to be sorted in ascending ### GPU Support -By default, `CSRNDArray` operators are executed on CPU. In MXNet, GPU support for `CSRNDArray` is experimental with only a few sparse operators such as [dot](https://mxnet.incubator.apache.org/api/python/ndarray/sparse.html#mxnet.ndarray.sparse.dot). - -To create a `CSRNDArray` on a GPU, we need to explicitly specify the context: +By default, `CSRNDArray` operators are executed on CPU. To create a `CSRNDArray` on a GPU, we need to explicitly specify the context: **Note** If a GPU is not available, an error will be reported in the following section. In order to execute it a cpu, set `gpu_device` to `mx.cpu()`. diff --git a/docs/tutorials/sparse/row_sparse.md b/docs/tutorials/sparse/row_sparse.md index c4cab75df543..27cc0d3d903e 100644 --- a/docs/tutorials/sparse/row_sparse.md +++ b/docs/tutorials/sparse/row_sparse.md @@ -541,12 +541,7 @@ Note that only [mxnet.optimizer.SGD](https://mxnet.incubator.apache.org/api/pyth ### GPU Support -By default, RowSparseNDArray operators are executed on CPU. In MXNet, GPU support for RowSparseNDArray is limited -to a few sparse operators such as [sgd_update](https://mxnet.incubator.apache.org/api/python/ndarray/sparse.html#mxnet.ndarray.sparse.sgd_update), -[dot](https://mxnet.incubator.apache.org/api/python/ndarray/sparse.html#mxnet.ndarray.sparse.dot) and -[Embedding](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html#mxnet.ndarray.Embedding). - -To create a RowSparseNDArray on gpu, we need to explicitly specify the context: +By default, RowSparseNDArray operators are executed on CPU. To create a RowSparseNDArray on gpu, we need to explicitly specify the context: **Note** If a GPU is not available, an error will be reported in the following section. In order to execute it on a cpu, set gpu_device to mx.cpu(). diff --git a/docs/tutorials/sparse/train.md b/docs/tutorials/sparse/train.md index 7472fcd14ca3..fde4c0e65521 100644 --- a/docs/tutorials/sparse/train.md +++ b/docs/tutorials/sparse/train.md @@ -314,7 +314,7 @@ assert metric.get()[1] < 1, "Achieved MSE (%f) is larger than expected (1.0)" % ### Training the model with multiple machines or multiple devices -To train a sparse model with multiple machines, you need to call `prepare` before `forward`, or `save_checkpoint`. +Distributed training with `row_sparse` weights and gradients are supported in MXNet, which significantly reduces communication cost for large models. To train a sparse model with multiple machines, you need to call `prepare` before `forward`, or `save_checkpoint`. Please refer to the example in [mxnet/example/sparse/linear_classification](https://github.com/apache/incubator-mxnet/tree/master/example/sparse/linear_classification) for more details. From d234b32300d198cb60e6b821292b6f64723dec80 Mon Sep 17 00:00:00 2001 From: Anirudh Date: Mon, 27 Aug 2018 21:08:34 -0700 Subject: [PATCH 077/160] [MXNET-690] Add tests for initializers in R (#12360) * Add tests for intializer * lint and indent --- R-package/tests/testthat/get_data.R | 91 +++--- R-package/tests/testthat/test_img_seg.R | 155 ++++----- R-package/tests/testthat/test_initializer.R | 114 +++++++ R-package/tests/testthat/test_io.R | 47 +-- R-package/tests/testthat/test_model.R | 333 ++++++++------------ R-package/tests/testthat/test_ndarray.R | 143 +++++---- R-package/tests/testthat/test_optimizer.R | 272 +++++++--------- R-package/tests/testthat/test_random.R | 20 +- R-package/tests/testthat/test_symbol.R | 88 +++--- 9 files changed, 642 insertions(+), 621 deletions(-) create mode 100644 R-package/tests/testthat/test_initializer.R diff --git a/R-package/tests/testthat/get_data.R b/R-package/tests/testthat/get_data.R index 2676b20fa80b..0e27894498b0 100644 --- a/R-package/tests/testthat/get_data.R +++ b/R-package/tests/testthat/get_data.R @@ -3,13 +3,11 @@ GetMNIST_ubyte <- function() { if (!dir.exists("data")) { dir.create("data/") } - if (!file.exists('data/train-images-idx3-ubyte') | - !file.exists('data/train-labels-idx1-ubyte') | - !file.exists('data/t10k-images-idx3-ubyte') | - !file.exists('data/t10k-labels-idx1-ubyte')) { - download.file('http://data.mxnet.io/mxnet/data/mnist.zip', destfile = 'data/mnist.zip') - unzip('data/mnist.zip', exdir = 'data/') - file.remove('data/mnist.zip') + if (!file.exists("data/train-images-idx3-ubyte") | !file.exists("data/train-labels-idx1-ubyte") | + !file.exists("data/t10k-images-idx3-ubyte") | !file.exists("data/t10k-labels-idx1-ubyte")) { + download.file("http://data.mxnet.io/mxnet/data/mnist.zip", destfile = "data/mnist.zip") + unzip("data/mnist.zip", exdir = "data/") + file.remove("data/mnist.zip") } } @@ -17,12 +15,11 @@ GetMNIST_csv <- function() { if (!dir.exists("data")) { dir.create("data/") } - if (!file.exists('data/train.csv') | - !file.exists('data/test.csv')) { - download.file('https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/data/mnist_csv.zip', - destfile = 'data/mnist_csv.zip') - unzip('data/mnist_csv.zip', exdir = 'data/') - file.remove('data/mnist_csv.zip') + if (!file.exists("data/train.csv") | !file.exists("data/test.csv")) { + download.file("https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/data/mnist_csv.zip", + destfile = "data/mnist_csv.zip") + unzip("data/mnist_csv.zip", exdir = "data/") + file.remove("data/mnist_csv.zip") } } @@ -30,14 +27,11 @@ GetCifar10 <- function() { if (!dir.exists("data")) { dir.create("data/") } - if (!file.exists('data/cifar/train.rec') | - !file.exists('data/cifar/test.rec') | - !file.exists('data/cifar/train.lst') | - !file.exists('data/cifar/test.lst')) { - download.file('http://data.mxnet.io/mxnet/data/cifar10.zip', - destfile = 'data/cifar10.zip') - unzip('data/cifar10.zip', exdir = 'data/') - file.remove('data/cifar10.zip') + if (!file.exists("data/cifar/train.rec") | !file.exists("data/cifar/test.rec") | + !file.exists("data/cifar/train.lst") | !file.exists("data/cifar/test.lst")) { + download.file("http://data.mxnet.io/mxnet/data/cifar10.zip", destfile = "data/cifar10.zip") + unzip("data/cifar10.zip", exdir = "data/") + file.remove("data/cifar10.zip") } } @@ -45,13 +39,13 @@ GetInception <- function() { if (!dir.exists("model")) { dir.create("model/") } - if (!file.exists('model/Inception-BN-0126.params')) { - download.file('http://data.dmlc.ml/models/imagenet/inception-bn/Inception-BN-0126.params', - destfile = 'model/Inception-BN-0126.params') + if (!file.exists("model/Inception-BN-0126.params")) { + download.file("http://data.dmlc.ml/models/imagenet/inception-bn/Inception-BN-0126.params", + destfile = "model/Inception-BN-0126.params") } - if (!file.exists('model/Inception-BN-symbol.json')) { - download.file('http://data.dmlc.ml/models/imagenet/inception-bn/Inception-BN-symbol.json', - destfile = 'model/Inception-BN-symbol.json') + if (!file.exists("model/Inception-BN-symbol.json")) { + download.file("http://data.dmlc.ml/models/imagenet/inception-bn/Inception-BN-symbol.json", + destfile = "model/Inception-BN-symbol.json") } } @@ -59,12 +53,11 @@ GetCatDog <- function() { if (!dir.exists("data")) { dir.create("data/") } - if (!file.exists('data/cats_dogs/cats_dogs_train.rec') | - !file.exists('data/cats_dogs/cats_dogs_val.rec')) { - download.file('https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/data/cats_dogs.zip', - destfile = 'data/cats_dogs.zip') - unzip('data/cats_dogs.zip', exdir = 'data/') - file.remove('data/cats_dogs.zip') + if (!file.exists("data/cats_dogs/cats_dogs_train.rec") | !file.exists("data/cats_dogs/cats_dogs_val.rec")) { + download.file("https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/data/cats_dogs.zip", + destfile = "data/cats_dogs.zip") + unzip("data/cats_dogs.zip", exdir = "data/") + file.remove("data/cats_dogs.zip") } } @@ -72,11 +65,11 @@ GetMovieLens <- function() { if (!dir.exists("data")) { dir.create("data/") } - if (!file.exists('data/ml-100k/u.data')) { - download.file('http://files.grouplens.org/datasets/movielens/ml-100k.zip', - destfile = 'data/ml-100k.zip') - unzip('data/ml-100k.zip', exdir = 'data/') - file.remove('data/ml-100k.zip') + if (!file.exists("data/ml-100k/u.data")) { + download.file("http://files.grouplens.org/datasets/movielens/ml-100k.zip", + destfile = "data/ml-100k.zip") + unzip("data/ml-100k.zip", exdir = "data/") + file.remove("data/ml-100k.zip") } } @@ -84,12 +77,11 @@ GetISBI_data <- function() { if (!dir.exists("data")) { dir.create("data/") } - if (!file.exists('data/ISBI/train-volume.tif') | - !file.exists('data/ISBI/train-labels.tif')) { - download.file('https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/data/ISBI.zip', - destfile = 'data/ISBI.zip') - unzip('data/ISBI.zip', exdir = 'data/') - file.remove('data/ISBI.zip') + if (!file.exists("data/ISBI/train-volume.tif") | !file.exists("data/ISBI/train-labels.tif")) { + download.file("https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/data/ISBI.zip", + destfile = "data/ISBI.zip") + unzip("data/ISBI.zip", exdir = "data/") + file.remove("data/ISBI.zip") } } @@ -97,11 +89,10 @@ GetCaptcha_data <- function() { if (!dir.exists("data")) { dir.create("data/") } - if (!file.exists('data/captcha_example/captcha_train.rec') | - !file.exists('data/captcha_example/captcha_test.rec')) { - download.file('https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/data/captcha_example.zip', - destfile = 'data/captcha_example.zip') - unzip('data/captcha_example.zip', exdir = 'data/') - file.remove('data/captcha_example.zip') + if (!file.exists("data/captcha_example/captcha_train.rec") | !file.exists("data/captcha_example/captcha_test.rec")) { + download.file("https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/data/captcha_example.zip", + destfile = "data/captcha_example.zip") + unzip("data/captcha_example.zip", exdir = "data/") + file.remove("data/captcha_example.zip") } } diff --git a/R-package/tests/testthat/test_img_seg.R b/R-package/tests/testthat/test_img_seg.R index b3400cd3bbc6..9b63f5078fa1 100644 --- a/R-package/tests/testthat/test_img_seg.R +++ b/R-package/tests/testthat/test_img_seg.R @@ -2,7 +2,8 @@ require(mxnet) source("get_data.R") -if (Sys.getenv("R_GPU_ENABLE") != "" & as.integer(Sys.getenv("R_GPU_ENABLE")) == 1) { +if (Sys.getenv("R_GPU_ENABLE") != "" & as.integer(Sys.getenv("R_GPU_ENABLE")) == + 1) { mx.ctx.default(new = mx.gpu()) message("Using GPU for testing.") } @@ -12,76 +13,89 @@ print_inferred_shape <- function(net) { print(slist$out.shapes) } -convolution_module <- function(net, kernel_size, pad_size, filter_count, - stride = c(1, 1), work_space = 2048, batch_norm = TRUE, - down_pool = FALSE, up_pool = FALSE, act_type = "relu", - convolution = TRUE) { +convolution_module <- function(net, kernel_size, pad_size, filter_count, stride = c(1, + 1), work_space = 2048, batch_norm = TRUE, down_pool = FALSE, up_pool = FALSE, + act_type = "relu", convolution = TRUE) { if (up_pool) { - net = mx.symbol.Deconvolution(net, kernel = c(2, 2), pad = c(0, 0), - stride = c(2, 2), num_filter = filter_count, - workspace = work_space) - net = mx.symbol.BatchNorm(net) + net <- mx.symbol.Deconvolution(net, kernel = c(2, 2), pad = c(0, 0), stride = c(2, + 2), num_filter = filter_count, workspace = work_space) + net <- mx.symbol.BatchNorm(net) if (act_type != "") { - net = mx.symbol.Activation(net, act_type = act_type) + net <- mx.symbol.Activation(net, act_type = act_type) } } if (convolution) { - conv = mx.symbol.Convolution(data = net, kernel = kernel_size, stride = stride, - pad = pad_size, num_filter = filter_count, - workspace = work_space) - net = conv + conv <- mx.symbol.Convolution(data = net, kernel = kernel_size, stride = stride, + pad = pad_size, num_filter = filter_count, workspace = work_space) + net <- conv } if (batch_norm) { - net = mx.symbol.BatchNorm(net) + net <- mx.symbol.BatchNorm(net) } if (act_type != "") { - net = mx.symbol.Activation(net, act_type = act_type) + net <- mx.symbol.Activation(net, act_type = act_type) } if (down_pool) { - pool = mx.symbol.Pooling(net, pool_type = "max", kernel = c(2, 2), stride = c(2, 2)) - net = pool + pool <- mx.symbol.Pooling(net, pool_type = "max", kernel = c(2, 2), stride = c(2, + 2)) + net <- pool } print_inferred_shape(net) return(net) } get_unet <- function() { - data = mx.symbol.Variable('data') - kernel_size = c(3, 3) - pad_size = c(1, 1) - filter_count = 32 - pool1 = convolution_module(data, kernel_size, pad_size, filter_count = filter_count, down_pool = TRUE) - net = pool1 - pool2 = convolution_module(net, kernel_size, pad_size, filter_count = filter_count * 2, down_pool = TRUE) - net = pool2 - pool3 = convolution_module(net, kernel_size, pad_size, filter_count = filter_count * 4, down_pool = TRUE) - net = pool3 - pool4 = convolution_module(net, kernel_size, pad_size, filter_count = filter_count * 4, down_pool = TRUE) - net = pool4 - net = mx.symbol.Dropout(net) - pool5 = convolution_module(net, kernel_size, pad_size, filter_count = filter_count * 8, down_pool = TRUE) - net = pool5 - net = convolution_module(net, kernel_size, pad_size, filter_count = filter_count * 4, up_pool = TRUE) - net = convolution_module(net, kernel_size, pad_size = c(2, 2), filter_count = filter_count * 4, up_pool = TRUE) - net = mx.symbol.Crop(net, pool3, num.args = 2) - net = mx.symbol.concat(c(pool3, net), num.args = 2) - net = mx.symbol.Dropout(net) - net = convolution_module(net, kernel_size, pad_size, filter_count = filter_count * 4) - net = convolution_module(net, kernel_size, pad_size, filter_count = filter_count * 4, up_pool = TRUE) + data <- mx.symbol.Variable("data") + kernel_size <- c(3, 3) + pad_size <- c(1, 1) + filter_count <- 32 + pool1 <- convolution_module(data, kernel_size, pad_size, filter_count = filter_count, + down_pool = TRUE) + net <- pool1 + pool2 <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count * + 2, down_pool = TRUE) + net <- pool2 + pool3 <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count * + 4, down_pool = TRUE) + net <- pool3 + pool4 <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count * + 4, down_pool = TRUE) + net <- pool4 + net <- mx.symbol.Dropout(net) + pool5 <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count * + 8, down_pool = TRUE) + net <- pool5 + net <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count * + 4, up_pool = TRUE) + net <- convolution_module(net, kernel_size, pad_size = c(2, 2), filter_count = filter_count * + 4, up_pool = TRUE) + net <- mx.symbol.Crop(net, pool3, num.args = 2) + net <- mx.symbol.concat(c(pool3, net), num.args = 2) + net <- mx.symbol.Dropout(net) + net <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count * + 4) + net <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count * + 4, up_pool = TRUE) - net = mx.symbol.Concat(c(pool2, net), num.args = 2) - net = mx.symbol.Dropout(net) - net = convolution_module(net, kernel_size, pad_size, filter_count = filter_count * 4) - net = convolution_module(net, kernel_size, pad_size, filter_count = filter_count * 4, up_pool = TRUE) - convolution_module(net, kernel_size, pad_size, filter_count = filter_count * 4) - net = mx.symbol.Concat(c(pool1, net), num.args = 2) - net = mx.symbol.Dropout(net) - net = convolution_module(net, kernel_size, pad_size, filter_count = filter_count * 2) - net = convolution_module(net, kernel_size, pad_size, filter_count = filter_count * 2, up_pool = TRUE) - net = convolution_module(net, kernel_size, pad_size, filter_count = 1, batch_norm = FALSE, act_type = "") - net = mx.symbol.SoftmaxOutput(data = net, name = 'sm') + net <- mx.symbol.Concat(c(pool2, net), num.args = 2) + net <- mx.symbol.Dropout(net) + net <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count * + 4) + net <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count * + 4, up_pool = TRUE) + convolution_module(net, kernel_size, pad_size, filter_count = filter_count * + 4) + net <- mx.symbol.Concat(c(pool1, net), num.args = 2) + net <- mx.symbol.Dropout(net) + net <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count * + 2) + net <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count * + 2, up_pool = TRUE) + net <- convolution_module(net, kernel_size, pad_size, filter_count = 1, batch_norm = FALSE, + act_type = "") + net <- mx.symbol.SoftmaxOutput(data = net, name = "sm") return(net) } @@ -89,47 +103,46 @@ context("Image segmentation") test_that("UNET", { list.of.packages <- c("imager") - new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])] - if(length(new.packages)) install.packages(new.packages, repos = "https://cloud.r-project.org/") + new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[, + "Package"])] + if (length(new.packages)) + install.packages(new.packages, repos = "https://cloud.r-project.org/") GetISBI_data() library(imager) IMG_SIZE <- 168 files <- list.files(path = "data/ISBI/train-volume/") - a = 'data/ISBI/train-volume/' - filess = paste(a, files, sep = '') - list_of_images = lapply(filess, function(x) { + a <- "data/ISBI/train-volume/" + filess <- paste(a, files, sep = "") + list_of_images <- lapply(filess, function(x) { x <- load.image(x) y <- resize(x, size_x = IMG_SIZE, size_y = IMG_SIZE) }) - train.x = do.call('cbind', lapply(list_of_images, as.vector)) + train.x <- do.call("cbind", lapply(list_of_images, as.vector)) train.array <- train.x dim(train.array) <- c(IMG_SIZE, IMG_SIZE, 1, 30) files <- list.files(path = "data/ISBI/train-labels") - b = 'data/ISBI/train-labels/' - filess = paste(b, files, sep = '') - list_of_images = lapply(filess, function(x) { + b <- "data/ISBI/train-labels/" + filess <- paste(b, files, sep = "") + list_of_images <- lapply(filess, function(x) { x <- load.image(x) y <- resize(x, size_x = IMG_SIZE, size_y = IMG_SIZE) }) - train.y = do.call('cbind', lapply(list_of_images, as.vector)) + train.y <- do.call("cbind", lapply(list_of_images, as.vector)) - train.y[which(train.y < 0.5)] = 0 - train.y[which(train.y > 0.5)] = 1 - train.y.array = train.y - dim(train.y.array) = c(IMG_SIZE, IMG_SIZE, 1, 30) + train.y[which(train.y < 0.5)] <- 0 + train.y[which(train.y > 0.5)] <- 1 + train.y.array <- train.y + dim(train.y.array) <- c(IMG_SIZE, IMG_SIZE, 1, 30) devices <- mx.ctx.default() mx.set.seed(0) net <- get_unet() - model <- mx.model.FeedForward.create(net, X = train.array, y = train.y.array, - ctx = devices, num.round = 2, - initializer = mx.init.normal(sqrt(2 / 576)), - learning.rate = 0.05, - momentum = 0.99, - array.batch.size = 2) + model <- mx.model.FeedForward.create(net, X = train.array, y = train.y.array, + ctx = devices, num.round = 2, initializer = mx.init.normal(sqrt(2/576)), + learning.rate = 0.05, momentum = 0.99, array.batch.size = 2) }) diff --git a/R-package/tests/testthat/test_initializer.R b/R-package/tests/testthat/test_initializer.R new file mode 100644 index 000000000000..c005244d02b0 --- /dev/null +++ b/R-package/tests/testthat/test_initializer.R @@ -0,0 +1,114 @@ +require(mxnet) + +context("initializer") + +testthat("mx.init.uniform", { + uniform_init <- mx.init.uniform(scale = 1) + expect_equal(typeof(uniform_init), "closure") + + X_bias <- uniform_init("X_bias", c(1, 10000), ctx = mx.ctx.default()) + expect_equal(X_bias, mx.nd.zeros(c(1, 10000))) + + X_weight <- uniform_init("X_weight", c(5, 10, 10000), ctx = mx.ctx.default()) + expect_equal(X_weight >= -1, mx.nd.ones(c(5, 10, 10000))) + expect_equal(X_weight <= 1, mx.nd.ones(c(5, 10, 10000))) + mean_weight <- mean(as.array(X_weight)) + expect_equal(mean_weight, 0, tolerance = 0.01) +}) + +testthat("mx.init.normal", { + normal_init <- mx.init.normal(sd = 0.1) + expect_equal(typeof(normal_init), "closure") + + X_bias <- normal_init("X_bias", c(1, 10000), ctx = mx.ctx.default()) + expect_equal(X_bias, mx.nd.zeros(c(1, 10000))) + + X_weight <- normal_init("X_weight", c(5, 10, 10000), ctx = mx.ctx.default()) + weight_mean <- mean(as.array(X_weight)) + weight_sd <- sd(as.array(X_weight)) + expect_equal(weight_mean, 0, tolerance = 0.01) + expect_equal(weight_sd, 0.1, tolerance = 0.01) +}) + +testthat("mx.init.Xavier", { + xavier_init <- mx.init.Xavier() + expect_equal(typeof(xavier_init), "closure") + + # default parameters + shape <- c(2, 3, 324, 324) + fan_out <- shape[length(shape)] + fan_in <- prod(shape[-length(shape)]) + + X_bias <- xavier_init("X_bias", shape = shape, ctx = mx.ctx.default()) + expect_equal(X_bias, mx.nd.zeros(shape)) + + X_weight <- xavier_init("X_weight", shape = shape, ctx = mx.ctx.default()) + scale <- sqrt(3/((fan_in + fan_out)/2)) + expect_equal(X_weight >= -scale, mx.nd.ones(shape)) + expect_equal(X_weight <= scale, mx.nd.ones(shape)) + weight_mean <- mean(as.array(X_weight)) + expect_equal(weight_mean, 0, tolerance = 0.01) + + for (dist_type in c("gaussian", "uniform")) { + for (factor_type in c("in", "out", "avg")) { + xavier_init <- mx.init.Xavier(rnd_type = dist_type, factor_type = factor_type, + magnitude = 200) + expect_equal(typeof(xavier_init), "closure") + + X_weight <- xavier_init("X_weight", shape = shape, ctx = mx.ctx.default()) + factor_val <- switch(factor_type, avg = (fan_in + fan_out)/2, `in` = fan_in, + out = fan_out) + scale <- sqrt(200/factor_val) + + if (dist_type == "gaussian") { + weight_mean <- mean(as.array(X_weight)) + weight_sd <- sd(as.array(X_weight)) + expect_equal(weight_mean, 0, tolerance = 0.01) + expect_equal(weight_sd, scale, tolerance = 0.01) + } else { + expect_equal(X_weight >= -scale, mx.nd.ones(shape)) + expect_equal(X_weight <= scale, mx.nd.ones(shape)) + weight_mean <- mean(as.array(X_weight)) + expect_equal(weight_mean, 0, tolerance = 0.01) + } + } + } +}) + +testthat("mx.init.internal.default", { + sample_bias <- mxnet:::mx.init.internal.default("X_bias", c(5, 10, 100), ctx = mx.ctx.default()) + expect_equal(sample_bias, mx.nd.zeros(c(5, 10, 100))) + + sample_gamma <- mxnet:::mx.init.internal.default("X_gamma", c(5, 10, 100), ctx = mx.ctx.default()) + expect_equal(sample_gamma, mx.nd.ones(c(5, 10, 100))) + + sample_beta <- mxnet:::mx.init.internal.default("X_beta", c(5, 10, 100), ctx = mx.ctx.default()) + expect_equal(sample_beta, mx.nd.zeros(c(5, 10, 100))) + + sample_moving_mean <- mxnet:::mx.init.internal.default("X_moving_mean", c(5, + 10, 100), ctx = mx.ctx.default()) + expect_equal(sample_moving_mean, mx.nd.zeros(c(5, 10, 100))) + + sample_moving_var <- mxnet:::mx.init.internal.default("X_moving_var", c(5, 10, + 100), ctx = mx.ctx.default()) + expect_equal(sample_moving_var, mx.nd.ones(c(5, 10, 100))) + + expect_error(mxnet:::mx.init.internal.default("X", c(5, 10, 100), ctx = mx.ctx.default()), + "Unkown initialization pattern for X") +}) + +testthat("mx.init.create", { + uniform_init <- mx.init.uniform(scale = 1) + expect_equal(typeof(uniform_init), "closure") + arrs <- setNames(as.list(c(50000, 100)), c("X_weight", "X_bias")) + arr_init <- mx.init.create(uniform_init, arrs, ctx = mx.ctx.default()) + + X_bias <- arr_init$X_bias + expect_equal(X_bias, mx.nd.zeros(c(100))) + + X_weight <- arr_init$X_weight + expect_equal(X_weight >= -1, mx.nd.ones(c(50000))) + expect_equal(X_weight <= 1, mx.nd.ones(c(50000))) + mean_weight <- mean(as.array(X_weight)) + expect_equal(mean_weight, 0, tolerance = 0.01) +}) diff --git a/R-package/tests/testthat/test_io.R b/R-package/tests/testthat/test_io.R index d619856cbb99..32f6c58d3cb5 100644 --- a/R-package/tests/testthat/test_io.R +++ b/R-package/tests/testthat/test_io.R @@ -7,22 +7,15 @@ source("get_data.R") test_that("MNISTIter", { GetMNIST_ubyte() batch.size <- 100 - train_dataiter <- mx.io.MNISTIter( - image = "data/train-images-idx3-ubyte", - label = "data/train-labels-idx1-ubyte", - data.shape = c(784), - batch.size = batch.size, - shuffle = TRUE, - flat = TRUE, - silent = 0, - seed = 10 - ) + train_dataiter <- mx.io.MNISTIter(image = "data/train-images-idx3-ubyte", label = "data/train-labels-idx1-ubyte", + data.shape = c(784), batch.size = batch.size, shuffle = TRUE, flat = TRUE, + silent = 0, seed = 10) train_dataiter$reset() - batch_count = 0 + batch_count <- 0 while (train_dataiter$iter.next()) { - batch_count = batch_count + 1 + batch_count <- batch_count + 1 } - nbatch = 60000 / batch.size + nbatch <- 60000/batch.size expect_equal(batch_count, nbatch) train_dataiter$reset() train_dataiter$iter.next() @@ -39,21 +32,15 @@ test_that("MNISTIter", { test_that("Cifar10Rec", { GetCifar10() - dataiter <- mx.io.ImageRecordIter( - path.imgrec = "./data/cifar/train.rec", - path.imglist = "./data/cifar/train.lst", - mean.img = "./data/cifar/cifar10_mean.bin", - batch.size = 100, - data.shape = c(28, 28, 3), - rand.crop = TRUE, - rand.mirror = TRUE - ) - labelcount = rep(0, 10) + dataiter <- mx.io.ImageRecordIter(path.imgrec = "./data/cifar/train.rec", path.imglist = "./data/cifar/train.lst", + mean.img = "./data/cifar/cifar10_mean.bin", batch.size = 100, data.shape = c(28, + 28, 3), rand.crop = TRUE, rand.mirror = TRUE) + labelcount <- rep(0, 10) dataiter$reset() while (dataiter$iter.next()) { - label = as.array(dataiter$value()$label) + label <- as.array(dataiter$value()$label) for (i in label) { - labelcount[i + 1] = labelcount[i + 1] + 1 + labelcount[i + 1] <- labelcount[i + 1] + 1 } } @@ -65,20 +52,20 @@ test_that("mx.io.arrayiter", { y <- c(1:100) dataiter <- mx.io.arrayiter(X, y, batch.size = 20, shuffle = FALSE) dataiter$reset() - batch_count = 0 + batch_count <- 0 while (dataiter$iter.next()) { - batch_count = batch_count + 1 + batch_count <- batch_count + 1 } - expect_equal(batch_count, 100 / 20) + expect_equal(batch_count, 100/20) - y <- round(y / 10) + y <- round(y/10) dataiter <- mx.io.arrayiter(X, y, batch.size = 30, shuffle = FALSE) labelcount <- rep(0, 11) dataiter$reset() while (dataiter$iter.next()) { label <- as.array(dataiter$value()$label) for (i in label) { - labelcount[i + 1] = labelcount[i + 1] + 1 + labelcount[i + 1] <- labelcount[i + 1] + 1 } } diff --git a/R-package/tests/testthat/test_model.R b/R-package/tests/testthat/test_model.R index 6167ed66c414..f4be49d5fdd1 100644 --- a/R-package/tests/testthat/test_model.R +++ b/R-package/tests/testthat/test_model.R @@ -4,76 +4,64 @@ source("get_data.R") context("models") -if (Sys.getenv("R_GPU_ENABLE") != "" & as.integer(Sys.getenv("R_GPU_ENABLE")) == 1) { +if (Sys.getenv("R_GPU_ENABLE") != "" & as.integer(Sys.getenv("R_GPU_ENABLE")) == + 1) { mx.ctx.default(new = mx.gpu()) message("Using GPU for testing.") } test_that("MNIST", { -# # Network configuration - GetMNIST_ubyte() - batch.size <- 100 - data <- mx.symbol.Variable("data") - fc1 <- mx.symbol.FullyConnected(data, name="fc1", num_hidden=128) - act1 <- mx.symbol.Activation(fc1, name="relu1", act_type="relu") - fc2 <- mx.symbol.FullyConnected(act1, name = "fc2", num_hidden = 64) - act2 <- mx.symbol.Activation(fc2, name="relu2", act_type="relu") - fc3 <- mx.symbol.FullyConnected(act2, name="fc3", num_hidden=10) - softmax <- mx.symbol.Softmax(fc3, name = "sm") - - dtrain = mx.io.MNISTIter( - image="data/train-images-idx3-ubyte", - label="data/train-labels-idx1-ubyte", - data.shape=c(784), - batch.size=batch.size, - shuffle=TRUE, - flat=TRUE, - silent=0, - seed=10) - - dtest = mx.io.MNISTIter( - image="data/t10k-images-idx3-ubyte", - label="data/t10k-labels-idx1-ubyte", - data.shape=c(784), - batch.size=batch.size, - shuffle=FALSE, - flat=TRUE, - silent=0) - - mx.set.seed(0) - - # create the model - model <- mx.model.FeedForward.create(softmax, X=dtrain, eval.data=dtest, - ctx = mx.ctx.default(), num.round=1, - learning.rate=0.1, momentum=0.9, - initializer=mx.init.uniform(0.07), - epoch.end.callback=mx.callback.save.checkpoint("chkpt"), - batch.end.callback=mx.callback.log.train.metric(100)) - - # do prediction - pred <- predict(model, dtest) - label <- mx.io.extract(dtest, "label") - dataX <- mx.io.extract(dtest, "data") - # Predict with R's array - pred2 <- predict(model, X=dataX) - - accuracy <- function(label, pred) { - ypred = max.col(t(as.array(pred))) - return(sum((as.array(label) + 1) == ypred) / length(label)) - } - - expect_equal(accuracy(label, pred), accuracy(label, pred2)) - - file.remove("chkpt-0001.params") - file.remove("chkpt-symbol.json") + # # Network configuration + GetMNIST_ubyte() + batch.size <- 100 + data <- mx.symbol.Variable("data") + fc1 <- mx.symbol.FullyConnected(data, name = "fc1", num_hidden = 128) + act1 <- mx.symbol.Activation(fc1, name = "relu1", act_type = "relu") + fc2 <- mx.symbol.FullyConnected(act1, name = "fc2", num_hidden = 64) + act2 <- mx.symbol.Activation(fc2, name = "relu2", act_type = "relu") + fc3 <- mx.symbol.FullyConnected(act2, name = "fc3", num_hidden = 10) + softmax <- mx.symbol.Softmax(fc3, name = "sm") + + dtrain <- mx.io.MNISTIter(image = "data/train-images-idx3-ubyte", label = "data/train-labels-idx1-ubyte", + data.shape = c(784), batch.size = batch.size, shuffle = TRUE, flat = TRUE, + silent = 0, seed = 10) + + dtest <- mx.io.MNISTIter(image = "data/t10k-images-idx3-ubyte", label = "data/t10k-labels-idx1-ubyte", + data.shape = c(784), batch.size = batch.size, shuffle = FALSE, flat = TRUE, + silent = 0) + + mx.set.seed(0) + + # create the model + model <- mx.model.FeedForward.create(softmax, X = dtrain, eval.data = dtest, + ctx = mx.ctx.default(), num.round = 1, learning.rate = 0.1, momentum = 0.9, + initializer = mx.init.uniform(0.07), epoch.end.callback = mx.callback.save.checkpoint("chkpt"), + batch.end.callback = mx.callback.log.train.metric(100)) + + # do prediction + pred <- predict(model, dtest) + label <- mx.io.extract(dtest, "label") + dataX <- mx.io.extract(dtest, "data") + # Predict with R's array + pred2 <- predict(model, X = dataX) + + accuracy <- function(label, pred) { + ypred <- max.col(t(as.array(pred))) + return(sum((as.array(label) + 1) == ypred)/length(label)) + } + + expect_equal(accuracy(label, pred), accuracy(label, pred2)) + + file.remove("chkpt-0001.params") + file.remove("chkpt-symbol.json") }) test_that("Regression", { data(BostonHousing, package = "mlbench") train.ind <- seq(1, 506, 3) - train.x <- data.matrix(BostonHousing[train.ind,-14]) + train.x <- data.matrix(BostonHousing[train.ind, -14]) train.y <- BostonHousing[train.ind, 14] - test.x <- data.matrix(BostonHousing[-train.ind,-14]) + test.x <- data.matrix(BostonHousing[-train.ind, -14]) test.y <- BostonHousing[-train.ind, 14] data <- mx.symbol.Variable("data") fc1 <- mx.symbol.FullyConnected(data, num_hidden = 1) @@ -81,16 +69,13 @@ test_that("Regression", { demo.metric.mae <- mx.metric.custom("mae", function(label, pred) { pred <- mx.nd.reshape(pred, shape = 0) - res <- mx.nd.mean(mx.nd.abs(label-pred)) + res <- mx.nd.mean(mx.nd.abs(label - pred)) return(as.array(res)) }) mx.set.seed(0) - model <- mx.model.FeedForward.create(lro, X = train.x, y = train.y, - ctx = mx.ctx.default(), num.round = 5, - array.batch.size = 20, - learning.rate = 2e-6, - momentum = 0.9, - eval.metric = demo.metric.mae) + model <- mx.model.FeedForward.create(lro, X = train.x, y = train.y, ctx = mx.ctx.default(), + num.round = 5, array.batch.size = 20, learning.rate = 2e-06, momentum = 0.9, + eval.metric = demo.metric.mae) train.x <- data.matrix(BostonHousing[train.ind, -(13:14)]) train.y <- BostonHousing[train.ind, c(13:14)] @@ -98,18 +83,14 @@ test_that("Regression", { test.y <- BostonHousing[-train.ind, c(13:14)] data <- mx.symbol.Variable("data") - fc2 <- mx.symbol.FullyConnected(data, num_hidden=2) + fc2 <- mx.symbol.FullyConnected(data, num_hidden = 2) lro2 <- mx.symbol.LinearRegressionOutput(fc2) mx.set.seed(0) - train_iter = mx.io.arrayiter(data = t(train.x), label = t(train.y)) - - model <- mx.model.FeedForward.create(lro2, X = train_iter, - ctx = mx.ctx.default(), - num.round = 50, - array.batch.size = 20, - learning.rate = 2e-6, - momentum = 0.9) + train_iter <- mx.io.arrayiter(data = t(train.x), label = t(train.y)) + + model <- mx.model.FeedForward.create(lro2, X = train_iter, ctx = mx.ctx.default(), + num.round = 50, array.batch.size = 20, learning.rate = 2e-06, momentum = 0.9) }) @@ -122,23 +103,18 @@ test_that("Classification", { test.x <- data.matrix(Sonar[-train.ind, 1:60]) test.y <- Sonar[-train.ind, 61] mx.set.seed(0) - model <- mx.mlp(train.x, train.y, hidden_node = 10, - out_node = 2, out_activation = "softmax", - num.round = 5, array.batch.size = 15, - learning.rate = 0.07, - momentum = 0.9, - eval.metric = mx.metric.accuracy) + model <- mx.mlp(train.x, train.y, hidden_node = 10, out_node = 2, out_activation = "softmax", + num.round = 5, array.batch.size = 15, learning.rate = 0.07, momentum = 0.9, + eval.metric = mx.metric.accuracy) }) test_that("Fine-tune", { GetInception() GetCatDog() - train_iter <- mx.io.ImageRecordIter(path.imgrec = "./data/cats_dogs/cats_dogs_train.rec", - batch.size = 8, data.shape = c(224, 224, 3), - rand.crop = TRUE, rand.mirror = TRUE) - val_iter <- mx.io.ImageRecordIter(path.imgrec = "./data/cats_dogs/cats_dogs_val.rec", - batch.size = 8, data.shape = c(224, 224, 3), - rand.crop = FALSE, rand.mirror = FALSE) + train_iter <- mx.io.ImageRecordIter(path.imgrec = "./data/cats_dogs/cats_dogs_train.rec", + batch.size = 8, data.shape = c(224, 224, 3), rand.crop = TRUE, rand.mirror = TRUE) + val_iter <- mx.io.ImageRecordIter(path.imgrec = "./data/cats_dogs/cats_dogs_val.rec", + batch.size = 8, data.shape = c(224, 224, 3), rand.crop = FALSE, rand.mirror = FALSE) inception_bn <- mx.model.load("./model/Inception-BN", iteration = 126) symbol <- inception_bn$symbol internals <- symbol$get.internals() @@ -148,11 +124,8 @@ test_that("Fine-tune", { new_fc <- mx.symbol.FullyConnected(data = flatten, num_hidden = 2, name = "fc1") new_soft <- mx.symbol.SoftmaxOutput(data = new_fc, name = "softmax") - arg_params_new <- mx.model.init.params(symbol = new_soft, - input.shape = list("data" = c(224, 224, 3, 8)), - output.shape = NULL, - initializer = mx.init.uniform(0.1), - ctx = mx.cpu())$arg.params + arg_params_new <- mx.model.init.params(symbol = new_soft, input.shape = list(data = c(224, + 224, 3, 8)), output.shape = NULL, initializer = mx.init.uniform(0.1), ctx = mx.cpu())$arg.params fc1_weights_new <- arg_params_new[["fc1_weight"]] fc1_bias_new <- arg_params_new[["fc1_bias"]] @@ -160,25 +133,22 @@ test_that("Fine-tune", { arg_params_new[["fc1_weight"]] <- fc1_weights_new arg_params_new[["fc1_bias"]] <- fc1_bias_new - - #model <- mx.model.FeedForward.create(symbol = new_soft, X = train_iter, eval.data = val_iter, - # ctx = mx.ctx.default(), eval.metric = mx.metric.accuracy, - # num.round = 2, learning.rate = 0.05, momentum = 0.9, - # wd = 0.00001, kvstore = "local", - # batch.end.callback = mx.callback.log.train.metric(50), - # initializer = mx.init.Xavier(factor_type = "in", magnitude = 2.34), - # optimizer = "sgd", - # arg.params = arg_params_new, - # aux.params = inception_bn$aux.params) -}) + + # model <- mx.model.FeedForward.create(symbol = new_soft, X = train_iter, + # eval.data = val_iter, ctx = mx.ctx.default(), eval.metric = mx.metric.accuracy, + # num.round = 2, learning.rate = 0.05, momentum = 0.9, wd = 0.00001, kvstore = + # 'local', batch.end.callback = mx.callback.log.train.metric(50), initializer = + # mx.init.Xavier(factor_type = 'in', magnitude = 2.34), optimizer = 'sgd', + # arg.params = arg_params_new, aux.params = inception_bn$aux.params) +}) test_that("Matrix Factorization", { # Use fake random data instead of GetMovieLens() to remove external dependency set.seed(123) - user <- sample(943, size = 100000, replace = T) - item <- sample(1682, size = 100000, replace = T) - score <- sample(5, size = 100000, replace = T) + user <- sample(943, size = 1e+05, replace = T) + item <- sample(1682, size = 1e+05, replace = T) + score <- sample(5, size = 1e+05, replace = T) DF <- data.frame(user, item, score) max_user <- max(DF$user) @@ -189,95 +159,74 @@ test_that("Matrix Factorization", { user <- mx.symbol.Variable("user") item <- mx.symbol.Variable("item") score <- mx.symbol.Variable("score") - user1 <- mx.symbol.Embedding(data = mx.symbol.BlockGrad(user), input_dim = max_user, - output_dim = k, name = "user1") - item1 <- mx.symbol.Embedding(data = mx.symbol.BlockGrad(item), input_dim = max_item, - output_dim = k, name = "item1") + user1 <- mx.symbol.Embedding(data = mx.symbol.BlockGrad(user), input_dim = max_user, + output_dim = k, name = "user1") + item1 <- mx.symbol.Embedding(data = mx.symbol.BlockGrad(item), input_dim = max_item, + output_dim = k, name = "item1") pred <- user1 * item1 pred1 <- mx.symbol.sum_axis(pred, axis = 1, name = "pred1") pred2 <- mx.symbol.Flatten(pred1, name = "pred2") pred3 <- mx.symbol.LinearRegressionOutput(data = pred2, label = score, name = "pred3") - + mx.set.seed(123) - CustomIter <- setRefClass( "CustomIter", fields = c("iter1", "iter2"), - contains = "Rcpp_MXArrayDataIter", - methods = list( - initialize = function(iter1, iter2) { - .self$iter1 <- iter1 - .self$iter2 <- iter2 - .self - }, - value = function() { - user <- .self$iter1$value()$data - item <- .self$iter2$value()$data - score <- .self$iter1$value()$label - list(user = user, - item = item, - score = score) - }, - iter.next = function() { - .self$iter1$iter.next() - .self$iter2$iter.next() - }, - reset = function() { - .self$iter1$reset() - .self$iter2$reset() - }, - num.pad = function() { - .self$iter1$num.pad() - }, - finalize = function() { - .self$iter1$finalize() - .self$iter2$finalize() - } - ) - ) - - user_iter = mx.io.arrayiter(data = DF[, 1], label = DF[, 3], batch.size = k) - - item_iter = mx.io.arrayiter(data = DF[, 2], label = DF[, 3], batch.size = k) + CustomIter <- setRefClass("CustomIter", fields = c("iter1", "iter2"), contains = "Rcpp_MXArrayDataIter", + methods = list(initialize = function(iter1, iter2) { + .self$iter1 <- iter1 + .self$iter2 <- iter2 + .self + }, value = function() { + user <- .self$iter1$value()$data + item <- .self$iter2$value()$data + score <- .self$iter1$value()$label + list(user = user, item = item, score = score) + }, iter.next = function() { + .self$iter1$iter.next() + .self$iter2$iter.next() + }, reset = function() { + .self$iter1$reset() + .self$iter2$reset() + }, num.pad = function() { + .self$iter1$num.pad() + }, finalize = function() { + .self$iter1$finalize() + .self$iter2$finalize() + })) + + user_iter <- mx.io.arrayiter(data = DF[, 1], label = DF[, 3], batch.size = k) + + item_iter <- mx.io.arrayiter(data = DF[, 2], label = DF[, 3], batch.size = k) train_iter <- CustomIter$new(user_iter, item_iter) - model <- mx.model.FeedForward.create(pred3, X = train_iter, ctx = mx.ctx.default(), - num.round = 5, initializer = mx.init.uniform(0.07), - learning.rate = 0.07, - eval.metric = mx.metric.rmse, - momentum = 0.9, - epoch.end.callback = mx.callback.log.train.metric(1), - input.names = c("user", "item"), - output.names = "score") + model <- mx.model.FeedForward.create(pred3, X = train_iter, ctx = mx.ctx.default(), + num.round = 5, initializer = mx.init.uniform(0.07), learning.rate = 0.07, + eval.metric = mx.metric.rmse, momentum = 0.9, epoch.end.callback = mx.callback.log.train.metric(1), + input.names = c("user", "item"), output.names = "score") }) test_that("Captcha", { GetCaptcha_data() data.shape <- c(80, 30, 3) batch_size <- 40 - train <- mx.io.ImageRecordIter( - path.imgrec = "./data/captcha_example/captcha_train.rec", - path.imglist = "./data/captcha_example/captcha_train.lst", - batch.size = batch_size, - label.width = 4, - data.shape = data.shape, - mean.img = "mean.bin") - - val <- mx.io.ImageRecordIter( - path.imgrec = "./data/captcha_example/captcha_test.rec", - path.imglist = "./data/captcha_example/captcha_test.lst", - batch.size = batch_size, - label.width = 4, - data.shape = data.shape, - mean.img = "mean.bin") + train <- mx.io.ImageRecordIter(path.imgrec = "./data/captcha_example/captcha_train.rec", + path.imglist = "./data/captcha_example/captcha_train.lst", batch.size = batch_size, + label.width = 4, data.shape = data.shape, mean.img = "mean.bin") + + val <- mx.io.ImageRecordIter(path.imgrec = "./data/captcha_example/captcha_test.rec", + path.imglist = "./data/captcha_example/captcha_test.lst", batch.size = batch_size, + label.width = 4, data.shape = data.shape, mean.img = "mean.bin") data <- mx.symbol.Variable("data") label <- mx.symbol.Variable("label") conv1 <- mx.symbol.Convolution(data = data, kernel = c(5, 5), num_filter = 32) - pool1 <- mx.symbol.Pooling(data = conv1, pool_type = "max", kernel = c(2, 2), stride = c(1, 1)) + pool1 <- mx.symbol.Pooling(data = conv1, pool_type = "max", kernel = c(2, 2), + stride = c(1, 1)) relu1 <- mx.symbol.Activation(data = pool1, act_type = "relu") conv2 <- mx.symbol.Convolution(data = relu1, kernel = c(5, 5), num_filter = 32) - pool2 <- mx.symbol.Pooling(data = conv2, pool_type = "avg", kernel = c(2, 2), stride = c(1, 1)) + pool2 <- mx.symbol.Pooling(data = conv2, pool_type = "avg", kernel = c(2, 2), + stride = c(1, 1)) relu2 <- mx.symbol.Activation(data = pool2, act_type = "relu") flatten <- mx.symbol.Flatten(data = relu2) @@ -292,8 +241,8 @@ test_that("Captcha", { captcha_net <- mx.symbol.SoftmaxOutput(data = fc2, label = label, name = "softmax") mx.metric.acc2 <- mx.metric.custom("accuracy", function(label, pred) { - label = as.array(label) - pred = as.array(pred) + label <- as.array(label) + pred <- as.array(pred) ypred <- max.col(t(pred)) - 1 ypred <- matrix(ypred, nrow = nrow(label), ncol = ncol(label), byrow = TRUE) return(sum(colSums(label == ypred) == 4)/ncol(label)) @@ -305,26 +254,20 @@ test_that("Captcha", { train$iter.next() input.names <- "data" - input.shape <- sapply(input.names, function(n){dim(train$value()[[n]])}, simplify = FALSE) + input.shape <- sapply(input.names, function(n) { + dim(train$value()[[n]]) + }, simplify = FALSE) arg_names <- arguments(captcha_net) output.names <- "label" - output.shape <- sapply(output.names, function(n){dim(train$value()[[n]])}, simplify = FALSE) - params <- mx.model.init.params(captcha_net, input.shape, output.shape, - mx.init.Xavier(factor_type = "in", magnitude = 2.34), - mx.cpu()) - - #model <- mx.model.FeedForward.create( - # X = train, - # eval.data = val, - # ctx = mx.ctx.default(), - # symbol = captcha_net, - # eval.metric = mx.metric.acc2, - # num.round = 1, - # learning.rate = 1e-04, - # momentum = 0.9, - # wd = 1e-05, - # batch.end.callback = mx.callback.log.train.metric(50), - # initializer = mx.init.Xavier(factor_type = "in", magnitude = 2.34), - # optimizer = "sgd", - # clip_gradient = 10) + output.shape <- sapply(output.names, function(n) { + dim(train$value()[[n]]) + }, simplify = FALSE) + params <- mx.model.init.params(captcha_net, input.shape, output.shape, mx.init.Xavier(factor_type = "in", + magnitude = 2.34), mx.cpu()) + + # model <- mx.model.FeedForward.create( X = train, eval.data = val, ctx = + # mx.ctx.default(), symbol = captcha_net, eval.metric = mx.metric.acc2, num.round + # = 1, learning.rate = 1e-04, momentum = 0.9, wd = 1e-05, batch.end.callback = + # mx.callback.log.train.metric(50), initializer = mx.init.Xavier(factor_type = + # 'in', magnitude = 2.34), optimizer = 'sgd', clip_gradient = 10) }) diff --git a/R-package/tests/testthat/test_ndarray.R b/R-package/tests/testthat/test_ndarray.R index 326ea6ca7f30..4850823e29d2 100644 --- a/R-package/tests/testthat/test_ndarray.R +++ b/R-package/tests/testthat/test_ndarray.R @@ -2,45 +2,46 @@ require(mxnet) context("ndarray") -if (Sys.getenv("R_GPU_ENABLE") != "" & as.integer(Sys.getenv("R_GPU_ENABLE")) == 1) { +if (Sys.getenv("R_GPU_ENABLE") != "" & as.integer(Sys.getenv("R_GPU_ENABLE")) == + 1) { mx.ctx.default(new = mx.gpu()) message("Using GPU for testing.") } test_that("element-wise calculation for vector", { - x = 1:10 - mat = mx.nd.array(as.array(x), mx.ctx.default()) + x <- 1:10 + mat <- mx.nd.array(as.array(x), mx.ctx.default()) expect_equal(x, as.array(mat)) expect_equal(x + 1, as.array(mat + 1)) expect_equal(x - 10, as.array(mat - 10)) expect_equal(x * 20, as.array(mat * 20)) - expect_equal(x / 3, as.array(mat / 3), tolerance = 1e-5) + expect_equal(x/3, as.array(mat/3), tolerance = 1e-05) expect_equal(-1 - x, as.array(-1 - mat)) - expect_equal(-5 / x, as.array(-5 / mat), tolerance = 1e-5) + expect_equal(-5/x, as.array(-5/mat), tolerance = 1e-05) expect_equal(x + x, as.array(mat + mat)) - expect_equal(x / x, as.array(mat / mat)) + expect_equal(x/x, as.array(mat/mat)) expect_equal(x * x, as.array(mat * mat)) expect_equal(x - x, as.array(mat - mat)) expect_equal(as.array(1 - mat), as.array(1 - mat)) - x <- runif(10,-10, 10) - nd = mx.nd.array(as.array(x)) - expect_equal(sqrt(abs(x)), as.array(mx.nd.sqrt(mx.nd.abs(nd))), tolerance = 1e-6) - expect_equal(x ^ 2, as.array(mx.nd.square(nd)), tolerance = 1e-6) + x <- runif(10, -10, 10) + nd <- mx.nd.array(as.array(x)) + expect_equal(sqrt(abs(x)), as.array(mx.nd.sqrt(mx.nd.abs(nd))), tolerance = 1e-06) + expect_equal(x^2, as.array(mx.nd.square(nd)), tolerance = 1e-06) }) test_that("element-wise calculation for matrix", { - x = matrix(1:4, 2, 2) - mat = mx.nd.array(as.array(x), mx.ctx.default()) + x <- matrix(1:4, 2, 2) + mat <- mx.nd.array(as.array(x), mx.ctx.default()) expect_equal(x, as.array(mat)) expect_equal(x + 1, as.array(mat + 1)) expect_equal(x - 10, as.array(mat - 10)) expect_equal(x * 20, as.array(mat * 20)) - expect_equal(x / 3, as.array(mat / 3), tolerance = 1e-5) + expect_equal(x/3, as.array(mat/3), tolerance = 1e-05) expect_equal(-1 - x, as.array(-1 - mat)) - expect_equal(-5 / x, as.array(-5 / mat), tolerance = 1e-5) + expect_equal(-5/x, as.array(-5/mat), tolerance = 1e-05) expect_equal(x + x, as.array(mat + mat)) - expect_equal(x / x, as.array(mat / mat)) + expect_equal(x/x, as.array(mat/mat)) expect_equal(x * x, as.array(mat * mat)) expect_equal(x - x, as.array(mat - mat)) expect_equal(as.array(1 - mat), as.array(1 - mat)) @@ -51,20 +52,24 @@ test_that("ndarray ones, zeros, save and load", { expect_equal(matrix(0, 10, 5), as.array(mx.nd.zeros(c(10, 5)))) expect_equal(rep(1, 10), as.array(mx.nd.ones(10))) expect_equal(matrix(1, 10, 5), as.array(mx.nd.ones(c(10, 5)))) - mat = mx.nd.array(1:20) - mx.nd.save(mat, 'temp.mat') - mat2 = mx.nd.load('temp.mat') + mat <- mx.nd.array(1:20) + mx.nd.save(mat, "temp.mat") + mat2 <- mx.nd.load("temp.mat") expect_true(is.mx.ndarray(mat2[[1]])) expect_equal(as.array(mat), as.array(mat2[[1]])) - file.remove('temp.mat') + file.remove("temp.mat") }) test_that("ndarray concatenate", { shapes <- matrix(c(2, 3, 4, 2, 2, 2, 4, 2, 2, 1, 4, 2), nrow = 3, byrow = TRUE) - array_r <- apply(shapes, 2, function(s) { runif(s, -10, 10) }) - array_nd <- apply(array_r, 1, function(s) { mx.nd.array(matrix(s, nrow = 1)) }) + array_r <- apply(shapes, 2, function(s) { + runif(s, -10, 10) + }) + array_nd <- apply(array_r, 1, function(s) { + mx.nd.array(matrix(s, nrow = 1)) + }) array_nd_concat <- mx.nd.concat(data = array_nd, num_args = 3, dim = 1) - expect_equal(array_r, as.matrix(array_nd_concat), tolerance = 1e-6) + expect_equal(array_r, as.matrix(array_nd_concat), tolerance = 1e-06) x1 <- mx.nd.array(c(1:24)) x2 <- mx.nd.array(c(25:48)) @@ -74,7 +79,8 @@ test_that("ndarray concatenate", { x1 <- array(1:24, dim = c(4, 3, 2)) x2 <- array(25:48, dim = c(4, 3, 2)) - x3 <- c(1:4, 25:28, 5:8, 29:32, 9:12, 33:36, 13:16, 37:40, 17:20, 41:44, 21:24, 45:48) + x3 <- c(1:4, 25:28, 5:8, 29:32, 9:12, 33:36, 13:16, 37:40, 17:20, 41:44, 21:24, + 45:48) y1 <- mx.nd.array(x1) y2 <- mx.nd.array(x2) y3 <- mx.nd.concat(data = c(y1, y2), num_args = 2, dim = 2) @@ -83,8 +89,8 @@ test_that("ndarray concatenate", { }) test_that("ndarray clip", { - nd <- mx.nd.array(runif(10,-10, 10)) - nd2 <- mx.nd.clip(nd,-2, 3) + nd <- mx.nd.array(runif(10, -10, 10)) + nd2 <- mx.nd.clip(nd, -2, 3) arr <- as.array(nd2) expect_equal(arr >= -2 | arr <= 3, rep(TRUE, length(arr))) }) @@ -98,7 +104,7 @@ test_that("ndarray dot", { B <- mx.nd.array(t(b)) C <- mx.nd.dot(A, B) - expect_equal(c, t(as.matrix(C)), tolerance = 1e-6) + expect_equal(c, t(as.matrix(C)), tolerance = 1e-06) }) test_that("ndarray crop", { @@ -107,9 +113,10 @@ test_that("ndarray crop", { expect_equal(array(1, dim = c(2, 1, 3)), as.array(y)) z <- mx.nd.zeros(c(2, 1, 3)) - x <- mxnet:::mx.nd.internal.crop.assign(x, z, begin = c(0, 0, 0), end = c(2, 1, 3)) + x <- mxnet:::mx.nd.internal.crop.assign(x, z, begin = c(0, 0, 0), end = c(2, + 1, 3)) arr_x <- array(1, dim = dim(x)) - arr_x[c(1:2), 1 , c(1:3)] <- 0 + arr_x[c(1:2), 1, c(1:3)] <- 0 expect_equal(as.array(x), arr_x) }) @@ -118,77 +125,77 @@ test_that("ndarray negate", { arr <- array(runif(24, -10, 10), dim = c(2, 3, 4)) nd <- mx.nd.array(arr) - expect_equal(arr, as.array(nd), tolerance = 1e-6) - expect_equal(-arr, as.array(-nd), tolerance = 1e-6) - expect_equal(arr, as.array(nd), tolerance = 1e-6) + expect_equal(arr, as.array(nd), tolerance = 1e-06) + expect_equal(-arr, as.array(-nd), tolerance = 1e-06) + expect_equal(arr, as.array(nd), tolerance = 1e-06) }) test_that("ndarray equal", { x <- mx.nd.zeros(c(2, 3)) y <- mx.nd.ones(c(2, 3)) - z = x == y - expect_equal(as.array(z), array(0, c(2,3))) + z <- x == y + expect_equal(as.array(z), array(0, c(2, 3))) - z = 0 == x - expect_equal(as.array(z), array(1, c(2,3))) + z <- 0 == x + expect_equal(as.array(z), array(1, c(2, 3))) }) test_that("ndarray not equal", { x <- mx.nd.zeros(c(2, 3)) y <- mx.nd.ones(c(2, 3)) - z = x != y - expect_equal(as.array(z), array(1, c(2,3))) + z <- x != y + expect_equal(as.array(z), array(1, c(2, 3))) - z = 0 != x - expect_equal(as.array(z), array(0, c(2,3))) + z <- 0 != x + expect_equal(as.array(z), array(0, c(2, 3))) }) test_that("ndarray greater", { x <- mx.nd.zeros(c(2, 3)) y <- mx.nd.ones(c(2, 3)) - z = x > y - expect_equal(as.array(z), array(0, c(2,3))) + z <- x > y + expect_equal(as.array(z), array(0, c(2, 3))) - z = y > 0 - expect_equal(as.array(z), array(1, c(2,3))) + z <- y > 0 + expect_equal(as.array(z), array(1, c(2, 3))) - z = 0 > y - expect_equal(as.array(z), array(0, c(2,3))) + z <- 0 > y + expect_equal(as.array(z), array(0, c(2, 3))) - z = x >= y - expect_equal(as.array(z), array(0, c(2,3))) + z <- x >= y + expect_equal(as.array(z), array(0, c(2, 3))) - z = y >= 0 - expect_equal(as.array(z), array(1, c(2,3))) + z <- y >= 0 + expect_equal(as.array(z), array(1, c(2, 3))) - z = 0 >= y - expect_equal(as.array(z), array(0, c(2,3))) + z <- 0 >= y + expect_equal(as.array(z), array(0, c(2, 3))) - z = y >= 1 - expect_equal(as.array(z), array(1, c(2,3))) + z <- y >= 1 + expect_equal(as.array(z), array(1, c(2, 3))) }) test_that("ndarray lesser", { x <- mx.nd.zeros(c(2, 3)) y <- mx.nd.ones(c(2, 3)) - z = x < y - expect_equal(as.array(z), array(1, c(2,3))) + z <- x < y + expect_equal(as.array(z), array(1, c(2, 3))) - z = y < 0 - expect_equal(as.array(z), array(0, c(2,3))) + z <- y < 0 + expect_equal(as.array(z), array(0, c(2, 3))) - z = 0 < y - expect_equal(as.array(z), array(1, c(2,3))) + z <- 0 < y + expect_equal(as.array(z), array(1, c(2, 3))) - z = x <= y - expect_equal(as.array(z), array(1, c(2,3))) + z <- x <= y + expect_equal(as.array(z), array(1, c(2, 3))) - z = y <= 0 - expect_equal(as.array(z), array(0, c(2,3))) + z <- y <= 0 + expect_equal(as.array(z), array(0, c(2, 3))) - z = 0 <= y - expect_equal(as.array(z), array(1, c(2,3))) + z <- 0 <= y + expect_equal(as.array(z), array(1, c(2, 3))) - z = y <= 1 - expect_equal(as.array(z), array(1, c(2,3))) -}) \ No newline at end of file + z <- y <= 1 + expect_equal(as.array(z), array(1, c(2, 3))) +}) diff --git a/R-package/tests/testthat/test_optimizer.R b/R-package/tests/testthat/test_optimizer.R index c6dacaa728bd..a02a9edf524a 100644 --- a/R-package/tests/testthat/test_optimizer.R +++ b/R-package/tests/testthat/test_optimizer.R @@ -1,204 +1,168 @@ context("optimizer") test_that("sgd", { - - data = mx.symbol.Variable('data') - label = mx.symbol.Variable('label') - fc_weight = mx.symbol.Variable('fc_weight') - fc = mx.symbol.FullyConnected(data = data, weight = fc_weight, no.bias = T, name = 'fc1', num_hidden = 1) - loss = mx.symbol.LinearRegressionOutput(data = fc, label = label, name = 'loss') - - x <- mx.nd.array(array(1:6, dim=2:3)) + + data <- mx.symbol.Variable("data") + label <- mx.symbol.Variable("label") + fc_weight <- mx.symbol.Variable("fc_weight") + fc <- mx.symbol.FullyConnected(data = data, weight = fc_weight, no.bias = T, + name = "fc1", num_hidden = 1) + loss <- mx.symbol.LinearRegressionOutput(data = fc, label = label, name = "loss") + + x <- mx.nd.array(array(1:6, dim = 2:3)) y <- mx.nd.array(c(5, 11, 16)) - w1 <- mx.nd.array(array(c(1.1, 1.8), dim = c(2,1))) - - exec <- mxnet:::mx.symbol.bind(symbol = loss, - ctx = mx.cpu(), - arg.arrays = list(data = x, - fc1_weight = w1, - label = y), - aux.arrays = NULL, - grad.reqs = c("null", "write", "null")) - - optimizer <- mx.opt.create("sgd", - learning.rate = 1, - momentum = 0, - wd = 0, - rescale.grad = 1, - clip_gradient = -1) - + w1 <- mx.nd.array(array(c(1.1, 1.8), dim = c(2, 1))) + + exec <- mxnet:::mx.symbol.bind(symbol = loss, ctx = mx.cpu(), arg.arrays = list(data = x, + fc1_weight = w1, label = y), aux.arrays = NULL, grad.reqs = c("null", "write", + "null")) + + optimizer <- mx.opt.create("sgd", learning.rate = 1, momentum = 0, wd = 0, rescale.grad = 1, + clip_gradient = -1) + updaters <- mx.opt.get.updater(optimizer, exec$ref.arg.arrays, ctx = mx.cpu()) - + mx.exec.forward(exec, is.train = T) mx.exec.backward(exec) - + arg.blocks <- updaters(exec$ref.arg.arrays, exec$ref.grad.arrays) mx.exec.update.arg.arrays(exec, arg.blocks, skip.null = TRUE) - - expect_equal(as.array(arg.blocks[[2]]), array(c(1.4, 2.6), dim = c(2,1)), tolerance = 1e-1) - + + expect_equal(as.array(arg.blocks[[2]]), array(c(1.4, 2.6), dim = c(2, 1)), tolerance = 0.1) + }) test_that("rmsprop", { - - data = mx.symbol.Variable('data') - label = mx.symbol.Variable('label') - fc_weight = mx.symbol.Variable('fc_weight') - fc = mx.symbol.FullyConnected(data = data, weight = fc_weight, no.bias = T, name = 'fc1', num_hidden = 1) - loss = mx.symbol.LinearRegressionOutput(data = fc, label = label, name = 'loss') - - x <- mx.nd.array(array(1:6, dim=2:3)) + + data <- mx.symbol.Variable("data") + label <- mx.symbol.Variable("label") + fc_weight <- mx.symbol.Variable("fc_weight") + fc <- mx.symbol.FullyConnected(data = data, weight = fc_weight, no.bias = T, + name = "fc1", num_hidden = 1) + loss <- mx.symbol.LinearRegressionOutput(data = fc, label = label, name = "loss") + + x <- mx.nd.array(array(1:6, dim = 2:3)) y <- mx.nd.array(c(5, 11, 16)) - w1 <- mx.nd.array(array(c(1.1, 1.8), dim = c(2,1))) - - exec <- mxnet:::mx.symbol.bind(symbol = loss, - ctx = mx.cpu(), - arg.arrays = list(data = x, - fc1_weight = w1, - label = y), - aux.arrays = NULL, - grad.reqs = c("null", "write", "null")) - - optimizer <- mx.opt.create("rmsprop", learning.rate = 1, - centered = TRUE, - gamma1 = 0.95, - gamma2 = 0.9, - epsilon = 1e-4, - wd = 0, - rescale.grad = 1, - clip_gradient = -1) - + w1 <- mx.nd.array(array(c(1.1, 1.8), dim = c(2, 1))) + + exec <- mxnet:::mx.symbol.bind(symbol = loss, ctx = mx.cpu(), arg.arrays = list(data = x, + fc1_weight = w1, label = y), aux.arrays = NULL, grad.reqs = c("null", "write", + "null")) + + optimizer <- mx.opt.create("rmsprop", learning.rate = 1, centered = TRUE, gamma1 = 0.95, + gamma2 = 0.9, epsilon = 1e-04, wd = 0, rescale.grad = 1, clip_gradient = -1) + updaters <- mx.opt.get.updater(optimizer, exec$ref.arg.arrays, ctx = mx.cpu()) - + mx.exec.forward(exec, is.train = T) mx.exec.backward(exec) - + arg.blocks <- updaters(exec$ref.arg.arrays, exec$ref.grad.arrays) mx.exec.update.arg.arrays(exec, arg.blocks, skip.null = TRUE) - - expect_equal(as.array(arg.blocks[[2]]), array(c(5.64, 6.38), dim = c(2,1)), tolerance = 1e-1) - + + expect_equal(as.array(arg.blocks[[2]]), array(c(5.64, 6.38), dim = c(2, 1)), + tolerance = 0.1) + }) test_that("adam", { - - data = mx.symbol.Variable('data') - label = mx.symbol.Variable('label') - fc_weight = mx.symbol.Variable('fc_weight') - fc = mx.symbol.FullyConnected(data = data, weight = fc_weight, no.bias = T, name = 'fc1', num_hidden = 1) - loss = mx.symbol.LinearRegressionOutput(data = fc, label = label, name = 'loss') - - x <- mx.nd.array(array(1:6, dim=2:3)) + + data <- mx.symbol.Variable("data") + label <- mx.symbol.Variable("label") + fc_weight <- mx.symbol.Variable("fc_weight") + fc <- mx.symbol.FullyConnected(data = data, weight = fc_weight, no.bias = T, + name = "fc1", num_hidden = 1) + loss <- mx.symbol.LinearRegressionOutput(data = fc, label = label, name = "loss") + + x <- mx.nd.array(array(1:6, dim = 2:3)) y <- mx.nd.array(c(5, 11, 16)) - w1 <- mx.nd.array(array(c(1.1, 1.8), dim = c(2,1))) - - exec <- mxnet:::mx.symbol.bind(symbol = loss, - ctx = mx.cpu(), - arg.arrays = list(data = x, - fc1_weight = w1, - label = y), - aux.arrays = NULL, - grad.reqs = c("null", "write", "null")) - - optimizer <- mx.opt.create("adam", - learning.rate = 1, - beta1 = 0.9, - beta2 = 0.999, - epsilon = 1e-8, - wd = 0, - rescale.grad = 1, - clip_gradient = -1) - + w1 <- mx.nd.array(array(c(1.1, 1.8), dim = c(2, 1))) + + exec <- mxnet:::mx.symbol.bind(symbol = loss, ctx = mx.cpu(), arg.arrays = list(data = x, + fc1_weight = w1, label = y), aux.arrays = NULL, grad.reqs = c("null", "write", + "null")) + + optimizer <- mx.opt.create("adam", learning.rate = 1, beta1 = 0.9, beta2 = 0.999, + epsilon = 1e-08, wd = 0, rescale.grad = 1, clip_gradient = -1) + updaters <- mx.opt.get.updater(optimizer, exec$ref.arg.arrays, ctx = mx.cpu()) - + mx.exec.forward(exec, is.train = T) mx.exec.backward(exec) - + arg.blocks <- updaters(exec$ref.arg.arrays, exec$ref.grad.arrays) mx.exec.update.arg.arrays(exec, arg.blocks, skip.null = TRUE) - - expect_equal(as.array(arg.blocks[[2]]), array(c(4.26, 4.96), dim = c(2,1)), tolerance = 1e-1) - + + expect_equal(as.array(arg.blocks[[2]]), array(c(4.26, 4.96), dim = c(2, 1)), + tolerance = 0.1) + }) test_that("adagrad", { - - data = mx.symbol.Variable('data') - label = mx.symbol.Variable('label') - fc_weight = mx.symbol.Variable('fc_weight') - fc = mx.symbol.FullyConnected(data = data, weight = fc_weight, no.bias = T, name = 'fc1', num_hidden = 1) - loss = mx.symbol.LinearRegressionOutput(data = fc, label = label, name = 'loss') - - x <- mx.nd.array(array(1:6, dim=2:3)) + + data <- mx.symbol.Variable("data") + label <- mx.symbol.Variable("label") + fc_weight <- mx.symbol.Variable("fc_weight") + fc <- mx.symbol.FullyConnected(data = data, weight = fc_weight, no.bias = T, + name = "fc1", num_hidden = 1) + loss <- mx.symbol.LinearRegressionOutput(data = fc, label = label, name = "loss") + + x <- mx.nd.array(array(1:6, dim = 2:3)) y <- mx.nd.array(c(5, 11, 16)) - w1 <- mx.nd.array(array(c(1.1, 1.8), dim = c(2,1))) - - exec <- mxnet:::mx.symbol.bind(symbol = loss, - ctx = mx.cpu(), - arg.arrays = list(data = x, - fc1_weight = w1, - label = y), - aux.arrays = NULL, - grad.reqs = c("null", "write", "null")) - - optimizer <- mx.opt.create("adagrad", - learning.rate = 1, - epsilon = 1e-8, - wd = 0, - rescale.grad = 1, - clip_gradient = -1) - + w1 <- mx.nd.array(array(c(1.1, 1.8), dim = c(2, 1))) + + exec <- mxnet:::mx.symbol.bind(symbol = loss, ctx = mx.cpu(), arg.arrays = list(data = x, + fc1_weight = w1, label = y), aux.arrays = NULL, grad.reqs = c("null", "write", + "null")) + + optimizer <- mx.opt.create("adagrad", learning.rate = 1, epsilon = 1e-08, wd = 0, + rescale.grad = 1, clip_gradient = -1) + updaters <- mx.opt.get.updater(optimizer, exec$ref.arg.arrays, ctx = mx.cpu()) - + mx.exec.forward(exec, is.train = T) mx.exec.backward(exec) - + arg.blocks <- updaters(exec$ref.arg.arrays, exec$ref.grad.arrays) mx.exec.update.arg.arrays(exec, arg.blocks, skip.null = TRUE) - - expect_equal(as.array(arg.blocks[[2]]), array(c(2.1, 2.8), dim = c(2,1)), tolerance = 1e-1) - + + expect_equal(as.array(arg.blocks[[2]]), array(c(2.1, 2.8), dim = c(2, 1)), tolerance = 0.1) + }) test_that("adadelta", { - - data = mx.symbol.Variable('data') - label = mx.symbol.Variable('label') - fc_weight = mx.symbol.Variable('fc_weight') - fc = mx.symbol.FullyConnected(data = data, weight = fc_weight, no.bias = T, name = 'fc1', num_hidden = 1) - loss = mx.symbol.LinearRegressionOutput(data = fc, label = label, name = 'loss') - - x <- mx.nd.array(array(1:6, dim=2:3)) + + data <- mx.symbol.Variable("data") + label <- mx.symbol.Variable("label") + fc_weight <- mx.symbol.Variable("fc_weight") + fc <- mx.symbol.FullyConnected(data = data, weight = fc_weight, no.bias = T, + name = "fc1", num_hidden = 1) + loss <- mx.symbol.LinearRegressionOutput(data = fc, label = label, name = "loss") + + x <- mx.nd.array(array(1:6, dim = 2:3)) y <- mx.nd.array(c(5, 11, 16)) - w1 <- mx.nd.array(array(c(1.1, 1.8), dim = c(2,1))) - - exec <- mxnet:::mx.symbol.bind(symbol = loss, - ctx = mx.cpu(), - arg.arrays = list(data = x, - fc1_weight = w1, - label = y), - aux.arrays = NULL, - grad.reqs = c("null", "write", "null")) - - optimizer <- mx.opt.create("adadelta", - rho = 0.90, - epsilon = 1e-5, - wd = 0, - rescale.grad = 1, - clip_gradient = -1) - + w1 <- mx.nd.array(array(c(1.1, 1.8), dim = c(2, 1))) + + exec <- mxnet:::mx.symbol.bind(symbol = loss, ctx = mx.cpu(), arg.arrays = list(data = x, + fc1_weight = w1, label = y), aux.arrays = NULL, grad.reqs = c("null", "write", + "null")) + + optimizer <- mx.opt.create("adadelta", rho = 0.9, epsilon = 1e-05, wd = 0, rescale.grad = 1, + clip_gradient = -1) + updaters <- mx.opt.get.updater(optimizer, exec$ref.arg.arrays, ctx = mx.cpu()) - + mx.exec.forward(exec, is.train = T) mx.exec.backward(exec) - + arg.blocks <- updaters(exec$ref.arg.arrays, exec$ref.grad.arrays) mx.exec.update.arg.arrays(exec, arg.blocks, skip.null = TRUE) - - expect_equal(as.array(arg.blocks[[2]]), array(c(1.11, 1.81), dim = c(2,1)), tolerance = 1e-1) - + + expect_equal(as.array(arg.blocks[[2]]), array(c(1.11, 1.81), dim = c(2, 1)), + tolerance = 0.1) + }) diff --git a/R-package/tests/testthat/test_random.R b/R-package/tests/testthat/test_random.R index 411d0c768a6e..e90011dadb2e 100644 --- a/R-package/tests/testthat/test_random.R +++ b/R-package/tests/testthat/test_random.R @@ -3,17 +3,17 @@ require(mxnet) context("random") test_that("mx.runif", { - X <- mx.runif(shape=50000, min=0, max=1, ctx=mx.ctx.default()) - expect_equal(X>=0, mx.nd.ones(50000)) - expect_equal(X<=1, mx.nd.ones(50000)) - sample_mean = mean(as.array(X)) - expect_equal(sample_mean, 0.5, tolerance=1e-2) + X <- mx.runif(shape = 50000, min = 0, max = 1, ctx = mx.ctx.default()) + expect_equal(X >= 0, mx.nd.ones(50000)) + expect_equal(X <= 1, mx.nd.ones(50000)) + sample_mean <- mean(as.array(X)) + expect_equal(sample_mean, 0.5, tolerance = 0.01) }) test_that("mx.rnorm", { - X <- mx.rnorm(shape=50000, mean=5, sd=0.1, ctx=mx.ctx.default()) - sample_mean = mean(as.array(X)) - sample_sd = sd(as.array(X)) - expect_equal(sample_mean, 5, tolerance=1e-2) - expect_equal(sample_sd, 0.1, tolerance=1e-2) + X <- mx.rnorm(shape = 50000, mean = 5, sd = 0.1, ctx = mx.ctx.default()) + sample_mean <- mean(as.array(X)) + sample_sd <- sd(as.array(X)) + expect_equal(sample_mean, 5, tolerance = 0.01) + expect_equal(sample_sd, 0.1, tolerance = 0.01) }) diff --git a/R-package/tests/testthat/test_symbol.R b/R-package/tests/testthat/test_symbol.R index 656d146cd87c..4a253fbd3e7c 100644 --- a/R-package/tests/testthat/test_symbol.R +++ b/R-package/tests/testthat/test_symbol.R @@ -3,71 +3,73 @@ require(mxnet) context("symbol") test_that("basic symbol operation", { - data = mx.symbol.Variable('data') - net1 = mx.symbol.FullyConnected(data = data, name = 'fc1', num_hidden = 10) - net1 = mx.symbol.FullyConnected(data = net1, name = 'fc2', num_hidden = 100) + data <- mx.symbol.Variable("data") + net1 <- mx.symbol.FullyConnected(data = data, name = "fc1", num_hidden = 10) + net1 <- mx.symbol.FullyConnected(data = net1, name = "fc2", num_hidden = 100) - expect_equal(arguments(net1), c('data', 'fc1_weight', 'fc1_bias', 'fc2_weight', 'fc2_bias')) - expect_equal(outputs(net1), 'fc2_output') + expect_equal(arguments(net1), c("data", "fc1_weight", "fc1_bias", "fc2_weight", + "fc2_bias")) + expect_equal(outputs(net1), "fc2_output") - net2 = mx.symbol.FullyConnected(name = 'fc3', num_hidden = 10) - net2 = mx.symbol.Activation(data = net2, act_type = 'relu') - net2 = mx.symbol.FullyConnected(data = net2, name = 'fc4', num_hidden = 20) + net2 <- mx.symbol.FullyConnected(name = "fc3", num_hidden = 10) + net2 <- mx.symbol.Activation(data = net2, act_type = "relu") + net2 <- mx.symbol.FullyConnected(data = net2, name = "fc4", num_hidden = 20) - composed = mx.apply(net2, fc3_data = net1, name = 'composed') + composed <- mx.apply(net2, fc3_data = net1, name = "composed") - expect_equal(arguments(composed), c('data', 'fc1_weight', 'fc1_bias', 'fc2_weight', 'fc2_bias', 'fc3_weight', 'fc3_bias', 'fc4_weight', 'fc4_bias')) - expect_equal(outputs(composed), 'composed_output') + expect_equal(arguments(composed), c("data", "fc1_weight", "fc1_bias", "fc2_weight", + "fc2_bias", "fc3_weight", "fc3_bias", "fc4_weight", "fc4_bias")) + expect_equal(outputs(composed), "composed_output") - multi_out = mx.symbol.Group(c(composed, net1)) - expect_equal(outputs(multi_out), c('composed_output', 'fc2_output')) + multi_out <- mx.symbol.Group(c(composed, net1)) + expect_equal(outputs(multi_out), c("composed_output", "fc2_output")) }) test_that("symbol internal", { - data = mx.symbol.Variable('data') - oldfc = mx.symbol.FullyConnected(data = data, name = 'fc1', num_hidden = 10) - net1 = mx.symbol.FullyConnected(data = oldfc, name = 'fc2', num_hidden = 100) + data <- mx.symbol.Variable("data") + oldfc <- mx.symbol.FullyConnected(data = data, name = "fc1", num_hidden = 10) + net1 <- mx.symbol.FullyConnected(data = oldfc, name = "fc2", num_hidden = 100) - expect_equal(arguments(net1), c("data", "fc1_weight", "fc1_bias", "fc2_weight", "fc2_bias")) + expect_equal(arguments(net1), c("data", "fc1_weight", "fc1_bias", "fc2_weight", + "fc2_bias")) - internal = internals(net1) - fc1 = internal[[match("fc1_output", internal$outputs)]] + internal <- internals(net1) + fc1 <- internal[[match("fc1_output", internal$outputs)]] expect_equal(arguments(fc1), arguments(oldfc)) }) test_that("symbol children", { - data = mx.symbol.Variable('data') - oldfc = mx.symbol.FullyConnected(data = data, - name = 'fc1', - num_hidden = 10) - net1 = mx.symbol.FullyConnected(data = oldfc, name = 'fc2', num_hidden = 100) + data <- mx.symbol.Variable("data") + oldfc <- mx.symbol.FullyConnected(data = data, name = "fc1", num_hidden = 10) + net1 <- mx.symbol.FullyConnected(data = oldfc, name = "fc2", num_hidden = 100) - expect_equal(outputs(children(net1)), c('fc1_output', 'fc2_weight', 'fc2_bias')) - expect_equal(outputs(children(children(net1))), c('data', 'fc1_weight', 'fc1_bias')) + expect_equal(outputs(children(net1)), c("fc1_output", "fc2_weight", "fc2_bias")) + expect_equal(outputs(children(children(net1))), c("data", "fc1_weight", "fc1_bias")) - net2 = net1$get.children() - expect_equal(net2[[match('fc2_weight', net2$outputs)]]$arguments, 'fc2_weight') + net2 <- net1$get.children() + expect_equal(net2[[match("fc2_weight", net2$outputs)]]$arguments, "fc2_weight") - data = mx.symbol.Variable('data') - sliced = mx.symbol.SliceChannel(data, num_outputs = 3, name = 'slice') - expect_equal(outputs(children(sliced)), 'data') + data <- mx.symbol.Variable("data") + sliced <- mx.symbol.SliceChannel(data, num_outputs = 3, name = "slice") + expect_equal(outputs(children(sliced)), "data") }) test_that("symbol infer type", { - num_hidden = 128 - num_dim = 64 - num_sample = 10 + num_hidden <- 128 + num_dim <- 64 + num_sample <- 10 - data = mx.symbol.Variable('data') - prev = mx.symbol.Variable('prevstate') - x2h = mx.symbol.FullyConnected(data = data, name = 'x2h', num_hidden = num_hidden) - h2h = mx.symbol.FullyConnected(data = prev, name = 'h2h', num_hidden = num_hidden) + data <- mx.symbol.Variable("data") + prev <- mx.symbol.Variable("prevstate") + x2h <- mx.symbol.FullyConnected(data = data, name = "x2h", num_hidden = num_hidden) + h2h <- mx.symbol.FullyConnected(data = prev, name = "h2h", num_hidden = num_hidden) - out = mx.symbol.Activation(data = mx.symbol.elemwise_add(x2h, h2h), name = 'out', act_type = 'relu') + out <- mx.symbol.Activation(data = mx.symbol.elemwise_add(x2h, h2h), name = "out", + act_type = "relu") # shape inference will fail because information is not available for h2h - ret = mx.symbol.infer.shape(out, data = c(num_dim, num_sample)) + ret <- mx.symbol.infer.shape(out, data = c(num_dim, num_sample)) expect_equal(ret, NULL) }) @@ -77,7 +79,7 @@ test_that("symbol save/load", { fc1 <- mx.symbol.FullyConnected(data, num_hidden = 1) lro <- mx.symbol.LinearRegressionOutput(fc1) mx.symbol.save(lro, "tmp_r_sym.json") - data2 = mx.symbol.load("tmp_r_sym.json") + data2 <- mx.symbol.load("tmp_r_sym.json") expect_equal(data2$as.json(), lro$as.json()) file.remove("tmp_r_sym.json") @@ -85,12 +87,12 @@ test_that("symbol save/load", { test_that("symbol attributes access", { str <- "(1, 1, 1, 1)" - x = mx.symbol.Variable('x') + x <- mx.symbol.Variable("x") x$attributes <- list(`__shape__` = str) expect_equal(x$attributes$`__shape__`, str) - y = mx.symbol.Variable('y') + y <- mx.symbol.Variable("y") y$attributes$`__shape__` <- str expect_equal(y$attributes$`__shape__`, str) From ae5d60fa830090f4882a433d9b88c53c26c42b4f Mon Sep 17 00:00:00 2001 From: Lin Yuan Date: Tue, 28 Aug 2018 09:51:39 -0700 Subject: [PATCH 078/160] Update PyPI version number (#11773) * Update issue templates * Update PyPi version number * Update version number to 0.1.3 * Update issue templates * Remove files added by mistake --- tools/coreml/pip_package/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/coreml/pip_package/setup.py b/tools/coreml/pip_package/setup.py index 18c601d38166..35614271bfdd 100644 --- a/tools/coreml/pip_package/setup.py +++ b/tools/coreml/pip_package/setup.py @@ -40,7 +40,7 @@ def readme(): return f.read() setup(name='mxnet-to-coreml', - version='0.1.0', + version='0.1.3', description='Tool to convert MXNet models into Apple CoreML model format.', long_description=readme(), classifiers=[ From e2a3eef349cb6643c08a7840d8cbd43b38fedfd5 Mon Sep 17 00:00:00 2001 From: Pedro Larroy <928489+larroy@users.noreply.github.com> Date: Tue, 28 Aug 2018 21:16:31 +0200 Subject: [PATCH 079/160] A solution to prevent zombie containers locally and in CI (#12381) Fix pylint, mypy, and pycharm code inspection warnings --- ci/README.md | 14 +++ ci/build.py | 304 ++++++++++++++++++++++++++++++++++++++------------- 2 files changed, 243 insertions(+), 75 deletions(-) diff --git a/ci/README.md b/ci/README.md index 548e9cb9b042..693087569434 100644 --- a/ci/README.md +++ b/ci/README.md @@ -59,6 +59,20 @@ To work inside a container with a shell you can do: When building, the artifacts are located in the build/ directory in the project root. In case `build.py -a` is invoked, the artifacts are located in build./ +# Docker container cleanup (Zombie containers) +Docker has a client-server architecture, so when the program that is executing the docker client +dies or receieves a signal, the container keeps running as it's started by the docker daemon. +We implement signal handlers that catch sigterm and sigint and cleanup containers before exit. In +Jenkins there's not enough time between sigterm and sigkill so we guarantee that containers are not +left running by propagating environment variables used by the Jenkins process tree killer to +identify which process to kill when the job is stopped. This has the effect of stopping the +container given that the process inside the container is terminated. + +How to test this is working propperly: On the console you can hit ^C while a container is running +(not just building) and see that the container is stopped by running `docker ps` on another +terminal. In Jenkins this has been tested by stopping the job which has containers running and +verifying that the container stops shortly afterwards by running docker ps. + ## Add a platform To add a platform, you should add the appropriate dockerfile in diff --git a/ci/build.py b/ci/build.py index f1a5e99e2d0e..df9e97bdb5fd 100755 --- a/ci/build.py +++ b/ci/build.py @@ -23,26 +23,67 @@ """ __author__ = 'Marco de Abreu, Kellen Sunderland, Anton Chernov, Pedro Larroy' -__version__ = '0.2' +__version__ = '0.3' import argparse import glob import logging +import os import re import shutil import subprocess import sys import tempfile -from copy import deepcopy from itertools import chain -from subprocess import call, check_call, check_output +from subprocess import check_call, check_output from typing import * from util import * +import docker +import docker.models +import docker.errors +import signal +import atexit import pprint -import requests -CCACHE_MAXSIZE = '500G' +class Cleanup: + """A class to cleanup containers""" + def __init__(self): + self.containers = set() + self.docker_stop_timeout = 3 + + def add_container(self, container: docker.models.containers.Container): + assert isinstance(container, docker.models.containers.Container) + self.containers.add(container) + + def remove_container(self, container: docker.models.containers.Container): + assert isinstance(container, docker.models.containers.Container) + self.containers.remove(container) + + def _cleanup_containers(self): + if self.containers: + logging.warning("Cleaning up containers") + else: + return + # noinspection PyBroadException + try: + stop_timeout = int(os.environ.get("DOCKER_STOP_TIMEOUT", self.docker_stop_timeout)) + except Exception: + stop_timeout = 3 + for container in self.containers: + try: + container.stop(timeout=stop_timeout) + logging.info("☠: stopped container %s", trim_container_id(container.id)) + container.remove() + logging.info("🚽: removed container %s", trim_container_id(container.id)) + except Exception as e: + logging.exception(e) + self.containers.clear() + logging.info("Cleaning up containers finished.") + + def __call__(self): + """Perform cleanup""" + self._cleanup_containers() def get_dockerfiles_path(): @@ -115,7 +156,10 @@ def run_cmd(): run_cmd() # Get image id by reading the tag. It's guaranteed (except race condition) that the tag exists. Otherwise, the # check_call would have failed - return _get_local_image_id(docker_binary=docker_binary, docker_tag=tag) + image_id = _get_local_image_id(docker_binary=docker_binary, docker_tag=tag) + if not image_id: + raise FileNotFoundError('Unable to find docker image id matching with {}'.format(tag)) + return image_id def _get_local_image_id(docker_binary, docker_tag): @@ -137,10 +181,11 @@ def buildir() -> str: def default_ccache_dir() -> str: + """:return: ccache directory for the current platform""" # Share ccache across containers if 'CCACHE_DIR' in os.environ: + ccache_dir = os.path.realpath(os.environ['CCACHE_DIR']) try: - ccache_dir = os.path.realpath(os.environ['CCACHE_DIR']) os.makedirs(ccache_dir, exist_ok=True) return ccache_dir except PermissionError: @@ -154,14 +199,41 @@ def default_ccache_dir() -> str: return os.path.join(tempfile.gettempdir(), "ci_ccache") +def trim_container_id(cid): + """:return: trimmed container id""" + return cid[:12] + + def container_run(platform: str, - docker_binary: str, + nvidia_runtime: bool, docker_registry: str, shared_memory_size: str, local_ccache_dir: str, command: List[str], - dry_run: bool = False, - interactive: bool = False) -> int: + cleanup: Cleanup, + dry_run: bool = False) -> int: + """Run command in a container""" + container_wait_s = 600 + # + # Environment setup + # + environment = { + 'CCACHE_MAXSIZE': '500G', + 'CCACHE_TEMPDIR': '/tmp/ccache', # temp dir should be local and not shared + 'CCACHE_DIR': '/work/ccache', # this path is inside the container as /work/ccache is + # mounted + 'CCACHE_LOGFILE': '/tmp/ccache.log', # a container-scoped log, useful for ccache + # verification. + } + # These variables are passed to the container to the process tree killer can find runaway + # process inside the container + # https://wiki.jenkins.io/display/JENKINS/ProcessTreeKiller + # https://github.com/jenkinsci/jenkins/blob/578d6bacb33a5e99f149de504c80275796f0b231/core/src/main/java/hudson/model/Run.java#L2393 + # + jenkins_env_vars = ['BUILD_NUMBER', 'BUILD_ID', 'BUILD_TAG'] + environment.update({k: os.environ[k] for k in jenkins_env_vars if k in os.environ}) + environment.update({k: os.environ[k] for k in ['CCACHE_MAXSIZE'] if k in os.environ}) + tag = get_docker_tag(platform=platform, registry=docker_registry) mx_root = get_mxnet_root() local_build_folder = buildir() @@ -169,39 +241,107 @@ def container_run(platform: str, os.makedirs(local_build_folder, exist_ok=True) os.makedirs(local_ccache_dir, exist_ok=True) logging.info("Using ccache directory: %s", local_ccache_dir) - runlist = [docker_binary, 'run', '--rm', '-t', - '--shm-size={}'.format(shared_memory_size), - '-v', "{}:/work/mxnet".format(mx_root), # mount mxnet root - '-v', "{}:/work/build".format(local_build_folder), # mount mxnet/build for storing build artifacts - '-v', "{}:/work/ccache".format(local_ccache_dir), - '-u', '{}:{}'.format(os.getuid(), os.getgid()), - '-e', 'CCACHE_MAXSIZE={}'.format(CCACHE_MAXSIZE), - '-e', 'CCACHE_TEMPDIR=/tmp/ccache', # temp dir should be local and not shared - '-e', "CCACHE_DIR=/work/ccache", # this path is inside the container as /work/ccache is mounted - '-e', "CCACHE_LOGFILE=/tmp/ccache.log", # a container-scoped log, useful for ccache verification. - tag] - runlist.extend(command) - cmd = '\\\n\t'.join(runlist) + docker_client = docker.from_env() + # Equivalent command + docker_cmd_list = [ + get_docker_binary(nvidia_runtime), + 'run', + '--rm', + '--shm-size={}'.format(shared_memory_size), + # mount mxnet root + '-v', "{}:/work/mxnet".format(mx_root), + # mount mxnet/build for storing build + '-v', "{}:/work/build".format(local_build_folder), + '-v', "{}:/work/ccache".format(local_ccache_dir), + '-u', '{}:{}'.format(os.getuid(), os.getgid()), + '-e', 'CCACHE_MAXSIZE={}'.format(environment['CCACHE_MAXSIZE']), + # temp dir should be local and not shared + '-e', 'CCACHE_TEMPDIR={}'.format(environment['CCACHE_TEMPDIR']), + # this path is inside the container as /work/ccache is mounted + '-e', "CCACHE_DIR={}".format(environment['CCACHE_DIR']), + # a container-scoped log, useful for ccache verification. + '-e', "CCACHE_LOGFILE={}".format(environment['CCACHE_LOGFILE']), + '-ti', + tag] + docker_cmd_list.extend(command) + docker_cmd = ' \\\n\t'.join(docker_cmd_list) + logging.info("Running %s in container %s", command, tag) + logging.info("Executing the equivalent of:\n%s\n", docker_cmd) + # return code of the command inside docker ret = 0 - if not dry_run and not interactive: - logging.info("Running %s in container %s", command, tag) - logging.info("Executing:\n%s\n", cmd) - ret = call(runlist) - - if not dry_run and interactive: - into_cmd = deepcopy(runlist) - # -ti can't be after the tag, as is interpreted as a command so hook it up after the -u argument - idx = into_cmd.index('-u') + 2 - into_cmd[idx:idx] = ['-ti'] - cmd = ' \\\n\t'.join(into_cmd) - logging.info("Executing:\n%s\n", cmd) - ret = call(into_cmd) - - if not dry_run and not interactive and ret != 0: - logging.error("Running of command in container failed (%s):\n%s\n", ret, cmd) - logging.error("You can get into the container by adding the -i option") - raise subprocess.CalledProcessError(ret, cmd) + if not dry_run: + ############################# + # + signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT, signal.SIGTERM}) + # noinspection PyShadowingNames + runtime = None + if nvidia_runtime: + # noinspection PyShadowingNames + # runc is default (docker info | grep -i runtime) + runtime = 'nvidia' + + container = docker_client.containers.run( + tag, + runtime=runtime, + detach=True, + command=command, + shm_size=shared_memory_size, + user='{}:{}'.format(os.getuid(), os.getgid()), + volumes={ + mx_root: + {'bind': '/work/mxnet', 'mode': 'rw'}, + local_build_folder: + {'bind': '/work/build', 'mode': 'rw'}, + local_ccache_dir: + {'bind': '/work/ccache', 'mode': 'rw'}, + }, + environment=environment) + logging.info("Started container: %s", trim_container_id(container.id)) + # Race condition: + # If the previous call is interrupted then it's possible that the container is not cleaned up + # We avoid by masking the signals temporarily + cleanup.add_container(container) + signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGINT, signal.SIGTERM}) + # + ############################# + + stream = container.logs(stream=True, stdout=True, stderr=True) + sys.stdout.flush() + for chunk in stream: + sys.stdout.buffer.write(chunk) + sys.stdout.buffer.flush() + sys.stdout.flush() + stream.close() + try: + logging.info("Waiting for status of container %s for %d s.", + trim_container_id(container.id), + container_wait_s) + wait_result = container.wait(timeout=container_wait_s) + logging.info("Container exit status: %s", wait_result) + ret = wait_result.get('StatusCode', 200) + except Exception as e: + logging.exception(e) + ret = 150 + + # Stop + try: + logging.info("Stopping container: %s", trim_container_id(container.id)) + container.stop() + except Exception as e: + logging.exception(e) + ret = 151 + # Remove + try: + logging.info("Removing container: %s", trim_container_id(container.id)) + container.remove() + except Exception as e: + logging.exception(e) + ret = 152 + cleanup.remove_container(container) + containers = docker_client.containers.list() + if containers: + logging.info("Other running containers: %s", [trim_container_id(x.id) for x in containers]) return ret @@ -210,12 +350,13 @@ def list_platforms() -> str: def load_docker_cache(tag, docker_registry) -> None: + """Imports tagged container from the given docker registry""" if docker_registry: + # noinspection PyBroadException try: import docker_cache logging.info('Docker cache download is enabled from registry %s', docker_registry) docker_cache.load_docker_cache(registry=docker_registry, docker_tag=tag) - # noinspection PyBroadException except Exception: logging.exception('Unable to retrieve Docker cache. Continue without...') else: @@ -231,6 +372,7 @@ def log_environment(): def script_name() -> str: + """:returns: script name with leading paths removed""" return os.path.split(sys.argv[0])[1] @@ -274,10 +416,6 @@ def main() -> int: help="print docker run command for manual inspection", action='store_true') - parser.add_argument("-i", "--interactive", - help="go in a shell inside the container", - action='store_true') - parser.add_argument("-d", "--docker-registry", help="Dockerhub registry name to retrieve cache from. Default is 'mxnetci'", default='mxnetci', @@ -299,7 +437,7 @@ def main() -> int: parser.add_argument("--ccache-dir", default=default_ccache_dir(), - help="Ccache directory", + help="ccache directory", type=str) args = parser.parse_args() @@ -310,6 +448,20 @@ def use_cache(): command = list(chain(*args.command)) docker_binary = get_docker_binary(args.nvidiadocker) + # Cleanup on signals and exit + cleanup = Cleanup() + + def signal_handler(signum, _): + signal.pthread_sigmask(signal.SIG_BLOCK, {signum}) + logging.warning("Signal %d received, cleaning up...", signum) + cleanup() + logging.warning("done. Exiting with error.") + sys.exit(1) + + atexit.register(cleanup) + signal.signal(signal.SIGTERM, signal_handler) + signal.signal(signal.SIGINT, signal_handler) + if args.list: print(list_platforms()) elif args.platform: @@ -323,38 +475,42 @@ def use_cache(): logging.warning("Container was just built. Exiting due to build-only.") return 0 + # noinspection PyUnusedLocal + ret = 0 if command: - container_run(platform=platform, docker_binary=docker_binary, shared_memory_size=args.shared_memory_size, - command=command, docker_registry=args.docker_registry, - local_ccache_dir=args.ccache_dir, interactive=args.interactive) + ret = container_run( + platform=platform, nvidia_runtime=args.nvidiadocker, + shared_memory_size=args.shared_memory_size, command=command, docker_registry=args.docker_registry, + local_ccache_dir=args.ccache_dir, cleanup=cleanup) elif args.print_docker_run: - print(container_run(platform=platform, docker_binary=docker_binary, shared_memory_size=args.shared_memory_size, - command=[], dry_run=True, docker_registry=args.docker_registry, - local_ccache_dir=args.ccache_dir)) - elif args.interactive: - container_run(platform=platform, docker_binary=docker_binary, shared_memory_size=args.shared_memory_size, - command=command, docker_registry=args.docker_registry, - local_ccache_dir=args.ccache_dir, interactive=args.interactive) - + command = [] + ret = container_run( + platform=platform, nvidia_runtime=args.nvidiadocker, + shared_memory_size=args.shared_memory_size, command=command, docker_registry=args.docker_registry, + local_ccache_dir=args.ccache_dir, dry_run=True, cleanup=cleanup) else: # With no commands, execute a build function for the target platform - assert not args.interactive, "when running with -i must provide a command" - cmd = ["/work/mxnet/ci/docker/runtime_functions.sh", "build_{}".format(platform)] - logging.info("No command specified, trying default build: %s", ' '.join(cmd)) - container_run(platform=platform, docker_binary=docker_binary, shared_memory_size=args.shared_memory_size, - command=cmd, docker_registry=args.docker_registry, - local_ccache_dir=args.ccache_dir) + command = ["/work/mxnet/ci/docker/runtime_functions.sh", "build_{}".format(platform)] + logging.info("No command specified, trying default build: %s", ' '.join(command)) + ret = container_run( + platform=platform, nvidia_runtime=args.nvidiadocker, + shared_memory_size=args.shared_memory_size, command=command, docker_registry=args.docker_registry, + local_ccache_dir=args.ccache_dir, cleanup=cleanup) + + if ret != 0: + logging.critical("Execution of %s failed with status: %d", command, ret) + return ret elif args.all: platforms = get_platforms() - logging.info("Building for all architectures: {}".format(platforms)) + logging.info("Building for all architectures: %s", platforms) logging.info("Artifacts will be produced in the build/ directory.") for platform in platforms: tag = get_docker_tag(platform=platform, registry=args.docker_registry) if use_cache(): load_docker_cache(tag=tag, docker_registry=args.docker_registry) - build_docker(platform, docker_binary, args.docker_registry, num_retries=args.docker_build_retries, - use_cache=use_cache()) + build_docker(platform, docker_binary=docker_binary, registry=args.docker_registry, + num_retries=args.docker_build_retries, use_cache=use_cache()) if args.build_only: continue shutil.rmtree(buildir(), ignore_errors=True) @@ -362,11 +518,13 @@ def use_cache(): plat_buildir = os.path.abspath(os.path.join(get_mxnet_root(), '..', "mxnet_{}".format(build_platform))) if os.path.exists(plat_buildir): - logging.warning("{} already exists, skipping".format(plat_buildir)) + logging.warning("%s already exists, skipping", plat_buildir) continue command = ["/work/mxnet/ci/docker/runtime_functions.sh", build_platform] - container_run(platform=platform, docker_binary=docker_binary, shared_memory_size=args.shared_memory_size, - command=command, docker_registry=args.docker_registry, local_ccache_dir=args.ccache_dir) + container_run( + platform=platform, nvidia_runtime=args.nvidiadocker, + shared_memory_size=args.shared_memory_size, command=command, docker_registry=args.docker_registry, + local_ccache_dir=args.ccache_dir, cleanup=cleanup) shutil.move(buildir(), plat_buildir) logging.info("Built files left in: %s", plat_buildir) @@ -389,13 +547,9 @@ def use_cache(): Will print a docker run command to get inside the container in a shell -./build.py -p armv7 --interactive - - Will execute a shell into the container - ./build.py -a - Builds for all platforms and leaves artifacts in build_. + Builds for all platforms and leaves artifacts in build_ """) From 65c374db28941c9dc57e89b45c61779a55fd3025 Mon Sep 17 00:00:00 2001 From: Anton Chernov Date: Wed, 29 Aug 2018 17:03:42 +0200 Subject: [PATCH 080/160] Disabled flaky test: test_mkldnn.test_activation (#12378) * Disabled flaky test: test_mkldnn.test_activation * Revert accidental change --- tests/python/mkl/test_mkldnn.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/python/mkl/test_mkldnn.py b/tests/python/mkl/test_mkldnn.py index 6287bfc96fab..ba4cf3f0116a 100644 --- a/tests/python/mkl/test_mkldnn.py +++ b/tests/python/mkl/test_mkldnn.py @@ -22,6 +22,7 @@ import os import numpy as np import mxnet as mx +import unittest from mxnet.test_utils import rand_ndarray, assert_almost_equal from mxnet import gluon from mxnet.gluon import nn @@ -280,6 +281,7 @@ def check_pooling_training(stype): check_pooling_training(stype) +@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/12377") @with_seed() def test_activation(): def check_activation_training(stype): From 1f0d6ba7fd313bcaf145767274cbf9c96a0febc8 Mon Sep 17 00:00:00 2001 From: Sergey Sokolov Date: Wed, 29 Aug 2018 08:43:41 -0700 Subject: [PATCH 081/160] Add tutorial Gotchas using NumPy (#12007) * Add tutorial Gotchas using NumPy * Forcing build * Code review fix * Forcing build --- .../tutorials/gluon/gotchas_numpy_in_mxnet.md | 168 ++++++++++++++++++ docs/tutorials/index.md | 1 + tests/tutorials/test_tutorials.py | 3 + 3 files changed, 172 insertions(+) create mode 100644 docs/tutorials/gluon/gotchas_numpy_in_mxnet.md diff --git a/docs/tutorials/gluon/gotchas_numpy_in_mxnet.md b/docs/tutorials/gluon/gotchas_numpy_in_mxnet.md new file mode 100644 index 000000000000..c82c63edbc2b --- /dev/null +++ b/docs/tutorials/gluon/gotchas_numpy_in_mxnet.md @@ -0,0 +1,168 @@ + +# Gotchas using NumPy in Apache MXNet + +The goal of this tutorial is to explain some common misconceptions about using [NumPy](http://www.numpy.org/) arrays in Apache MXNet. We are going to explain why you need to minimize or completely remove usage of NumPy from your Apache MXNet code. We also going to show how to minimize NumPy performance impact, when you have to use NumPy. + +## Asynchronous and non-blocking nature of Apache MXNet + +Instead of using NumPy arrays Apache MXNet offers its own array implementation named [NDArray](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html). `NDArray API` was intentionally designed to be similar to `NumPy`, but there are differences. + +One key difference is in the way calculations are executed. Every `NDArray` manipulation in Apache MXNet is done in asynchronous, non-blocking way. That means, that when we write code like `c = a * b`, where both `a` and `b` are `NDArrays`, the function is pushed to the [Execution Engine](https://mxnet.incubator.apache.org/architecture/overview.html#execution-engine), which starts the calculation. The function immediately returns back, and the user thread can continue execution, despite the fact that the calculation may not have been completed yet. + +`Execution Engine` builds the computation graph which may reorder or combine some calculations, but it honors dependency order: if there are other manipulation with `c` done later in the code, the `Execution Engine` will start doing them once the result of `c` is available. We don't need to write callbacks to start execution of subsequent code - the `Execution Engine` is going to do it for us. + +To get the result of the computation we only need to access the resulting variable, and the flow of the code will be blocked until the computation results are assigned to the resulting variable. This behavior allows to increase code performance while still supporting imperative programming mode. + +Refer to the [intro tutorial to NDArray](https://mxnet.incubator.apache.org/tutorials/basic/ndarray.html), if you are new to Apache MXNet and would like to learn more how to manipulate NDArrays. + +## Converting NDArray to NumPy Array blocks calculation + +Many people are familiar with NumPy and flexible doing tensor manipulations using it. `NDArray API` offers a convinient [.asnumpy() method](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html#mxnet.ndarray.NDArray.asnumpy) to cast `nd.array` to `np.array`. However, by doing this cast and using `np.array` for calculation, we cannot use all the goodness of `Execution Engine`. All manipulations done on `np.array` are blocking. Moreover, the cast to `np.array` itself is a blocking operation (same as [.asscalar()](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html#mxnet.ndarray.NDArray.asscalar), [.wait_to_read()](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html#mxnet.ndarray.NDArray.wait_to_read) and [.waitall()](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html#mxnet.ndarray.waitall)). + +That means that if we have a long computation graph and, at some point, we want to cast the result to `np.array`, it may feel like the casting takes a lot of time. But what really takes this time is `Execution Engine`, which finishes all the async calculations we have pushed into it to get the final result, which then will be converted to `np.array`. + +Because of the blocking nature of [.asnumpy() method](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html#mxnet.ndarray.NDArray.asnumpy), using it reduces the execution performance, especially if the calculations are done on GPU: Apache MXNet has to copy data from GPU to CPU to return `np.array`. + +The best solution is to **make manipulations directly on NDArrays by methods provided in [NDArray API](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html)**. + +## NumPy operators vs. NDArray operators + +Despite the fact that [NDArray API](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html) was specifically designed to be similar to `NumPy`, sometimes it is not easy to replace existing `NumPy` computations. The main reason is that not all operators, that are available in `NumPy`, are available in `NDArray API`. The list of currently available operators is available on [NDArray class page](http://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html#the-ndarray-class). + +If a required operator is missing from `NDArray API`, there are few things you can do. + +### Combine a higher level operator using a few lower level operators + +There are a situation, when you can assemble a higher level operator using existing operators. An example for that is the [np.full_like()](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.full_like.html) operator. This operator doesn't exist in `NDArray API`, but can be easily replaced with a combination of existing operators. + + +```python +from mxnet import nd +import numpy as np + +# NumPy has full_like() operator +np_y = np.full_like(a=np.arange(6, dtype=int), fill_value=10) + +# NDArray doesn't have it, but we can replace it with +# creating an array of ones and then multiplying by fill_value +nd_y = nd.ones(shape=(6,)) * 10 + +# To compare results we had to convert NDArray to NumPy +# But this is okay for that particular case +np.array_equal(np_y, nd_y.asnumpy()) +``` + + True + +### Find similar operator with different name and/or signature + +Some operators may have slightly different name, but are similar in terms of functionality. For example [nd.ravel_multi_index()](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html#mxnet.ndarray.ravel_multi_index) is similar to [np.ravel()](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.ma.ravel.html#numpy.ma.ravel). In other cases some operators may have similar names, but different signatures. For example [np.split()](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.split.html#numpy.split) and [nd.split()](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html#mxnet.ndarray.split) are similar, but the former works with indices and the latter requires the number of splits to be provided. + +One particular example of different input requirements is [nd.pad()](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html#mxnet.ndarray.pad). The trick is that it can only work with 4-dimensional tensors. If your input has less dimensions, then you need to expand its number before using `nd.pad()` as it is shown in the code block below: + + +```python +def pad_array(data, max_length): + # expand dimensions to 4, because nd.pad can work only with 4 dims + data_expanded = data.reshape(1, 1, 1, data.shape[0]) + + # pad all 4 dimensions with constant value of 0 + data_padded = nd.pad(data_expanded, + mode='constant', + pad_width=[0, 0, 0, 0, 0, 0, 0, max_length - data.shape[0]], + constant_value=0) + + # remove temporary dimensions + data_reshaped_back = data_padded.reshape(max_length) + return data_reshaped_back + +pad_array(nd.array([1, 2, 3]), max_length=10) +``` + + [ 1. 2. 3. 0. 0. 0. 0. 0. 0. 0.] + + + + +### Search for an operator on [Github](https://github.com/apache/incubator-mxnet/labels/Operator) + +Apache MXNet community is responsive to requests, and everyone is welcomed to contribute new operators. Have in mind, that there is always a lag between new operators being merged into the codebase and release of a next stable version. For example, [nd.diag()](https://github.com/apache/incubator-mxnet/pull/11643) operator was recently introduced to Apache MXNet, but on the moment of writing this tutorial, it is not in any stable release. You can always get all latest implementations by installing the [master version](https://mxnet.incubator.apache.org/install/index.html?version=master#) of Apache MXNet. + +## How to minimize the impact of blocking calls + +There are cases, when you have to use either `.asnumpy()` or `.asscalar()` methods. As it is explained before, this will force Apache MXNet to block the execution until the result can be retrieved. One common use case is printing a metric or a value of a loss function. + +You can minimize the impact of a blocking call by calling `.asnumpy()` or `.asscalar()` in the moment, when you think the calculation of this value is already done. In the example below, we introduce the `LossBuffer` class. It is used to cache the previous value of a loss function. By doing so, we delay printing by one iteration in hope that the `Execution Engine` would finish the previous iteration and blocking time would be minimized. + + +```python +from __future__ import print_function + +import mxnet as mx +from mxnet import gluon, nd, autograd +from mxnet.ndarray import NDArray +from mxnet.gluon import HybridBlock +import numpy as np + +class LossBuffer(object): + """ + Simple buffer for storing loss value + """ + def __init__(self): + self._loss = None + + def new_loss(self, loss): + ret = self._loss + self._loss = loss + return ret + + @property + def loss(self): + return self._loss + + +net = gluon.nn.Dense(10) +ce = gluon.loss.SoftmaxCELoss() +net.initialize() + +data = nd.random.uniform(shape=(1024, 100)) +label = nd.array(np.random.randint(0, 10, (1024,)), dtype='int32') +train_dataset = gluon.data.ArrayDataset(data, label) +train_data = gluon.data.DataLoader(train_dataset, batch_size=128, shuffle=True, num_workers=2) + +trainer = gluon.Trainer(net.collect_params(), optimizer='sgd') +loss_buffer = LossBuffer() + +for data, label in train_data: + with autograd.record(): + out = net(data) + # This call saves new loss and returns previous loss + prev_loss = loss_buffer.new_loss(ce(out, label)) + + loss_buffer.loss.backward() + trainer.step(data.shape[0]) + + if prev_loss is not None: + print("Loss: {}".format(np.mean(prev_loss.asnumpy()))) +``` + + Loss: 2.310760974884033 + + Loss: 2.334498643875122 + + Loss: 2.3244147300720215 + + Loss: 2.332686424255371 + + Loss: 2.321366310119629 + + Loss: 2.3236165046691895 + + Loss: 2.3178648948669434 + + +## Conclusion + +For performance reasons, it is better to use native `NDArray API` methods and avoid using NumPy altogether. In case when you must use NumPy, you can use convenient method `.asnumpy()` on `NDArray` to get NumPy representation. By doing so, you block the whole computational process, and force data to be synced between CPU and GPU. If it is a necessary evil to do that, try to minimize the blocking time by calling `.asnumpy()` in time, when you expect the value to be already computed. + + \ No newline at end of file diff --git a/docs/tutorials/index.md b/docs/tutorials/index.md index 3632388b82a9..530d1302129b 100644 --- a/docs/tutorials/index.md +++ b/docs/tutorials/index.md @@ -38,6 +38,7 @@ Select API:  * [Word-level text generation with RNN, LSTM and GRU](http://gluon.mxnet.io/chapter05_recurrent-neural-networks/rnns-gluon.html) External link * [Visual Question Answering](http://gluon.mxnet.io/chapter08_computer-vision/visual-question-answer.html) External link * Practitioner Guides + * [Gotchas using NumPy](/tutorials/gluon/gotchas_numpy_in_mxnet.html) * [Multi-GPU training](http://gluon.mxnet.io/chapter07_distributed-learning/multiple-gpus-gluon.html) External link * [Checkpointing and Model Serialization (a.k.a. saving and loading)](/tutorials/gluon/save_load_params.html) External link ([Alternative](http://gluon.mxnet.io/chapter03_deep-neural-networks/serialization.html)) * [Distributed Training](https://github.com/apache/incubator-mxnet/tree/master/example/distributed_training) diff --git a/tests/tutorials/test_tutorials.py b/tests/tutorials/test_tutorials.py index 503df017ffe0..a2442a4f6a06 100644 --- a/tests/tutorials/test_tutorials.py +++ b/tests/tutorials/test_tutorials.py @@ -142,6 +142,9 @@ def test_python_linear_regression(): def test_python_logistic_regression() : assert _test_tutorial_nb('gluon/logistic_regression_explained') +def test_python_numpy_gotchas() : + assert _test_tutorial_nb('gluon/gotchas_numpy_in_mxnet') + def test_python_mnist(): assert _test_tutorial_nb('python/mnist') From ba8a9d13e1b549d061f1933c463cfad5e7bdd7aa Mon Sep 17 00:00:00 2001 From: Hao Jin Date: Wed, 29 Aug 2018 10:35:44 -0700 Subject: [PATCH 082/160] support softmin operator with unit test (#12306) --- src/operator/contrib/ctc_loss-inl.h | 7 +- src/operator/nn/softmax-inl.h | 88 +++++++++++++++----------- src/operator/nn/softmax.cc | 39 ++++++++++++ src/operator/nn/softmax.cu | 7 ++ tests/python/unittest/test_operator.py | 24 ++++++- 5 files changed, 123 insertions(+), 42 deletions(-) diff --git a/src/operator/contrib/ctc_loss-inl.h b/src/operator/contrib/ctc_loss-inl.h index 72209ae286c6..9380be47451f 100644 --- a/src/operator/contrib/ctc_loss-inl.h +++ b/src/operator/contrib/ctc_loss-inl.h @@ -409,7 +409,8 @@ class CTCLossOp : public Operator { // since the input is activation before softmax and cudnn ctc takes softmax // apply softmax to inputs first. - mxnet_op::Softmax(s, data.dptr_, prob.dptr_, data.shape_, 2, 1.0); + mxnet_op::Softmax( + s, data.dptr_, prob.dptr_, data.shape_, 2, 1.0); CUDNN_CALL(cudnnCTCLoss(s->dnn_handle_, prob_desc_, @@ -426,8 +427,8 @@ class CTCLossOp : public Operator { workspace_bytes)); if (req_grad) { - mxnet_op::SoftmaxGrad(s, - prob.dptr_, grad.dptr_, grad.dptr_, data.shape_, 2, 1.0); + mxnet_op::SoftmaxGrad( + s, prob.dptr_, grad.dptr_, grad.dptr_, data.shape_, 2, 1.0); Assign(grad, mxnet::kWriteInplace, grad * alphabet_size); } } diff --git a/src/operator/nn/softmax-inl.h b/src/operator/nn/softmax-inl.h index 4a19db7c36bc..c063e385f63a 100644 --- a/src/operator/nn/softmax-inl.h +++ b/src/operator/nn/softmax-inl.h @@ -51,7 +51,7 @@ struct log_softmax_fwd { }; -template +template inline void Softmax(Stream *s, DType *in, DType *out, Shape shape, int axis, const DType temperature) { index_t M = shape[axis]; @@ -65,30 +65,37 @@ inline void Softmax(Stream *s, DType *in, DType *out, for (int i = 0; i < static_cast(N); ++i) { index_t base = unravel_dot(i, sshape, stride); - DType mmax = in[base]; + DType mmax = negate ? -in[base] : in[base]; + DType val; for (index_t j = 1; j < M; ++j) { - if (mmax < in[base + j*sa]) mmax = in[base + j*sa]; + val = negate ? -in[base + j*sa] : in[base + j*sa]; + if (mmax < val) mmax = val; } DType sum = DType(0); + DType in_val; // By default temperature is 1.0, and only in reinforcement training // users would set it to other values. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { - sum += std::exp(in[base + j*sa] - mmax); + in_val = negate ? -in[base + j*sa] : in[base + j*sa]; + sum += std::exp(in_val - mmax); } for (index_t j = 0; j < M; ++j) { - out[base + j*sa] = OP::Map(in[base + j*sa] - mmax, sum); + in_val = negate ? -in[base + j*sa] : in[base + j*sa]; + out[base + j*sa] = OP::Map(in_val - mmax, sum); } } else { for (index_t j = 0; j < M; ++j) { - sum += std::exp((in[base + j*sa] - mmax)/temperature); + in_val = negate ? -in[base + j*sa] : in[base + j*sa]; + sum += std::exp((in_val - mmax)/temperature); } for (index_t j = 0; j < M; ++j) { - out[base + j*sa] = OP::Map((in[base + j*sa] - mmax)/temperature, sum); + in_val = negate ? -in[base + j*sa] : in[base + j*sa]; + out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum); } } } @@ -111,7 +118,7 @@ struct log_softmax_bwd { }; -template +template inline void SoftmaxGrad(Stream *s, DType *out, DType *ograd, DType *igrad, Shape shape, int axis, const DType temperature) { @@ -137,12 +144,16 @@ inline void SoftmaxGrad(Stream *s, DType *out, DType *ograd, DType final_result; if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { - final_result = OP2::Map(ograd[base + j*sa], out[base + j*sa], sum); + final_result = negate ? + -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) : + OP2::Map(ograd[base + j*sa], out[base + j*sa], sum); KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } else { for (index_t j = 0; j < M; ++j) { - final_result = OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature; + final_result = negate ? + -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature : + OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature; KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } @@ -151,7 +162,7 @@ inline void SoftmaxGrad(Stream *s, DType *out, DType *ograd, #ifdef __CUDACC__ -template +template __global__ void softmax_compute_kernel(DType *in, DType *out, index_t M, int axis, Shape sshape, Shape stride, const double temperature) { @@ -163,7 +174,7 @@ __global__ void softmax_compute_kernel(DType *in, DType *out, index_t M, int axi red::maximum::SetInitValue(smem[x]); for (index_t i = x; i < M; i += x_size) { - red::maximum::Reduce(smem[x], in[base + i*sa]); + red::maximum::Reduce(smem[x], negate ? -in[base + i*sa] : in[base + i*sa]); } __syncthreads(); cuda::Reduce1D(smem); @@ -172,9 +183,11 @@ __global__ void softmax_compute_kernel(DType *in, DType *out, index_t M, int axi __syncthreads(); red::sum::SetInitValue(smem[x]); + DType val; for (index_t i = x; i < M; i += x_size) { - red::sum::Reduce(smem[x], static_cast(expf((in[base + i*sa] - smax)/ - static_cast(temperature)))); + val = negate ? -in[base + i*sa]:in[base + i*sa]; + red::sum::Reduce( + smem[x], static_cast(expf((val - smax) / static_cast(temperature)))); } __syncthreads(); cuda::Reduce1D(smem); @@ -183,11 +196,12 @@ __global__ void softmax_compute_kernel(DType *in, DType *out, index_t M, int axi __syncthreads(); for (index_t i = x; i < M; i += x_size) { - out[base + i*sa] = OP::Map((in[base + i*sa] - smax)/static_cast(temperature), ssum); + val = negate ? -in[base + i*sa] : in[base + i*sa]; + out[base + i*sa] = OP::Map((val - smax)/static_cast(temperature), ssum); } } -template +template inline void Softmax(Stream *s, DType *in, DType *out, Shape shape, int axis, const double temperature) { const int x_bits = 7; @@ -198,14 +212,14 @@ inline void Softmax(Stream *s, DType *in, DType *out, Shape sshape = shape; sshape[axis] = 1; - softmax_compute_kernel + softmax_compute_kernel <<::GetStream(s)>>>( in, out, M, axis, sshape, stride, temperature); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_compute_kernel); } -template +template __global__ void softmax_gradient_kernel(DType *out, DType *ograd, DType *igrad, index_t M, int axis, Shape sshape, Shape stride, const double temperature) { @@ -228,13 +242,15 @@ __global__ void softmax_gradient_kernel(DType *out, DType *ograd, DType *igrad, DType final_result; for (index_t i = x; i < M; i += x_size) { final_result = - OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum) / static_cast(temperature); - KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result); + negate ? + -OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum) : + OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum); + KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result / static_cast(temperature)); } } -template +template inline void SoftmaxGrad(Stream *s, DType *out, DType *ograd, DType *igrad, Shape shape, int axis, const double temperature) { @@ -246,7 +262,7 @@ inline void SoftmaxGrad(Stream *s, DType *out, DType *ograd, Shape sshape = shape; sshape[axis] = 1; - softmax_gradient_kernel + softmax_gradient_kernel <<::GetStream(s)>>>( out, ograd, igrad, M, axis, sshape, stride, temperature); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_gradient_kernel); @@ -267,7 +283,7 @@ struct SoftmaxParam : public dmlc::Parameter { } }; -template +template void SoftmaxCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector& inputs, @@ -283,19 +299,19 @@ void SoftmaxCompute(const nnvm::NodeAttrs& attrs, TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { if (shape.ndim() == 2) { - Softmax(ctx.get_stream(), inputs[0].dptr(), - outputs[0].dptr(), shape.get<2>(), axis, - static_cast(temperature)); + Softmax(ctx.get_stream(), inputs[0].dptr(), + outputs[0].dptr(), shape.get<2>(), axis, + static_cast(temperature)); } else { - Softmax(ctx.get_stream(), inputs[0].dptr(), - outputs[0].dptr(), shape.get<3>(), axis, - static_cast(temperature)); + Softmax(ctx.get_stream(), inputs[0].dptr(), + outputs[0].dptr(), shape.get<3>(), axis, + static_cast(temperature)); } }); } -template +template void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector& inputs, @@ -311,13 +327,13 @@ void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs, MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { if (shape.ndim() == 2) { - SoftmaxGrad(ctx.get_stream(), inputs[1].dptr(), - inputs[0].dptr(), outputs[0].dptr(), - shape.get<2>(), axis, static_cast(temperature)); + SoftmaxGrad(ctx.get_stream(), inputs[1].dptr(), + inputs[0].dptr(), outputs[0].dptr(), + shape.get<2>(), axis, static_cast(temperature)); } else { - SoftmaxGrad(ctx.get_stream(), inputs[1].dptr(), - inputs[0].dptr(), outputs[0].dptr(), - shape.get<3>(), axis, static_cast(temperature)); + SoftmaxGrad(ctx.get_stream(), inputs[1].dptr(), + inputs[0].dptr(), outputs[0].dptr(), + shape.get<3>(), axis, static_cast(temperature)); } }); }); diff --git a/src/operator/nn/softmax.cc b/src/operator/nn/softmax.cc index 0fad3d6e6951..88b7b5fc473e 100644 --- a/src/operator/nn/softmax.cc +++ b/src/operator/nn/softmax.cc @@ -109,6 +109,45 @@ MXNET_OPERATOR_REGISTER_BINARY(_backward_softmax) .set_attr("FCompute", SoftmaxGradCompute); +MXNET_OPERATOR_REGISTER_UNARY(softmin) +.describe(R"code(Applies the softmin function. + +The resulting array contains elements in the range (0,1) and the elements along the given axis sum +up to 1. + +.. math:: + softmin(\mathbf{z/t})_j = \frac{e^{-z_j/t}}{\sum_{k=1}^K e^{-z_k/t}} + +for :math:`j = 1, ..., K` + +t is the temperature parameter in softmax function. By default, t equals 1.0 + +Example:: + + x = [[ 1. 2. 3.] + [ 3. 2. 1.]] + + softmin(x,axis=0) = [[ 0.88079703, 0.5, 0.11920292], + [ 0.11920292, 0.5, 0.88079703]] + + softmin(x,axis=1) = [[ 0.66524094, 0.24472848, 0.09003057], + [ 0.09003057, 0.24472848, 0.66524094]] + +)code" ADD_FILELINE) +.set_attr_parser(ParamParser) +.set_attr("FListOutputNames", + [](const NodeAttrs& attrs) { + return std::vector{"output"}; +}) +.set_attr("FCompute", SoftmaxCompute) +.set_attr("FGradient", ElemwiseGradUseOut{"_backward_softmin"}) +.add_arguments(SoftmaxParam::__FIELDS__()); + +MXNET_OPERATOR_REGISTER_BINARY(_backward_softmin) +.set_attr_parser(ParamParser) +.set_attr("FCompute", SoftmaxGradCompute); + MXNET_OPERATOR_REGISTER_UNARY(log_softmax) .describe(R"code(Computes the log softmax of the input. This is equivalent to computing softmax followed by log. diff --git a/src/operator/nn/softmax.cu b/src/operator/nn/softmax.cu index 8274642c81ba..254e726d5e26 100644 --- a/src/operator/nn/softmax.cu +++ b/src/operator/nn/softmax.cu @@ -35,6 +35,13 @@ NNVM_REGISTER_OP(_backward_softmax) .set_attr("FCompute", SoftmaxGradCompute); +NNVM_REGISTER_OP(softmin) +.set_attr("FCompute", SoftmaxCompute); + +NNVM_REGISTER_OP(_backward_softmin) +.set_attr("FCompute", SoftmaxGradCompute); + NNVM_REGISTER_OP(log_softmax) .set_attr("FCompute", SoftmaxCompute); diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 38c90e6747ef..5bd88dd58695 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -267,11 +267,8 @@ def test_rnnrelu_dropout(): out[0].wait_to_read() def np_softmax(x, axis=-1, temperature=1.0): - # fix for old numpy on Travis not supporting keepdims - # x = x - np.max(x, axis=-1, keepdims=True) x = x - np.max(x, axis=axis, keepdims=True) x = np.exp(x/temperature) - # x /= np.sum(x, axis=-1, keepdims=True) x /= np.sum(x, axis=axis, keepdims=True) return x @@ -4535,6 +4532,27 @@ def test_1d_cond(): test_invalid_shape() test_1d_cond() + +@with_seed() +def test_softmin(): + for ndim in range(1, 5): + for dtype in [np.float16, np.float32, np.float64]: + rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3) + shape = np.random.randint(1, 5, size=ndim) + axis = np.random.randint(-ndim, ndim) + data = np.random.uniform(-2, 2, size=shape).astype(dtype) + data = data / 10 if dtype is np.float16 else data + sym = mx.sym.softmin(axis=axis) + expected_fwd = np_softmax(-data, axis=axis) + expected_bwd = np.zeros(shape) + check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype) + for req in ['null', 'add', 'write']: + check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd], + rtol=rtol, atol=atol, grad_req=req, dtype=dtype) + if dtype is not np.float16: + check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype) + + @with_seed() def test_new_softmax(): for ndim in range(1, 5): From 846086d62805c67e00ac11e3818e4427debfd1e7 Mon Sep 17 00:00:00 2001 From: Anton Chernov Date: Wed, 29 Aug 2018 19:38:23 +0200 Subject: [PATCH 083/160] Revert "Revert "Disable kvstore test (#11798)" (#12279)" (#12379) This reverts commit c1a89488ef551f441dbdf1c5107694680ce1d340. --- Jenkinsfile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 6a93fd586414..346cb19ce46f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -900,6 +900,10 @@ core_logic: { } } }, + /* Disabled due to master build failure: + * http://jenkins.mxnet-ci.amazon-ml.com/blue/organizations/jenkins/incubator-mxnet/detail/master/1221/pipeline/ + * https://github.com/apache/incubator-mxnet/issues/11801 + 'dist-kvstore tests CPU': { node(NODE_LINUX_CPU) { ws('workspace/it-dist-kvstore') { @@ -911,7 +915,7 @@ core_logic: { } } } - }, + }, */ 'Scala: GPU': { node(NODE_LINUX_GPU) { ws('workspace/ut-scala-gpu') { From 5af3d392be688b707a7bece84e7da57e2b74235b Mon Sep 17 00:00:00 2001 From: "Yuan (Terry) Tang" Date: Wed, 29 Aug 2018 17:13:26 -0400 Subject: [PATCH 084/160] Edit shape.array doc and some style improvements (#12162) * Edit shape.array doc and some style improvements * Trigger CI * Trigger CI --- R-package/R/initializer.R | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/R-package/R/initializer.R b/R-package/R/initializer.R index 40712432d8b6..bb81a285beaa 100644 --- a/R-package/R/initializer.R +++ b/R-package/R/initializer.R @@ -3,7 +3,7 @@ #' @param name the name of the variable. #' @param shape the shape of the array to be generated. #' -mx.init.internal.default <- function(name, shape, ctx, allow.unknown=FALSE) { +mx.init.internal.default <- function(name, shape, ctx, allow.unknown = FALSE) { if (endsWith(name, "bias")) return (mx.nd.zeros(shape)) if (endsWith(name, "gamma")) return (mx.nd.ones(shape)) if (endsWith(name, "beta")) return (mx.nd.zeros(shape)) @@ -19,7 +19,7 @@ mx.init.internal.default <- function(name, shape, ctx, allow.unknown=FALSE) { #' #' @export mx.init.uniform <- function(scale) { - function(name, shape, ctx, allow.unknown=FALSE) { + function(name, shape, ctx, allow.unknown = FALSE) { if (!endsWith(name, "weight")) { return (mx.init.internal.default(name = name, shape = shape, allow.unknown = allow.unknown)) } @@ -33,7 +33,7 @@ mx.init.uniform <- function(scale) { #' #' @export mx.init.normal <- function(sd) { - function(name, shape, ctx, allow.unknown=FALSE) { + function(name, shape, ctx, allow.unknown = FALSE) { if (!endsWith(name, "weight")) { return (mx.init.internal.default(name = name, shape = shape, allow.unknown = allow.unknown)) } @@ -59,15 +59,15 @@ mx.init.Xavier <- function(rnd_type = "uniform", factor_type = "avg", return (mx.init.internal.default(name = name, shape = shape, allow.unknown = allow.unknown)) } - fan_out = shape[length(shape)] - fan_in = prod(shape[-length(shape)]) + fan_out <- shape[length(shape)] + fan_in <- prod(shape[-length(shape)]) factor_val <- switch(factor_type, "avg" = (fan_in + fan_out) / 2, "in" = fan_in, "out" = fan_out, stop("Not supported factor type. See usage of function mx.init.Xavier")) - scale = sqrt(magnitude / factor_val) + scale <- sqrt(magnitude / factor_val) if (rnd_type == "uniform"){ return(mx.nd.random.uniform(low = -scale, high = scale, shape = shape)) @@ -83,14 +83,16 @@ mx.init.Xavier <- function(rnd_type = "uniform", factor_type = "avg", #' Create initialization of argument like arg.array #' #' @param initializer The initializer. -#' @param shape.array named-list The shape of the weights +#' @param shape.array A named list that represents the shape of the weights #' @param ctx mx.context The context of the weights #' @param skip.unknown Whether skip the unknown weight types #' @export -mx.init.create <- function(initializer, shape.array, ctx=NULL, skip.unknown=TRUE) { +mx.init.create <- function(initializer, shape.array, ctx = NULL, skip.unknown = TRUE) { if (length(shape.array) == 0) return(list()) - names = names(shape.array) - ret <- lapply(seq_along(names), function(i) initializer(names[[i]], shape.array[[i]], ctx, allow.unknown=skip.unknown)) + names <- names(shape.array) + ret <- lapply( + seq_along(names), + function(i) initializer(names[[i]], shape.array[[i]], ctx, allow.unknown = skip.unknown)) names(ret) <- names if (skip.unknown) { ret <- mx.util.filter.null(ret) From e456dc45ce781bfb08a71d0d2e2b87fcb98250c7 Mon Sep 17 00:00:00 2001 From: Vandana Kannan Date: Wed, 29 Aug 2018 21:11:18 -0700 Subject: [PATCH 085/160] Fix speech recognition example (#12291) --- example/speech_recognition/README.md | 4 ++-- example/speech_recognition/deepspeech.cfg | 5 +++-- example/speech_recognition/default.cfg | 2 +- example/speech_recognition/singleton.py | 26 ++++++----------------- example/speech_recognition/stt_metric.py | 9 ++++++-- example/speech_recognition/train.py | 21 +++++++++--------- 6 files changed, 29 insertions(+), 38 deletions(-) diff --git a/example/speech_recognition/README.md b/example/speech_recognition/README.md index 00d166602403..f95fddf2103e 100644 --- a/example/speech_recognition/README.md +++ b/example/speech_recognition/README.md @@ -19,9 +19,9 @@ With rich functionalities and convenience explained above, you can build your ow ## **Environments** - MXNet version: 0.9.5+ - GPU memory size: 2.4GB+ -- Install tensorboard for logging +- Install mxboard for logging
-pip install tensorboard
+pip install mxboard
 
- [SoundFile](https://pypi.python.org/pypi/SoundFile/0.8.1) for audio preprocessing (If encounter errors about libsndfile, follow [this tutorial](http://www.linuxfromscratch.org/blfs/view/svn/multimedia/libsndfile.html).) diff --git a/example/speech_recognition/deepspeech.cfg b/example/speech_recognition/deepspeech.cfg index ec3af0459589..69894ae7d640 100644 --- a/example/speech_recognition/deepspeech.cfg +++ b/example/speech_recognition/deepspeech.cfg @@ -26,14 +26,15 @@ prefix = deep_bucket # when mode is load or predict, model will be loaded from the file name with model_file under checkpoints model_file = deep_bucketn_epoch0n_batch-0018 batch_size = 12 -#batch_size=4 +#use batch_size 4 with single GPU +#batch_size = 4 # log will be saved by the log_filename log_filename = deep_bucket.log # checkpoint set n to save checkpoints after n epoch save_checkpoint_every_n_epoch = 1 save_checkpoint_every_n_batch = 3000 is_bi_graphemes = True -tensorboard_log_dir = tblog/deep_bucket +mxboard_log_dir = mxlog/deep_bucket # if random_seed is -1 then it gets random seed from timestamp mx_random_seed = -1 random_seed = -1 diff --git a/example/speech_recognition/default.cfg b/example/speech_recognition/default.cfg index e4beb83d32dd..b0869a9dad2e 100644 --- a/example/speech_recognition/default.cfg +++ b/example/speech_recognition/default.cfg @@ -31,7 +31,7 @@ log_filename = test.log save_checkpoint_every_n_epoch = 20 save_checkpoint_every_n_batch = 1000 is_bi_graphemes = False -tensorboard_log_dir = tblog/libri_sample +mxboard_log_dir = mxlog/libri_sample # if random_seed is -1 then it gets random seed from timestamp mx_random_seed = 1234 random_seed = 1234 diff --git a/example/speech_recognition/singleton.py b/example/speech_recognition/singleton.py index 1d68edfb3ca3..01717e4df068 100644 --- a/example/speech_recognition/singleton.py +++ b/example/speech_recognition/singleton.py @@ -19,9 +19,9 @@ import logging as log class Singleton: - def __init__(self, decrated): - log.debug("Singleton Init %s" % decrated) - self._decorated = decrated + def __init__(self, decorated): + log.debug("Singleton Init %s" % decorated) + self._decorated = decorated def getInstance(self): try: @@ -30,25 +30,11 @@ def getInstance(self): self._instance = self._decorated() return self._instance - def __new__(class_, *args, **kwargs): + def __new__(cls, *args, **kwargs): print("__new__") - class_.instances[class_] = super(Singleton, class_).__new__(class_, *args, **kwargs) - return class_.instances[class_] + cls._instance = super(Singleton, cls).__new__(cls, *args, **kwargs) + return cls._instance def __call__(self): raise TypeError("Singletons must be accessed through 'getInstance()'") - -class SingletonInstane: - __instance = None - - @classmethod - def __getInstance(cls): - return cls.__instance - - @classmethod - def instance(cls, *args, **kargs): - cls.__instance = cls(*args, **kargs) - cls.instance = cls.__getInstance - return cls.__instance - diff --git a/example/speech_recognition/stt_metric.py b/example/speech_recognition/stt_metric.py index fc1916b40c38..ec74fc063dc6 100644 --- a/example/speech_recognition/stt_metric.py +++ b/example/speech_recognition/stt_metric.py @@ -47,6 +47,7 @@ def __init__(self, batch_size, num_gpu, is_epoch_end=False, is_logging=True): self.total_ctc_loss = 0. self.batch_loss = 0. self.is_logging = is_logging + def update(self, labels, preds): check_label_shapes(labels, preds) if self.is_logging: @@ -83,10 +84,15 @@ def update(self, labels, preds): if self.is_logging: log.info("loss: %f " % loss) self.total_ctc_loss += self.batch_loss + def get_batch_loss(self): return self.batch_loss + def get_name_value(self): - total_cer = float(self.total_l_dist) / float(self.total_n_label) + try: + total_cer = float(self.total_l_dist) / float(self.total_n_label) + except ZeroDivisionError: + total_cer = float('inf') return total_cer, self.total_n_label, self.total_l_dist, self.total_ctc_loss @@ -244,4 +250,3 @@ def char_match_2way(label, pred): val = val1_max if val1_max > val2_max else val2_max val_matched = val1_max_matched if val1_max > val2_max else val2_max_matched return val, val_matched, n_whole_label - diff --git a/example/speech_recognition/train.py b/example/speech_recognition/train.py index 0d04e4e47a5f..b1ae50b07558 100644 --- a/example/speech_recognition/train.py +++ b/example/speech_recognition/train.py @@ -16,15 +16,14 @@ # under the License. import sys - +import json sys.path.insert(0, "../../python") import os.path +#mxboard setting +from mxboard import SummaryWriter import mxnet as mx from config_util import get_checkpoint_path, parse_contexts from stt_metric import STTMetric -#tensorboard setting -from tensorboard import SummaryWriter -import json from stt_bucketing_module import STTBucketingModule @@ -65,7 +64,7 @@ def do_training(args, module, data_train, data_val, begin_epoch=0): contexts = parse_contexts(args) num_gpu = len(contexts) eval_metric = STTMetric(batch_size=batch_size, num_gpu=num_gpu, is_logging=enable_logging_validation_metric,is_epoch_end=True) - # tensorboard setting + # mxboard setting loss_metric = STTMetric(batch_size=batch_size, num_gpu=num_gpu, is_logging=enable_logging_train_metric,is_epoch_end=False) optimizer = args.config.get('optimizer', 'optimizer') @@ -131,9 +130,9 @@ def reset_optimizer(force_init=False): data_train.reset() data_train.is_first_epoch = True - #tensorboard setting - tblog_dir = args.config.get('common', 'tensorboard_log_dir') - summary_writer = SummaryWriter(tblog_dir) + #mxboard setting + mxlog_dir = args.config.get('common', 'mxboard_log_dir') + summary_writer = SummaryWriter(mxlog_dir) while True: @@ -144,7 +143,7 @@ def reset_optimizer(force_init=False): for nbatch, data_batch in enumerate(data_train): module.forward_backward(data_batch) module.update() - # tensorboard setting + # mxboard setting if (nbatch + 1) % show_every == 0: module.update_metric(loss_metric, data_batch.label) #summary_writer.add_scalar('loss batch', loss_metric.get_batch_loss(), nbatch) @@ -160,7 +159,7 @@ def reset_optimizer(force_init=False): module.forward(data_batch, is_train=True) module.update_metric(eval_metric, data_batch.label) - # tensorboard setting + # mxboard setting val_cer, val_n_label, val_l_dist, _ = eval_metric.get_name_value() log.info("Epoch[%d] val cer=%f (%d / %d)", n_epoch, val_cer, int(val_n_label - val_l_dist), val_n_label) curr_acc = val_cer @@ -170,7 +169,7 @@ def reset_optimizer(force_init=False): data_train.reset() data_train.is_first_epoch = False - # tensorboard setting + # mxboard setting train_cer, train_n_label, train_l_dist, train_ctc_loss = loss_metric.get_name_value() summary_writer.add_scalar('loss epoch', train_ctc_loss, n_epoch) summary_writer.add_scalar('CER train', train_cer, n_epoch) From 6ca909270d5a7f8af280ad83c8aa4862ba819d04 Mon Sep 17 00:00:00 2001 From: solin319 Date: Thu, 30 Aug 2018 17:25:00 +0800 Subject: [PATCH 086/160] fix bug in 'device' type kvstore (#12350) * fix bug in 'device' type kvstore When we init a key after another key pushed. This key has no merged_buf_ in file 'comm.h', but the inited_ is true. So it can't pull this new key. ``` import mxnet as mx a=mx.nd.array([1,2,3], ctx=mx.gpu(0)) b=mx.nd.array([0,0,0], ctx=mx.gpu(0)) kv=mx.kv.create('device') kv.init('1', a) kv.push('1', [a,a,a,a]) kv.pull('1', b) kv.init('2', a) kv.pull('2', b) ``` * add kv test pull --- src/kvstore/comm.h | 7 +++++-- tests/python/unittest/test_kvstore.py | 17 +++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/src/kvstore/comm.h b/src/kvstore/comm.h index 34cab3037ce9..61370a5bfaf3 100644 --- a/src/kvstore/comm.h +++ b/src/kvstore/comm.h @@ -459,6 +459,7 @@ class CommDevice : public Comm { void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) override { sorted_key_attrs_.emplace_back(key, shape, dtype); + inited_ = false; } void InitBuffersAndComm(const std::vector& src) { @@ -701,8 +702,10 @@ class CommDevice : public Comm { } // Delayed allocation - as the dense merged buffer might not be used at all if push() // only sees sparse arrays - bool delay_alloc = true; - buf.merged = NDArray(shape, ctx, delay_alloc, type); + if (buf.merged.is_none()) { + bool delay_alloc = true; + buf.merged = NDArray(shape, ctx, delay_alloc, type); + } ctx_info[ctx.dev_id].second += shape.Size(); } inited_ = true; diff --git a/tests/python/unittest/test_kvstore.py b/tests/python/unittest/test_kvstore.py index 921a5704d54b..28d4ec262c06 100644 --- a/tests/python/unittest/test_kvstore.py +++ b/tests/python/unittest/test_kvstore.py @@ -106,6 +106,23 @@ def check_init(kv, key): check_init(mx.kv.create(), 3) check_init(mx.kv.create(), 'a') +@with_seed() +def test_pull(): + """test pull""" + def check_pull(kv): + a = mx.nd.ones(shape) + b = mx.nd.zeros(shape) + kv.init('1', mx.nd.zeros(shape)) + kv.push('1', [a,a,a,a]) + kv.pull('1', b) + check_diff_to_scalar(b, 4) + kv.init('2', mx.nd.zeros(shape)) + kv.pull('2', b) + check_diff_to_scalar(b, 0) + + check_pull(mx.kv.create('device')) + check_pull(mx.kv.create()) + @with_seed() def test_list_kv_pair(): """list key-value pair push & pull""" From 32c9ca74839ae4d275bcf9a027ea0a711373be81 Mon Sep 17 00:00:00 2001 From: Alexander Zai Date: Thu, 30 Aug 2018 15:07:32 -0700 Subject: [PATCH 087/160] [MXNET-753] Fallback when using non-MKLDNN supported operators (#12019) * add fallback test * wait to read throws error * add TIsMKLDNN attr * invalidate inputs if fcomputeex unsupported * keep ptr to newly created default arrays * add flag to all mkldnn operators * update method name to CreateDefaultInputs * remove dup attrs * create new instance var to store copy * only reorder if mkldnn * add mkldnn flag to batch norm * do not check input / output ptr for mkldnn as copied is made * fix lint * update macro * update custom update name * add todo for fallback * fix lint * rename opexecutor name * add fallback to opexecutor class * fix lint * add todos * create fallback arrays in place * revert in place diff * create copy of arrays for fallback * empty array --- src/executor/attach_op_execs_pass.cc | 10 +++++ src/executor/exec_pass.h | 4 ++ src/operator/nn/activation.cc | 2 + src/operator/nn/batch_norm.cc | 2 + src/operator/nn/concat.cc | 2 + src/operator/nn/convolution.cc | 2 + src/operator/nn/deconvolution.cc | 2 + src/operator/nn/fully_connected.cc | 2 + src/operator/nn/lrn.cc | 2 + src/operator/nn/mkldnn/mkldnn_base-inl.h | 12 +++++ src/operator/nn/pooling.cc | 2 + src/operator/nn/softmax.cc | 1 + src/operator/tensor/elemwise_sum.cc | 3 ++ src/operator/tensor/elemwise_unary_op.h | 4 ++ .../tensor/elemwise_unary_op_basic.cc | 2 + tests/python/mkl/test_mkldnn.py | 44 +++++++++++++++++++ 16 files changed, 96 insertions(+) diff --git a/src/executor/attach_op_execs_pass.cc b/src/executor/attach_op_execs_pass.cc index c011c1d9ce03..0e415ef5112a 100644 --- a/src/executor/attach_op_execs_pass.cc +++ b/src/executor/attach_op_execs_pass.cc @@ -159,6 +159,9 @@ class StatefulComputeExExecutor : public OpExecutor { op_ctx.run_ctx = rctx; #if MXNET_USE_MKLDNN == 1 InvalidateOutputs(out_array, req); + CreateDefaultInputs(in_array, &in_array_fallback); + fcompute_(state_, op_ctx, in_array_fallback, req, out_array); + return; #endif fcompute_(state_, op_ctx, in_array, req, out_array); } @@ -226,6 +229,13 @@ class FComputeExExecutor : public OpExecutor { op_ctx.run_ctx = rctx; #if MXNET_USE_MKLDNN == 1 InvalidateOutputs(out_array, req); + // TODO(alex): (MXNET-847) Remove this fallback feature after subgraph implemented + const auto is_mkldnn = Op::GetAttr("TIsMKLDNN"); + if (!is_mkldnn.get(attrs_.op, false)) { + CreateDefaultInputs(in_array, &in_array_fallback); + fcompute_(attrs_, op_ctx, in_array_fallback, req, out_array); + return; + } #endif fcompute_(attrs_, op_ctx, in_array, req, out_array); } diff --git a/src/executor/exec_pass.h b/src/executor/exec_pass.h index cd1db0ac1944..52f7c790c77e 100644 --- a/src/executor/exec_pass.h +++ b/src/executor/exec_pass.h @@ -86,6 +86,10 @@ class OpExecutor { virtual OpStatePtr state() const { return OpStatePtr(); } + + // TODO(alexzai): (MXNET-856) Remove instance member after subgraph feature added + protected: + std::vector in_array_fallback; }; /*! diff --git a/src/operator/nn/activation.cc b/src/operator/nn/activation.cc index b8c2045fba12..ba44ebd4ed4d 100644 --- a/src/operator/nn/activation.cc +++ b/src/operator/nn/activation.cc @@ -155,6 +155,7 @@ The following activation functions are supported: }) .set_attr("FCompute", ActivationCompute) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", ActivationComputeExCPU) #endif .set_attr("FGradient", ActivationGrad{"_backward_Activation"}) @@ -184,6 +185,7 @@ NNVM_REGISTER_OP(_backward_Activation) #endif .set_attr_parser(ParamParser) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", ActivationGradComputeExCPU) #endif .set_attr("FCompute", ActivationGradCompute); diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc index b15f84e107e0..4ea494d64e47 100644 --- a/src/operator/nn/batch_norm.cc +++ b/src/operator/nn/batch_norm.cc @@ -601,6 +601,7 @@ the sparse tensors will fallback. #endif .set_attr("FGradient", BatchNormGrad) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FResourceRequest", [](const NodeAttrs& n) { return std::vector{ResourceRequest::kTempSpace}; }) @@ -633,6 +634,7 @@ NNVM_REGISTER_OP(_backward_BatchNorm) #endif .set_attr_parser(ParamParser) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", BatchNormGradComputeExCPU) #endif .set_attr("FCompute", BatchNormGradCompute); diff --git a/src/operator/nn/concat.cc b/src/operator/nn/concat.cc index 9df459e9224d..ac8a814ce70f 100644 --- a/src/operator/nn/concat.cc +++ b/src/operator/nn/concat.cc @@ -367,6 +367,7 @@ Example:: .set_attr("FResourceRequest", [](const NodeAttrs& n) { return std::vector{ResourceRequest::kTempSpace}; }) +.set_attr("TIsMKLDNN", true) #endif CONCAT_FORWARD_ATTRS .set_attr("FInferShape", ConcatShape) @@ -387,6 +388,7 @@ NNVM_REGISTER_OP(_backward_Concat) .set_attr("TIsBackward", true) .set_attr("FInferStorageType", BackwardConcatStorageType) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", ConcatGradComputeExCPU) #endif .set_attr("FCompute", ConcatGradCompute); diff --git a/src/operator/nn/convolution.cc b/src/operator/nn/convolution.cc index 8f25cf0dcbb1..d5abe629123b 100644 --- a/src/operator/nn/convolution.cc +++ b/src/operator/nn/convolution.cc @@ -484,6 +484,7 @@ There are other options to tune the performance. #endif .set_attr("FCompute", ConvolutionCompute) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", ConvolutionComputeExCPU) #endif .set_attr("FGradient", ConvolutionGrad{"_backward_Convolution"}) @@ -509,6 +510,7 @@ NNVM_REGISTER_OP(_backward_Convolution) }) .set_attr_parser(ConvolutionParamParser) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", ConvolutionGradComputeExCPU) #endif .set_attr("FCompute", ConvolutionGradCompute); diff --git a/src/operator/nn/deconvolution.cc b/src/operator/nn/deconvolution.cc index a4be1a0c56a0..1ab391d92b04 100644 --- a/src/operator/nn/deconvolution.cc +++ b/src/operator/nn/deconvolution.cc @@ -413,6 +413,7 @@ NNVM_REGISTER_OP(Deconvolution) }) .set_attr("FCompute", DeconvolutionCompute) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", DeconvolutionComputeExCPU) #endif .set_attr("FGradient", DeconvolutionGrad{"_backward_Deconvolution"}) @@ -436,6 +437,7 @@ NNVM_REGISTER_OP(_backward_Deconvolution) }) .set_attr_parser(DeconvolutionParamParser) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", DeconvolutionGradComputeExCPU) #endif .set_attr("FCompute", DeconvolutionGradCompute); diff --git a/src/operator/nn/fully_connected.cc b/src/operator/nn/fully_connected.cc index eb881d29abd1..d8a32f0ae963 100644 --- a/src/operator/nn/fully_connected.cc +++ b/src/operator/nn/fully_connected.cc @@ -290,6 +290,7 @@ If ``no_bias`` is set to be true, then the ``bias`` term is ignored. return std::vector{"output"}; }) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FResourceRequest", [](const NodeAttrs& n) { return std::vector{ResourceRequest::kTempSpace}; }) @@ -322,6 +323,7 @@ NNVM_REGISTER_OP(_backward_FullyConnected) .set_attr("FInferStorageType", BackwardFCStorageType) .set_attr_parser(ParamParser) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", FullyConnectedGradComputeExCPU) #endif .set_attr("FCompute", FullyConnectedGradCompute); diff --git a/src/operator/nn/lrn.cc b/src/operator/nn/lrn.cc index 587cf930920e..a428eb1e4faf 100644 --- a/src/operator/nn/lrn.cc +++ b/src/operator/nn/lrn.cc @@ -180,6 +180,7 @@ number of kernels in the layer. }) .set_attr("FCompute", LRNCompute) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", LRNComputeExCPU) #endif .set_attr("FGradient", LRNGrad{"_backward_LRN"}) @@ -194,6 +195,7 @@ NNVM_REGISTER_OP(_backward_LRN) #endif .set_attr("TIsBackward", true) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", LRNGradComputeExCPU) // Native compute requires norm while MKLDNN does not so cannot be compared in debug mode .set_attr("TExcludeMKLDNNDebug", true) diff --git a/src/operator/nn/mkldnn/mkldnn_base-inl.h b/src/operator/nn/mkldnn/mkldnn_base-inl.h index 273afcd32dc7..6eb90f845d37 100644 --- a/src/operator/nn/mkldnn/mkldnn_base-inl.h +++ b/src/operator/nn/mkldnn/mkldnn_base-inl.h @@ -356,6 +356,18 @@ static inline void InvalidateOutputs(const std::vector &arrs, } } +// TODO(alexzai): (MXNET-856) Remove helper function after subgraph feature added +static inline void CreateDefaultInputs(const std::vector &arrs, + std::vector *out_arrs) { + out_arrs->clear(); + for (size_t i = 0; i < arrs.size(); ++i) { + if (arrs[i].IsMKLDNNData()) + out_arrs->push_back(arrs[i].Reorder2Default()); + else + out_arrs->push_back(arrs[i]); + } +} + const mkldnn::memory *GetWeights(const NDArray &arr, const mkldnn::memory::primitive_desc &target_pd, int num_groups); diff --git a/src/operator/nn/pooling.cc b/src/operator/nn/pooling.cc index 2d118142bc79..c133b63623af 100644 --- a/src/operator/nn/pooling.cc +++ b/src/operator/nn/pooling.cc @@ -395,6 +395,7 @@ For each window ``X``, the mathematical expression for Lp pooling is: .set_attr("FInferShape", PoolingShape) .set_attr("FCompute", PoolingCompute) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", PoolingComputeExCPU) #endif .set_attr("FGradient", @@ -424,6 +425,7 @@ NNVM_REGISTER_OP(_backward_Pooling) #endif .set_attr_parser(PoolingParamParser) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", PoolingGradComputeExCPU) #endif .set_attr("FCompute", PoolingGradCompute); diff --git a/src/operator/nn/softmax.cc b/src/operator/nn/softmax.cc index 88b7b5fc473e..81e775cac526 100644 --- a/src/operator/nn/softmax.cc +++ b/src/operator/nn/softmax.cc @@ -98,6 +98,7 @@ Example:: }) .set_attr("FCompute", SoftmaxCompute) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", SoftmaxComputeExCPU) .set_attr("FInferStorageType", SoftmaxStorageType) #endif diff --git a/src/operator/tensor/elemwise_sum.cc b/src/operator/tensor/elemwise_sum.cc index 9630988165ce..1666537e2860 100644 --- a/src/operator/tensor/elemwise_sum.cc +++ b/src/operator/tensor/elemwise_sum.cc @@ -179,6 +179,9 @@ The storage type of ``add_n`` output depends on storage types of inputs [](const NodeAttrs& attrs) { return std::vector{ResourceRequest::kTempSpace}; }) +#if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) +#endif .set_attr("FInferShape", ElementWiseSumShape) .set_attr("FInferType", ElementWiseSumType) .set_attr("FInferStorageType", ElementWiseSumForwardInferStorageType) diff --git a/src/operator/tensor/elemwise_unary_op.h b/src/operator/tensor/elemwise_unary_op.h index e09a6cccddbf..eb070a411279 100644 --- a/src/operator/tensor/elemwise_unary_op.h +++ b/src/operator/tensor/elemwise_unary_op.h @@ -299,7 +299,11 @@ class UnaryOp : public OpBase { } break; case kWriteInplace: +// cannot check if ptrs are the same for MKLDNN because we may have +// created copies of input when reordering. WriteInPlace will still write to original array +#if MXNET_USE_MKLDNN == 0 CHECK_EQ(inputs[0].dptr_, outputs[0].dptr_); +#endif break; case kNullOp: break; diff --git a/src/operator/tensor/elemwise_unary_op_basic.cc b/src/operator/tensor/elemwise_unary_op_basic.cc index f7f21f9076a6..c3e9c2dc91d0 100644 --- a/src/operator/tensor/elemwise_unary_op_basic.cc +++ b/src/operator/tensor/elemwise_unary_op_basic.cc @@ -206,6 +206,7 @@ MXNET_OPERATOR_REGISTER_UNARY(_copy) .set_attr("FResourceRequest", [](const NodeAttrs& n) { return std::vector{ResourceRequest::kTempSpace}; }) +.set_attr("TIsMKLDNN", true) #endif .set_attr("FInplaceIdentity", [](const NodeAttrs& attrs){ @@ -225,6 +226,7 @@ NNVM_REGISTER_OP(_backward_copy) .set_attr("FCompute", UnaryOp::IdentityCompute) .set_attr("FComputeEx", CopyEx) #if MXNET_USE_MKLDNN == 1 +.set_attr("TIsMKLDNN", true) .set_attr("FResourceRequest", [](const NodeAttrs& n) { return std::vector{ResourceRequest::kTempSpace}; }) diff --git a/tests/python/mkl/test_mkldnn.py b/tests/python/mkl/test_mkldnn.py index ba4cf3f0116a..e597d0f5fc58 100644 --- a/tests/python/mkl/test_mkldnn.py +++ b/tests/python/mkl/test_mkldnn.py @@ -381,6 +381,50 @@ def check_fullyconnected_training(stype): for stype in stypes: check_fullyconnected_training(stype) +@with_seed() +def test_non_mkldnn_fcomputeex(): + # test special case where MKLDNN formatted NDArray feeds into non-mkldnn fcomputeex operator + # conv is example where MKLDNN NDArray is created from regular NDArrays + # CustomOps is example of non-mkldnn fcomputeex operator + + @mx.operator.register("custom") + class CustomProp(mx.operator.CustomOpProp): + def __int__(self): + super(CustomProp, self).__init__(need_top_grad=False) + + def list_arguments(self): + return ['data'] + + def list_outputs(self): + return ['output'] + + def infer_shape(self, in_shape): + data_shape = in_shape[0] + output_shape = in_shape[0] + return [data_shape], [output_shape], [] + + def infer_type(self, in_type): + dtype = in_type[0] + return [dtype], [dtype], [] + + def create_operator(self, ctx, shapes, dtypes): + return Custom() + + + class Custom(mx.operator.CustomOp): + def forward(self, is_train, req, in_data, out_data, aux): + print(in_data[0]) + self.assign(out_data[0], req[0], in_data[0]) + + def backward(self, req, out_grad, in_data, out_data, in_grad, aux): + self.assign(in_grad[0], req[0], out_grad) + + data = mx.symbol.Variable('data') + conv = mx.sym.Convolution(data=data, kernel=(5, 5), pad=(1, 1), stride=(1,1), num_filter=8, name="conv", no_bias=True) + custom = mx.symbol.Custom(name='custom', data=conv, op_type='custom') + exec1 = custom.bind(mx.cpu(), args={'data': mx.nd.ones([10,3,96,96]), 'conv_weight': mx.nd.ones([8,3,5,5])}) + exec1.forward()[0].wait_to_read() + if __name__ == '__main__': install.test_mkldnn_install() From a64cf7d9c8c1c473e201b5bd68ab9af6bf7365ba Mon Sep 17 00:00:00 2001 From: reminisce Date: Thu, 30 Aug 2018 19:13:33 -0700 Subject: [PATCH 088/160] Subgraph API for integrating accelerators with MXNet (#12157) * Graph partitioner and subgraph op (#11251) Graph partitioner and subgraph op Fix duplicate entry bugs (#11767) Make subgraph var node name unique (#11876) [DO NOT REVIEW] Fix bug of eliminating cycles (#11907) * Fix cycle bug * Fix decycle bug * Fix comment [DO NOT REVIEW] Subgraph API (#12104) * Initial commit * Add unit tests * Fix lint * Fix lint * Clean up * Add graph partitiong to Bind * Add property name to graph partitioning c api * Fix unit test gpu context * Address cr * Move subgraph to attrs.subgraphs and fix the example * Fix lint * Add var version unit test * Address cr * Enable unit test that was flaky * Clean up * Clean up * Clean up * Change version return type in NDArray * Clean up * Add register or get for subgraph prop registry * Address cr * Remove unnecessary code * Handle var version issue in naive engine * Delete example * Remove registration of resource request for default subgraph op * Add doc string * Improve doc string --- include/mxnet/c_api_test.h | 66 ++ include/mxnet/engine.h | 22 +- include/mxnet/ndarray.h | 4 + src/c_api/c_api_test.cc | 73 ++ src/engine/engine_impl.h | 14 - src/engine/naive_engine.cc | 31 +- src/engine/threaded_engine.cc | 10 +- src/engine/threaded_engine.h | 1 + src/executor/graph_executor.cc | 151 ++++ src/executor/graph_executor.h | 4 + src/operator/subgraph/common.h | 237 ++++++ src/operator/subgraph/default_subgraph_op.cc | 112 +++ src/operator/subgraph/default_subgraph_op.cu | 44 + .../subgraph/default_subgraph_property.cc | 76 ++ src/operator/subgraph/partition_graph.cc | 774 ++++++++++++++++++ src/operator/subgraph/subgraph_property.h | 166 ++++ tests/cpp/engine/threaded_engine_test.cc | 58 ++ tests/python/gpu/test_operator_gpu.py | 1 + tests/python/unittest/test_subgraph_op.py | 238 ++++++ 19 files changed, 2059 insertions(+), 23 deletions(-) create mode 100644 include/mxnet/c_api_test.h create mode 100644 src/c_api/c_api_test.cc create mode 100644 src/operator/subgraph/common.h create mode 100644 src/operator/subgraph/default_subgraph_op.cc create mode 100644 src/operator/subgraph/default_subgraph_op.cu create mode 100644 src/operator/subgraph/default_subgraph_property.cc create mode 100644 src/operator/subgraph/partition_graph.cc create mode 100644 src/operator/subgraph/subgraph_property.h create mode 100644 tests/python/unittest/test_subgraph_op.py diff --git a/include/mxnet/c_api_test.h b/include/mxnet/c_api_test.h new file mode 100644 index 000000000000..fe6fc7fe9cc4 --- /dev/null +++ b/include/mxnet/c_api_test.h @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2018 by Contributors + * \file c_api_test.h + * \brief C API of mxnet for ease of testing backend in Python + */ +#ifndef MXNET_C_API_TEST_H_ +#define MXNET_C_API_TEST_H_ + +/*! \brief Inhibit C++ name-mangling for MXNet functions. */ +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +#include + +/*! + * \brief This API partitions a graph only by the operator names + * provided by users. This will attach a DefaultSubgraphProperty + * to the input graph for partitioning. This function should be + * used only for the testing purpose. + */ +MXNET_DLL int MXPartitionGraphByOpNames(SymbolHandle sym_handle, + const char* prop_name, + const mx_uint num_ops, + const char** op_names, + SymbolHandle* ret_sym_handle); + +/*! + * \brief Given a subgraph property name, use the provided op names + * as the op_names attribute for that subgraph property, instead of + * the predefined one. This is only for the purpose of testing. + */ +MXNET_DLL int MXSetSubgraphPropertyOpNames(const char* prop_name, + const mx_uint num_ops, + const char** op_names); + +/*! + * \brief Given a subgraph property name, delete the op name set + * in the SubgraphPropertyOpNameSet. + */ +MXNET_DLL int MXRemoveSubgraphPropertyOpNames(const char* prop_name); + +#ifdef __cplusplus +} +#endif // __cplusplus + +#endif // MXNET_C_API_TEST_H_ diff --git a/include/mxnet/engine.h b/include/mxnet/engine.h index dc48bfb83fa3..11e64edfcd54 100644 --- a/include/mxnet/engine.h +++ b/include/mxnet/engine.h @@ -41,8 +41,26 @@ class Engine; /*! \brief namespace of engine internal types. */ namespace engine { -/*! \brief Internal representation of variable. */ -struct Var; +/*! \brief base class of engine variables.*/ +struct Var { + virtual size_t version() { + return version_; + } + virtual ~Var() = default; + /*! + * \brief cast variable to derived type T + * \tparam T the type we want to cast into. + * \return A casted variable. + */ + template + inline T* Cast(); + /*! + * \brief version number of the var. Every time the object it is associated with + * is modified, the version number is incremented by 1. + */ + size_t version_{0}; +}; // struct Var + /*! \brief Internal representation of operator. */ struct Opr; /*! \brief Variable pointer type, usually hold by user used to specify dependencies. */ diff --git a/include/mxnet/ndarray.h b/include/mxnet/ndarray.h index bae3ea90d5e0..6141a4da78ef 100644 --- a/include/mxnet/ndarray.h +++ b/include/mxnet/ndarray.h @@ -340,6 +340,10 @@ class NDArray { inline size_t byte_offset() const { return byte_offset_; } + /*! \brief return var version of the NDArray*/ + inline size_t version() const { + return var()->version(); + } /*! * \brief save the content into binary stream * \param strm the output stream diff --git a/src/c_api/c_api_test.cc b/src/c_api/c_api_test.cc new file mode 100644 index 000000000000..623faa71adc9 --- /dev/null +++ b/src/c_api/c_api_test.cc @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2018 by Contributors + * \file c_api_test.cc + * \brief C API of mxnet for the ease of testing backend in Python + */ +#include +#include +#include "./c_api_common.h" +#include "../operator/subgraph/subgraph_property.h" + +int MXPartitionGraphByOpNames(SymbolHandle sym_handle, + const char* prop_name, + const mx_uint num_ops, + const char** op_names, + SymbolHandle* ret_sym_handle) { + nnvm::Symbol* s = new nnvm::Symbol(); + API_BEGIN(); + std::unordered_set op_name_set; + for (size_t i = 0; i < num_ops; ++i) { + op_name_set.emplace(op_names[i]); + } + nnvm::Symbol* sym = static_cast(sym_handle); + *s = sym->Copy(); + nnvm::Graph g; + g.outputs = s->outputs; + if (!op_name_set.empty()) { + mxnet::op::SubgraphPropertyPtr property + = mxnet::op::SubgraphPropertyRegistry::Get()->CreateSubgraphProperty(prop_name); + property->SetAttr("op_names", op_name_set); + g.attrs["subgraph_property"] = std::make_shared(std::move(property)); + } + g = nnvm::ApplyPass(std::move(g), "PartitionGraph"); + s->outputs = g.outputs; + *ret_sym_handle = s; + API_END_HANDLE_ERROR(delete s); +} + +int MXSetSubgraphPropertyOpNames(const char* prop_name, + const mx_uint num_ops, + const char** op_names) { + API_BEGIN(); + std::unordered_set op_name_set; + for (size_t i = 0; i < num_ops; ++i) { + op_name_set.emplace(op_names[i]); + } + (*mxnet::op::SubgraphPropertyOpNameSet::Get())[prop_name] = op_name_set; + API_END(); +} + +int MXRemoveSubgraphPropertyOpNames(const char* prop_name) { + API_BEGIN(); + mxnet::op::SubgraphPropertyOpNameSet::Get()->erase(prop_name); + API_END(); +} diff --git a/src/engine/engine_impl.h b/src/engine/engine_impl.h index b3ec34dc857a..f15141f4e7a2 100644 --- a/src/engine/engine_impl.h +++ b/src/engine/engine_impl.h @@ -33,20 +33,6 @@ namespace mxnet { namespace engine { -/*! \brief base class of engine variables, used for type checking */ -struct Var { -#if ENGINE_DEBUG - virtual ~Var() = default; -#endif // ENGINE_DEBUG - /*! - * \brief cast variable to derived type T - * \tparam T the type we want to cast into. - * \return A casted variable. - */ - template - inline T* Cast(); -}; // struct Var - /*! \brief base class of engine operators, used for type checking */ struct Opr { #if ENGINE_DEBUG diff --git a/src/engine/naive_engine.cc b/src/engine/naive_engine.cc index 8196af2de2f9..daff53066949 100644 --- a/src/engine/naive_engine.cc +++ b/src/engine/naive_engine.cc @@ -28,10 +28,24 @@ #include "./engine_impl.h" #include "../profiler/profiler.h" #include "./openmp.h" +#include "../common/object_pool.h" namespace mxnet { namespace engine { +/*! + * \brief var used in Naive Engine for tracking the version + * of the objects it is associated with. + */ +class NaiveVar final + : public Var, public common::ObjectPoolAllocatable { + public: + inline static NaiveVar* CastFromBase(Var* ptr) { + return ptr->Cast(); + } +}; // class NaiveVar + + // implement naive engine class NaiveEngine final : public Engine { public: @@ -71,8 +85,7 @@ class NaiveEngine final : public Engine { // new variables VarHandle NewVariable() override { - size_t v = ++counter_; - return reinterpret_cast(v); + return NaiveVar::New(); } OprHandle NewOperator(AsyncFn fn, @@ -146,6 +159,10 @@ class NaiveEngine final : public Engine { opr->opr_profile.reset(new profiler::ProfileOperator(opr->opr_name, attrs.release())); opr->opr_profile->start(exec_ctx.dev_type, exec_ctx.dev_id); } + // increment mutable var version + for (auto var : mutable_vars) { + ++var->version_; + } if (exec_ctx.dev_mask() == gpu::kDevMask) { #if MXNET_USE_CUDA size_t dev_id = static_cast(exec_ctx.dev_id); @@ -171,8 +188,12 @@ class NaiveEngine final : public Engine { } void DeleteVariable(SyncFn delete_fn, Context exec_ctx, VarHandle var) override { - this->PushSync(delete_fn, exec_ctx, {}, {var}, - FnProperty::kNormal, 0, "DeleteVariable"); + NaiveVar* naive_var = NaiveVar::CastFromBase(var); + this->PushAsync([delete_fn, naive_var](RunContext ctx, CallbackOnComplete on_complete) mutable { + delete_fn(ctx); + NaiveVar::Delete(naive_var); + on_complete(); + }, exec_ctx, {}, {var}, FnProperty::kDeleteVar, 0, "DeleteVariable"); } void WaitForVar(VarHandle var) override { @@ -192,8 +213,6 @@ class NaiveEngine final : public Engine { } // whether action is completed bool req_completed_; - // counter - std::atomic counter_{0}; /*! \brief whether it is during shutdown phase*/ std::atomic shutdown_phase_{false}; // CPU stream diff --git a/src/engine/threaded_engine.cc b/src/engine/threaded_engine.cc index e70cc197c0c3..3a7587fef13f 100644 --- a/src/engine/threaded_engine.cc +++ b/src/engine/threaded_engine.cc @@ -130,6 +130,9 @@ inline bool ThreadedVar::CompleteWriteDependency(Dispatcher dispatcher) { assert(pending_write_ != nullptr); CHECK_EQ(num_pending_reads_, kWriteTriggered); + // increment version number + ++version_; + // really delete if (to_delete_) { VersionedVarBlock *head = pending_write_->next; @@ -164,7 +167,7 @@ inline bool ThreadedVar::CompleteWriteDependency(Dispatcher dispatcher) { } // This is outside of lock scope // Be very carful, pending_write_ and num_pending_reads_ - // can change now, do not reply ont the two variables. + // can change now, do not rely on these two variables. // The linked list \in [old_pending_write, end_of_read_chain) // is already detached from this Var. // So it is safe to modify these @@ -196,6 +199,11 @@ inline bool ThreadedVar::ready_to_read() { return this->is_ready_to_read(); } +inline size_t ThreadedVar::version() { + std::lock_guard lock{mutex_}; + return this->version_; +} + // implementation of threaded engine ThreadedVar* ThreadedEngine::NewVariable() { return ThreadedVar::New(VersionedVarBlock::New()); diff --git a/src/engine/threaded_engine.h b/src/engine/threaded_engine.h index 428f0d8c554f..a2c1a2b943aa 100644 --- a/src/engine/threaded_engine.h +++ b/src/engine/threaded_engine.h @@ -162,6 +162,7 @@ class ThreadedVar final inline void SetToDelete(); /*! \return whether this variable is ready to read. */ inline bool ready_to_read(); + inline size_t version() override; /*! * \brief Cast a Var pointer to ThreadedVar pointer * \param ptr pointer from base. diff --git a/src/executor/graph_executor.cc b/src/executor/graph_executor.cc index 32b14b8e9637..265554ab3918 100644 --- a/src/executor/graph_executor.cc +++ b/src/executor/graph_executor.cc @@ -33,6 +33,7 @@ #include "../profiler/profiler.h" #include "../common/utils.h" #include "../common/exec_utils.h" +#include "../operator/subgraph/subgraph_property.h" namespace mxnet { namespace exec { @@ -42,6 +43,7 @@ using namespace mxnet::common; GraphExecutor::GraphExecutor() { log_verbose_ = dmlc::GetEnv("MXNET_EXEC_VERBOSE_LOGGING", false); need_grad_ = false; + subgraph_property_ = dmlc::GetEnv("MXNET_SUBGRAPH_BACKEND", std::string()); } GraphExecutor::~GraphExecutor() { @@ -1428,6 +1430,146 @@ GraphExecutor::CachedSegOpr GraphExecutor::CreateCachedSegOpr(size_t topo_start, iter->c_str()); return ret; } + +// Infer shapes, dtypes, stypes, contexts for the forward graph +static nnvm::Graph InferForwardAttrs(nnvm::Graph g, + nnvm::ShapeVector arg_shapes, + nnvm::DTypeVector arg_dtypes, + StorageTypeVector arg_stypes, + const Context& default_ctx, + const std::map& ctx_map, + const std::vector& in_arg_ctxes, + const std::vector& aux_state_ctxes) { + const auto& indexed_graph = g.indexed_graph(); + const auto num_forward_inputs = indexed_graph.input_nodes().size(); + g = AssignContext(g, default_ctx, ctx_map, in_arg_ctxes, {}, + aux_state_ctxes, {}, num_forward_inputs, g.outputs.size()); + g = InferShape(std::move(g), std::move(arg_shapes), "__shape__"); + if (g.GetAttr("shape_num_unknown_nodes") != 0U) { + HandleInferShapeError(num_forward_inputs, indexed_graph, + g.GetAttr("shape")); + } + g = InferType(std::move(g), std::move(arg_dtypes), "__dtype__"); + if (g.GetAttr("dtype_num_unknown_nodes") != 0U) { + HandleInferTypeError(num_forward_inputs, indexed_graph, + g.GetAttr("dtype")); + } + g = InferStorageType(std::move(g), std::move(arg_stypes), "__storage_type__"); + if (g.GetAttr("storage_type_num_unknown_nodes") != 0U) { + HandleInferStorageTypeError(num_forward_inputs, indexed_graph, + g.GetAttr("storage_type")); + } + return g; +} + +// Given input attr arrays, partition the graph using the backend name equal to prop_name. +// This is a common function for bind and simple_bind flows. +static nnvm::Symbol PartitionGraph(const nnvm::Symbol& src, + const std::string& prop_name, + const nnvm::ShapeVector& arg_shapes, + const nnvm::DTypeVector& arg_dtypes, + const StorageTypeVector& arg_stypes, + const Context& default_ctx, + const std::map& ctx_map, + const std::vector& in_arg_ctxes, + const std::vector& aux_state_ctxes) { + auto subgraph_prop = op::SubgraphPropertyRegistry::Get()->CreateSubgraphProperty(prop_name); + nnvm::Symbol ret = src.Copy(); + nnvm::Graph g; + g.outputs = ret.outputs; + g = InferForwardAttrs(g, arg_shapes, arg_dtypes, arg_stypes, default_ctx, + ctx_map, in_arg_ctxes, aux_state_ctxes); + subgraph_prop->SetAttr("graph", g); + auto it = op::SubgraphPropertyOpNameSet::Get()->find(prop_name); + // assign a op name set to the subgraph property if it has been provided by users + if (it != op::SubgraphPropertyOpNameSet::Get()->end()) { + LOG(INFO) << "SubgraphPropertyOpNameSet for subgraph property " << prop_name + << " has been assigned a value. Please make sure it is initialized" + " only for the testing purpose."; + subgraph_prop->SetAttr("op_names", it->second); + } + g.attrs["subgraph_property"] = std::make_shared(std::move(subgraph_prop)); + g = ApplyPass(std::move(g), "PartitionGraph"); + ret.outputs = g.outputs; + return ret; +} + +// Given input attr dicts, partition the graph using the backend name equal to prop_name. +// This is for simple_bind flow. +static nnvm::Symbol PartitionGraph(const nnvm::Symbol& src, + const std::string& prop_name, + const std::unordered_map& arg_shape_map, + const std::unordered_map& arg_dtype_map, + const std::unordered_map& arg_stype_map, + const Context& default_ctx, + const std::map& ctx_map, + const std::vector& in_arg_ctxes, + const std::vector& aux_state_ctxes) { + const std::vector input_names = src.ListInputNames(Symbol::kAll); + nnvm::ShapeVector arg_shapes(input_names.size(), TShape()); + nnvm::DTypeVector arg_dtypes(input_names.size(), -1); + StorageTypeVector arg_stypes(input_names.size(), kUndefinedStorage); + for (size_t i = 0; i < input_names.size(); ++i) { + auto it1 = arg_shape_map.find(input_names[i]); + if (arg_shape_map.end() != it1) { + arg_shapes[i] = it1->second; + } + auto it2 = arg_dtype_map.find(input_names[i]); + if (arg_dtype_map.end() != it2) { + arg_dtypes[i] = it2->second; + } + auto it3 = arg_stype_map.find(input_names[i]); + if (arg_stype_map.end() != it3) { + arg_stypes[i] = it3->second; + } + } + return PartitionGraph(src, prop_name, arg_shapes, arg_dtypes, arg_stypes, + default_ctx, ctx_map, in_arg_ctxes, aux_state_ctxes); +} + +// Given input ndarrays, partition the graph using the backend name equal to prop_name. +// This is for bind flow. +static nnvm::Symbol PartitionGraph(const nnvm::Symbol& src, + const std::string& prop_name, + const std::vector &in_args, + const std::vector &aux_states, + const Context& default_ctx, + const std::map& ctx_map) { + const std::vector input_names = src.ListInputNames(Symbol::kAll); + const std::vector arg_names = src.ListInputNames(nnvm::Symbol::kReadOnlyArgs); + const std::vector aux_names = src.ListInputNames(nnvm::Symbol::kAuxiliaryStates); + CHECK_EQ(arg_names.size(), in_args.size()); + CHECK_EQ(aux_names.size(), aux_states.size()); + nnvm::ShapeVector arg_shapes; // all input shapes + arg_shapes.reserve(input_names.size()); + nnvm::DTypeVector arg_dtypes; // all input dtypes + arg_dtypes.reserve(input_names.size()); + StorageTypeVector arg_stypes; // all input stypes + arg_stypes.reserve(input_names.size()); + std::vector in_arg_ctxes(in_args.size()); + std::vector aux_state_ctxes(aux_states.size()); + + size_t i1 = 0, i2 = 0; + for (size_t i = 0; i < input_names.size(); ++i) { + if (i2 < aux_names.size() && aux_names[i2] == input_names[i]) { + arg_shapes.push_back(aux_states[i2].shape()); + arg_dtypes.push_back(aux_states[i2].dtype()); + arg_stypes.push_back(aux_states[i2].storage_type()); + aux_state_ctxes[i2] = aux_states[i2].ctx(); + ++i2; + } else { + CHECK(i1 < arg_names.size()); + CHECK_EQ(arg_names[i1], input_names[i]); + arg_shapes.push_back(in_args[i1].shape()); + arg_dtypes.push_back(in_args[i1].dtype()); + arg_stypes.push_back(in_args[i1].storage_type()); + in_arg_ctxes[i1] = in_args[i1].ctx(); + ++i1; + } + } + return PartitionGraph(src, prop_name, arg_shapes, arg_dtypes, arg_stypes, + default_ctx, ctx_map, in_arg_ctxes, aux_state_ctxes); +} } // namespace exec Executor *Executor::SimpleBind(nnvm::Symbol symbol, @@ -1447,6 +1589,11 @@ Executor *Executor::SimpleBind(nnvm::Symbol symbol, std::unordered_map* shared_buffer, Executor* shared_exec) { auto exec = new exec::GraphExecutor(); + if (!exec->subgraph_property().empty()) { + symbol = exec::PartitionGraph(symbol, exec->subgraph_property(), arg_shape_map, arg_dtype_map, + arg_stype_map, default_ctx, group2ctx, in_arg_ctxes, + aux_state_ctxes); + } exec->Init(symbol, default_ctx, group2ctx, in_arg_ctxes, arg_grad_ctxes, aux_state_ctxes, arg_shape_map, arg_dtype_map, arg_stype_map, @@ -1465,6 +1612,10 @@ Executor *Executor::Bind(nnvm::Symbol symbol, const std::vector &aux_states, Executor* shared_exec) { auto exec = new exec::GraphExecutor(); + if (!exec->subgraph_property().empty()) { + symbol = exec::PartitionGraph(symbol, exec->subgraph_property(), in_args, aux_states, + default_ctx, group2ctx); + } exec->Init(symbol, default_ctx, group2ctx, in_args, arg_grad_store, grad_req_type, aux_states, reinterpret_cast(shared_exec)); diff --git a/src/executor/graph_executor.h b/src/executor/graph_executor.h index 7b936c300254..b94bb4377786 100644 --- a/src/executor/graph_executor.h +++ b/src/executor/graph_executor.h @@ -117,6 +117,8 @@ class GraphExecutor : public Executor { std::vector* arg_grads, std::vector* aux_states) override; + const std::string& subgraph_property() const { return subgraph_property_; } + protected: friend class mxnet::Imperative; // Information about operational node @@ -256,6 +258,8 @@ class GraphExecutor : public Executor { std::unordered_set cached_seg_opr_names_; // verbose logging bool log_verbose_ = false; + // subgraph property name + std::string subgraph_property_; }; } // namespace exec diff --git a/src/operator/subgraph/common.h b/src/operator/subgraph/common.h new file mode 100644 index 000000000000..22058d556e07 --- /dev/null +++ b/src/operator/subgraph/common.h @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef MXNET_OPERATOR_SUBGRAPH_COMMON_H_ +#define MXNET_OPERATOR_SUBGRAPH_COMMON_H_ + +#include +#include +#include +#include "../elemwise_op_common.h" +#include "../../executor/exec_pass.h" + +namespace mxnet { +namespace op { + +inline uint32_t DefaultSubgraphOpNumInputs(const nnvm::NodeAttrs& attrs) { + const nnvm::Symbol& sym = *attrs.subgraphs[0]; + return sym.ListInputNames(nnvm::Symbol::kAll).size(); +} + +inline uint32_t DefaultSubgraphOpNumOutputs(const nnvm::NodeAttrs& attrs) { + const nnvm::Symbol& sym = *attrs.subgraphs[0]; + return sym.ListOutputNames().size(); +} + +inline std::vector DefaultSubgraphOpListInputs(const nnvm::NodeAttrs& attrs) { + const nnvm::Symbol& sym = *attrs.subgraphs[0]; + return sym.ListInputNames(nnvm::Symbol::kAll); +} + +inline std::vector DefaultSubgraphOpListOutputs(const nnvm::NodeAttrs& attrs) { + const nnvm::Symbol& sym = *attrs.subgraphs[0]; + return sym.ListOutputNames(); +} + +inline bool DefaultSubgraphOpShape(const nnvm::NodeAttrs& attrs, + std::vector *in_shapes, + std::vector *out_shapes) { + using namespace exec; + const nnvm::Symbol& subgraph_sym = *attrs.subgraphs[0]; + nnvm::Graph g; + g.outputs = subgraph_sym.outputs; + const auto& idx_g = g.indexed_graph(); + CHECK_EQ(idx_g.input_nodes().size(), in_shapes->size()); + CHECK_EQ(idx_g.outputs().size(), out_shapes->size()); + + // Put the input and output shapes to the shape vector. + nnvm::ShapeVector shapes(idx_g.num_node_entries()); + const auto &input_nids = idx_g.input_nodes(); + CHECK_EQ(input_nids.size(), in_shapes->size()); + for (size_t i = 0; i < in_shapes->size(); i++) { + auto eid = idx_g.entry_id(input_nids[i], 0); + shapes[eid] = in_shapes->at(i); + } + CHECK_EQ(g.outputs.size(), out_shapes->size()); + for (size_t i = 0; i < out_shapes->size(); i++) { + auto eid = idx_g.entry_id(g.outputs[i]); + shapes[eid] = out_shapes->at(i); + } + + // Infer shape of the graph. + g.attrs["shape"] = std::make_shared(std::move(shapes)); + g = exec::InferShape(std::move(g)); + + // Copy the inferred shape back to the input shapes and the output shapes. + shapes = g.GetAttr("shape"); + // assign to in_shapes + for (size_t i = 0; i < in_shapes->size(); ++i) { + const auto eid = idx_g.entry_id(input_nids[i], 0); + SHAPE_ASSIGN_CHECK(*in_shapes, i, shapes[eid]); + } + // assign to out_shapes + for (size_t i = 0; i < g.outputs.size(); ++i) { + const auto eid = idx_g.entry_id(g.outputs[i]); + SHAPE_ASSIGN_CHECK(*out_shapes, i, shapes[eid]); + } + // Check if we have inferred the shapes correctly. + return g.GetAttr("shape_num_unknown_nodes") == 0; +} + +inline bool DefaultSubgraphOpType(const nnvm::NodeAttrs& attrs, + std::vector *in_types, + std::vector *out_types) { + const nnvm::Symbol& subgraph_sym = *attrs.subgraphs[0]; + nnvm::Graph g; + g.outputs = subgraph_sym.outputs; + const auto& idx_g = g.indexed_graph(); + CHECK_EQ(idx_g.input_nodes().size(), in_types->size()); + CHECK_EQ(idx_g.outputs().size(), out_types->size()); + + // Put the input and output data types to the dtype vector. + nnvm::DTypeVector types(idx_g.num_node_entries(), -1); + const auto &input_nids = idx_g.input_nodes(); + CHECK_EQ(input_nids.size(), in_types->size()); + for (size_t i = 0; i < in_types->size(); i++) { + auto eid = idx_g.entry_id(input_nids[i], 0); + types[eid] = in_types->at(i); + } + CHECK_EQ(g.outputs.size(), out_types->size()); + for (size_t i = 0; i < out_types->size(); i++) { + auto eid = idx_g.entry_id(g.outputs[i]); + types[eid] = out_types->at(i); + } + + // Infer data type of the graph. + g.attrs["dtype"] = std::make_shared(std::move(types)); + g = exec::InferType(std::move(g)); + + types = g.GetAttr("dtype"); + // assign to in_types + for (size_t i = 0; i < in_types->size(); ++i) { + const auto eid = idx_g.entry_id(input_nids[i], 0); + TYPE_ASSIGN_CHECK(*in_types, i, types[eid]); + } + // assign to out_types + for (size_t i = 0; i < g.outputs.size(); ++i) { + const auto eid = idx_g.entry_id(g.outputs[i]); + TYPE_ASSIGN_CHECK(*out_types, i, types[eid]); + } + // Check if we have inferred the dtypes correctly. + return g.GetAttr("dtype_num_unknown_nodes") == 0; +} + +inline bool DefaultSubgraphOpStorageType(const nnvm::NodeAttrs& attrs, + const int dev_mask, + DispatchMode* dispatch_mode, + std::vector* in_stypes, + std::vector* out_stypes) { + const nnvm::Symbol& subgraph_sym = *attrs.subgraphs[0]; + nnvm::Graph g; + g.outputs = subgraph_sym.outputs; + const auto& idx_g = g.indexed_graph(); + CHECK_EQ(idx_g.input_nodes().size(), in_stypes->size()); + CHECK_EQ(idx_g.outputs().size(), out_stypes->size()); + exec::DevMaskVector dev_masks(idx_g.num_node_entries(), dev_mask); + + // Put the input and output storages to the storage vector. + StorageTypeVector stypes(idx_g.num_node_entries(), kUndefinedStorage); + const auto &input_nids = idx_g.input_nodes(); + CHECK_EQ(input_nids.size(), in_stypes->size()); + for (size_t i = 0; i < in_stypes->size(); i++) { + auto eid = idx_g.entry_id(input_nids[i], 0); + stypes[eid] = in_stypes->at(i); + } + CHECK_EQ(g.outputs.size(), out_stypes->size()); + for (size_t i = 0; i < out_stypes->size(); i++) { + auto eid = idx_g.entry_id(g.outputs[i]); + stypes[eid] = out_stypes->at(i); + } + + // Infer storage type of the graph. + bool dev_match = g.attrs.count("dev_mask") && + g.GetAttr("dev_mask") == dev_masks; + if (!dev_match) { + g.attrs["dev_mask"] = std::make_shared(std::move(dev_masks)); + } + g.attrs["storage_type"] = std::make_shared(std::move(stypes)); + g = exec::InferStorageType(std::move(g)); + + stypes = g.GetAttr("storage_type"); + // assign to in_types + for (size_t i = 0; i < in_stypes->size(); ++i) { + const auto eid = idx_g.entry_id(input_nids[i], 0); + STORAGE_TYPE_ASSIGN_CHECK(*in_stypes, i, stypes[eid]); + } + + DISPATCH_MODE_ASSIGN_CHECK(dispatch_mode, 0, DispatchMode::kFComputeEx); + // assign to out_types + for (size_t i = 0; i < g.outputs.size(); ++i) { + const auto eid = idx_g.entry_id(g.outputs[i]); + STORAGE_TYPE_ASSIGN_CHECK(*out_stypes, i, stypes[eid]); + } + // Check if we have inferred the storages correctly. + return g.GetAttr("storage_type_num_unknown_nodes") == 0; +} + +inline ExecType DefaultSubgraphOpExecType(const nnvm::NodeAttrs& attrs) { + return ExecType::kSubgraphExec; +} + +inline std::vector DefaultSubgraphOpMutableInputs(const nnvm::NodeAttrs& attrs) { + const nnvm::Symbol& subgraph_sym = *attrs.subgraphs[0]; + const std::vector input_names = subgraph_sym.ListInputNames(nnvm::Symbol::kAll); + const std::vector immutable_input_names = + subgraph_sym.ListInputNames(nnvm::Symbol::kReadOnlyArgs); + const std::vector mutable_input_names = + subgraph_sym.ListInputNames(nnvm::Symbol::kAuxiliaryStates); + CHECK_EQ(immutable_input_names.size() + mutable_input_names.size(), input_names.size()); + std::vector ret; + size_t i1 = 0, i2 = 0; + for (size_t i = 0; i < input_names.size(); ++i) { + if (i1 < immutable_input_names.size() && input_names[i] == immutable_input_names[i1]) { + ++i1; + } else { + CHECK(i2 < mutable_input_names.size()); + CHECK_EQ(input_names[i], mutable_input_names[i2]); + ++i2; + ret.push_back(i); + } + } + return ret; +} + +inline std::vector DefaultSubgraphOpResourceRequest(const nnvm::NodeAttrs& attrs) { + const nnvm::Symbol& subgraph_sym = *attrs.subgraphs[0]; + static auto& fresource = Op::GetAttr("FResourceRequest"); + std::set resource_types; + DFSVisit(subgraph_sym.outputs, [&](const nnvm::NodePtr& node) { + if (!node->is_variable() && fresource.count(node->op())) { + for (ResourceRequest& r : fresource[node->op()](node->attrs)){ + resource_types.insert(r.type); + } + } + }); + return std::vector(resource_types.begin(), resource_types.end()); +} + +} // namespace op +} // namespace mxnet + +#endif // MXNET_OPERATOR_SUBGRAPH_COMMON_H_ diff --git a/src/operator/subgraph/default_subgraph_op.cc b/src/operator/subgraph/default_subgraph_op.cc new file mode 100644 index 000000000000..d5fb7ee2db61 --- /dev/null +++ b/src/operator/subgraph/default_subgraph_op.cc @@ -0,0 +1,112 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, +* software distributed under the License is distributed on an +* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +* KIND, either express or implied. See the License for the +* specific language governing permissions and limitations +* under the License. +*/ + +#include +#include "./common.h" +#include "../../imperative/imperative_utils.h" +#include "../../imperative/cached_op.h" + +namespace mxnet { +namespace op { + +#define DEBUG_SUBGRAPH 0 + +class DefaultSubgraphOperator { + public: + explicit DefaultSubgraphOperator(const Symbol& sym) : subgraph_sym_(sym) { + subgraph_exec_.reset(new CachedOp(sym, {{"static_alloc", "true"}, + {"static_shape", "true"}})); + } + + void Forward(const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs); + void Backward(const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + LOG(FATAL) << "Not implemented"; + } + + private: + nnvm::Symbol subgraph_sym_; + CachedOpPtr subgraph_exec_; +}; + +void DefaultSubgraphOperator::Forward(const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + std::vector tmp_inputs = inputs; + std::vector input_ptrs; + input_ptrs.reserve(inputs.size()); + for (auto& nd : tmp_inputs) { + input_ptrs.push_back(&nd); + } + std::vector tmp_outputs = outputs; + std::vector output_ptrs; + for (auto& nd : tmp_outputs) { + output_ptrs.push_back(&nd); + } +#if DEBUG_SUBGRAPH + for (size_t i = 0; i < inputs.size(); ++i) { + LOG(INFO) << "inputs[" << i << "].version = " << inputs[i].version(); + } + for (size_t i = 0; i < outputs.size(); ++i) { + LOG(INFO) << "outputs[" << i << "].version = " << outputs[i].version(); + } +#endif + subgraph_exec_->Forward(subgraph_exec_, input_ptrs, output_ptrs); +} + +OpStatePtr CreateDefaultSubgraphOpState(const NodeAttrs& attrs, + Context ctx, + const std::vector& in_shapes, + const std::vector& in_types) { + return OpStatePtr::Create(*attrs.subgraphs[0]); +} + +void DefaultSubgraphOpForward(const OpStatePtr& state_ptr, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + DefaultSubgraphOperator& op = state_ptr.get_state(); + op.Forward(ctx, inputs, req, outputs); +} + +NNVM_REGISTER_OP(_default_subgraph_op) +.describe(R"code(_default_subgraph_op)code" ADD_FILELINE) +.set_num_inputs(DefaultSubgraphOpNumInputs) +.set_num_outputs(DefaultSubgraphOpNumOutputs) +.set_attr("FListInputNames", DefaultSubgraphOpListInputs) +.set_attr("FListOutputNames", DefaultSubgraphOpListOutputs) +.set_attr("FCreateOpState", CreateDefaultSubgraphOpState) +.set_attr("FInferShape", DefaultSubgraphOpShape) +.set_attr("FInferType", DefaultSubgraphOpType) +.set_attr("FInferStorageType", DefaultSubgraphOpStorageType) +.set_attr("FStatefulComputeEx", DefaultSubgraphOpForward) +.set_attr("FMutateInputs", DefaultSubgraphOpMutableInputs) +.set_attr("key_var_num_args", "num_args") +.set_attr("FExecType", DefaultSubgraphOpExecType) +.add_argument("data", "NDArray-or-Symbol[]", "input data list"); + +} // namespace op +} // namespace mxnet diff --git a/src/operator/subgraph/default_subgraph_op.cu b/src/operator/subgraph/default_subgraph_op.cu new file mode 100644 index 000000000000..008826b21d71 --- /dev/null +++ b/src/operator/subgraph/default_subgraph_op.cu @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2018 by Contributors + * \file default_subgraph_op.cu + * \brief GPU Implementation of subgraph operations + */ + +#include +#include "./common.h" +#include "../../imperative/imperative_utils.h" +#include "../../imperative/cached_op.h" + +namespace mxnet { +namespace op { + +void DefaultSubgraphOpForward(const OpStatePtr& state_ptr, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs); + +NNVM_REGISTER_OP(_default_subgraph_op) +.set_attr("FStatefulComputeEx", DefaultSubgraphOpForward); + +} // namespace op +} // namespace mxnet diff --git a/src/operator/subgraph/default_subgraph_property.cc b/src/operator/subgraph/default_subgraph_property.cc new file mode 100644 index 000000000000..c8d3e9ffd438 --- /dev/null +++ b/src/operator/subgraph/default_subgraph_property.cc @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include +#include +#include "./common.h" +#include "./subgraph_property.h" + +namespace mxnet { +namespace op { + +/* + * This selects nodes for a subgraph that only contains operators + * in a given set and it visits nodes via both input and output links. + */ +class ContainOpSelector: public SubgraphSelector { + public: + explicit ContainOpSelector(const std::unordered_set& op_names) + : op_names_(op_names) {} + + virtual bool Select(const nnvm::Node &seed_node) { + return !seed_node.is_variable() && op_names_.count(seed_node.op()->name); + } + + virtual bool SelectInput(const nnvm::Node &cur_node, const nnvm::Node &input_node) { + return !input_node.is_variable() && op_names_.count(input_node.op()->name); + } + + virtual bool SelectOutput(const nnvm::Node &cur_node, const nnvm::Node &output_node) { + return !output_node.is_variable() && op_names_.count(output_node.op()->name); + } + private: + const std::unordered_set& op_names_; +}; + +/* + * This subgraph property finds a subgraph whose nodes have only operators + * within a set. The operators in the subgraph will be executed by _default_subgraph_op. + */ +class DefaultSubgraphProperty: public SubgraphProperty { + public: + static SubgraphPropertyPtr Create() { return std::make_shared(); } + virtual nnvm::NodePtr CreateSubgraphNode(const nnvm::Symbol &sym, + const int subgraph_id = 0) const { + nnvm::NodePtr n = nnvm::Node::Create(); + n->attrs.op = Op::Get("_default_subgraph_op"); + n->attrs.name = "_default_subgraph_op" + std::to_string(subgraph_id); + n->attrs.subgraphs.push_back(std::make_shared(sym)); + return n; + } + virtual SubgraphSelectorPtr CreateSubgraphSelector() const { + return std::make_shared( + this->GetAttr>("op_names")); + } +}; + +MXNET_REGISTER_SUBGRAPH_PROPERTY(default, DefaultSubgraphProperty); + +} // namespace op +} // namespace mxnet diff --git a/src/operator/subgraph/partition_graph.cc b/src/operator/subgraph/partition_graph.cc new file mode 100644 index 000000000000..315f7eec00c6 --- /dev/null +++ b/src/operator/subgraph/partition_graph.cc @@ -0,0 +1,774 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2018 by Contributors + * \file partition_graph.cc + * \brief + */ +#include +#include +#include +#include +#include +#include + +#include "./subgraph_property.h" + +namespace nnvm { +NodePtr CreateVariableNode(const std::string& name); +} + +namespace mxnet { + +namespace op { + +using nnvm::Symbol; +using nnvm::Node; +using nnvm::NodePtr; +using nnvm::NodeEntry; +using nnvm::Graph; + +#define DEBUG_SUBGRAPH 0 + +namespace sg { // sg stands for subgraph + +struct SimpleNode; +using SimpleNodePtr = std::shared_ptr; + +/*! + * \brief Node of the undirected graph which replicates the network structures + * of the computational graph. It is used to ease the graph traversal for finding + * subgraphs. + */ +struct SimpleNode { + static SimpleNodePtr Create() { + return std::make_shared(); + } + SimpleNode() : label(-1), node(nullptr) {} + /*! subgraph label */ + int label; + /*! the original node in the computational graph it references*/ + nnvm::Node* node; + /*! + * \brief output nodes of the current node + * key is node ptr and value is an array of indices standing for the entry indices + * in key->inputs whose source is the current node. + */ + std::unordered_map> outputs; +}; // struct SimpleNode + +#if DEBUG_SUBGRAPH +void PrintSubgraph(const std::vector& simple_nodes) { + std::string op_names = ""; + for (size_t i = 0; i < simple_nodes.size(); ++i) { + op_names += simple_nodes[i]->node->attrs.name + ' '; + } + LOG(INFO) << "Subgraph node names: " << op_names; +} + +void PrintNodeEntry(const nnvm::NodeEntry& entry) { + std::string ret = "NodeEntry: node_name=" + entry.node->attrs.name + + ", index=" + std::to_string(entry.index) + ", version=" + std::to_string(entry.version); + LOG(INFO) << ret; +} + +void PrintNodeEntries(const std::vector& entries) { + for (size_t i = 0; i < entries.size(); ++i) { + PrintNodeEntry(*entries[i]); + } +} +#endif + +/*! + * \brief Given a MXNet computational graph, create an undirected graph from it. + * \param g the MXNet computational graph + * \param simple_nodes the nodes of undirected graph in top sorted order + */ +void CreateSimpleGraph(const Graph& g, + std::vector* simple_nodes) { + const auto& indexed_graph = g.indexed_graph(); + simple_nodes->reserve(indexed_graph.num_nodes()); + DFSVisit(g.outputs, [&](const NodePtr& node) { + SimpleNodePtr sn = SimpleNode::Create(); + sn->node = node.get(); + for (size_t i = 0; i < sn->node->inputs.size(); ++i) { + const auto& e = sn->node->inputs[i]; + const auto input_nid = indexed_graph.node_id(e.node.get()); + CHECK_LT(input_nid, simple_nodes->size()); + auto& input_node_outputs = (*simple_nodes)[input_nid]->outputs; + auto it = input_node_outputs.find(sn->node); + if (it == input_node_outputs.end()) { + input_node_outputs.emplace(sn->node, std::vector{i}); + } else { + it->second.push_back(i); + } + } + simple_nodes->emplace_back(std::move(sn)); + }); +} + +/*! + * \brief Reset labels of the subgraph nodes to the original state + * and clear the vector of subgraph nodes. + */ +void ResetNodeLabels(const nnvm::Graph& g, + const std::vector& simple_nodes, + std::vector* subgraph_nodes) { + for (auto n : *subgraph_nodes) { + const auto nid = g.indexed_graph().node_id(n); + simple_nodes[nid]->label = -1; + } + subgraph_nodes->clear(); +} + +/*! + * \brief This function traverses the nodes in a computation graph from a starting + * node following the input edges and output edges, and marks all nodes that + * can be accessed from the starting node. Before the function returns, + * it will conduct checking whether there is a loop between the potential subgraph + * and the outside nodes. If so, add the node that should break the loop + * in excluded_nodes and return false. Otherwise, return true. + * \param g the whole graph + * \subgraph_selector determines whether the visited node should be choosen or not + * \label the label of the current subgraph + * \snid node id of the seed simple node + * \simple_nodes all simple nodes in the top sorted order + * \subgraph_nodes all the nodes belonging to the same subgraph of seed node + * \excluded_nodes set of nodes that should be excluded from the current subgraph + */ +bool LabelSubgraph(const Graph& g, + SubgraphSelectorPtr subgraph_selector, + const int label, + const size_t snid, // simple node id, this is a seed + const std::vector& simple_nodes, + std::vector* subgraph_nodes, + std::unordered_set* excluded_nodes = nullptr) { + const auto& indexed_graph = g.indexed_graph(); + std::queue node_queue; + if (!excluded_nodes || !excluded_nodes->count(simple_nodes[snid]->node)) { + CHECK_EQ(simple_nodes[snid]->label, -1); + simple_nodes[snid]->label = label; + node_queue.push(simple_nodes[snid].get()); + } + // key: nodes that serve as input/output nodes to the subgraph + // value: pair of vectors of nodes in the subgraph. The first vector contains the + // output nodes of the key in the subgraph, and the second vector contains the + // input nodes of the key in the subgraph. + // If a non-subgraph node has inputs from the subgraph and the other non-subgraph node + // has outputs to the subgraph, and the first non-subgraph node is an ancestor + // of the second non-subgraph node, there exits a cycle. + // When breaking the cycle, we want to start from removing the node with the largest node id + // in the subgraph. + std::unordered_map, + std::vector>> non_subgraph_node_map; + while (!node_queue.empty()) { + SimpleNode* cur_node = node_queue.front(); + node_queue.pop(); + subgraph_nodes->push_back(cur_node->node); + // get qualified adjacent input nodes + for (auto& e : cur_node->node->inputs) { + const bool select_input = (!excluded_nodes || !excluded_nodes->count(e.node.get())) + && subgraph_selector->SelectInput(*cur_node->node, *e.node); + if (select_input) { + // e.node is a subgraph node + const auto nid = indexed_graph.node_id(e.node.get()); + CHECK_LT(nid, simple_nodes.size()); + // this node has not been visited yet + if (simple_nodes[nid]->label == -1) { + simple_nodes[nid]->label = label; + node_queue.push(simple_nodes[nid].get()); + } + } else { + // e.node is an input node of the subgraph + non_subgraph_node_map[e.node.get()].first.push_back(cur_node->node); + } + } + // get qualified output nodes + for (auto it = cur_node->outputs.begin(); it != cur_node->outputs.end(); ++it) { + const bool select_output = (!excluded_nodes || !excluded_nodes->count(it->first)) + && subgraph_selector->SelectOutput(*cur_node->node, *it->first); + if (select_output) { + // it->first is a subgraph node + const auto nid = indexed_graph.node_id(it->first); + CHECK_LT(nid, simple_nodes.size()); + // this node has not been visited yet + if (simple_nodes[nid]->label == -1) { + simple_nodes[nid]->label = label; + node_queue.push(simple_nodes[nid].get()); + } + } else { + // it->first is an output node of the subgraph + non_subgraph_node_map[it->first].second.push_back(cur_node->node); + } + } + } + // prepare to check if there is a cycle + auto node_cmp = [&] (const nnvm::Node* node1, const nnvm::Node* node2) { + return indexed_graph.node_id(node1) < indexed_graph.node_id(node2); + }; + std::vector non_subgraph_nodes; + non_subgraph_nodes.reserve(non_subgraph_node_map.size()); + for (auto& kv : non_subgraph_node_map) { + auto& output_nodes = kv.second.first; + std::sort(output_nodes.begin(), output_nodes.end(), node_cmp); + auto& input_nodes = kv.second.second; + std::sort(input_nodes.begin(), input_nodes.end(), node_cmp); + non_subgraph_nodes.push_back(kv.first); + } + // check whether there is a cycle between the subgraph and its input/output nodes + auto is_ancestor = [&](const nnvm::Node* ancestor, const nnvm::Node* descendant, + const std::vector& snodes) { + if (ancestor == descendant) return true; + std::stack s; + s.push(descendant); + size_t count = 0; + while (!s.empty()) { + CHECK_LT(count, indexed_graph.num_nodes()) << "Finding ancestor failed. There is probably" + " a loop in the graph"; + ++count; + const nnvm::Node* top = s.top(); + s.pop(); + if (top == ancestor) { + return true; + } + for (const auto& entry : top->inputs) { + // when searching for the ancestor, the path cannot cross any subgraph node + auto it = std::find(snodes.begin(), snodes.end(), entry.node.get()); + if (it == snodes.end()) { + s.push(entry.node.get()); + } + } + } + return false; + }; + std::sort(non_subgraph_nodes.begin(), non_subgraph_nodes.end(), node_cmp); + int excluded_node_id = -1; + for (size_t i = 0; i < non_subgraph_nodes.size(); ++i) { + auto it1 = non_subgraph_node_map.find(non_subgraph_nodes[i]); + CHECK(it1 != non_subgraph_node_map.end()); + auto& output_nodes = it1->second.first; // has been top sorted + auto& input_nodes = it1->second.second; // has been top sorted + if (!output_nodes.empty() && !input_nodes.empty()) { + // there is a loop between node i and the subgraph + const auto node_id = std::max(indexed_graph.node_id(output_nodes.back()), + indexed_graph.node_id(input_nodes.back())); + excluded_node_id = std::max(excluded_node_id, static_cast(node_id)); + } else if (!input_nodes.empty()) { + // node i is an input to the subgraph, find out if there is a node j + // which is an output of the subgraph and also a child of node i. + for (size_t j = i + 1; j < non_subgraph_nodes.size(); ++j) { + auto it2 = non_subgraph_node_map.find(non_subgraph_nodes[j]); + CHECK(it2 != non_subgraph_node_map.end()); + // i is topologically before j, j might be a direct/indirect output node of i + CHECK_LT(indexed_graph.node_id(it1->first), indexed_graph.node_id(it2->first)); + if (!it2->second.first.empty() && is_ancestor(it1->first, it2->first, *subgraph_nodes)) { + // found a loop + const auto node_id = std::max(indexed_graph.node_id(input_nodes.back()), + indexed_graph.node_id(it2->second.first.back())); + excluded_node_id = std::max(excluded_node_id, static_cast(node_id)); + } + } + } + } + + if (excluded_node_id != -1) { + CHECK_LT(excluded_node_id, static_cast(simple_nodes.size())); + CHECK_NE(excluded_node_id, static_cast(snid)) + << "A cycle is found in the computational graph between nodes " + << simple_nodes[excluded_node_id]->node->attrs.name << " and " + << simple_nodes[snid]->node->attrs.name; + excluded_nodes->insert(simple_nodes[excluded_node_id]->node); + ResetNodeLabels(g, simple_nodes, subgraph_nodes); + return false; + } + std::sort(subgraph_nodes->begin(), subgraph_nodes->end(), node_cmp); + return true; +} + +/*! + * \brief Finds all the nodes belonging to the same subgraph given a seed node. + * \param g the whole graph + * \subgraph_selector determines whether the visited node should be choosen or not + * \label the label of the current subgraph + * \snid node id of the seed simple node + * \simple_nodes all simple nodes in the top sorted order + * \subgraph_nodes all the nodes belonging to the same subgraph of seed node + * \return Subgraph node candidates sorted in the topological order + */ +void PreSelectSubgraphNodes(const Graph& g, + SubgraphSelectorPtr subgraph_selector, + const int label, + const size_t snid, + const std::vector& simple_nodes, + std::vector* subgraph_nodes) { + std::unordered_set excluded_nodes; + const size_t max_num_retry = simple_nodes.size() * simple_nodes.size(); + size_t count = 0; + bool success = false; + while (!success && count < max_num_retry) { + success = LabelSubgraph(g, subgraph_selector, label, snid, simple_nodes, + subgraph_nodes, &excluded_nodes); + if (!success) { + CHECK(!excluded_nodes.empty()); + std::string excluded_node_names; + for (auto node : excluded_nodes) { + excluded_node_names += node->attrs.name + ", "; + } + LOG(INFO) << "Found a cycle when BFS from node " << simple_nodes[snid]->node->attrs.name + << ". Excluding nodes " << excluded_node_names << "and retrying"; + } + ++count; + } + if (!success) { + LOG(INFO) << "Tried " << count << " times of finding subgraphs starting from node " + << simple_nodes[snid]->node->attrs.name << " without success because a loop " + "is always found between the subgraph and some other nodes. Will treat " + "seed node " << simple_nodes[snid]->node->attrs.name + << "as a subgraph with one node"; + CHECK(subgraph_nodes->empty()); + simple_nodes[snid]->label = label; + subgraph_nodes->push_back(simple_nodes[snid]->node); + } +} + +/*! + * \brief Given a vector of nodes, group them into individual subgraphs + * based upon their connectivity. + */ +void PostProcessNodeCandidates(const nnvm::Graph& g, + const std::vector& nodes, + const std::vector& simple_nodes, + std::vector>* subgraphs, + size_t* subgraph_id) { + const auto& indexed_graph = g.indexed_graph(); + std::unordered_set node_set(nodes.begin(), nodes.end()); + auto simple_node_cmp = [&] (const SimpleNode* node1, const SimpleNode* node2) { + return indexed_graph.node_id(node1->node) < indexed_graph.node_id(node2->node); + }; + for (auto node : nodes) { + if (!node_set.count(node)) { + // The node has been included in a subgraph + continue; + } + std::queue q; + q.push(node); + CHECK_EQ(node_set.erase(node), 1U); + subgraphs->emplace_back(); + const auto nid = indexed_graph.node_id(node); + simple_nodes[nid]->label = *subgraph_id; + subgraphs->back().push_back(simple_nodes[nid].get()); + while (!q.empty()) { + nnvm::Node* cur_node = q.front(); + q.pop(); + for (auto& e : cur_node->inputs) { + auto in_it = node_set.find(e.node.get()); + if (in_it != node_set.end()) { + q.push(*in_it); + const auto in_nid = indexed_graph.node_id(*in_it); + simple_nodes[in_nid]->label = *subgraph_id; + subgraphs->back().push_back(simple_nodes[in_nid].get()); + node_set.erase(in_it); + } + } + const auto cur_nid = indexed_graph.node_id(cur_node); + const SimpleNode* cur_snode = simple_nodes[cur_nid].get(); + for (const auto& kv : cur_snode->outputs) { + const auto out_it = node_set.find(kv.first); + if (out_it != node_set.end()) { + q.push(*out_it); + const auto out_nid = indexed_graph.node_id(*out_it); + simple_nodes[out_nid]->label = *subgraph_id; + subgraphs->back().push_back(simple_nodes[out_nid].get()); + node_set.erase(out_it); + } + } + } + ++(*subgraph_id); + std::sort(subgraphs->back().begin(), subgraphs->back().end(), simple_node_cmp); + } + CHECK(node_set.empty()); +} + +/*! + * \brief Finds subgraphs with all nodes that meet certain criteria. + * All nodes in a subgraph are marked with the same label. + */ +void FindSubgraphs(Graph* g, + const SubgraphProperty &subg_prop, + const std::vector& simple_nodes, + std::vector>* subgraph_nodes) { + const auto& indexed_graph = g->indexed_graph(); + CHECK_EQ(indexed_graph.num_nodes(), simple_nodes.size()); + auto node_cmp = [&] (const nnvm::Node* node1, const nnvm::Node* node2) { + return indexed_graph.node_id(node1) < indexed_graph.node_id(node2); + }; + size_t subgraph_id = 0; + for (size_t i = 0; i < simple_nodes.size(); ++i) { + nnvm::Node* node = simple_nodes[i]->node; + auto subgraph_selector = subg_prop.CreateSubgraphSelector(); + if (subgraph_selector->Select(*node) && simple_nodes[i]->label == -1) { + // pre-select nodes that can be grouped in a subgraph + std::vector preselected_nodes; + PreSelectSubgraphNodes(*g, subgraph_selector, subgraph_id, i, simple_nodes, + &preselected_nodes); + + // filter out unqualified pre-selected nodes + std::vector filtered_nodes = subgraph_selector->Filter(preselected_nodes); + + // make sure filtered_nodes is a subset of preselected_nodes + for (const auto n : filtered_nodes) { + const auto nit = std::find(preselected_nodes.begin(), preselected_nodes.end(), n); + CHECK(nit != preselected_nodes.end()) + << "Node " << n->attrs.name << " is not found in the pre-selected subgraph nodes." + " Please make sure that no new nodes were added in your subgraph" + " selector's Filter function"; + } + + // make sure nodes are sorted + std::sort(filtered_nodes.begin(), filtered_nodes.end(), node_cmp); + + // reset node labels that are not in filtered nodes + for (const auto n : preselected_nodes) { + const auto nit = std::find(filtered_nodes.begin(), filtered_nodes.end(), n); + if (nit == filtered_nodes.end()) { + simple_nodes[indexed_graph.node_id(n)]->label = -1; + } + } + // find out subgraphs from the filtered nodes + std::vector> subgraphs; + PostProcessNodeCandidates(*g, filtered_nodes, simple_nodes, &subgraphs, &subgraph_id); + if (!subgraphs.empty()) { + subgraph_nodes->insert(subgraph_nodes->end(), subgraphs.begin(), subgraphs.end()); + } + } + } +} + +/*! + * \brief Sorts entries according to their topological order. + * Note that entry ids cannot be used to sort entries. + * \param entry_top_order_map mapping from entry pointer to its topological position in the graph + * \param entries Node entries to be sorted + */ +void SortEntries(const std::unordered_map& entry_top_order_map, + std::vector* entries) { + auto entry_cmp = [&](const nnvm::NodeEntry* e1, const nnvm::NodeEntry* e2) { + const auto it1 = entry_top_order_map.find(e1); + CHECK(it1 != entry_top_order_map.end()); + const auto it2 = entry_top_order_map.find(e2); + CHECK(it2 != entry_top_order_map.end()); + return it1->second < it2->second; + }; + std::sort(entries->begin(), entries->end(), entry_cmp); +} + +/*! + * \brief Given a subgraph, find the output entries of a subgraph. + * \param g pointer to the whole graph + * \param simple_nods vector of simple nodes in top sorted order + * \param subgraph_nodes vector of pointers of simples of a subgraph. + * \param entry_top_order_map mapping entry pointer to its top sorted position + * \param input_entries input entries of the subgraph + */ +void FindInputEntries(const Graph& g, + const std::vector& simple_nodes, + const std::vector& subgraph_nodes, + const std::unordered_map& entry_top_order_map, + std::vector* input_entries) { + const auto& indexed_graph = g.indexed_graph(); + int label = -1; + for (size_t i = 0; i < subgraph_nodes.size(); ++i) { + if (label == -1) { + label = subgraph_nodes[i]->label; + } else { + CHECK_EQ(subgraph_nodes[i]->label, label); + } + auto& inputs = subgraph_nodes[i]->node->inputs; + for (size_t j = 0; j < inputs.size(); ++j) { + auto& e = inputs[j]; + if (indexed_graph.exist(e.node.get())) { + // e's source node is not a subgraph node + const auto nid = indexed_graph.node_id(e.node.get()); + // this is a node not belonging to the subgraph + if (simple_nodes[nid]->label != label) { + input_entries->push_back(&e); + } + } else { + // e's source node is a subgraph node. + // In this case, two subgraphs are adjacent. + input_entries->push_back(&e); + } + } + } + SortEntries(entry_top_order_map, input_entries); +} + +/*! + * \brief Given a subgraph, find the output entries of a subgraph. + * \param g pointer to the whole graph + * \param simple_nods vector of simple nodes in top sorted order + * \param subgraph_nodes vector of pointers of simples of a subgraph. + * \param entry_top_order_map mapping entry pointer to its top sorted position + * \param output_entries output entries of the subgraph + */ +void FindOutputEntries(Graph* g, + const std::vector& simple_nodes, + const std::vector& subgraph_nodes, + const std::unordered_map& + entry_top_order_map, + std::vector* output_entries) { + if (subgraph_nodes.empty()) return; + const auto& indexed_graph = g->indexed_graph(); + int label = -1; + for (size_t i = 0; i < subgraph_nodes.size(); ++i) { + if (label == -1) { + label = subgraph_nodes[i]->label; + } else { + CHECK_EQ(subgraph_nodes[i]->label, label); + } + for (auto it = subgraph_nodes[i]->outputs.begin(); + it != subgraph_nodes[i]->outputs.end(); ++it) { + if (indexed_graph.exist(it->first)) { + // if the output node is a normal graph node (not a subgraph node) + const auto nid = indexed_graph.node_id(it->first); + // this is a node not belonging to the current subgraph + if (simple_nodes[nid]->label != label) { + for (auto idx : it->second) { + auto& e = simple_nodes[nid]->node->inputs[idx]; + output_entries->push_back(&e); + } + } + } else { + // if the output node is a subgraph node + // two graphs are adjacent + for (auto idx : it->second) { + output_entries->push_back(&(it->first->inputs[idx])); + } + } + } + } + // Check if current subgraph contains a node which is the last node + // of the whole graph. If so, save its corresponding entry as well. + for (size_t i = 0; i < g->outputs.size(); ++i) { + auto& entry = g->outputs[i]; + // The entry might has been updated as an output of + // a subgraph node. In this case, no need + // to check its source for the current subgraph. Otherwise, + // do the following. + if (indexed_graph.exist(entry.node.get())) { + const auto nid = indexed_graph.node_id(entry.node.get()); + if (simple_nodes[nid]->label == label) { + output_entries->push_back(&entry); + } + } + } + SortEntries(entry_top_order_map, output_entries); +} + +/*! + * \brief Given a computation graph and a set of input node entries, this function cuts + * the node entries and creates new variable nodes as the input nodes of the + * subgraph. It returns the nodes that connect to the subgraph directly and + * the names of the new variable nodes. + */ +void CutGraphInputs(const std::vector &input_entries, + std::vector *orig_entries, + const bool skip_var = false) { + orig_entries->resize(input_entries.size()); + // map for creating unique var nodes for deduplicating entries from the same node + std::unordered_map name_count_map; + for (size_t i = 0; i < input_entries.size(); ++i) { + nnvm::NodeEntry *e = input_entries[i]; + // If the node is a variable itself, we may want to skip the node. + if (e->node->is_variable() && skip_var) { + continue; + } + + orig_entries->at(i) = *e; + nnvm::Symbol sym; + sym.outputs.push_back(*e); + const auto output_names = sym.ListOutputNames(); + CHECK_EQ(output_names.size(), 1U); + const std::string& var_name = output_names[0]; + auto it = name_count_map.find(var_name); + if (name_count_map.end() == it) { + name_count_map.emplace(var_name, 0); + } else { + ++(it->second); + } + nnvm::NodePtr n = nnvm::CreateVariableNode(var_name + std::to_string(name_count_map[var_name])); + *e = nnvm::NodeEntry{n, 0, 0}; + } +} + +/*! + * \brief Replace a set of nodes belonging to the same subgraph with a subgrpah node + * and keep the subgraph in the subgraph node. The input entries and output entries + * of the subgraph node are kept in the same order as the subgraph's. + */ +void CreateSubgraphNode(Graph* g, + const std::vector& simple_nodes, + const std::vector& subgraph_nodes, + const size_t subgraph_id, + std::unordered_map* entry_top_order_map) { +#if DEBUG_SUBGRAPH + LOG(INFO) << "Searching for input entries..."; +#endif + std::vector input_entries; + FindInputEntries(*g, simple_nodes, subgraph_nodes, *entry_top_order_map, &input_entries); + std::vector orig_input_entries; + CutGraphInputs(input_entries, &orig_input_entries, false); +#if DEBUG_SUBGRAPH + PrintNodeEntries(input_entries); + LOG(INFO) << "Searching for output entries..."; +#endif + std::vector output_entries; + FindOutputEntries(g, simple_nodes, subgraph_nodes, *entry_top_order_map, &output_entries); + + // Create a subgraph for the subgraph node + nnvm::Symbol sym; + sym.outputs.resize(output_entries.size()); + for (size_t i = 0; i < output_entries.size(); ++i) { + sym.outputs[i] = *output_entries[i]; + } + const SubgraphPropertyPtr& subg_prop = g->GetAttr("subgraph_property"); + nnvm::NodePtr n = subg_prop->CreateSubgraphNode(sym, subgraph_id); + + // Connect the external nodes to the subgraph node. + for (size_t i = 0; i < output_entries.size(); ++i) { + *output_entries[i] = nnvm::NodeEntry{n, static_cast(i), 0}; + } + n->inputs = orig_input_entries; + const auto& indexed_graph = g->indexed_graph(); + for (size_t i = 0; i < n->inputs.size(); ++i) { + auto& e = n->inputs[i]; + // update entry_top_order_map with newly created orig_input_entries + auto it = entry_top_order_map->find(input_entries[i]); + CHECK(it != entry_top_order_map->end()); + entry_top_order_map->emplace(&e, it->second); + // update input entries' source simple nodes' outputs map + nnvm::Node* node = e.node.get(); + if (indexed_graph.exist(node)) { + const auto nid = indexed_graph.node_id(node); + SimpleNode* sn = simple_nodes[nid].get(); + for (SimpleNode* dest_node : subgraph_nodes) { + sn->outputs.erase(dest_node->node); + } + sn->outputs[n.get()].push_back(i); + } + } +#if DEBUG_SUBGRAPH + PrintNodeEntries(output_entries); +#endif +} + +} // namespace sg + +/*! + * \brief Sort entries of all the nodes' inputs vectors in the topological order. + * This is going to be used to sort input/output entries of subgraphs to keep + * the topological order unchanged. + */ +void TopSortEntries(const Graph& g, + std::unordered_map* entry_top_order_map) { + CHECK(entry_top_order_map != nullptr); + std::unordered_set visited; + // tuple: (graph node, index of node's inputs, node entry as the output of the graph node) + std::stack> s; + auto in_degree = [] (const nnvm::Node* node)->size_t { + if (!node) { + return 0; + } + CHECK_EQ(node->control_deps.size(), 0U); + return node->inputs.size(); + }; + for (auto& e : g.outputs) { + nnvm::Node* node = e.node.get(); + if (visited.count(node) == 0U) { + s.emplace(node, 0U, &e); + visited.insert(node); + } else { + // The entry's source node has been visited before. + // Marking the order for it. + entry_top_order_map->emplace(&e, entry_top_order_map->size()); + } + while (!s.empty()) { + auto& top = s.top(); + if (std::get<1>(top) == in_degree(std::get<0>(top))) { + // The node's inputs has been exhausted. + entry_top_order_map->emplace(std::get<2>(top), entry_top_order_map->size()); + s.pop(); + } else { + // The node still has input entries not visited. + CHECK_LT(std::get<1>(top), std::get<0>(top)->inputs.size()); + auto& entry = std::get<0>(top)->inputs[std::get<1>(top)++]; + nnvm::Node* input_node = entry.node.get(); + if (visited.count(input_node) == 0U) { + // The entry's source node has not been visited. + // Push the entry to the stack for marking order later. + s.emplace(input_node, 0U, &entry); + visited.insert(input_node); + } else { + // The entry's source node has been visited before. + // Marking the order for it. + entry_top_order_map->emplace(&entry, entry_top_order_map->size()); + } + } + } + } +} + +Graph PartitionGraph(Graph&& g) { + if (!g.HasAttr("subgraph_property")) { // treat the whole graph as a subgraph + LOG(INFO) << "The graph has no attribute of subgraph_property attached. " + "The original graph is returned."; + return g; + } + using namespace sg; + const SubgraphPropertyPtr& subg_prop = g.GetAttr("subgraph_property"); + // top sort NodeEntry of all the nodes' inputs + std::unordered_map entry_top_order_map; + TopSortEntries(g, &entry_top_order_map); + + // Create undirected graph for ease of finding subgraphs + std::vector simple_nodes; + CreateSimpleGraph(g, &simple_nodes); + std::vector> subgraph_nodes; + FindSubgraphs(&g, *subg_prop, simple_nodes, &subgraph_nodes); + for (size_t i = 0; i < subgraph_nodes.size(); ++i) { +#if DEBUG_SUBGRAPH + std::set simple_node_set(subgraph_nodes[i].begin(), subgraph_nodes[i].end()); + CHECK_EQ(simple_node_set.size(), subgraph_nodes[i].size()); + PrintSubgraph(subgraph_nodes[i]); +#endif + CreateSubgraphNode(&g, simple_nodes, subgraph_nodes[i], i, &entry_top_order_map); + } + return g; +} + +NNVM_REGISTER_PASS(PartitionGraph) +.describe("Partition a graph according to the user defined rules " + "in a derived class of SubgraphProperty") +.set_body(PartitionGraph) +.set_change_graph(true); + +} // namespace op +} // namespace mxnet diff --git a/src/operator/subgraph/subgraph_property.h b/src/operator/subgraph/subgraph_property.h new file mode 100644 index 000000000000..cfbc1f837337 --- /dev/null +++ b/src/operator/subgraph/subgraph_property.h @@ -0,0 +1,166 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef MXNET_OPERATOR_SUBGRAPH_SUBGRAPH_PROPERTY_H_ +#define MXNET_OPERATOR_SUBGRAPH_SUBGRAPH_PROPERTY_H_ + +#include +#include +#include +#include +#include +#include + +namespace mxnet { +namespace op { + +/* + * This provides criteria for the graph partitioning algorithm to select + * nodes to subgraphs. + * The algorithm first sorts all the nodes in topological order, and then + * loops through the sorted nodes and tries to find a subgraph starting + * from each node (we call it a seed node) that satisfies the following two conditions: + * 1. The node has not been selected before. + * 2. The function Select is called on the node and returns true. + * + * Expanding from this seed node, we do BFS to traverse the graph. + * During the traversal, we call SelectInput and SelectOutput to determine + * if a neighboring node of the current node should be selected as a candidate for the subgraph. + * The search continues when a new node is selected as a candidate, and terminates when no more + * qualified nodes are found. When the search ends, all of the candidate nodes will + * be passed to the function Filter to finalize the subgraph. The filtering gives + * developers the last opportunity to drop off some of the candidate nodes. + * By default, Filter returns all nodes as the subgraph nodes. + * If the pre-selected subgraph becomes disconnected because some + * nodes are filtered out in the Filter function, the algorithm will automatically convert + * the rest of the nodes to multiple valid subgraphs based upon their connectivity. + */ +class SubgraphSelector { + public: + virtual ~SubgraphSelector() {} + /*! + * \brief Determines if to search for other nodes to form a subgraph from the seed_node. + */ + virtual bool Select(const nnvm::Node &seed_node) = 0; + /*! + * \brief Determines if to select input_node when traverse to the cur_node. + * \param cur_node the node for determining whether its input_node should be selected + * \param input_node the input node of the cur_node + */ + virtual bool SelectInput(const nnvm::Node &cur_node, const nnvm::Node &input_node) = 0; + /*! + * \brief Determines if to select output_node when traverse to the cur_node. + * \param cur_node the node for determining whether its output_node should be selected + * \param output_node the output node of the cur_node + */ + virtual bool SelectOutput(const nnvm::Node &cur_node, const nnvm::Node &output_node) = 0; + // Post processes pre-selected subgraph nodes. Return a list of nodes that + // users want to keep in subgraph(s). + virtual std::vector Filter(const std::vector& candidates) { + return candidates; + } +}; + +using SubgraphSelectorPtr = std::shared_ptr; + +/*! + * \brief This provides a set of properties for partitioning a graph into subgraphs, + * reconstructing a new graph from the subgraphs and creating a subgraph + * operator to execute the subgraph. + */ +class SubgraphProperty { + public: + // the criteria of selecting the subgraph nodes. + virtual SubgraphSelectorPtr CreateSubgraphSelector() const = 0; + // create an nnvm node for a given subgraph. Here users can customize how to + // execute the operators in the subgraph. + virtual nnvm::NodePtr CreateSubgraphNode(const nnvm::Symbol &s, + const int subgraph_id = 0) const = 0; + // set an attr with name in the attr map + template + SubgraphProperty& SetAttr(const std::string& name, const T& value) { + attrs_[name] = std::make_shared(value); + return *this; + } + // get the attr with the name + template + const T& GetAttr(const std::string& name) const { + auto it = attrs_.find(name); + CHECK(it != attrs_.end()) << "Cannot find attribute " << name << " in SubgraphProperty"; + return nnvm::get(*it->second); + } + protected: + std::unordered_map> attrs_; +}; + +using SubgraphPropertyPtr = std::shared_ptr; + +class SubgraphPropertyRegistry { + public: + typedef SubgraphPropertyPtr (*SubgraphPropertyCreateFn)(void); + static SubgraphPropertyRegistry* Get() { + static SubgraphPropertyRegistry inst; + return &inst; + } + + SubgraphPropertyPtr CreateSubgraphProperty(const std::string& name) { + auto it = prop_fn_map_.find(name); + CHECK(it != prop_fn_map_.end()) << "SubgraphProperty " << name + << " is not found in SubgraphPropertyRegistry"; + return it->second(); + } + + SubgraphPropertyCreateFn __REGISTER_OR_GET__(const std::string& name, + SubgraphPropertyCreateFn fn) { + if (prop_fn_map_.count(name) == 0U) { + return __REGISTER__(name, fn); + } else { + return prop_fn_map_.at(name); + } + } + + private: + SubgraphPropertyCreateFn __REGISTER__(const std::string& name, SubgraphPropertyCreateFn fn) { + CHECK_EQ(prop_fn_map_.count(name), 0U) << "Subgraph property " << name + << " has been registered"; + prop_fn_map_[name] = fn; + return prop_fn_map_[name]; + } + + SubgraphPropertyRegistry() = default; + SubgraphPropertyRegistry(const SubgraphPropertyRegistry&) = delete; + SubgraphPropertyRegistry(SubgraphPropertyRegistry&&) = delete; + SubgraphPropertyRegistry& operator=(const SubgraphPropertyRegistry&) = delete; + std::unordered_map prop_fn_map_; +}; + +// This op name set is for setting the names of operators that should be grouped into +// subgraphs. In practice, every backend accelerator should have a predefined name set. +// This set is only used for the testing purpose. +// key: property name, value: op name set +typedef dmlc::ThreadLocalStore>> + SubgraphPropertyOpNameSet; + +#define MXNET_REGISTER_SUBGRAPH_PROPERTY(Name, SubgraphPropertyType) \ + static DMLC_ATTRIBUTE_UNUSED auto __make_ ## SubgraphPropertyType ## _ ## Name ## __ = \ + SubgraphPropertyRegistry::Get()->__REGISTER_OR_GET__(#Name, &SubgraphPropertyType::Create) + +} // namespace op +} // namespace mxnet +#endif // MXNET_OPERATOR_SUBGRAPH_SUBGRAPH_PROPERTY_H_ diff --git a/tests/cpp/engine/threaded_engine_test.cc b/tests/cpp/engine/threaded_engine_test.cc index 92d0958c4630..6d669c19bcaa 100644 --- a/tests/cpp/engine/threaded_engine_test.cc +++ b/tests/cpp/engine/threaded_engine_test.cc @@ -275,6 +275,64 @@ TEST(Engine, basics) { LOG(INFO) << "All pass"; } +TEST(Engine, VarVersion) { + const size_t num_engines = 3; + std::vector engines(num_engines); + engines[0] = mxnet::engine::CreateNaiveEngine(); + engines[1] = mxnet::engine::CreateThreadedEnginePooled(); + engines[2] = mxnet::engine::CreateThreadedEnginePerDevice(); + std::string type_names[3] = {"NaiveEngine", "ThreadedEnginePooled", "ThreadedEnginePerDevice"}; + for (size_t k = 0; k < num_engines; ++k) { + auto engine = engines[k]; + std::vector oprs; + + LOG(INFO) << "Testing var as a read dependency in " << type_names[k]; + auto var = engine->NewVariable(); + EXPECT_EQ(var->version(), 0U); + for (int i = 0; i < 10; ++i) { + oprs.push_back(engine->NewOperator( + [i](mxnet::RunContext ctx, mxnet::Engine::CallbackOnComplete cb) { + Foo(ctx, i); + cb(); + }, + {var}, {})); + engine->Push(oprs.at(i), mxnet::Context{}); + } + engine->WaitForAll(); + EXPECT_EQ(var->version(), 0U); + for (auto&& i : oprs) { + engine->DeleteOperator(i); + } + engine->DeleteVariable([](mxnet::RunContext) {}, mxnet::Context{}, var); + engine->WaitForAll(); + + LOG(INFO) << "Testing var as a write dependency in " << type_names[k]; + var = engine->NewVariable(); + EXPECT_EQ(var->version(), 0U); + oprs.clear(); + for (int i = 0; i < 10; ++i) { + oprs.push_back(engine->NewOperator( + [i](mxnet::RunContext ctx, mxnet::Engine::CallbackOnComplete cb) { + Foo(ctx, i); + cb(); + }, + {}, {var})); + engine->Push(oprs.at(i), mxnet::Context{}); + } + engine->WaitForAll(); + EXPECT_EQ(var->version(), 10U); + for (auto&& i : oprs) { + engine->DeleteOperator(i); + } + engine->DeleteVariable([](mxnet::RunContext) {}, mxnet::Context{}, var); + engine->WaitForAll(); + + var = nullptr; + oprs.clear(); + LOG(INFO) << "All pass"; + } +} + #ifdef _OPENMP struct TestSaveAndRestoreOMPState { diff --git a/tests/python/gpu/test_operator_gpu.py b/tests/python/gpu/test_operator_gpu.py index 5612b0a647ed..0ff33e1e4094 100644 --- a/tests/python/gpu/test_operator_gpu.py +++ b/tests/python/gpu/test_operator_gpu.py @@ -41,6 +41,7 @@ from test_sparse_ndarray import * from test_sparse_operator import * from test_ndarray import * +from test_subgraph_op import * set_default_context(mx.gpu(0)) del test_support_vector_machine_l1_svm # noqa diff --git a/tests/python/unittest/test_subgraph_op.py b/tests/python/unittest/test_subgraph_op.py new file mode 100644 index 000000000000..40d609ad3541 --- /dev/null +++ b/tests/python/unittest/test_subgraph_op.py @@ -0,0 +1,238 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import ctypes +import mxnet as mx +from mxnet.base import SymbolHandle, check_call, _LIB, mx_uint, c_str_array, c_str +from mxnet.symbol import Symbol +import numpy as np +from mxnet.test_utils import assert_almost_equal + + +def test_subgraph_exe(): + def _check_subgraph_exe1(sym, op_names): + """Use the partitioned sym to simple_bind an executor and compare the outputs + with those of the original executor""" + out = SymbolHandle() + check_call(_LIB.MXPartitionGraphByOpNames(sym.handle, c_str('default'), mx_uint(len(op_names)), + c_str_array(op_names), ctypes.byref(out))) + + partitioned_sym = Symbol(out) + assert partitioned_sym.list_inputs() == sym.list_inputs() + assert partitioned_sym.list_arguments() == sym.list_arguments() + assert partitioned_sym.list_auxiliary_states() == sym.list_auxiliary_states() + exe = sym.simple_bind(ctx=mx.current_context(), grad_req='null') + partitioned_exe = partitioned_sym.simple_bind(ctx=mx.current_context(), grad_req='null') + input_names = sym.list_inputs() + for name in input_names: + if name in exe.arg_dict: + exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape) + partitioned_exe.arg_dict[name][:] = exe.arg_dict[name] + else: + assert name in exe.aux_dict + exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape) + partitioned_exe.aux_dict[name][:] = exe.aux_dict[name] + exe.forward() + partitioned_exe.forward() + assert len(exe.outputs) == len(partitioned_exe.outputs) + for i in range(len(exe.outputs)): + assert_almost_equal((exe.outputs[i] - partitioned_exe.outputs[i]).abs().sum().asnumpy(), + np.zeros(shape=(1,))) + + def _check_subgraph_exe2(sym, op_names): + """Use env var MXNET_SUBGRAPH_BACKEND=default to trigger graph partitioning in simple_bind + and compare results of the partitioned sym and the original sym.""" + def get_executor(sym, subgraph_backend=None, op_names=None, original_exec=None): + if subgraph_backend is not None: + os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend + check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), mx_uint(len(op_names)), + c_str_array(op_names))) + exe = sym.simple_bind(ctx=mx.current_context(), grad_req='null') + input_names = sym.list_inputs() + for name in input_names: + if name in exe.arg_dict: + exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)\ + if original_exec is None else original_exec.arg_dict[name] + else: + assert name in exe.aux_dict + exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)\ + if original_exec is None else original_exec.aux_dict[name] + exe.forward() + if subgraph_backend is not None: + check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend))) + del os.environ['MXNET_SUBGRAPH_BACKEND'] + return exe + + original_exec = get_executor(sym) + partitioned_exec = get_executor(sym, 'default', op_names, original_exec) + outputs1 = original_exec.outputs + outputs2 = partitioned_exec.outputs + assert len(outputs1) == len(outputs2) + for i in range(len(outputs1)): + assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,))) + + def _check_subgraph_exe3(sym, op_names): + """Use the partitioned sym to bind an executor and compare the outputs + with those of the original executor""" + out = SymbolHandle() + check_call(_LIB.MXPartitionGraphByOpNames(sym.handle, c_str('default'), mx_uint(len(op_names)), + c_str_array(op_names), ctypes.byref(out))) + + partitioned_sym = Symbol(out) + input_names = sym.list_inputs() + arg_names = sym.list_arguments() + aux_names = sym.list_auxiliary_states() + assert partitioned_sym.list_inputs() == input_names + assert partitioned_sym.list_arguments() == arg_names + assert partitioned_sym.list_auxiliary_states() == aux_names + arg_shapes, _, aux_shapes = sym.infer_shape() + arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes] + aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes] + exe = sym.bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null') + partitioned_exe = partitioned_sym.bind(ctx=mx.current_context(), args=arg_array, + aux_states=aux_array, grad_req='null') + exe.forward() + partitioned_exe.forward() + assert len(exe.outputs) == len(partitioned_exe.outputs) + for i in range(len(exe.outputs)): + assert_almost_equal((exe.outputs[i] - partitioned_exe.outputs[i]).abs().sum().asnumpy(), + np.zeros(shape=(1,))) + + def _check_subgraph_exe4(sym, op_names): + """Use env var MXNET_SUBGRAPH_BACKEND=default to trigger graph partitioning in bind + and compare results of the partitioned sym and the original sym.""" + def get_executor(sym, subgraph_backend=None, op_names=None, original_exec=None): + if subgraph_backend is not None: + os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend + check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), mx_uint(len(op_names)), + c_str_array(op_names))) + arg_shapes, _, aux_shapes = sym.infer_shape() + if subgraph_backend is None: + arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes] + aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes] + else: + arg_array = None + aux_array = None + exe = sym.bind(ctx=mx.current_context(), + args=arg_array if subgraph_backend is None else original_exec.arg_arrays, + aux_states=aux_array if subgraph_backend is None else original_exec.aux_arrays, + grad_req='null') + exe.forward() + if subgraph_backend is not None: + check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend))) + del os.environ['MXNET_SUBGRAPH_BACKEND'] + return exe + + original_exec = get_executor(sym) + partitioned_exec = get_executor(sym, 'default', op_names, original_exec) + outputs1 = original_exec.outputs + outputs2 = partitioned_exec.outputs + assert len(outputs1) == len(outputs2) + for i in range(len(outputs1)): + assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,))) + + def check_subgraph_exe(sym, op_names): + _check_subgraph_exe1(sym, op_names) + _check_subgraph_exe2(sym, op_names) + _check_subgraph_exe3(sym, op_names) + _check_subgraph_exe4(sym, op_names) + + def test_network_structure_1(): + data1 = mx.sym.var('data1', shape=(2, 3, 10, 10)) + data2 = mx.sym.var('data2') + conv1 = mx.sym.Convolution(data=data1, weight=data2, no_bias=True, kernel=(2, 2), num_filter=1) + conv2 = mx.sym.Convolution(data=data2, no_bias=True, kernel=(1, 1), num_filter=1) + out = mx.sym.Group([conv1, conv2]) + check_subgraph_exe(out, ['Convolution']) + + def test_network_structure_2(): + # this tests whether the partitioning algorithm can deal with cycles + data = mx.sym.var('data', shape=(2, 3, 10, 10)) + ret = mx.sym.exp(data) + ret1 = mx.sym.cos(ret) + ret2 = mx.sym.sin(ret) + ret = ret1 + ret2 + check_subgraph_exe(ret, ['exp', 'sin', '_Plus', 'elemwise_add', '_plus']) + check_subgraph_exe(ret, ['exp', 'cos', '_Plus', 'elemwise_add', '_plus']) + + def test_network_structure_3(): + # this tests whether the partitioned sym can distinguish in_args and aux_states + data = mx.sym.var('data', shape=(2, 3, 10, 10)) + ret = mx.sym.exp(data) + ret1 = mx.sym.cos(ret) + ret2 = mx.sym.sin(ret) + ret = ret1 + ret2 + ret = mx.sym.BatchNorm(ret) + ret = mx.sym.BatchNorm(ret) + check_subgraph_exe(ret, ['exp', 'sin', '_Plus', 'elemwise_add', '_plus']) + check_subgraph_exe(ret, ['exp', 'cos', '_Plus', 'elemwise_add', '_plus']) + check_subgraph_exe(ret, ['exp', 'sin', '_Plus', 'elemwise_add', '_plus', 'BatchNorm']) + check_subgraph_exe(ret, ['exp', 'cos', '_Plus', 'elemwise_add', '_plus', 'BatchNorm']) + check_subgraph_exe(ret, ['exp', 'BatchNorm']) + check_subgraph_exe(ret, ['BatchNorm']) + + def test_network_structure_4(): + # the last op has multiple duplicate outputs + data = mx.sym.var('data', shape=(2, 3, 10, 10)) + ret = mx.sym.exp(data) + ret = mx.sym.Group([ret, ret, ret]) + check_subgraph_exe(ret, ['exp']) + + def test_network_structure_5(): + # the subgraph has two duplicate input entries + data = mx.sym.var('data', shape=(2, 3, 10, 10)) + ret = data + data + check_subgraph_exe(ret, ['_plus', '_Plus', 'elemwise_add']) + + def test_network_structure_6(): + def get_graph(): + data1 = mx.sym.Variable('data1', shape=(3, 3, 10, 10), dtype=np.float32) + data2 = mx.sym.Variable('data2', shape=(1, 0, 2, 2)) + data3 = mx.sym.sin(data2) + conv = mx.sym.Convolution(data=data1, weight=data3, kernel=(2, 2), num_filter=1) + rets = [(conv, []), + (conv, [mx.sym.sin.__name__]), + (conv, [mx.sym.Convolution.__name__]), + (conv, [mx.sym.sin.__name__, mx.sym.Convolution.__name__])] + return rets + + for sym, op_names in get_graph(): + check_subgraph_exe(sym, op_names) + + def test_network_structure_7(): + # in this graph, the subgraph node and the other two external nodes form a cycle + data = mx.sym.Variable('data', shape=(1,)) + ret1 = mx.sym.sin(data) + ret2 = mx.sym.cos(ret1) + for _ in range(5): + ret2 = mx.sym.cos(ret2) + ret = ret1 + ret2 + check_subgraph_exe(ret, ['sin', 'elemwise_add', '_plus', '_Plus']) + + test_network_structure_1() + test_network_structure_2() + test_network_structure_3() + test_network_structure_4() + test_network_structure_5() + test_network_structure_6() + test_network_structure_7() + + +if __name__ == '__main__': + import nose + nose.runmodule() From 3fb7ab53dd666fad903807c232d226e06987007e Mon Sep 17 00:00:00 2001 From: Aaron Markham Date: Fri, 31 Aug 2018 09:28:58 -0700 Subject: [PATCH 089/160] fix search result 404s (#12414) --- docs/_static/searchtools_custom.js | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/_static/searchtools_custom.js b/docs/_static/searchtools_custom.js index dcc147329b13..5f8c30a24f10 100644 --- a/docs/_static/searchtools_custom.js +++ b/docs/_static/searchtools_custom.js @@ -8,14 +8,14 @@ * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: - * + * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * + * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -523,7 +523,7 @@ var Search = { displayNextItem(); }); } else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) { - $.ajax({url: DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' + item[0] + '.txt', + $.ajax({url: DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' + item[0] + '.md.txt', dataType: "text", complete: function(jqxhr, textstatus) { var data = jqxhr.responseText; From 4033cdd1d8a144286074efe4edfaf8c606205c8b Mon Sep 17 00:00:00 2001 From: Pedro Larroy <928489+larroy@users.noreply.github.com> Date: Fri, 31 Aug 2018 18:41:04 +0200 Subject: [PATCH 090/160] Make the output of ci/docker/install/ubuntu_mklml.sh less verbose (#12422) --- ci/docker/install/ubuntu_mklml.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/docker/install/ubuntu_mklml.sh b/ci/docker/install/ubuntu_mklml.sh index 4efa1f77e921..7e17295f4208 100755 --- a/ci/docker/install/ubuntu_mklml.sh +++ b/ci/docker/install/ubuntu_mklml.sh @@ -21,5 +21,5 @@ # the whole docker cache for the image set -ex -wget --no-check-certificate -O /tmp/mklml.tgz https://github.com/intel/mkl-dnn/releases/download/v0.14/mklml_lnx_2018.0.3.20180406.tgz -tar -zxvf /tmp/mklml.tgz && cp -rf mklml_*/* /usr/local/ && rm -rf mklml_* +wget -q --no-check-certificate -O /tmp/mklml.tgz https://github.com/intel/mkl-dnn/releases/download/v0.14/mklml_lnx_2018.0.3.20180406.tgz +tar -zxf /tmp/mklml.tgz && cp -rf mklml_*/* /usr/local/ && rm -rf mklml_* From 5ace01cd4e809428a2177501f91df41d167ae11b Mon Sep 17 00:00:00 2001 From: Aaron Markham Date: Fri, 31 Aug 2018 12:42:11 -0700 Subject: [PATCH 091/160] fixed docs/website build checkout bug (#12413) * fixed checkout bug; fixed echo statement * turning off clojure docs for v1.2.0 --- docs/build_version_doc/build_all_version.sh | 4 ++-- docs/settings.ini | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/build_version_doc/build_all_version.sh b/docs/build_version_doc/build_all_version.sh index d36f1f5edc64..350c75b084b7 100755 --- a/docs/build_version_doc/build_all_version.sh +++ b/docs/build_version_doc/build_all_version.sh @@ -114,7 +114,7 @@ function checkout () { # Overriding configs later will cause a conflict here, so stashing... git stash # Fails to checkout if not available locally, so try upstream - git checkout "$repo_folder" || git branch $repo_folder "upstream/$repo_folder" + git checkout "$repo_folder" || git branch $repo_folder "upstream/$repo_folder" && git checkout "$repo_folder" || exit 1 if [ $tag == 'master' ]; then git pull fi @@ -174,4 +174,4 @@ done echo "Now you may want to run update_all_version.sh to create the production layout with the versions dropdown and other per-version corrections." echo "The following pattern is recommended (tags, default tag, url base):" -echo "./update_all_version.sh "$tags_to_display " master http://mxnet.incubator.apache.org/" +echo "./update_all_version.sh \"$2\" master http://mxnet.incubator.apache.org/" diff --git a/docs/settings.ini b/docs/settings.ini index f999b3efde23..b8e486e58e87 100644 --- a/docs/settings.ini +++ b/docs/settings.ini @@ -14,7 +14,7 @@ r_docs = 0 scala_docs = 1 [document_sets_v1.2.0] -clojure_docs = 1 +clojure_docs = 0 doxygen_docs = 1 r_docs = 0 scala_docs = 1 From b8ee84b278cba6f85d3b5fdfe69bb06b9d1db28e Mon Sep 17 00:00:00 2001 From: solin319 Date: Sat, 1 Sep 2018 08:45:26 +0800 Subject: [PATCH 092/160] fix help in imread (#12420) fix help in imread --- python/mxnet/image/image.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/mxnet/image/image.py b/python/mxnet/image/image.py index 24f5309d136b..791de4bebdd2 100644 --- a/python/mxnet/image/image.py +++ b/python/mxnet/image/image.py @@ -72,12 +72,12 @@ def imread(filename, *args, **kwargs): Set `flag` parameter to 0 to get grayscale output - >>> mx.img.imdecode("flower.jpg", flag=0) + >>> mx.img.imread("flower.jpg", flag=0) Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR) - >>> mx.img.imdecode(str_image, to_rgb=0) + >>> mx.img.imread("flower.jpg", to_rgb=0) """ return _internal._cvimread(filename, *args, **kwargs) From 10e94f8804ccde98a4edc12d0ed2e0b2014e41a9 Mon Sep 17 00:00:00 2001 From: Manu Seth <22492939+mseth10@users.noreply.github.com> Date: Fri, 31 Aug 2018 18:09:50 -0700 Subject: [PATCH 093/160] fixed flaky test issue for test_operator_gpu.test_convolution_grouping (#12385) * fixed flaky test issue for test_operator_gpu.test_convolution_grouping * Changed implicit cast to explicit cast --- tests/python/unittest/test_operator.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 5bd88dd58695..78285b64543a 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -1583,7 +1583,6 @@ def check_batchnorm_training(stype): check_batchnorm_training('default') -@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12219") @with_seed() def test_convolution_grouping(): for dim in [1, 2, 3]: @@ -1606,7 +1605,7 @@ def test_convolution_grouping(): exe1 = y1.simple_bind(default_context(), x=shape) exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,)) for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays): - arr1[:] = np.random.normal(size=arr1.shape) + arr1[:] = np.float32(np.random.normal(size=arr1.shape)) arr2[:] = arr1 exe1.forward(is_train=True) exe1.backward(exe1.outputs[0]) @@ -1614,7 +1613,7 @@ def test_convolution_grouping(): exe2.backward(exe2.outputs[0]) for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays): - np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-4) + np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3) @unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12203") From 58560f6d2e96da605c658742ebb0e26a7d0cbcd3 Mon Sep 17 00:00:00 2001 From: Manu Seth <22492939+mseth10@users.noreply.github.com> Date: Fri, 31 Aug 2018 18:13:09 -0700 Subject: [PATCH 094/160] fixed flaky test issue for test_operator_gpu.test_depthwise_convolution (#12402) * fixed flaky test issue for test_operator_gpu.test_depthwise_convolution * Changed implicit cast to explicit cast --- tests/python/unittest/test_operator.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 78285b64543a..f246689e34e6 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -1616,7 +1616,6 @@ def test_convolution_grouping(): np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3) -@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12203") @with_seed() def test_depthwise_convolution(): for dim in [1,2]: @@ -1650,7 +1649,7 @@ def test_depthwise_convolution(): exe2 = y2.simple_bind(mx.cpu(), x=shape, w=(num_filter, shape[1]//num_group)+kernel, b=(num_filter,)) for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays): - arr1[:] = np.random.normal(size=arr1.shape) + arr1[:] = np.float32(np.random.normal(size=arr1.shape)) arr2[:] = arr1 exe1.forward(is_train=True) exe1.backward(exe1.outputs[0]) @@ -1658,7 +1657,7 @@ def test_depthwise_convolution(): exe2.backward(exe2.outputs[0]) for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays): - np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3) + np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-2, atol=1e-3) def gen_broadcast_data(idx): # Manually set test cases From 307e62a60099cde9d504f6fa9f6bc406ffbfb2f8 Mon Sep 17 00:00:00 2001 From: Vandana Kannan Date: Sun, 2 Sep 2018 22:59:58 -0700 Subject: [PATCH 095/160] Add trigonometric operators (#12424) --- .../contrib/onnx/mx2onnx/_op_translations.py | 120 ++++++++++++++++++ .../onnx/export/onnx_backend_test.py | 6 + 2 files changed, 126 insertions(+) diff --git a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py index af7fedb33cb9..0960776251c4 100644 --- a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py +++ b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py @@ -308,6 +308,126 @@ def convert_tanh(node, **kwargs): ) return [node] +@mx_op.register("cos") +def convert_cos(node, **kwargs): + """Map MXNet's cos operator attributes to onnx's Cos operator + and return the created node. + """ + helper, _, _ = import_onnx_modules() + name = node["name"] + inputs = node["inputs"] + input_node_idx = kwargs["index_lookup"][inputs[0][0]] + proc_nodes = kwargs["proc_nodes"] + input_node = proc_nodes[input_node_idx].name + + node = helper.make_node( + 'Cos', + [input_node], + [name], + name=name + ) + return [node] + +@mx_op.register("sin") +def convert_sin(node, **kwargs): + """Map MXNet's sin operator attributes to onnx's Sin operator + and return the created node. + """ + helper, _, _ = import_onnx_modules() + name = node["name"] + inputs = node["inputs"] + input_node_idx = kwargs["index_lookup"][inputs[0][0]] + proc_nodes = kwargs["proc_nodes"] + input_node = proc_nodes[input_node_idx].name + + node = helper.make_node( + 'Sin', + [input_node], + [name], + name=name + ) + return [node] + +@mx_op.register("tan") +def convert_tan(node, **kwargs): + """Map MXNet's tan operator attributes to onnx's tan operator + and return the created node. + """ + helper, _, _ = import_onnx_modules() + name = node["name"] + inputs = node["inputs"] + input_node_idx = kwargs["index_lookup"][inputs[0][0]] + proc_nodes = kwargs["proc_nodes"] + input_node = proc_nodes[input_node_idx].name + + node = helper.make_node( + 'Tan', + [input_node], + [name], + name=name + ) + return [node] + +@mx_op.register("arccos") +def convert_acos(node, **kwargs): + """Map MXNet's acos operator attributes to onnx's acos operator + and return the created node. + """ + helper, _, _ = import_onnx_modules() + name = node["name"] + inputs = node["inputs"] + input_node_idx = kwargs["index_lookup"][inputs[0][0]] + proc_nodes = kwargs["proc_nodes"] + input_node = proc_nodes[input_node_idx].name + + node = helper.make_node( + 'Acos', + [input_node], + [name], + name=name + ) + return [node] + +@mx_op.register("arcsin") +def convert_asin(node, **kwargs): + """Map MXNet's asin operator attributes to onnx's asin operator + and return the created node. + """ + helper, _, _ = import_onnx_modules() + name = node["name"] + inputs = node["inputs"] + input_node_idx = kwargs["index_lookup"][inputs[0][0]] + proc_nodes = kwargs["proc_nodes"] + input_node = proc_nodes[input_node_idx].name + + node = helper.make_node( + 'Asin', + [input_node], + [name], + name=name + ) + return [node] + +@mx_op.register("arctan") +def convert_atan(node, **kwargs): + """Map MXNet's atan operator attributes to onnx's atan operator + and return the created node. + """ + helper, _, _ = import_onnx_modules() + name = node["name"] + inputs = node["inputs"] + input_node_idx = kwargs["index_lookup"][inputs[0][0]] + proc_nodes = kwargs["proc_nodes"] + input_node = proc_nodes[input_node_idx].name + + node = helper.make_node( + 'Atan', + [input_node], + [name], + name=name + ) + return [node] + #Basic neural network functions @mx_op.register("sigmoid") def convert_sigmoid(node, **kwargs): diff --git a/tests/python-pytest/onnx/export/onnx_backend_test.py b/tests/python-pytest/onnx/export/onnx_backend_test.py index 1fbfde5977eb..19bf6993e7cd 100644 --- a/tests/python-pytest/onnx/export/onnx_backend_test.py +++ b/tests/python-pytest/onnx/export/onnx_backend_test.py @@ -45,6 +45,12 @@ 'test_abs', 'test_sum', 'test_tanh', + 'test_cos', + 'test_sin', + 'test_tan', + 'test_acos', + 'test_asin', + 'test_atan' 'test_ceil', 'test_floor', 'test_concat', From d00cb42458802e4471c9517720924f2d595e02c0 Mon Sep 17 00:00:00 2001 From: Hao Jin Date: Mon, 3 Sep 2018 03:11:50 -0700 Subject: [PATCH 096/160] adjust tolerance levels of test_l2_normalization (#12429) --- tests/python/unittest/test_operator.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index f246689e34e6..2f94eb0aae18 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -3115,11 +3115,9 @@ def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10): # compare numpy + mxnet assert_almost_equal(exe.outputs[0].asnumpy(), np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5) # check gradient - check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=1e-3) + check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3) -# @haojin2: getting rid of the fixed seed as the flakiness could not be reproduced. -# tracked at: https://github.com/apache/incubator-mxnet/issues/11717 @with_seed() def test_l2_normalization(): for dtype in ['float16', 'float32', 'float64']: From e0498ebf7af1dccf245b55ecdde4249a300c30dc Mon Sep 17 00:00:00 2001 From: Anton Chernov Date: Mon, 3 Sep 2018 14:57:21 +0200 Subject: [PATCH 097/160] Revert "fixed flaky test issue for test_operator_gpu.test_depthwise_convolution (#12402)" (#12441) This reverts commit 58560f6d2e96da605c658742ebb0e26a7d0cbcd3. --- tests/python/unittest/test_operator.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 2f94eb0aae18..ca358ef02b1f 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -1616,6 +1616,7 @@ def test_convolution_grouping(): np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3) +@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12203") @with_seed() def test_depthwise_convolution(): for dim in [1,2]: @@ -1649,7 +1650,7 @@ def test_depthwise_convolution(): exe2 = y2.simple_bind(mx.cpu(), x=shape, w=(num_filter, shape[1]//num_group)+kernel, b=(num_filter,)) for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays): - arr1[:] = np.float32(np.random.normal(size=arr1.shape)) + arr1[:] = np.random.normal(size=arr1.shape) arr2[:] = arr1 exe1.forward(is_train=True) exe1.backward(exe1.outputs[0]) @@ -1657,7 +1658,7 @@ def test_depthwise_convolution(): exe2.backward(exe2.outputs[0]) for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays): - np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-2, atol=1e-3) + np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3) def gen_broadcast_data(idx): # Manually set test cases From 8e4aeee6f0021d0c5a90fac0bda4631eac03e1cc Mon Sep 17 00:00:00 2001 From: Anton Chernov Date: Tue, 4 Sep 2018 00:43:10 +0200 Subject: [PATCH 098/160] Updated tvm submodule head (#12448) --- 3rdparty/tvm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3rdparty/tvm b/3rdparty/tvm index 290226e1c9ad..5fec9adbcaf8 160000 --- a/3rdparty/tvm +++ b/3rdparty/tvm @@ -1 +1 @@ -Subproject commit 290226e1c9adbb3e598f9ed9184018df1c12be33 +Subproject commit 5fec9adbcaf8debb720b56beffd45bd6941eff63 From e290623f621ff0ca631b73b74c82956a83e77aea Mon Sep 17 00:00:00 2001 From: Aaron Markham Date: Tue, 4 Sep 2018 11:37:46 -0700 Subject: [PATCH 099/160] Sphinx error reduction (#12323) * new toctrees and index pages to reduce sphinx warnings and errors * exclude depricated and working dirs; update config add title, fix render errors, fix inaccurate text * new toctrees and index pages to reduce sphinx warnings and errors * exclude depricated and working dirs; update config add title, fix render errors, fix inaccurate text * removing unused and unnecessary files (that cause sphinx warnings) * add c++ index to whitelist * add more index to whitelist --- docs/api/clojure/index.md | 11 ++ docs/api/index.md | 14 +++ docs/api/python/index.md | 100 +++++++++++------- docs/api/scala/index.md | 14 +++ docs/architecture/index.md | 18 ++-- docs/architecture/release_note_0_9.md | 49 --------- docs/community/index.md | 11 ++ docs/faq/index.md | 8 ++ docs/get_started/index.md | 8 -- docs/gluon/index.md | 14 +-- docs/index.md | 27 ++--- docs/tutorials/basic/index.md | 8 ++ docs/tutorials/c++/index.md | 8 ++ docs/tutorials/embedded/index.md | 8 ++ docs/tutorials/gluon/index.md | 8 ++ docs/tutorials/index.md | 19 ++++ docs/tutorials/nlp/index.md | 8 ++ docs/tutorials/onnx/index.md | 8 ++ docs/tutorials/python/index.md | 8 ++ docs/tutorials/r/index.md | 13 +-- docs/tutorials/sparse/index.md | 8 ++ docs/tutorials/speech_recognition/index.md | 8 ++ docs/tutorials/unsupervised_learning/index.md | 8 ++ docs/tutorials/vision/index.md | 8 ++ tests/tutorials/test_sanity_tutorials.py | 15 ++- 25 files changed, 273 insertions(+), 136 deletions(-) create mode 100644 docs/api/index.md delete mode 100644 docs/architecture/release_note_0_9.md create mode 100644 docs/community/index.md delete mode 100644 docs/get_started/index.md create mode 100644 docs/tutorials/basic/index.md create mode 100644 docs/tutorials/c++/index.md create mode 100644 docs/tutorials/embedded/index.md create mode 100644 docs/tutorials/gluon/index.md create mode 100644 docs/tutorials/nlp/index.md create mode 100644 docs/tutorials/onnx/index.md create mode 100644 docs/tutorials/python/index.md create mode 100644 docs/tutorials/sparse/index.md create mode 100644 docs/tutorials/speech_recognition/index.md create mode 100644 docs/tutorials/unsupervised_learning/index.md create mode 100644 docs/tutorials/vision/index.md diff --git a/docs/api/clojure/index.md b/docs/api/clojure/index.md index 3eeffff0a4e6..32abbe06ad79 100644 --- a/docs/api/clojure/index.md +++ b/docs/api/clojure/index.md @@ -1,9 +1,20 @@ # MXNet - Clojure API + MXNet supports the Clojure programming language. The MXNet Clojure package brings flexible and efficient GPU computing and state-of-art deep learning to Clojure. It enables you to write seamless tensor/matrix computation with multiple GPUs in Clojure. It also lets you construct and customize the state-of-art deep learning models in Clojure, and apply them to tasks, such as image classification and data science challenges. See the [MXNet Clojure API Documentation](docs/index.html) for detailed API information. +```eval_rst +.. toctree:: + :maxdepth: 1 + + kvstore.md + module.md + ndarray.md + symbol_in_pictures.md + symbol.md +``` ## Tensor and Matrix Computations You can perform tensor or matrix computation in pure Clojure: diff --git a/docs/api/index.md b/docs/api/index.md new file mode 100644 index 000000000000..eff6807678ea --- /dev/null +++ b/docs/api/index.md @@ -0,0 +1,14 @@ +# MXNet APIs + +```eval_rst +.. toctree:: + :maxdepth: 1 + + c++/index.md + clojure/index.md + julia/index.md + perl/index.md + python/index.md + r/index.md + scala/index.md +``` diff --git a/docs/api/python/index.md b/docs/api/python/index.md index 420f4c9b72f6..42c4af9e46b5 100644 --- a/docs/api/python/index.md +++ b/docs/api/python/index.md @@ -17,58 +17,41 @@ Code examples are placed throughout the API documentation and these can be run a ```eval_rst .. note:: A convenient way to execute code examples is using the ``%doctest_mode`` mode of - Jupyter notebook, which allows for pasting multi-line examples containing - ``>>>`` while preserving indentation. Run ``%doctest_mode?`` in Jupyter notebook - for more details. + Jupyter notebook, which allows for pasting multi-line examples containing + ``>>>`` while preserving indentation. Run ``%doctest_mode?`` in Jupyter notebook + for more details. ``` \* Some old references to Model API may exist, but this API has been deprecated. -## NDArray API - -```eval_rst -.. toctree:: - :maxdepth: 1 - - ndarray/ndarray.md - ndarray/random.md - ndarray/linalg.md - ndarray/sparse.md - ndarray/contrib.md -``` - -## Symbol API +## Autograd API ```eval_rst .. toctree:: :maxdepth: 1 - symbol/symbol.md - symbol/random.md - symbol/linalg.md - symbol/sparse.md - symbol/contrib.md - symbol/rnn.md + autograd/autograd.md ``` -## Module API +## Callback API ```eval_rst .. toctree:: :maxdepth: 1 - module/module.md - executor/executor.md + callback/callback.md ``` -## Autograd API +## Contrib Package ```eval_rst .. toctree:: :maxdepth: 1 - autograd/autograd.md + contrib/contrib.md + contrib/text.md + contrib/onnx.md ``` ## Gluon API @@ -86,6 +69,15 @@ Code examples are placed throughout the API documentation and these can be run a gluon/contrib.md ``` +## Image API + +```eval_rst +.. toctree:: + :maxdepth: 1 + + image/image.md +``` + ## IO API ```eval_rst @@ -95,40 +87,54 @@ Code examples are placed throughout the API documentation and these can be run a io/io.md ``` -## Image API +## KV Store API ```eval_rst .. toctree:: :maxdepth: 1 - image/image.md + kvstore/kvstore.md ``` -## Optimization API +## Metric API ```eval_rst .. toctree:: :maxdepth: 1 - optimization/optimization.md + metric/metric.md ``` -## Callback API +## Module API ```eval_rst .. toctree:: :maxdepth: 1 - callback/callback.md + module/module.md + executor/executor.md ``` -## Metric API +## NDArray API ```eval_rst .. toctree:: :maxdepth: 1 - metric/metric.md + ndarray/ndarray.md + ndarray/random.md + ndarray/linalg.md + ndarray/sparse.md + ndarray/contrib.md +``` + +## Optimization API + +```eval_rst +.. toctree:: + :maxdepth: 1 + + optimization/optimization.md ``` ## Profiler API @@ -144,18 +150,30 @@ Code examples are placed throughout the API documentation and these can be run a ```eval_rst .. toctree:: - :maxdepth 1 + :maxdepth: 1 rtc/rtc.md ``` -## Contrib Package +## Symbol API ```eval_rst .. toctree:: :maxdepth: 1 - contrib/contrib.md - contrib/text.md - contrib/onnx.md + symbol/symbol.md + symbol/random.md + symbol/linalg.md + symbol/sparse.md + symbol/contrib.md + symbol/rnn.md +``` + +## Symbol in Pictures API + +```eval_rst +.. toctree:: + :maxdepth: 1 + + symbol_in_pictures/symbol_in_pictures.md ``` diff --git a/docs/api/scala/index.md b/docs/api/scala/index.md index e96892b58003..8b32c9fe9e22 100644 --- a/docs/api/scala/index.md +++ b/docs/api/scala/index.md @@ -1,9 +1,23 @@ # MXNet - Scala API + MXNet supports the Scala programming language. The MXNet Scala package brings flexible and efficient GPU computing and state-of-art deep learning to Scala. It enables you to write seamless tensor/matrix computation with multiple GPUs in Scala. It also lets you construct and customize the state-of-art deep learning models in Scala, and apply them to tasks, such as image classification and data science challenges. See the [MXNet Scala API Documentation](docs/index.html#org.apache.mxnet.package) for detailed API information. +```eval_rst +.. toctree:: + :maxdepth: 1 + + infer.md + io.md + kvstore.md + model.md + module.md + ndarray.md + symbol_in_pictures.md + symbol.md +``` ## Image Classification with the Scala Infer API The Infer API can be used for single and batch image classification. More information can be found at the following locations: diff --git a/docs/architecture/index.md b/docs/architecture/index.md index 91fb5f51d7b2..189e76e62fa5 100644 --- a/docs/architecture/index.md +++ b/docs/architecture/index.md @@ -15,9 +15,15 @@ Mainly, they focus on the following 3 areas: abstraction, optimization, and trade-offs between efficiency and flexibility. Additionally, we provide an overview of the complete MXNet system. -* [MXNet System Overview](http://mxnet.io/architecture/overview.html) -* [Deep Learning Programming Style: Symbolic vs Imperative](http://mxnet.io/architecture/program_model.html) -* [Dependency Engine for Deep Learning](http://mxnet.io/architecture/note_engine.html) -* [Optimizing the Memory Consumption in Deep Learning](http://mxnet.io/architecture/note_memory.html) -* [Efficient Data Loading Module for Deep Learning](http://mxnet.io/architecture/note_data_loading.html) -* [Exception Handling in MXNet](http://mxnet.io/architecture/exception_handling.html) +```eval_rst +.. toctree:: + :maxdepth: 1 + + overview.md + program_model.md + note_engine.md + note_memory.md + note_data_loading.md + exception_handling.md + rnn_interface.md +``` diff --git a/docs/architecture/release_note_0_9.md b/docs/architecture/release_note_0_9.md deleted file mode 100644 index afcc091d7ccb..000000000000 --- a/docs/architecture/release_note_0_9.md +++ /dev/null @@ -1,49 +0,0 @@ -# MXNet 0.9 (NNVM) Release Note - -Version 0.9 brings a number of important features and changes, including a back-end refactor to adopt the [NNVM](https://github.com/dmlc/nnvm) framework, a profiler for analyzing performance, a fast image IO and augmentation module that bypasses GIL, and various other changes. - -## NNVM Refactor - -NNVM is a library for neural network graph construction, optimization, and operator registration. It serves as an intermediary layer between the front-end (MXNet user API) and the back-end (computation on the device). After version 0.9, MXNet fully adopts the NNVM framework. Now it's easier to create operators. You can also register "pass"es that process and optimizes the graph when `bind` is called on the symbol. For more discussion on how to create operators with NNVM, please refer to [How to Create New Operators](../faq/new_op.md) - -Other changes brought by NNVM include: -- Backward shape inference is now supported -- All operators can now be used with both symbolic and ndarray API. For example, `mx.nd.Activation(x, act_type='relu')` works now. -- Optional cython API for mx.symbol and mx.ndarray is now available. Use `make cython` to activate it for accelerated communication with the back-end. - -## Profiler - -![MLP Profile](https://cloud.githubusercontent.com/assets/17693755/18035938/0a43484a-6d93-11e6-80d4-241c6ca552ea.png) - -MXNet now provides a native profiler for analyzing the performance of operators. This feature compliments general profiling tools like nvprof and gprof by summarizing at the operator level, instead of function, kernel, or instruction level. - -To use this feature, first set `USE_PROFILER = 1` in `config.mk` and rebuild mxnet. Then add three lines at the beginning and end of the section of your program you want to profile: -```python -mx.profiler.profiler_set_config(mode=scope, filename=fname) -profiler.profiler_set_state('run') - -# do computation ... - -profiler.profiler_set_state('stop') -``` -`scope` can be 'symbolic' (to only include symbolic operations) or 'all' (to include all operations), and `fname` is the path to save profiler output. - -After program finishes, navigate to [chrome://tracing](chrome://tracing) in a Chrome browser and load profiler output to see the results. - -## Image IO - -MXNet already has `mx.io.ImageRecordIter` for loading and preprocessing images. However, some tasks need more flexible image processing API. Detection, for example, requires transforming labels together with images. Usually, people write custom data iterators in python to handle this. But due to the infamous Global Interpreter Lock (GIL), python scripts cannot use multithreading to speed up processing. - -`mx.image` provides a set of fast image processing API that leverage MXNet Engine to automatically parallelize processing. You can write -```python -imgs = [mx.image.imdecode(open(f).read()) for f in img_paths] -``` -and decoding will be automatically run in parallel. - -## Miscellaneous - -- sgd and adam optimizer are now implemented with a single imperative call. They should be as fast and memory efficient as cc optimizers. ccsgd is now deprecated and redirects to sgd. -- Layout support is added. Use `mx.io.DataDesc(..., layout='NHWC')` in provide_data to specify data layout. use `mx.sym.YourSymbol(..., __layout__='NHWC')` to specify output layout. `layout` option is now available for Convolution layer. -- element_mask is removed. Please use src*mask.reshape((mask.size, 1, 1, ..., 1)) directly as binary ops now support broadcasting. -- sum_axis, max_axis, and min_axis are deprecated. Please use mx.nd.max(src, axis=n) instead. -- symbol attributes are now limited to ctx_group, lr_mult, wd_mult, force_mirroring. All other custom attributes need to be in __xxx__ format (start and end with double underscore) or an error will be triggered during attribute parsing. diff --git a/docs/community/index.md b/docs/community/index.md new file mode 100644 index 000000000000..7bdb1c213503 --- /dev/null +++ b/docs/community/index.md @@ -0,0 +1,11 @@ +# MXNet Community + +```eval_rst +.. toctree:: + :maxdepth: 1 + + contribute.md + ecosystem.md + powered_by.md + mxnet_channels.md +``` diff --git a/docs/faq/index.md b/docs/faq/index.md index 07dd9b9d7ca3..1b4a95d3f331 100644 --- a/docs/faq/index.md +++ b/docs/faq/index.md @@ -1,5 +1,13 @@ # MXNet FAQ +```eval_rst +.. toctree:: + :hidden: + :glob: + + * +``` + This section addresses common questions about how to use _MXNet_. These include performance issues, e.g., how to train with multiple GPUs. They also include workflow questions, e.g., how to visualize a neural network computation graph. These answers are fairly focused. For more didactic, self-contained introductions to neural networks diff --git a/docs/get_started/index.md b/docs/get_started/index.md deleted file mode 100644 index a743930b33dd..000000000000 --- a/docs/get_started/index.md +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - Page Redirection - - diff --git a/docs/gluon/index.md b/docs/gluon/index.md index 4bea06edcabf..c0d9053cd2c1 100644 --- a/docs/gluon/index.md +++ b/docs/gluon/index.md @@ -1,9 +1,11 @@ -![](https://github.com/dmlc/web-data/blob/master/mxnet/image/image-gluon-logo.png?raw=true) +# About Gluon + +![gluon logo](https://github.com/dmlc/web-data/blob/master/mxnet/image/image-gluon-logo.png?raw=true) Based on the [the Gluon API specification](https://github.com/gluon-api/gluon-api), the new Gluon library in Apache MXNet provides a clear, concise, and simple API for deep learning. It makes it easy to prototype, build, and train deep learning models without sacrificing training speed. Install the latest version of MXNet to get access to Gluon by either following these easy steps or using this simple command: -```python - pip install mxnet --pre --user +```bash + pip install mxnet ```
@@ -39,8 +41,8 @@ Use plug-and-play neural network building blocks, including predefined layers, o ```python net = gluon.nn.Sequential() -# When instantiated, Sequential stores a chain of neural network layers. -# Once presented with data, Sequential executes each layer in turn, using +# When instantiated, Sequential stores a chain of neural network layers. +# Once presented with data, Sequential executes each layer in turn, using # the output of one layer as the input for the next with net.name_scope(): net.add(gluon.nn.Dense(256, activation="relu")) # 1st layer (256 nodes) @@ -81,7 +83,7 @@ def forward(self, F, inputs, tree):
**__High Performance__** -Easily cache the neural network to achieve high performance by defining your neural network with ``HybridSequential`` and calling the ``hybridize`` method: +Easily cache the neural network to achieve high performance by defining your neural network with ``HybridSequential`` and calling the ``hybridize`` method: ```python net = nn.HybridSequential() diff --git a/docs/index.md b/docs/index.md index 7e251131fee3..ab6a95dc0ddd 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,14 +1,15 @@ -Contents --------- -These are used to generate the indexes for search functionality. +# MXNet -- [Python Documents](api/python/index.md) -- [R Documents](api/r/index.md) -- [Julia Documents](api/julia/index.md) -- [C++ Documents](api/c++/index.md) -- [Scala Documents](api/scala/index.md) -- [Perl Documents](api/perl/index.md) -- [HowTo Documents](faq/index.md) -- [System Documents](architecture/index.md) -- [Tutorials](tutorials/index.md) -- [Community](community/contribute.md) +```eval_rst +.. toctree:: + :maxdepth: 1 + + api/index.md + architecture/index.md + community/index.md + faq/index.md + gluon/index.md + install/index.md + model_zoo/index.md + tutorials/index.md +``` diff --git a/docs/tutorials/basic/index.md b/docs/tutorials/basic/index.md new file mode 100644 index 000000000000..87d72894424f --- /dev/null +++ b/docs/tutorials/basic/index.md @@ -0,0 +1,8 @@ +# Tutorials + +```eval_rst +.. toctree:: + :glob: + + * +``` diff --git a/docs/tutorials/c++/index.md b/docs/tutorials/c++/index.md new file mode 100644 index 000000000000..87d72894424f --- /dev/null +++ b/docs/tutorials/c++/index.md @@ -0,0 +1,8 @@ +# Tutorials + +```eval_rst +.. toctree:: + :glob: + + * +``` diff --git a/docs/tutorials/embedded/index.md b/docs/tutorials/embedded/index.md new file mode 100644 index 000000000000..87d72894424f --- /dev/null +++ b/docs/tutorials/embedded/index.md @@ -0,0 +1,8 @@ +# Tutorials + +```eval_rst +.. toctree:: + :glob: + + * +``` diff --git a/docs/tutorials/gluon/index.md b/docs/tutorials/gluon/index.md new file mode 100644 index 000000000000..87d72894424f --- /dev/null +++ b/docs/tutorials/gluon/index.md @@ -0,0 +1,8 @@ +# Tutorials + +```eval_rst +.. toctree:: + :glob: + + * +``` diff --git a/docs/tutorials/index.md b/docs/tutorials/index.md index 530d1302129b..8a6ac4081c04 100644 --- a/docs/tutorials/index.md +++ b/docs/tutorials/index.md @@ -1,5 +1,24 @@ # Tutorials +```eval_rst +.. toctree:: + :hidden: + + basic/index.md + c++/index.md + embedded/index.md + gluon/index.md + nlp/index.md + onnx/index.md + python/index.md + r/index.md + scala/index.md + sparse/index.md + speech_recognition/index.md + unsupervised_learning/index.md + vision/index.md +``` + MXNet tutorials can be found in this section. A variety of language bindings are available for MXNet (including Python, Scala, C++ and R) and we have a different tutorial section for each language. Are you new to MXNet, and don't have a preference on language? We currently recommend starting with Python, and specifically the Gluon APIs (versus Module APIs) as they're more flexible and easier to debug. diff --git a/docs/tutorials/nlp/index.md b/docs/tutorials/nlp/index.md new file mode 100644 index 000000000000..87d72894424f --- /dev/null +++ b/docs/tutorials/nlp/index.md @@ -0,0 +1,8 @@ +# Tutorials + +```eval_rst +.. toctree:: + :glob: + + * +``` diff --git a/docs/tutorials/onnx/index.md b/docs/tutorials/onnx/index.md new file mode 100644 index 000000000000..87d72894424f --- /dev/null +++ b/docs/tutorials/onnx/index.md @@ -0,0 +1,8 @@ +# Tutorials + +```eval_rst +.. toctree:: + :glob: + + * +``` diff --git a/docs/tutorials/python/index.md b/docs/tutorials/python/index.md new file mode 100644 index 000000000000..87d72894424f --- /dev/null +++ b/docs/tutorials/python/index.md @@ -0,0 +1,8 @@ +# Tutorials + +```eval_rst +.. toctree:: + :glob: + + * +``` diff --git a/docs/tutorials/r/index.md b/docs/tutorials/r/index.md index 4692e7adce7e..fbc8911f2a6d 100644 --- a/docs/tutorials/r/index.md +++ b/docs/tutorials/r/index.md @@ -4,18 +4,9 @@ These tutorials introduce a few fundamental concepts in deep learning and how to ```eval_rst .. toctree:: - :maxdepth: 1 + :glob: - ndarray - symbol - fiveMinutesNeuralNetwork - classifyRealImageWithPretrainedModel - mnistCompetition - CatsDogsFinetune - CharRnnModel - CallbackFunction - CustomIterator - CustomLossFunction + * ```
diff --git a/docs/tutorials/sparse/index.md b/docs/tutorials/sparse/index.md new file mode 100644 index 000000000000..87d72894424f --- /dev/null +++ b/docs/tutorials/sparse/index.md @@ -0,0 +1,8 @@ +# Tutorials + +```eval_rst +.. toctree:: + :glob: + + * +``` diff --git a/docs/tutorials/speech_recognition/index.md b/docs/tutorials/speech_recognition/index.md new file mode 100644 index 000000000000..87d72894424f --- /dev/null +++ b/docs/tutorials/speech_recognition/index.md @@ -0,0 +1,8 @@ +# Tutorials + +```eval_rst +.. toctree:: + :glob: + + * +``` diff --git a/docs/tutorials/unsupervised_learning/index.md b/docs/tutorials/unsupervised_learning/index.md new file mode 100644 index 000000000000..87d72894424f --- /dev/null +++ b/docs/tutorials/unsupervised_learning/index.md @@ -0,0 +1,8 @@ +# Tutorials + +```eval_rst +.. toctree:: + :glob: + + * +``` diff --git a/docs/tutorials/vision/index.md b/docs/tutorials/vision/index.md new file mode 100644 index 000000000000..87d72894424f --- /dev/null +++ b/docs/tutorials/vision/index.md @@ -0,0 +1,8 @@ +# Tutorials + +```eval_rst +.. toctree:: + :glob: + + * +``` diff --git a/tests/tutorials/test_sanity_tutorials.py b/tests/tutorials/test_sanity_tutorials.py index f87e98e92126..e59521f27044 100644 --- a/tests/tutorials/test_sanity_tutorials.py +++ b/tests/tutorials/test_sanity_tutorials.py @@ -24,8 +24,15 @@ # automated test suite. # Rules to be in the whitelist: # - not a python tutorial -whitelist = ['c++/basics.md', +whitelist = ['basic/index.md', + 'c++/basics.md', + 'c++/index.md', + 'embedded/index.md', 'embedded/wine_detector.md', + 'gluon/index.md', + 'nlp/index.md', + 'onnx/index.md', + 'python/index.md', 'r/CallbackFunction.md', 'r/charRnnModel.md', 'r/classifyRealImageWithPretrainedModel.md', @@ -39,7 +46,11 @@ 'scala/char_lstm.md', 'scala/mnist.md', 'scala/index.md', - 'scala/mxnet_scala_on_intellij.md'] + 'scala/mxnet_scala_on_intellij.md', + 'sparse/index.md', + 'speech_recognition/index.md', + 'unsupervised_learning/index.md', + 'vision/index.md'] whitelist_set = set(whitelist) def test_tutorial_downloadable(): From 4e19a328ae94c893ed11591b798aaebf33f39052 Mon Sep 17 00:00:00 2001 From: Hao Jin Date: Wed, 5 Sep 2018 11:34:54 -0700 Subject: [PATCH 100/160] remove flaky test and add consistency test for stable testing (#12427) --- src/operator/bilinear_sampler-inl.h | 3 + src/operator/bilinear_sampler.cu | 6 +- tests/python/gpu/test_operator_gpu.py | 59 +++++++++ tests/python/unittest/test_operator.py | 160 ------------------------- 4 files changed, 67 insertions(+), 161 deletions(-) diff --git a/src/operator/bilinear_sampler-inl.h b/src/operator/bilinear_sampler-inl.h index 657aebafdb74..e0b4db7b367c 100644 --- a/src/operator/bilinear_sampler-inl.h +++ b/src/operator/bilinear_sampler-inl.h @@ -44,7 +44,10 @@ enum BilinearSamplerOpOutputs {kOut, kTmp}; } struct BilinearSamplerParam : public dmlc::Parameter { + dmlc::optional cudnn_off; DMLC_DECLARE_PARAMETER(BilinearSamplerParam) { + DMLC_DECLARE_FIELD(cudnn_off).set_default(dmlc::optional()) + .describe("whether to turn cudnn off"); } }; diff --git a/src/operator/bilinear_sampler.cu b/src/operator/bilinear_sampler.cu index 0ab628da700b..e1f205258a24 100644 --- a/src/operator/bilinear_sampler.cu +++ b/src/operator/bilinear_sampler.cu @@ -212,7 +212,11 @@ Operator* CreateOp(BilinearSamplerParam param, int dtype) { Operator *op = NULL; #if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5 MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { - op = new CuDNNBilinearSamplerOp(param); + if (param.cudnn_off.has_value() && param.cudnn_off.value()) { + op = new BilinearSamplerOp(param); + } else { + op = new CuDNNBilinearSamplerOp(param); + } }) #else MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { diff --git a/tests/python/gpu/test_operator_gpu.py b/tests/python/gpu/test_operator_gpu.py index 0ff33e1e4094..7b75275e0c98 100644 --- a/tests/python/gpu/test_operator_gpu.py +++ b/tests/python/gpu/test_operator_gpu.py @@ -1918,6 +1918,65 @@ def test_softmax_activation(): assert_almost_equal(cpu_a.grad.asnumpy(), gpu_a.grad.asnumpy(), atol = 1e-3, rtol = 1e-3) + +@with_seed() +def test_bilinear_sampler_versions(): + data = mx.sym.Variable('data') + grid = mx.sym.Variable('grid') + sym1 = mx.sym.BilinearSampler(data=data, grid=grid) + sym2 = mx.sym.BilinearSampler(data=data, grid=grid, cudnn_off=True) + sym3 = mx.sym.BilinearSampler(data=data, grid=grid) + + test_cases = [[(1,3,15,16),(1,2,10,10)], + [(1,6,7,16),(1,2,10,4)], + [(1,7,3,16),(1,2,8,11)], + [(1,9,50,50),(1,2,50,50)]] + + for item in test_cases: + data_shape, grid_shape = item + # kWriteTo + exe_cpu = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='write') + exe_gpu = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write') + exe_cudnn = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write') + exe_list = [exe_cpu, exe_gpu, exe_cudnn] + ref_idx = 0 + test_data = np.random.uniform(low=-0.1, high=0.1,size=data_shape).astype(np.float32) + test_grid = np.random.uniform(low=-2, high=2, size=grid_shape).astype(np.float32) + for exe in exe_list: + exe.arg_dict['data'][:] = test_data + exe.arg_dict['grid'][:] = test_grid + exe.forward(is_train=True) + assert_almost_equal(exe_list[0].outputs[0].asnumpy(), exe.outputs[0].asnumpy(), rtol=1e-3, atol=1e-5) + + out_grad = np.random.uniform(low=-0.01, high=0.01,size=data_shape[:2] + grid_shape[2:]).astype(np.float32) + for exe in exe_list: + exe.backward(mx.nd.array(out_grad)) + assert_almost_equal(exe.grad_dict['data'].asnumpy(), exe_list[ref_idx].grad_dict['data'].asnumpy(), rtol=1e-3, atol=1e-5) + assert_almost_equal(exe.grad_dict['grid'].asnumpy(), exe_list[ref_idx].grad_dict['grid'].asnumpy(), rtol=1e-3, atol=1e-5) + + data_grad = exe_list[ref_idx].grad_dict['data'].asnumpy() + grid_grad = exe_list[ref_idx].grad_dict['grid'].asnumpy() + + # kAddTo + exe_cpu_addto = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='add') + exe_gpu_addto = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add') + exe_cudnn_addto = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add') + exe_list = [exe_cpu_addto, exe_gpu_addto, exe_cudnn_addto] + data_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['data'].shape).astype(np.float32) + grid_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['grid'].shape).astype(np.float32) + for exe in exe_list: + exe.arg_dict['data'][:] = test_data + exe.arg_dict['grid'][:] = test_grid + exe.grad_dict['data'][:] = data_initial_grad + exe.grad_dict['grid'][:] = grid_initial_grad + exe.forward(is_train=True) + exe.backward(mx.nd.array(out_grad)) + assert_almost_equal(exe.grad_dict['data'].asnumpy(), exe_list[ref_idx].grad_dict['data'].asnumpy(), rtol=1e-3, atol=1e-5) + assert_almost_equal(exe.grad_dict['grid'].asnumpy(), exe_list[ref_idx].grad_dict['grid'].asnumpy(), rtol=1e-3, atol=1e-5) + assert_almost_equal(exe_list[ref_idx].grad_dict['data'].asnumpy(), data_grad + data_initial_grad, rtol=1e-3, atol=1e-5) + assert_almost_equal(exe_list[ref_idx].grad_dict['grid'].asnumpy(), grid_grad + grid_initial_grad, rtol=1e-3, atol=1e-5) + + def test_context_num_gpus(): # Test that num_gpus reports at least one GPU, as the test is run on a GPU host. assert mx.context.num_gpus() > 0 diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index ca358ef02b1f..9842a69e18d4 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -3945,166 +3945,6 @@ def test_grid_generator(): assert_almost_equal(exe_add.grad_dict['flow'].asnumpy(), grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5) -@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12248") -def test_bilinear_sampler(): - from math import floor - - def between(x, lowerbound, upperbound): - return x>=lowerbound and x<=upperbound - - def bilinear_forward_numpy(data, grid): - - batchsize = data.shape[0] - input_height = data.shape[2] - input_width = data.shape[3] - num_channel = data.shape[1] - - output_height = grid.shape[2] - output_width = grid.shape[3] - out = np.zeros(data.shape[:2] + grid.shape[2:], dtype=np.float32) - - for i in range(batchsize): - for yout in range(output_height): - for xout in range(output_width): - - xcoord = np.float32((grid[i, 0, yout, xout] + 1) * (input_width-1) / 2.0) - ycoord = np.float32((grid[i, 1, yout, xout] + 1) * (input_height-1) / 2.0) - - xInTopLeft = int(floor(xcoord)) - xWeightTopLeft = np.float32(1-(xcoord - xInTopLeft)) - - yInTopLeft = int(floor(ycoord)) - yWeightTopLeft = np.float32(1-(ycoord - yInTopLeft)) - - # interpolation - for channel in range(num_channel): - - inTopLeft = data[i,channel,yInTopLeft, xInTopLeft] \ - if between(xInTopLeft,0,input_width-1) and between(yInTopLeft,0,input_height-1) else 0.0 - inTopRight = data[i,channel,yInTopLeft, xInTopLeft+1] \ - if between(xInTopLeft+1,0,input_width-1) and between(yInTopLeft,0,input_height-1) else 0.0 - inBottomLeft = data[i,channel,yInTopLeft+1, xInTopLeft] \ - if between(xInTopLeft,0,input_width-1) and between(yInTopLeft+1,0,input_height-1) else 0.0 - inBottomRight = data[i,channel,yInTopLeft+1, xInTopLeft+1] \ - if between(xInTopLeft+1,0,input_width-1) and between(yInTopLeft+1,0,input_height-1) else 0.0 - - out[i,channel,yout,xout] = xWeightTopLeft * yWeightTopLeft * inTopLeft\ - + (1-xWeightTopLeft)*yWeightTopLeft * inTopRight\ - + xWeightTopLeft * (1-yWeightTopLeft) * inBottomLeft\ - +(1-xWeightTopLeft) * (1-yWeightTopLeft) * inBottomRight - return out - - def bilinear_backward_numpy(out_grad, data, grid): - - data_grad = np.zeros(data.shape, dtype=np.float32) - grid_grad = np.zeros(grid.shape, dtype=np.float32) - - batchsize = data.shape[0] - input_height = data.shape[2] - input_width = data.shape[3] - num_channel = data.shape[1] - output_height = grid.shape[2] - output_width = grid.shape[3] - - for i in range(batchsize): - for yout in range(output_height): - for xout in range(output_width): - - top_left_y_gw = np.float32(0.0); - top_left_x_gw = np.float32(0.0); - - xcoord = np.float32((grid[i, 0, yout, xout] + 1) * (input_width-1) / 2.0) - ycoord = np.float32((grid[i, 1, yout, xout] + 1) * (input_height-1) / 2.0) - - xInTopLeft = int(floor(xcoord)) - xWeightTopLeft = np.float32(1-(xcoord - xInTopLeft)) - - yInTopLeft = int(floor(ycoord)) - yWeightTopLeft = np.float32(1-(ycoord - yInTopLeft)) - - topLeftDotProduct = np.float32(0) - topRightDotProduct = np.float32(0) - bottomLeftDotProduct = np.float32(0) - bottomRightDotProduct = np.float32(0) - - for channel in range(num_channel): - # left top - if between(xInTopLeft,0,input_width-1) and between(yInTopLeft,0,input_height-1): - topLeftDotProduct += data[i,channel,yInTopLeft, xInTopLeft] * \ - out_grad[i,channel,yout,xout] - data_grad[i, channel, yInTopLeft, xInTopLeft] += xWeightTopLeft * \ - yWeightTopLeft * out_grad[i,channel,yout,xout] - # right top - if between(xInTopLeft+1,0,input_width-1) and between(yInTopLeft,0,input_height-1): - topRightDotProduct += data[i, channel, yInTopLeft,xInTopLeft+1] * \ - out_grad[i, channel, yout,xout] - data_grad[i, channel,yInTopLeft, xInTopLeft+1] += (1-xWeightTopLeft) * \ - yWeightTopLeft * out_grad[i,channel,yout,xout] - # left bottom - if between(xInTopLeft,0,input_width-1) and between(yInTopLeft+1,0,input_height-1): - bottomLeftDotProduct += data[i, channel,yInTopLeft+1, xInTopLeft] * \ - out_grad[i,channel,yout,xout] - data_grad[i,channel,yInTopLeft+1,xInTopLeft]+=xWeightTopLeft * \ - (1-yWeightTopLeft)* out_grad[i,channel,yout,xout] - # right bottom - if between(xInTopLeft+1,0,input_width-1) and between(yInTopLeft+1,0,input_height-1): - bottomRightDotProduct += data[i,channel,yInTopLeft+1, xInTopLeft+1] * \ - out_grad[i,channel,yout,xout] - data_grad[i,channel,yInTopLeft+1,xInTopLeft+1]+= (1-xWeightTopLeft) * \ - (1-yWeightTopLeft)*out_grad[i,channel,yout,xout] - - yf = np.float32(-xWeightTopLeft * topLeftDotProduct + xWeightTopLeft*bottomLeftDotProduct - \ - (1-xWeightTopLeft)* topRightDotProduct + (1-xWeightTopLeft)*bottomRightDotProduct) - xf = np.float32(-yWeightTopLeft * topLeftDotProduct + yWeightTopLeft*topRightDotProduct - \ - (1-yWeightTopLeft)*bottomLeftDotProduct + (1-yWeightTopLeft)*bottomRightDotProduct) - - grid_grad[i,0,yout,xout] = xf * (input_width-1) / 2.0 - grid_grad[i,1,yout,xout] = yf * (input_height-1) / 2.0 - - return data_grad, grid_grad - - data = mx.sym.Variable('data') - grid = mx.sym.Variable('grid') - net = mx.sym.BilinearSampler(data=data,grid=grid) - - test_case = [[(1,3,15,16),(1,2,10,10)], - [(1,6,7,16),(1,2,10,4)], - [(1,7,3,16),(1,2,8,11)], - [(1,9,50,50),(1,2,50,50)]] - - for ctx in [default_context()]: - for item in test_case: - data_shape, grid_shape = item - exe = net.simple_bind(data=data_shape,grid=grid_shape,ctx=ctx,grad_req='write') - # check forward - exe.arg_dict['data'][:] = np.random.uniform(low=-0.1, high=0.1,size=data_shape).astype(np.float32) - exe.arg_dict['grid'][:] = np.random.uniform(low=-2, high=2, size=grid_shape).astype(np.float32) - exe.forward(is_train=True) - out = bilinear_forward_numpy(exe.arg_dict['data'].asnumpy(), exe.arg_dict['grid'].asnumpy()) - assert_almost_equal(exe.outputs[0].asnumpy(), out, rtol=1e-3,atol=1e-5) - - # check backward - out_grad = np.random.uniform(low=-0.01, high=0.01,size=data_shape[:2] + grid_shape[2:]).astype(np.float32) - exe.backward(mx.nd.array(out_grad)) - data_grad, grid_grad = bilinear_backward_numpy(out_grad,exe.arg_dict['data'].asnumpy(), - exe.arg_dict['grid'].asnumpy()) - assert_almost_equal(exe.grad_dict['data'].asnumpy(), data_grad, rtol=1e-3, atol=1e-5) - assert_almost_equal(exe.grad_dict['grid'].asnumpy(), grid_grad, rtol=1e-3, atol=1e-5) - - # check kAddTo - exe_addto = net.simple_bind(data=data_shape, grid=grid_shape, ctx=ctx, grad_req='add') - data_initial_grid = np.random.normal(size=exe_addto.grad_dict['data'].shape).astype(np.float32) - grid_initial_grid = np.random.normal(size=exe_addto.grad_dict['grid'].shape).astype(np.float32) - exe_addto.arg_dict['data'][:] = exe.arg_dict['data'][:] - exe_addto.arg_dict['grid'][:] = exe.arg_dict['grid'][:] - exe_addto.grad_dict['data'][:] = data_initial_grid - exe_addto.grad_dict['grid'][:] = grid_initial_grid - exe_addto.forward(is_train=True) - exe_addto.backward(mx.nd.array(out_grad)) - assert_almost_equal(exe_addto.grad_dict['data'].asnumpy(), data_grad + data_initial_grid, rtol=1e-3,atol=1e-5) - assert_almost_equal(exe_addto.grad_dict['grid'].asnumpy(), grid_grad + grid_initial_grid, rtol=1e-3,atol=1e-5) - - @with_seed() def test_index2d(): for _ in range(30): From de9a2e88c334cc9a157ceb904ecaebffed2779b2 Mon Sep 17 00:00:00 2001 From: Philip Hyunsu Cho Date: Wed, 5 Sep 2018 12:31:30 -0700 Subject: [PATCH 101/160] Fix flaky test test_operator_gpu.test_batchnorm_with_type (#11873) --- tests/python/gpu/test_operator_gpu.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/tests/python/gpu/test_operator_gpu.py b/tests/python/gpu/test_operator_gpu.py index 7b75275e0c98..1fc2c8e922d9 100644 --- a/tests/python/gpu/test_operator_gpu.py +++ b/tests/python/gpu/test_operator_gpu.py @@ -262,7 +262,6 @@ def test_fft(): @with_seed() -@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10087") def test_batchnorm_with_type(): ctx_list_v1_2D = [ {'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}}, @@ -288,12 +287,12 @@ def test_batchnorm_with_type(): ] ctx_list_v2_3D = [ - {'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}}, - {'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}}, - {'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}}, - {'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}}, - {'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}}, - {'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}} + {'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}}, + {'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}}, + {'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}}, + {'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}}, + {'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}}, + {'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}} ] # V1, 2D From d7111d357dd682e9e75c6adc5c73e6b74c5541dd Mon Sep 17 00:00:00 2001 From: Aaron Markham Date: Wed, 5 Sep 2018 14:31:42 -0700 Subject: [PATCH 102/160] Installation instructions consolidation (#12388) * initial edits for c++ instructions * consolidation and reorg of install docs * simplify python section * fix url rewrite to allow for testing * further simplify install * fix render issues * adjust formatting * adjust formatting * fix python/gpu/master * install footers * update footers * add mac dev setup link for osx * adjust formatting for footer * separate validation page * add toc * adjust formatting * break out julia and perl * fix pypi link * fix formatting * fix formatting * fix formatting * add build from source link * updating from PR feedback * add c++ and clojure to ubuntu guide; clarify c++ setup * added pip chart to install; updates from PR feedback * correct reference to c infer api --- docs/_static/js/options.js | 20 +- docs/install/build_from_source.md | 493 ++---- docs/install/c_plus_plus.md | 29 + docs/install/centos_setup.md | 98 +- docs/install/index.md | 2523 ++++++----------------------- docs/install/osx_setup.md | 11 + docs/install/ubuntu_setup.md | 181 ++- docs/install/validate_mxnet.md | 185 +++ docs/install/windows_setup.md | 14 +- 9 files changed, 1093 insertions(+), 2461 deletions(-) create mode 100644 docs/install/c_plus_plus.md create mode 100644 docs/install/validate_mxnet.md diff --git a/docs/_static/js/options.js b/docs/_static/js/options.js index 87e50b8aa865..b43f3919a313 100644 --- a/docs/_static/js/options.js +++ b/docs/_static/js/options.js @@ -8,7 +8,7 @@ $(document).ready(function () { function label(lbl) { return lbl.replace(/[ .]/g, '-').toLowerCase(); } - + function urlSearchParams(searchString) { let urlDict = new Map(); let searchParams = searchString.substring(1).split("&"); @@ -45,11 +45,11 @@ $(document).ready(function () { showContent(); if (window.location.href.indexOf("/install/index.html") >= 0) { if (versionSelect.indexOf(defaultVersion) >= 0) { - history.pushState(null, null, '/install/index.html?platform=' + platformSelect + '&language=' + languageSelect + '&processor=' + processorSelect); + history.pushState(null, null, 'index.html?platform=' + platformSelect + '&language=' + languageSelect + '&processor=' + processorSelect); } else { - history.pushState(null, null, '/install/index.html?version=' + versionSelect + '&platform=' + platformSelect + '&language=' + languageSelect + '&processor=' + processorSelect); + history.pushState(null, null, 'index.html?version=' + versionSelect + '&platform=' + platformSelect + '&language=' + languageSelect + '&processor=' + processorSelect); } - } + } } function showContent() { @@ -73,22 +73,22 @@ $(document).ready(function () { $('.current-version').html( $(this).text() + ' ' ); if ($(this).text().indexOf(defaultVersion) < 0) { if (window.location.search.indexOf("version") < 0) { - history.pushState(null, null, '/install/index.html' + window.location.search.concat( '&version=' + $(this).text() )); + history.pushState(null, null, 'index.html' + window.location.search.concat( '&version=' + $(this).text() )); } else { - history.pushState(null, null, '/install/index.html' + window.location.search.replace( urlParams.get('version'), $(this).text() )); + history.pushState(null, null, 'index.html' + window.location.search.replace( urlParams.get('version'), $(this).text() )); } } else if (window.location.search.indexOf("version") >= 0) { - history.pushState(null, null, '/install/index.html' + window.location.search.replace( 'version', 'prev' )); + history.pushState(null, null, 'index.html' + window.location.search.replace( 'version', 'prev' )); } } else if ($(this).hasClass("platforms")) { - history.pushState(null, null, '/install/index.html' + window.location.search.replace( urlParams.get('platform'), $(this).text() )); + history.pushState(null, null, 'index.html' + window.location.search.replace( urlParams.get('platform'), $(this).text() )); } else if ($(this).hasClass("languages")) { - history.pushState(null, null, '/install/index.html' + window.location.search.replace( urlParams.get('language'), $(this).text() )); + history.pushState(null, null, 'index.html' + window.location.search.replace( urlParams.get('language'), $(this).text() )); } else if ($(this).hasClass("processors")) { - history.pushState(null, null, '/install/index.html' + window.location.search.replace( urlParams.get('processor'), $(this).text() )); + history.pushState(null, null, 'index.html' + window.location.search.replace( urlParams.get('processor'), $(this).text() )); } showContent(); //window.location.search = window.location.search.replace( urlParams.get('version'), $(this).text() ); diff --git a/docs/install/build_from_source.md b/docs/install/build_from_source.md index b22ff8833e9d..6c0a4dab251a 100644 --- a/docs/install/build_from_source.md +++ b/docs/install/build_from_source.md @@ -1,22 +1,23 @@ # Build MXNet from Source -**NOTE:** For MXNet with Python installation, please refer to the [new install guide](http://mxnet.io/install/index.html). - -This document explains how to build MXNet from sources. Building MXNet from sources is a 2 step process. +This document explains how to build MXNet from source code. Building MXNet from source is a two step process. 1. Build the MXNet shared library, `libmxnet.so`, from [C++ source files](#build-the-shared-library) -2. Install the language binding for MXNet. MXNet supports - - [C++](#build-the-c-package), - [Scala](#build-the-scala-package), [R](#build-the-r-package), and - [Julia](#build-the-julia-package). +2. Install the [language bindings](#installing-mxnet-language-bindings) for MXNet. MXNet supports the following languages: + - Python + - C++ + - Clojure + - Julia + - Perl + - R + - Scala -## Build the shared library +## Prerequisites -### Prerequisites +You need C++ build tools and a BLAS library to build the MXNet shared library. If you want to run MXNet with GPUs, you will need to install [NVDIA CUDA and cuDNN](https://developer.nvidia.com/cuda-downloads) first. -You need C++ build tools and BLAS library to build MXNet shared library. If you want to run MXNet on GPUs, you need to install CUDA and CuDNN. -#### C++ build tools +### C++ build tools 1. A C++ compiler that supports C++ 11. [G++ (4.8 or later)](https://gcc.gnu.org/gcc-4.8/) or @@ -24,311 +25,138 @@ You need C++ build tools and BLAS library to build MXNet shared library. If you 2. [Git](https://git-scm.com/downloads) for downloading the sources from Github repository. -3. [GNU Make](https://www.gnu.org/software/make/) ([cmake](https://cmake.org/) - for Windows) to build the library. - - -Select your preferences and follow the instructions to install MXNet from sources. -
- - - -
- - -
- -Then select the Linux distribution: -
- - - -
- -- **Ubuntu** for systems supporting the `apt-get` - package management program -- **CentOS** for systems supporting the `yum` package - management program -- **Others** for general Linux-like systems building dependencies from scratch. - -
- -Install build tools and git on `Ubuntu >= 13.10` and `Debian >= 8`. - -```bash -sudo apt-get update && sudo apt-get install build-essential git -``` - -
- -
- -Install build tools and git on `CentOS >= 7` and `Fedora >= 19`. - -```bash -sudo yum groupinstall -y "Development Tools" && sudo yum install -y git -``` - -
- -
- -Installing both `git` and `make` by following instructions on the websites is -straightforward. Here we provide the instructions to build `gcc-4.8` from source codes. - -1. Install the 32-bit `libc` with one of the following system-specific commands: - - ```bash - sudo apt-get install libc6-dev-i386 # In Ubuntu - sudo yum install glibc-devel.i686 # In RHEL (Red Hat Linux) - sudo yum install glibc-devel.i386 # In CentOS 5.8 - sudo yum install glibc-devel.i686 # In CentOS 6/7 - ``` - -2. Download and extract the `gcc` source code with the prerequisites: - - ```bash - wget http://mirrors.concertpass.com/gcc/releases/gcc-4.8.5/gcc-4.8.5.tar.gz - tar -zxf gcc-4.8.5.tar.gz - cd gcc-4.8.5 - ./contrib/download_prerequisites - ``` - -3. Build `gcc` by using 10 threads and then install to `/usr/local` - - ```bash - mkdir release && cd release - ../configure --prefix=/usr/local --enable-languages=c,c++ - make -j10 - sudo make install - ``` - -4. Add the lib path to your configure file such as `~/.bashrc`: - - ```bash - export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib64 - ``` +3. [cmake](https://cmake.org/) is recommended. You may also use [GNU Make](https://www.gnu.org/software/make/) to build the library. -
-
-
- -1. If [Microsoft Visual Studio 2015](https://www.visualstudio.com/downloads/) is not already installed, download and install it. You can download and install the free community edition. -2. Download and Install [CMake](https://cmake.org/) if it is not already installed. - -
- -
- -Install [Xcode](https://developer.apple.com/xcode/). - -
- -#### BLAS library +### BLAS library MXNet relies on the [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) (Basic -Linear Algebra Subprograms) library for numerical computations. You can install -any one among [ATLAS](http://math-atlas.sourceforge.net/), -[OpenBLAS](http://www.openblas.net/) and -[MKL](https://software.intel.com/en-us/intel-mkl). - -
-
+Linear Algebra Subprograms) library for numerical computations. +Those can be extended with [LAPACK (Linear Algebra Package)](https://github.com/Reference-LAPACK/lapack), an additional set of mathematical functions. -```bash -sudo apt-get install libatlas-base-dev -``` +MXNet supports multiple mathematical backends for computations on the CPU: -
+* [Apple Accelerate](https://developer.apple.com/documentation/accelerate) +* [ATLAS](http://math-atlas.sourceforge.net/) +* [MKL](https://software.intel.com/en-us/intel-mkl) (MKL, MKLML) +* [MKLDNN](https://github.com/intel/mkl-dnn) +* [OpenBLAS](http://www.openblas.net/) -
+Usage of these are covered in more detail in the [build configurations](#build-configurations) section. -```bash -sudo yum install atlas-devel -``` -
+### Optional -
+These might be optional, but they're typically desirable. -You can follow this link to build -[OpenBlas from source](https://github.com/xianyi/OpenBLAS#installation-from-source). +* [OpenCV](http://opencv.org/) for Image Loading and Augmentation +* [NVDIA CUDA and cuDNN](https://developer.nvidia.com/cuda-downloads) for running MXNet with GPUs -
-
-
+## Build Instructions by Operating System -macOS users can skip this step as `xcode` ships with a BLAS library. +Detailed instructions are provided per operating system. +You may jump to those, but it is recommended that you continue reading to understand more general build from source options. -
+| | | | | +|---|---|---|---| +| [macOS](osx_setup.html) | [Ubuntu](ubuntu_setup.html) | [CentOS/*unix](centos_setup.html) | [Windows](windows_setup.html) | +| [raspbian](raspian_setup.html) | [tx2](tx2_setup.html) | | | -
-1. Download pre-built binaries for [OpenBLAS](https://sourceforge.net/projects/openblas/files/) -2. Set the environment variable `OpenBLAS_HOME` to point to the OpenBLAS - directory that contains the `include/` and `lib/` directories. Typically, you - can find the directory in `C:\Program files (x86)\OpenBLAS\`. -
- -#### Optional: [OpenCV](http://opencv.org/) for Image Loading and Augmentation - -
-
+## Build +1. Clone the MXNet project. ```bash -sudo apt-get install libopencv-dev -``` - -
- -
- -```bash -sudo apt-get install opencv-devel +git clone --recursive https://github.com/apache/incubator-mxnet mxnet +cd mxnet ``` -
- -
- -To build OpenCV from source code, you need the [cmake](https://cmake.org) library. - -1. If you don't have cmake or if your version of cmake is earlier than 3.6.1, run the following commands to install a newer version of cmake: - - ```bash - wget https://cmake.org/files/v3.6/cmake-3.6.1-Linux-x86_64.tar.gz - tar -zxvf cmake-3.6.1-Linux-x86_64.tar.gz - alias cmake="cmake-3.6.1-Linux-x86_64/bin/cmake" - ``` - -2. To download and extract the OpenCV source code, run the following commands: - - ```bash - wget https://codeload.github.com/opencv/opencv/zip/2.4.13 - unzip 2.4.13 - cd opencv-2.4.13 - mkdir release - cd release/ - ``` - -3. Build OpenCV. The following commands build OpenCV with 10 threads. We - disabled GPU support, which might significantly slow down an MXNet program - running on a GPU processor. It also disables 1394 which might generate a - warning. Then install it on `/usr/local`. - - ```bash - cmake -D BUILD_opencv_gpu=OFF -D WITH_CUDA=OFF -D WITH_1394=OFF -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local .. - make -j10 - sudo make install - ``` +There is a configuration file for make, +[`make/config.mk`](https://github.com/apache/incubator-mxnet/blob/master/make/config.mk), that contains all the compilation options. You can edit it and then run `make`. -4. Add the lib path to your configuration such as `~/.bashrc`. - ```bash - export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:/usr/local/lib/pkgconfig/ - ``` +## Build Configurations -
-
+`cmake` is recommended for building MXNet, however you may use `make` instead. -
-First download and install [OpenCV](http://opencv.org/releases.html), then set -the environment variable `OpenCV_DIR` to point to the OpenCV build directory. +### Math Library Selection +It is useful to consider your math library selection first. -
+The default order of choice for the libraries if found follows the path from the most +(recommended) to less performant backends. +The following lists show this order by library and `cmake` switch. -#### Optional: [CUDA](https://developer.nvidia.com/cuda-downloads)/[cuDNN](https://developer.nvidia.com/cudnn) for Nvidia GPUs +For desktop platforms (x86_64): -MXNet is compatible with both CUDA 7.5 and 8.0. It is recommended to use cuDNN 5. +1. MKLDNN (submodule) | `USE_MKLDNN` +2. MKL | `USE_MKL_IF_AVAILABLE` +3. MKLML (downloaded) | `USE_MKLML` +4. Apple Accelerate | `USE_APPLE_ACCELERATE_IF_AVAILABLE` | Mac only +5. OpenBLAS | `BLAS` | Options: Atlas, Open, MKL, Apple -
-
+Note: If `USE_MKL_IF_AVAILABLE` is set to False then MKLML and MKLDNN will be disabled as well for configuration +backwards compatibility. -Install CUDA 7.5 and cuDNN 5 on Ubuntu 14.04 +For embedded platforms (all other and if cross compiled): -```bash -wget http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1404/x86_64/cuda-repo-ubuntu1404_7.5-18_amd64.deb -sudo dpkg -i cuda-repo-ubuntu1404_7.5-18_amd64.deb -echo "deb http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1404/x86_64 /" | sudo tee /etc/apt/sources.list.d/nvidia-ml.list -sudo apt-get update -sudo apt-get install -y linux-image-extra-`uname -r` linux-headers-`uname -r` linux-image-`uname -r` -sudo apt-get install -y cuda libcudnn5-dev=5.0.5-1+cuda7.5 -``` - -
-
- -### Build - -
- -First clone the recent codes - -```bash -git clone --recursive https://github.com/dmlc/mxnet -cd mxnet -``` - -File -[`make/config.mk`](https://github.com/dmlc/mxnet/blob/master/make/config.mk) -contains all the compilation options. You can edit it and then `make`. There are -some example build options - -If you want to build MXNet with C++ language binding, please make sure you read [Build the C++ package](#build-the-c-package) first. - -
- -
+1. OpenBLAS | `BLAS` | Options: Atlas, Open, MKL, Apple -- Build without using OpenCV. `-j` runs multiple jobs against multi-core CPUs. +You can set the BLAS library explicitly by setting the BLAS variable to: - ```bash - make -j USE_OPENCV=0 - ``` +* Atlas +* Open +* MKL +* Apple -- Build with both GPU and OpenCV support +See the [cmake/ChooseBLAS.cmake](https://github.com/apache/incubator-mxnet/blob/master/cmake/ChooseBlas.cmake) file for the options. - ```bash - make -j USE_BLAS=openblas USE_CUDA=1 USE_CUDA_PATH=/usr/local/cuda USE_CUDNN=1 - ``` +Intel's MKL (Math Kernel Library) is one of the most powerful math libraries +https://software.intel.com/en-us/mkl -
+It has following flavors: -
+* MKL is a complete math library, containing all the functionality found in ATLAS, OpenBlas and LAPACK. It is free under + community support licensing (https://software.intel.com/en-us/articles/free-mkl), + but needs to be downloaded and installed manually. -- Build with the default BLAS library and clang installed with `xcode` (OPENMP - is disabled because it is not supported in default by clang). +* MKLML is a subset of MKL. It contains a smaller number of functions to reduce the + size of the download and reduce the number of dynamic libraries user needs. - ```bash - make -j USE_BLAS=apple USE_OPENCV=0 USE_OPENMP=0 - ``` + -
+* MKLDNN is a separate open-source library, it can be used separately from MKL or MKLML. It is + shipped as a subrepo with MXNet source code (see 3rdparty/mkldnn or the [mkl-dnn project](https://github.com/intel/mkl-dnn)) -
+Since the full MKL library is almost always faster than any other BLAS library it's turned on by default, +however it needs to be downloaded and installed manually before doing `cmake` configuration. +Register and download on the [Intel performance libraries website](https://software.seek.intel.com/performance-libraries). -Use [CMake](https://cmake.org/) to create a Visual Studio solution in ```./build```. +Note: MKL is supported only for desktop builds and the framework itself supports the following +hardware: -In Visual Studio, open the solution file,```.sln```, and compile it. -These commands produce a library called ```mxnet.dll``` in the ```./build/Release/``` or ```./build/Debug``` folder. +* Intel® Xeon Phi™ processor +* Intel® Xeon® processor +* Intel® Core™ processor family +* Intel Atom® processor -
+If you have a different processor you can still try to use MKL, but performance results are +unpredictable. -
-## Build MXNet using NCCL +### Build MXNet with NCCL - Download and install the latest NCCL library from NVIDIA. - Note the directory path in which NCCL libraries and header files are installed. - Ensure that the installation directory contains ```lib``` and ```include``` folders. -- Ensure that the prerequisites for using NCCL such as Cuda libraries are met. +- Ensure that the prerequisites for using NCCL such as Cuda libraries are met. - Append the ```config.mk``` file with following, in addition to the CUDA related options. - USE_NCCL=1 - USE_NCCL_PATH=path-to-nccl-installation-folder + ``` bash echo "USE_NCCL=1" >> make/config.mk echo "USE_NCCP_PATH=path-to-nccl-installation-folder" >> make/config.mk @@ -339,7 +167,7 @@ cp make/config.mk . make -j"$(nproc)" ``` -## Validation +#### Validating NCCL - Follow the steps to install MXNet Python binding. - Comment the following line in ```test_nccl.py``` file at ```incubator-mxnet/tests/python/gpu/test_nccl.py``` ``` bash @@ -350,143 +178,56 @@ make -j"$(nproc)" nosetests --verbose tests/python/gpu/test_nccl.py ``` -## Recommendation for best performance +**Recommendation to get the best performance out of NCCL:** It is recommended to set environment variable NCCL_LAUNCH_MODE to PARALLEL when using NCCL version 2.1 or newer. -
- -## Build the C++ package -The C++ package has the same prerequisites as the MXNet library, you should also have `python` installed. (Both `python` 2 and 3 are supported) - -To enable C++ package, just add `USE_CPP_PACKAGE=1` in the build options when building the MXNet shared library. - -## Build the R package +### Build MXNet with Language Packages +* To enable C++ package, just add `USE_CPP_PACKAGE=1` when you run `make` or `cmake`. -The R package requires `R` to be installed. -
- -Follow the below instructions to install the latest R on Ubuntu 14.04 (trusty) and also the libraries used -to build other R package dependencies. +### Usage Examples +* `-j` runs multiple jobs against multi-core CPUs. Example using all cores on Linux: ```bash -echo "deb http://cran.rstudio.com/bin/linux/ubuntu trusty/" >> /etc/apt/sources.list -gpg --keyserver keyserver.ubuntu.com --recv-key E084DAB9 -gpg -a --export E084DAB9 | apt-key add - - -apt-get update -apt-get install -y r-base r-base-dev libxml2-dev libxt-dev libssl-dev +make -j$(nproc) ``` -
- -Install the required R package dependencies: +* Build without using OpenCV: ```bash -cd R-package -Rscript -e "install.packages('devtools', repo = 'https://cran.rstudio.com')" -Rscript -e "library(devtools); library(methods); options(repos=c(CRAN='https://cran.rstudio.com')); install_deps(dependencies = TRUE)" +make USE_OPENCV=0 ``` -Next, build and install the MXNet R package: +* Build with both OpenBLAS, GPU, and OpenCV support: ```bash -cd .. -make rpkg +make -j USE_BLAS=openblas USE_CUDA=1 USE_CUDA_PATH=/usr/local/cuda USE_CUDNN=1 ``` -## Build the Scala package - -Both JDK and Maven are required to build the Scala package. - -
+* Build on **macOS** with the default BLAS library (Apple Accelerate) and Clang installed with `xcode` (OPENMP is disabled because it is not supported by the Apple version of Clang): ```bash -apt-get install -y software-properties-common -add-apt-repository -y ppa:webupd8team/java -apt-get update -echo "oracle-java8-installer shared/accepted-oracle-license-v1-1 select true" | debconf-set-selections -apt-get install -y oracle-java8-installer -apt-get install -y oracle-java8-set-default -apt-get install -y maven +make -j USE_BLAS=apple USE_OPENCV=0 USE_OPENMP=0 ``` -
- -The following command builds the `.jar` package: +* To use OpenMP on **macOS** you need to install the Clang compiler, `llvm` (the one provided by Apple does not support OpenMP): ```bash -make scalapkg +brew install llvm +make -j USE_BLAS=apple USE_OPENMP=1 ``` -which can be found by `ls scala-package/assembly/*/target/*SNAPSHOT.jar`. - -Optionally, we can install Scala for the interactive interface. +## Installing MXNet Language Bindings +After building MXNet's shared library, you can install other language bindings. (Except for C++. You need to build this when you build MXNet from source.) -
- -```bash -wget http://downloads.lightbend.com/scala/2.11.8/scala-2.11.8.deb -dpkg -i scala-2.11.8.deb -rm scala-2.11.8.deb -``` - -
- -Then we can start `scala` with `mxnet` imported by - -```bash -scala -cp scala-package/assembly/*/target/*SNAPSHOT.jar -``` - -## Build the Julia package - -We need to first install Julia. - -
- -The following commands install Julia 0.5.1 - -```bash -wget -q https://julialang.s3.amazonaws.com/bin/linux/x64/0.5/julia-0.5.1-linux-x86_64.tar.gz -tar -zxf julia-0.5.1-linux-x86_64.tar.gz -rm julia-0.5.1-linux-x86_64.tar.gz -ln -s $(pwd)/julia-6445c82d00/bin/julia /usr/bin/julia -``` - -
- -Next set the environment variable `MXNET_HOME=/path/to/mxnet` so that Julia -can find the pre-built library. - -Install the Julia package for MXNet with: - -```bash -julia -e 'Pkg.add("MXNet")' -``` - -### Build the Perl package - -Run the following command from the MXNet source root directory to build the MXNet Perl package: - -```bash - sudo apt-get install libmouse-perl pdl cpanminus swig libgraphviz-perl - cpanm -q -L "${HOME}/perl5" Function::Parameters Hash::Ordered PDL::CCS - - MXNET_HOME=${PWD} - export LD_LIBRARY_PATH=${MXNET_HOME}/lib - export PERL5LIB=${HOME}/perl5/lib/perl5 - - cd ${MXNET_HOME}/perl-package/AI-MXNetCAPI/ - perl Makefile.PL INSTALL_BASE=${HOME}/perl5 - make install - - cd ${MXNET_HOME}/perl-package/AI-NNVMCAPI/ - perl Makefile.PL INSTALL_BASE=${HOME}/perl5 - make install - - cd ${MXNET_HOME}/perl-package/AI-MXNet/ - perl Makefile.PL INSTALL_BASE=${HOME}/perl5 - make install -``` +The following table provides links to each language binding by operating system: +| | Linux | macOS | Windows | +|---|---|---|---| +| Python | [Linux](ubuntu_setup.html#install-mxnet-for-python) | [macOS](osx_setup.html) | [Windows](windows_setup.html#install-mxnet-for-python) | +| C++ | [Linux](c_plus_plus.html) | [macOS](c_plus_plus.html) | [Windows](c_plus_plus.html) | +| Clojure | [Linux](https://github.com/apache/incubator-mxnet/tree/master/contrib/clojure-package) | [macOS](https://github.com/apache/incubator-mxnet/tree/master/contrib/clojure-package) | n/a | +| Julia | [Linux](ubuntu_setup.html#install-the-mxnet-package-for-julia) | [macOS](osx_setup.html#install-the-mxnet-package-for-julia) | [Windows](windows_setup.html#install-the-mxnet-package-for-julia) | +| Perl | [Linux](ubuntu_setup.html#install-the-mxnet-package-for-perl) | [macOS](osx_setup.html#install-the-mxnet-package-for-perl) | [Windows](n/a) | +| R | [Linux](ubuntu_setup.html#install-the-mxnet-package-for-r) | [macOS](osx_setup.html#install-the-mxnet-package-for-r) | [Windows](windows_setup.html#install-the-mxnet-package-for-r) | +| Scala | [Linux](scala_setup.html) | [macOS](scala_setup.html) | n/a | diff --git a/docs/install/c_plus_plus.md b/docs/install/c_plus_plus.md new file mode 100644 index 000000000000..6078877c27c8 --- /dev/null +++ b/docs/install/c_plus_plus.md @@ -0,0 +1,29 @@ +## Build the C++ package +The C++ package has the same prerequisites as the MXNet library. + +To enable C++ package, just add `USE_CPP_PACKAGE=1` in the [build from source](build_from_source.html) options when building the MXNet shared library. + +For example to build MXNet with GPU support and the C++ package, OpenCV, and OpenBLAS, from the project root you would run: + +```bash +make -j USE_CPP_PACKAGE=1 USE_OPENCV=1 USE_BLAS=openblas USE_CUDA=1 +``` + +You may also want to add the MXNet shared library to your `LD_LIBRARY_PATH`: + +```bash +export LD_LIBRARY_PATH=~/incubator-mxnet/lib +``` + +Setting the `LD_LIBRARY_PATH` is required to run the examples mentioned in the following section. + +## C++ Example Code +You can find C++ code examples in the `cpp-package/example` folder of the MXNet project. Refer to the [cpp-package's README](https://github.com/apache/incubator-mxnet/tree/master/cpp-package) for instructions on building the examples. + +## Tutorials + +* [MXNet C++ API Basics](https://mxnet.incubator.apache.org/tutorials/c++/basics.html) + +## Related Topics + +* [Image Classification using MXNet's C Predict API](https://github.com/apache/incubator-mxnet/tree/master/example/image-classification/predict-cpp) diff --git a/docs/install/centos_setup.md b/docs/install/centos_setup.md index 42a4fcb0eb89..f63099bcf623 100644 --- a/docs/install/centos_setup.md +++ b/docs/install/centos_setup.md @@ -1,8 +1,90 @@ - - - -

- - This content is moved to a new MXNet install page. Redirecting... -

+# Installing MXNet on CentOS and other non-Ubuntu Linux systems + +1. Install build tools and git on `CentOS >= 7` and `Fedora >= 19`: + +```bash +sudo yum groupinstall -y "Development Tools" && sudo yum install -y git +``` + +2. Install Atlas: + +```bash +sudo yum install atlas-devel +``` + +Installing both `git` and `cmake` or `make` by following instructions on the websites is +straightforward. Here we provide the instructions to build `gcc-4.8` from source codes. + +3. Install the 32-bit `libc` with one of the following system-specific commands: + +```bash +sudo apt-get install libc6-dev-i386 # In Ubuntu +sudo yum install glibc-devel.i686 # In RHEL (Red Hat Linux) +sudo yum install glibc-devel.i386 # In CentOS 5.8 +sudo yum install glibc-devel.i686 # In CentOS 6/7 +``` + +4. Download and extract the `gcc` source code with the prerequisites: + +```bash +wget http://mirrors.concertpass.com/gcc/releases/gcc-4.8.5/gcc-4.8.5.tar.gz +tar -zxf gcc-4.8.5.tar.gz +cd gcc-4.8.5 +./contrib/download_prerequisites +``` + +5. Build `gcc` by using 10 threads and then install to `/usr/local` + +```bash +mkdir release && cd release +../configure --prefix=/usr/local --enable-languages=c,c++ +make -j10 +sudo make install +``` + +6. Add the lib path to your configure file such as `~/.bashrc`: + +```bash +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib64 +``` + +7. Build [OpenBLAS from source](https://github.com/xianyi/OpenBLAS#installation-from-source). + +8. Build OpenCV + +To build OpenCV from source code, you need the [cmake](https://cmake.org) library. + +* If you don't have cmake or if your version of cmake is earlier than 3.6.1, run the following commands to install a newer version of cmake: + + ```bash + wget https://cmake.org/files/v3.6/cmake-3.6.1-Linux-x86_64.tar.gz + tar -zxvf cmake-3.6.1-Linux-x86_64.tar.gz + alias cmake="cmake-3.6.1-Linux-x86_64/bin/cmake" + ``` + +* To download and extract the OpenCV source code, run the following commands: + + ```bash + wget https://codeload.github.com/opencv/opencv/zip/2.4.13 + unzip 2.4.13 + cd opencv-2.4.13 + mkdir release + cd release/ + ``` + +* Build OpenCV. The following commands build OpenCV with 10 threads. We + disabled GPU support, which might significantly slow down an MXNet program + running on a GPU processor. It also disables 1394 which might generate a + warning. Then install it on `/usr/local`. + + ```bash + cmake -D BUILD_opencv_gpu=OFF -D WITH_CUDA=OFF -D WITH_1394=OFF -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local .. + make -j10 + sudo make install + ``` + +* Add the lib path to your configuration such as `~/.bashrc`. + + ```bash + export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:/usr/local/lib/pkgconfig/ + ``` diff --git a/docs/install/index.md b/docs/install/index.md index 833bedf08afa..4a6af31cee3c 100644 --- a/docs/install/index.md +++ b/docs/install/index.md @@ -1,6 +1,6 @@ # Installing MXNet -Indicate your preferred configuration. Then, follow the customized commands to install *MXNet*. +Indicate your preferred configuration. Then, follow the customized commands to install MXNet.
- +
@@ -78,88 +77,27 @@ Indicate your preferred configuration. Then, follow the customized commands to i
- -The following installation instructions have been tested on Ubuntu 14.04 and 16.04. - -
-
- -**Step 1** Install prerequisites - wget and latest pip. - -Installing *MXNet* with pip requires a latest version of `pip`. Install the latest version of `pip` by issuing the following command in the terminal. - -```bash -$ sudo apt-get update -$ sudo apt-get install -y wget python gcc -$ wget https://bootstrap.pypa.io/get-pip.py && sudo python get-pip.py -``` -
-**Step 2** Install MXNet with OpenBLAS acceleration. - -```bash -$ pip install mxnet -``` - -**Step 3** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -sudo apt-get install graphviz -pip install graphviz ``` - -**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). - -**Experimental Choice** If You would like to install mxnet with Intel MKL, try the experimental pip package with MKL: -```bash -$ pip install mxnet-mkl +$ pip install mxnet ```
-**Step 2** Install MXNet with OpenBLAS acceleration. - -```bash -$ pip install mxnet==1.1.0 -``` - -**Step 3** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -sudo apt-get install graphviz -pip install graphviz ``` - -**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). - -**Experimental Choice** If You would like to install mxnet with Intel MKL, try the experimental pip package with MKL: -```bash -$ pip install mxnet-mkl==1.1.0 +$ pip install mxnet==1.1.0 ```
-**Step 2** Install MXNet with OpenBLAS acceleration. - -```bash -$ pip install mxnet==1.0.0 -``` - -**Step 3** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -sudo apt-get install graphviz -pip install graphviz ``` - -**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). - -**Experimental Choice** If You would like to install mxnet with Intel MKL, try the experimental pip package with MKL: -```bash -$ pip install mxnet-mkl==1.0.0 +$ pip install mxnet==1.0.0 ```
@@ -167,210 +105,159 @@ $ pip install mxnet-mkl==1.0.0
- -**Step 2** Install MXNet with OpenBLAS acceleration. - -```bash -$ pip install mxnet==0.12.1 -``` - -For MXNet 0.12.0 - - -```bash -$ pip install mxnet==0.12.0 ``` - -**Step 3** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -sudo apt-get install graphviz -pip install graphviz +$ pip install mxnet==0.12.1 ``` -**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). +For MXNet 0.12.0: -**Experimental Choice** If You would like to install mxnet with Intel MKL, try the experimental pip package with MKL: -```bash -$ pip install mxnet-mkl==0.12.1 ``` - -For MXNet 0.12.0 - - -```bash -$ pip install mxnet-mkl==0.12.0 +$ pip install mxnet==0.12.0 ```
- -**Step 2** Install MXNet with OpenBLAS acceleration. - -```bash +``` $ pip install mxnet==0.11.0 ``` -**Step 3** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -sudo apt-get install graphviz -pip install graphviz -``` +
-**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). +
-**Experimental Choice** If You would like to install mxnet with Intel MKL, try the experimental pip package with MKL: -```bash -$ pip install mxnet-mkl==0.11.0 +``` +$ pip install mxnet --pre ``` -
+
+
+Most MXNet versions offer an experimental MKL pip package that will be much faster when running on Intel hardware. +Check the chart below for other options, refer to PyPI for other MXNet pip packages, or validate your MXNet installation. -
+pip packages +
-**Step 2** Install MXNet with OpenBLAS acceleration. -```bash -$ pip install mxnet --pre -``` +
+
-**Step 3** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -sudo apt-get install graphviz -pip install graphviz -``` +Docker images with *MXNet* are available at [Docker Hub](https://hub.docker.com/r/mxnet/). -**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). +**Step 1** Install Docker on your machine by following the [docker installation instructions](https://docs.docker.com/engine/installation/linux/ubuntu/#install-using-the-repository). -**Experimental Choice** If You would like to install mxnet with Intel MKL, try the experimental pip package with MKL: -```bash -$ pip install mxnet-mkl --pre -``` +*Note* - You can install Community Edition (CE) to get started with *MXNet*. -
+**Step 2** [Optional] Post installation steps to manage Docker as a non-root user. -
+Follow the four steps in this [docker documentation](https://docs.docker.com/engine/installation/linux/linux-postinstall/#manage-docker-as-a-non-root-user) to allow managing docker containers without *sudo*. -
-
+If you skip this step, you need to use *sudo* each time you invoke Docker. -**Step 1** Install virtualenv for Ubuntu. +**Step 3** Pull the MXNet docker image. -```bash -$ sudo apt-get update -$ sudo apt-get install -y python-dev python-virtualenv +``` +$ docker pull mxnet/python # Use sudo if you skip Step 2 ``` -**Step 2** Create and activate virtualenv environment for MXNet. +You can list docker images to see if mxnet/python docker image pull was successful. -Following command creates a virtualenv environment at `~/mxnet` directory. However, you can choose any directory by replacing `~/mxnet` with a directory of your choice. +``` +$ docker images # Use sudo if you skip Step 2 -```bash -$ virtualenv --system-site-packages ~/mxnet +REPOSITORY TAG IMAGE ID CREATED SIZE +mxnet/python latest 00d026968b3c 3 weeks ago 1.41 GB ``` -Activate the virtualenv environment created for *MXNet*. +**Step 4** Validate the installation. -```bash -$ source ~/mxnet/bin/activate -``` +
-After activating the environment, you should see the prompt as below. +
+
-```bash -(mxnet)$ -``` +To build from source, refer to the MXNet Ubuntu installation guide. -**Step 3** Install MXNet in the active virtualenv environment. +
-Installing *MXNet* with pip requires a latest version of `pip`. Install the latest version of `pip` by issuing the following command. +
+ -```bash -$ pip install --upgrade pip -``` + +
+
-Install *MXNet* with OpenBLAS acceleration. - -```bash -$ pip install mxnet +``` +$ pip install mxnet-cu92 ```
-Install *MXNet* with OpenBLAS acceleration. - -```bash -$ pip install mxnet==1.1.0 +``` +$ pip install mxnet-cu91==1.1.0 ```
-Install *MXNet* with OpenBLAS acceleration. - -```bash -$ pip install mxnet==1.0.0 +``` +$ pip install mxnet-cu90==1.0.0 ```
-
-Install *MXNet* with OpenBLAS acceleration. - -```bash -$ pip install mxnet==0.12.1 ``` - -For *MXNet* 0.12.0 - - -```bash -$ pip install mxnet==0.12.0 +$ pip install mxnet-cu90==0.12.1 ```
-Install *MXNet* with OpenBLAS acceleration. - -```bash -$ pip install mxnet==0.11.0 +``` +$ pip install mxnet-cu80==0.11.0 ```
-Install *MXNet* with OpenBLAS acceleration. - -```bash -$ pip install mxnet --pre +``` +$ pip install mxnet-cu92 --pre ```
+
+Most MXNet versions offer an experimental MKL pip package that will be much faster when running on Intel hardware. +Check the chart below for other options, refer to PyPI for other MXNet pip packages, or validate your MXNet installation. +pip packages -**Step 4** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -sudo apt-get install graphviz -pip install graphviz -``` +**NOTES:** -**Step 5** Validate the installation by running simple *MXNet* code described [here](#validate-mxnet-installation). +CUDA should be installed first. Instructions can be found in the CUDA dependencies section of the MXNet Ubuntu installation guide. -**Note** You can read more about virtualenv [here](https://virtualenv.pypa.io/en/stable/userguide/). +**Important:** Make sure your installed CUDA version matches the CUDA version in the pip package. Check your CUDA version with the following command: + +``` +nvcc --version +``` -
+You can either upgrade your CUDA install or install the MXNet package that supports your CUDA version. +
+
Docker images with *MXNet* are available at [Docker Hub](https://hub.docker.com/r/mxnet/). @@ -385,2146 +272,806 @@ Follow the four steps in this [docker documentation](https://docs.docker.com/eng If you skip this step, you need to use *sudo* each time you invoke Docker. -**Step 3** Pull the MXNet docker image. +**Step 3** Install *nvidia-docker-plugin* following the [installation instructions](https://github.com/NVIDIA/nvidia-docker/wiki/Installation). *nvidia-docker-plugin* is required to enable the usage of GPUs from the docker containers. -```bash -$ docker pull mxnet/python # Use sudo if you skip Step 2 +**Step 4** Pull the MXNet docker image. + +``` +$ docker pull mxnet/python:gpu # Use sudo if you skip Step 2 ``` You can list docker images to see if mxnet/python docker image pull was successful. -```bash +``` $ docker images # Use sudo if you skip Step 2 REPOSITORY TAG IMAGE ID CREATED SIZE -mxnet/python latest 00d026968b3c 3 weeks ago 1.41 GB +mxnet/python gpu 493b2683c269 3 weeks ago 4.77 GB ``` -**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). +**Step 5** Validate the installation.

+Refer to the MXNet Ubuntu installation guide. -Building *MXNet* from source is a 2 step process. -1. Build the *MXNet* core shared library, `libmxnet.so`, from the C++ sources. -2. Build the language specific bindings. Example - Python bindings, Scala bindings. -**Minimum Requirements** -1. [GCC 4.8](https://gcc.gnu.org/gcc-4.8/) or later to compile C++ 11. -2. [GNU Make](https://www.gnu.org/software/make/) +
+
+ + -
-**Build the MXNet core shared library** +
+
-**Step 1** Install build tools and git. -```bash -$ sudo apt-get update -$ sudo apt-get install -y build-essential git -``` +The default version of R that is installed with `apt-get` is insufficient. You will need to first [install R v3.4.4+ and build MXNet from source](ubuntu_setup.html#install-the-mxnet-package-for-r). -**Step 2** Install OpenBLAS. +After you have setup R v3.4.4+ and MXNet, you can build and install the MXNet R bindings with the following, assuming that `incubator-mxnet` is the source directory you used to build MXNet as follows: -*MXNet* uses [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) and [LAPACK](https://en.wikipedia.org/wiki/LAPACK) libraries for accelerated numerical computations on CPU machine. There are several flavors of BLAS/LAPACK libraries - [OpenBLAS](http://www.openblas.net/), [ATLAS](http://math-atlas.sourceforge.net/) and [MKL](https://software.intel.com/en-us/intel-mkl). In this step we install OpenBLAS. You can choose to install ATLAS or MKL. -```bash -$ sudo apt-get install -y libopenblas-dev liblapack-dev +``` +$ cd incubator-mxnet +$ make rpkg ``` -**Step 3** Install OpenCV. +
-*MXNet* uses [OpenCV](http://opencv.org/) for efficient image loading and augmentation operations. -```bash -$ sudo apt-get install -y libopencv-dev -``` -**Step 4** Download MXNet sources and build MXNet core shared library. You can clone the repository as described in the following code block, or you may try the download links for your desired MXNet version. +
+ +The default version of R that is installed with `apt-get` is insufficient. You will need to first [install R v3.4.4+ and build MXNet from source](ubuntu_setup.html#install-the-mxnet-package-for-r). + +After you have setup R v3.4.4+ and MXNet, you can build and install the MXNet R bindings with the following, assuming that `incubator-mxnet` is the source directory you used to build MXNet as follows: -```bash -$ git clone --recursive https://github.com/apache/incubator-mxnet +``` $ cd incubator-mxnet -$ make -j $(nproc) USE_OPENCV=1 USE_BLAS=openblas +$ make rpkg ``` -*Note* - USE_OPENCV and USE_BLAS are make file flags to set compilation options to use OpenCV and BLAS library. You can explore and use more compilation options in `make/config.mk`. +
+
-
-**Build the MXNet Python binding** +
+
+
+You can use the Maven packages defined in the following `dependency` to include MXNet in your Scala project. Please refer to the MXNet-Scala setup guide for a detailed set of instructions to help you with the setup process. -**Step 1** Install prerequisites - python, setup-tools, python-pip and libfortran (required for Numpy). +maven badge -```bash -$ sudo apt-get install -y python-dev python-setuptools python-pip libgfortran3 +```html + + org.apache.mxnet + mxnet-full_2.11-linux-x86_64-gpu + ``` +
+
-**Step 2** Install the MXNet Python binding. - -```bash -$ cd python -$ pip install -e . -``` +
+
+You can use the Maven packages defined in the following `dependency` to include MXNet in your Scala project. Please refer to the MXNet-Scala setup guide for a detailed set of instructions to help you with the setup process. -Note that the `-e` flag is optional. It is equivalent to `--editable` and means that if you edit the source files, these changes will be reflected in the package installed. +maven badge -**Step 3** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -sudo apt-get install graphviz -pip install graphviz +```html + + org.apache.mxnet + mxnet-full_2.11-linux-x86_64-cpu + ``` +
+
+
-**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). - - - + -
+
+
+
+Refer to the Perl section of the MXNet Ubuntu installation guide. -The following installation instructions have been tested on Ubuntu 14.04 and 16.04. +
+
-**Prerequisites** -Install the following NVIDIA libraries to setup *MXNet* with GPU support: +
+
+
+

To enable the C++ package, build from source using `make USE_CPP_PACKAGE=1`. +
Refer to the MXNet C++ setup guide for more info.

+
+
+
+
+For more installation options, refer to the MXNet Ubuntu installation guide. -1. Install CUDA 9.0 following the NVIDIA's [installation guide](http://docs.nvidia.com/cuda/cuda-installation-guide-linux/). -2. Install cuDNN 7 for CUDA 9.0 following the NVIDIA's [installation guide](https://developer.nvidia.com/cudnn). You may need to register with NVIDIA for downloading the cuDNN library. +
-**Note:** Make sure to add CUDA install path to `LD_LIBRARY_PATH`. -Example - *export LD_LIBRARY_PATH=/usr/local/cuda/lib64/:$LD_LIBRARY_PATH* + +
+
+
-
- -**Step 1** Install prerequisites - wget and latest pip. - -Installing *MXNet* with pip requires a latest version of `pip`. Install the latest version of `pip` by issuing the following command in the terminal. +
-```bash -$ sudo apt-get update -$ sudo apt-get install -y wget python -$ wget https://bootstrap.pypa.io/get-pip.py && sudo python get-pip.py +``` +$ pip install mxnet ``` -
+
-**Step 2** Install *MXNet* with GPU support using CUDA 9.2 -**Important**: Make sure your installed CUDA version matches the CUDA version in the pip package. -Check your CUDA version with the following command: +
-```bash -nvcc --version ``` - -You can either upgrade your CUDA install or install the MXNet package that supports your CUDA version. - -```bash -$ pip install mxnet-cu92 +$ pip install mxnet==1.1.0 ``` -Refer to [pypi for older packages](https://pypi.org/project/mxnet/). +
-**Step 3** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -sudo apt-get install graphviz -pip install graphviz -``` -**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). +
-**Experimental Choice** If You would like to install mxnet with Intel MKL, try the experimental pip package with MKL: -```bash -$ pip install mxnet-cu90mkl +``` +$ pip install mxnet==1.0.0 ``` -
- - -
- -**Step 2** Install *MXNet* with GPU support using CUDA 9.1 +
-**Important**: Make sure your installed CUDA version matches the CUDA version in the pip package. -Check your CUDA version with the following command: +
-```bash -nvcc --version +``` +$ pip install mxnet=0.12.1 ``` -You can either upgrade your CUDA install or install the MXNet package that supports your CUDA version. +
-```bash -$ pip install mxnet-cu91==1.1.0 -``` -Refer to [pypi for older packages](https://pypi.org/project/mxnet/). +
-**Step 3** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -sudo apt-get install graphviz -pip install graphviz ``` +$ pip install mxnet==0.11.0 +``` + +
-**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). +
-**Experimental Choice** If You would like to install MXNet with Intel MKL, try the experimental pip package with MKL: -```bash -$ pip install mxnet-cu91mkl==1.1.0 +``` +$ pip install mxnet --pre ``` -Refer to [pypi for older packages](https://pypi.org/project/mxnet/). +
+
+Most MXNet versions offer an experimental MKL pip package that will be much faster when running on Intel hardware. +Check the chart below for other options, refer to PyPI for other MXNet pip packages, or validate your MXNet installation. -
+pip packages -
+
-**Step 2** Install *MXNet* with GPU support using CUDA 9.0 - -```bash -$ pip install mxnet-cu90==1.0.0 -``` - -**Step 3** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -sudo apt-get install graphviz -pip install graphviz -``` - -**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). - -**Experimental Choice** If You would like to install mxnet with Intel MKL, try the experimental pip package with MKL: -```bash -$ pip install mxnet-cu90mkl==1.0.0 -``` - -
+
+
-
+Docker images with *MXNet* are available at [Docker Hub](https://hub.docker.com/r/mxnet/). -**Step 2** Install *MXNet* with GPU support using CUDA 9.0 +**Step 1** Install Docker on your machine by following the [docker installation instructions](https://docs.docker.com/docker-for-mac/install/#install-and-run-docker-for-mac). -```bash -$ pip install mxnet-cu90==0.12.1 -``` +*Note* - You can install Community Edition (CE) to get started with *MXNet*. -For *MXNet* 0.12.0 - +**Step 2** Pull the MXNet docker image. -```bash -$ pip install mxnet-cu90==0.12.0 ``` - -**Step 3** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -sudo apt-get install graphviz -pip install graphviz +$ docker pull mxnet/python ``` -**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). +You can list docker images to see if mxnet/python docker image pull was successful. -**Experimental Choice** If You would like to install mxnet with Intel MKL, try the experimental pip package with MKL: -```bash -$ pip install mxnet-cu90mkl==0.12.1 ``` +$ docker images -For *MXNet* 0.12.0 - - -```bash -$ pip install mxnet-cu90mkl==0.12.0 +REPOSITORY TAG IMAGE ID CREATED SIZE +mxnet/python latest 00d026968b3c 3 weeks ago 1.41 GB ``` -
- - -
+**Step 4** Validate the installation. -**Step 2** Install *MXNet* with GPU support using CUDA 8.0 +
-```bash -$ pip install mxnet-cu80==0.11.0 -``` -**Step 3** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -sudo apt-get install graphviz -pip install graphviz -``` +
+
-**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). +To build from source, refer to the MXNet macOS installation guide. -**Experimental Choice** If You would like to install MXNet with Intel MKL, try the experimental pip package with MKL: -```bash -$ pip install mxnet-cu80mkl==0.11.0 -``` +MXNet developers should refer to the MXNet wiki's Developer Setup on Mac. +
+
+
-
+ +
+
+
+This option is only available by building from source. Refer to the MXNet macOS installation guide. +
+
-
+
+
-
+Refer to the MXNet macOS installation guide. +MXNet developers should refer to the MXNet wiki's Developer Setup on Mac.
+
+
+
-**Step 1** Install virtualenv for Ubuntu. -```bash -$ sudo apt-get update -$ sudo apt-get install -y python-dev python-virtualenv -``` + -**Step 2** Create and activate virtualenv environment for MXNet. +
+
+
-Following command creates a virtualenv environment at `~/mxnet` directory. However, you can choose any directory by replacing `~/mxnet` with a directory of your choice. +Install the latest version (3.5.1+) of R from [CRAN](https://cran.r-project.org/bin/macosx/). +You can [build MXNet-R from source](osx_setup.html#install-the-mxnet-package-for-r), or you can use a pre-built binary: -```bash -$ virtualenv --system-site-packages ~/mxnet +```r +cran <- getOption("repos") +cran["dmlc"] <- "https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/CRAN/" +options(repos = cran) +install.packages("mxnet") ``` -Activate the virtualenv environment created for *MXNet*. +
-```bash -$ source ~/mxnet/bin/activate -``` -After activating the environment, you should see the prompt as below. +
+
+Will be available soon. -```bash -(mxnet)$ -``` +
+
-**Step 3** Install MXNet in the active virtualenv environment. +
+
+
+You can use the Maven packages defined in the following `dependency` to include MXNet in your Scala project. Please refer to the MXNet-Scala setup guide for a detailed set of instructions to help you with the setup process. -Installing *MXNet* with pip requires a latest version of `pip`. Install the latest version of `pip` by issuing the following command. +maven badge -```bash -(mxnet)$ pip install --upgrade pip +```html + + org.apache.mxnet + mxnet-full_2.11-osx-x86_64-cpu + ``` +
+
+
+Not available at this time.
-
+
+
-**Important**: Make sure your installed CUDA version matches the CUDA version in the pip package. -Check your CUDA version with the following command: -```bash -nvcc --version -``` -You can either upgrade your CUDA install or install the MXNet package that supports your CUDA version. +
+
+
+Refer to the Julia section of the MXNet macOS installation guide. -Install *MXNet* with GPU support using CUDA 9.2: +
+
-```bash -(mxnet)$ pip install mxnet-cu92 -``` +
+
+
+Refer to the Perl section of the MXNet macOS installation guide. -Refer to [pypi for older packages](https://pypi.org/project/mxnet/). +
+
-
-
+
+
+

To enable the C++ package, build from source using `make USE_CPP_PACKAGE=1`. +
Refer to the MXNet C++ setup guide for more info.

+
+
+
+For more installation options, refer to the MXNet macOS installation guide. +
-**Important**: Make sure your installed CUDA version matches the CUDA version in the pip package. -Check your CUDA version with the following command: -```bash -nvcc --version + +
+
+
+
+
+ +``` +$ pip install mxnet ``` -You can either upgrade your CUDA install or install the MXNet package that supports your CUDA version. +
-Install *MXNet* with GPU support using CUDA 9.1: +
-```bash -(mxnet)$ pip install mxnet-cu91==1.1.0 ``` - -Refer to [pypi for older packages](https://pypi.org/project/mxnet/). +$ pip install mxnet==1.1.0 +```
-
-Install *MXNet* with GPU support using CUDA 9.0. - -```bash -(mxnet)$ pip install mxnet-cu90==1.0.0 ``` -Refer to [pypi for older packages](https://pypi.org/project/mxnet/). +$ pip install mxnet==1.0.0 +```
-
-Install *MXNet* with GPU support using CUDA 9.0. - -```bash -(mxnet)$ pip install mxnet-cu90==0.12.1 ``` - -Refer to [pypi for older packages](https://pypi.org/project/mxnet/). +$ pip install mxnet==0.12.1 +```
-
-Install *MXNet* with GPU support using CUDA 8.0. - -```bash -(mxnet)$ pip install mxnet-cu80==0.11.0 +``` +$ pip install mxnet==0.11.0 ```
-**Important**: Make sure your installed CUDA version matches the CUDA version in the pip package. -Check your CUDA version with the following command: - -```bash -nvcc --version ``` - -You can either upgrade your CUDA install or install the MXNet package that supports your CUDA version. - -Install *MXNet* with GPU support using CUDA 9.2. - -```bash -(mxnet)$ pip install mxnet-cu92 --pre +$ pip install mxnet --pre ``` -Refer to [pypi for older packages](https://pypi.org/project/mxnet/). -
+
+Most MXNet versions offer an experimental MKL pip package that will be much faster when running on Intel hardware. +Check the chart below for other options, refer to PyPI for other MXNet pip packages, or validate your MXNet installation. -**Step 4** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -sudo apt-get install graphviz -pip install graphviz -``` - -**Step 5** Validate the installation by running simple *MXNet* code described [here](#validate-mxnet-installation). +pip packages -**Note** You can read more about virtualenv [here](https://virtualenv.pypa.io/en/stable/userguide/). -
+
-
+

-Docker images with *MXNet* are available at [Docker Hub](https://hub.docker.com/r/mxnet/). +Refer to the MXNet Windows installation guide. -**Step 1** Install Docker on your machine by following the [docker installation instructions](https://docs.docker.com/engine/installation/linux/ubuntu/#install-using-the-repository). -*Note* - You can install Community Edition (CE) to get started with *MXNet*. +
+
-**Step 2** [Optional] Post installation steps to manage Docker as a non-root user. -Follow the four steps in this [docker documentation](https://docs.docker.com/engine/installation/linux/linux-postinstall/#manage-docker-as-a-non-root-user) to allow managing docker containers without *sudo*. +
+
+
-If you skip this step, you need to use *sudo* each time you invoke Docker. +``` +$ pip install mxnet-cu92 +``` -**Step 3** Install *nvidia-docker-plugin* following the [installation instructions](https://github.com/NVIDIA/nvidia-docker/wiki/Installation). *nvidia-docker-plugin* is required to enable the usage of GPUs from the docker containers. +
-**Step 4** Pull the MXNet docker image. +
-```bash -$ docker pull mxnet/python:gpu # Use sudo if you skip Step 2 +``` +$ pip install mxnet-cu91==1.1.0 ``` -You can list docker images to see if mxnet/python docker image pull was successful. +
-```bash -$ docker images # Use sudo if you skip Step 2 +
-REPOSITORY TAG IMAGE ID CREATED SIZE -mxnet/python gpu 493b2683c269 3 weeks ago 4.77 GB +``` +$ pip install mxnet-cu90==1.0.0 ``` -**Step 5** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). +
-
+
-
+``` +$ pip install mxnet-cu90==0.12.1 +``` -
+
-Building *MXNet* from source is a 2 step process. -1. Build the *MXNet* core shared library, `libmxnet.so`, from the C++ sources. -2. Build the language specific bindings. Example - Python bindings, Scala bindings. +
-**Minimum Requirements** -1. [GCC 4.8](https://gcc.gnu.org/gcc-4.8/) or later to compile C++ 11. -2. [GNU Make](https://www.gnu.org/software/make/) +``` +$ pip install mxnet-cu80==0.11.0 +``` -
+
-**Build the MXNet core shared library** +
-**Step 1** Install build tools and git. -```bash -$ sudo apt-get update -$ sudo apt-get install -y build-essential git ``` -**Step 2** Install OpenBLAS. - -*MXNet* uses [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) and [LAPACK](https://en.wikipedia.org/wiki/LAPACK) libraries for accelerated numerical computations on CPU machine. There are several flavors of BLAS/LAPACK libraries - [OpenBLAS](http://www.openblas.net/), [ATLAS](http://math-atlas.sourceforge.net/) and [MKL](https://software.intel.com/en-us/intel-mkl). In this step we install OpenBLAS. You can choose to install ATLAS or MKL. -```bash -$ sudo apt-get install -y libopenblas-dev liblapack-dev +$ pip install mxnet-cu92 --pre ``` -**Step 3** Install OpenCV. +
+
+Most MXNet versions offer an experimental MKL pip package that will be much faster when running on Intel hardware. +Check the chart below for other options, refer to PyPI for other MXNet pip packages, or validate your MXNet installation. -*MXNet* uses [OpenCV](http://opencv.org/) for efficient image loading and augmentation operations. -```bash -$ sudo apt-get install -y libopencv-dev -``` +pip packages -**Step 4** Download MXNet sources and build MXNet core shared library. You can clone the repository as described in the following code block, or you may try the download links for your desired MXNet version. +**NOTES:** -```bash -$ git clone --recursive https://github.com/apache/incubator-mxnet -$ cd incubator-mxnet -$ make -j $(nproc) USE_OPENCV=1 USE_BLAS=openblas USE_CUDA=1 USE_CUDA_PATH=/usr/local/cuda USE_CUDNN=1 -``` +[Anaconda](https://www.anaconda.com/download/) is recommended. -*Note* - USE_OPENCV, USE_BLAS, USE_CUDA, USE_CUDA_PATH AND USE_CUDNN are make file flags to set compilation options to use OpenCV, OpenBLAS, CUDA and cuDNN libraries. You can explore and use more compilation options in `make/config.mk`. Make sure to set USE_CUDA_PATH to right CUDA installation path. In most cases it is - */usr/local/cuda*. +CUDA should be installed first. Instructions can be found in the CUDA dependencies section of the MXNet Ubuntu installation guide. -
+**Important:** Make sure your installed CUDA version matches the CUDA version in the pip package. Check your CUDA version with the following command: -**Install the MXNet Python binding** +``` +nvcc --version +``` -**Step 1** Install prerequisites - python, setup-tools, python-pip and libfortran (required for Numpy).. +Refer to [#8671](https://github.com/apache/incubator-mxnet/issues/8671) for status on CUDA 9.1 support. -```bash -$ sudo apt-get install -y python-dev python-setuptools python-pip libgfortran3 -``` +You can either upgrade your CUDA install or install the MXNet package that supports your CUDA version. -**Step 2** Install the MXNet Python binding. +
-```bash -$ cd python -$ pip install -e . -``` +
+
-Note that the `-e` flag is optional. It is equivalent to `--editable` and means that if you edit the source files, these changes will be reflected in the package installed. +To build from source, refer to the MXNet Windows installation guide. -**Step 3** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -sudo apt-get install graphviz -pip install graphviz -``` -**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). +
+
+
-
-
- - +
+
-The default version of R that is installed with `apt-get` is insufficient. You will need to first [install R v3.4.4+ and build MXNet from source](ubuntu_setup.html#install-the-mxnet-package-for-r). - -After you have setup R v3.4.4+ and MXNet, you can build and install the MXNet R bindings with the following, assuming that `incubator-mxnet` is the source directory you used to build MXNet as follows: +Install the latest version (3.5.1+) of R from [CRAN](https://cran.r-project.org/bin/windows/). +You can [build MXNet-R from source](windows_setup.html#install-mxnet-package-for-r), or you can use a pre-built binary: -```bash -$ cd incubator-mxnet -$ make rpkg +```r +cran <- getOption("repos") +cran["dmlc"] <- "https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/CRAN/" +options(repos = cran) +install.packages("mxnet") ``` -
- +
+
-The default version of R that is installed with `apt-get` is insufficient. You will need to first [install R v3.4.4+ and build MXNet from source](ubuntu_setup.html#install-the-mxnet-package-for-r). - -After you have setup R v3.4.4+ and MXNet, you can build and install the MXNet R bindings with the following, assuming that `incubator-mxnet` is the source directory you used to build MXNet as follows: +You can [build MXNet-R from source](windows_setup.html#install-mxnet-package-for-r), or you can use a pre-built binary: -```bash -$ cd incubator-mxnet -$ make rpkg +```r + cran <- getOption("repos") + cran["dmlc"] <- "https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/CRAN/GPU/cu92" + options(repos = cran) + install.packages("mxnet") ``` +Change cu92 to cu80, cu90 or cu91 based on your CUDA toolkit version. Currently, MXNet supports these versions of CUDA.
- - +
-
- -You can use the Maven packages defined in the following `dependency` to include MXNet in your Scala project. Please refer to the MXNet-Scala setup guide for a detailed set of instructions to help you with the setup process. - -maven badge - -```html - - org.apache.mxnet - mxnet-full_2.11-linux-x86_64-gpu - -``` -
-
- -
- -You can use the Maven packages defined in the following `dependency` to include MXNet in your Scala project. Please refer to the MXNet-Scala setup guide for a detailed set of instructions to help you with the setup process. - -maven badge - -```html - - org.apache.mxnet - mxnet-full_2.11-linux-x86_64-cpu - -``` -
-
+
+
+MXNet-Scala for Windows is not yet available. +
+
- -
+
- -Follow the installation instructions [in this guide](./ubuntu_setup.md) to set up MXNet. +
+Refer to the Julia section of the MXNet Windows installation guide.
-
+
+ +
-

To build the C++ package, please refer to this guide.

+
+

To enable the C++ package, build from source using `make USE_CPP_PACKAGE=1`. +
Refer to the MXNet C++ setup guide for more info.


-
- - - - - -
-
-
- -The following installation instructions have been tested on OSX Sierra and El Capitan. +
+
+For more installation options, refer to the MXNet Windows installation guide. +
-
-
+ -**Step 1** Install prerequisites - Homebrew, python development tools. +
-```bash -# Install Homebrew -$ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" -$ export PATH=/usr/local/bin:/usr/local/sbin:$PATH +AWS Marketplace distributes Deep Learning AMIs (Amazon Machine Image) with MXNet pre-installed. You can launch one of these Deep Learning AMIs by following instructions in the [AWS Deep Learning AMI Developer Guide](http://docs.aws.amazon.com/dlami/latest/devguide/what-is-dlami.html). -# Install python development tools - python2.7, pip, python-setuptools -$ brew install python -``` +You can also run distributed deep learning with *MXNet* on AWS using [Cloudformation Template](https://github.com/awslabs/deeplearning-cfn/blob/master/README.md). -**Step 2** Install MXNet with OpenBLAS acceleration. +
-Installing *MXNet* with pip requires a latest version of `pip`. Install the latest version of `pip` by issuing the following command. -```bash -$ pip install --upgrade pip -$ pip install --upgrade setuptools -``` + +
+
-
+MXNet supports the Debian based Raspbian ARM based operating system so you can run MXNet on Raspberry Pi Devices. -Then use pip to install MXNet: +These instructions will walk through how to build MXNet for the Raspberry Pi and install the Python bindings for the library. -```bash -$ pip install mxnet -``` -
+You can do a dockerized cross compilation build on your local machine or a native build on-device. +The complete MXNet library and its requirements can take almost 200MB of RAM, and loading large models with the library can take over 1GB of RAM. Because of this, we recommend running MXNet on the Raspberry Pi 3 or an equivalent device that has more than 1 GB of RAM and a Secure Digital (SD) card that has at least 4 GB of free memory. -
+**Cross compilation build (Experimental)** -Then use pip to install MXNet: +## Docker installation +**Step 1** Install Docker on your machine by following the [docker installation instructions](https://docs.docker.com/engine/installation/linux/ubuntu/#install-using-the-repository). -```bash -$ pip install mxnet==1.1.0 -``` +*Note* - You can install Community Edition (CE) -
+**Step 2** [Optional] Post installation steps to manage Docker as a non-root user. +Follow the four steps in this [docker documentation](https://docs.docker.com/engine/installation/linux/linux-postinstall/#manage-docker-as-a-non-root-user) to allow managing docker containers without *sudo*. -
+## Build -Then use pip to install MXNet: +The following command will build a container with dependencies and tools and then compile MXNet for +ARMv7. The resulting artifact will be located in `build/mxnet-x.x.x-py2.py3-none-any.whl`, copy this +file to your Raspberry Pi. -```bash -$ pip install mxnet==1.0.0 +``` +ci/build.py -p armv7 ``` -
- -
+## Install -Then use pip to install MXNet: +Create a virtualenv and install the package we created previously. -```bash -$ pip install mxnet=0.12.1 ``` - -For MXNet 0.12.0 - - -```bash -$ pip install mxnet=0.12.0 +virtualenv -p `which python3` mxnet_py3 +source mxnet_py3/bin/activate +pip install mxnet-x.x.x-py2.py3-none-any.whl ``` -
- - -
- -Then use pip to install MXNet: - -```bash -$ pip install mxnet==0.11.0 -``` +**Native Build** -
+Installing MXNet is a two-step process: -
+1. Build the shared library from the MXNet C++ source code. +2. Install the supported language-specific packages for MXNet. -Then use pip to install MXNet: +**Step 1** Build the Shared Library -```bash -$ pip install mxnet --pre -``` +On Raspbian versions Wheezy and later, you need the following dependencies: -
+- Git (to pull code from GitHub) -**Step 3** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -$ brew install graphviz -$ pip install graphviz -``` +- libblas (for linear algebraic operations) -**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). +- libopencv (for computer vision operations. This is optional if you want to save RAM and Disk Space) -
+- A C++ compiler that supports C++ 11. The C++ compiler compiles and builds MXNet source code. Supported compilers include the following: + - [G++ (4.8 or later)](https://gcc.gnu.org/gcc-4.8/). Make sure to use gcc 4 and not 5 or 6 as there are known bugs with these compilers. + - [Clang (3.9 - 6)](https://clang.llvm.org/) -
-
+Install these dependencies using the following commands in any directory: -**Step 1** Install prerequisites - Homebrew, python development tools. +``` + sudo apt-get update + sudo apt-get -y install git cmake ninja-build build-essential g++-4.9 c++-4.9 liblapack* libblas* libopencv* libopenblas* python3-dev virtualenv +``` -```bash -# Install Homebrew -$ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" -$ export PATH=/usr/local/bin:/usr/local/sbin:$PATH +Clone the MXNet source code repository using the following `git` command in your home directory: +``` + git clone https://github.com/apache/incubator-mxnet.git --recursive + cd incubator-mxnet +``` -# Install python development tools - python2.7, pip, python-setuptools -$ brew install python +Build: +``` + mkdir -p build && cd build + cmake \ + -DUSE_SSE=OFF \ + -DUSE_CUDA=OFF \ + -DUSE_OPENCV=ON \ + -DUSE_OPENMP=ON \ + -DUSE_MKL_IF_AVAILABLE=OFF \ + -DUSE_SIGNAL_HANDLER=ON \ + -DCMAKE_BUILD_TYPE=Release \ + -GNinja .. + ninja -j$(nproc) ``` +Some compilation units require memory close to 1GB, so it's recommended that you enable swap as +explained below and be cautious about increasing the number of jobs when building (-j) -**Step 2** Install virtualenv for macOS. +Executing these commands start the build process, which can take up to a couple hours, and creates a file called `libmxnet.so` in the build directory. -```bash -$ pip install virtualenv +If you are getting build errors in which the compiler is being killed, it is likely that the +compiler is running out of memory (especially if you are on Raspberry Pi 1, 2 or Zero, which have +less than 1GB of RAM), this can often be rectified by increasing the swapfile size on the Pi by +editing the file /etc/dphys-swapfile and changing the line CONF_SWAPSIZE=100 to CONF_SWAPSIZE=1024, +then running: +``` + sudo /etc/init.d/dphys-swapfile stop + sudo /etc/init.d/dphys-swapfile start + free -m # to verify the swapfile size has been increased ``` -**Step 3** Create and activate virtualenv environment for MXNet. +**Step 2** Install MXNet Python Bindings -Following command creates a virtualenv environment at `~/mxnet` directory. However, you can choose any directory by replacing `~/mxnet` with a directory of your choice. +To install Python bindings run the following commands in the MXNet directory: -```bash -$ virtualenv --system-site-packages ~/mxnet ``` - -Activate the virtualenv environment created for *MXNet*. - -```bash -$ source ~/mxnet/bin/activate + cd python + pip install --upgrade pip + pip install -e . ``` -After activating the environment, you should see the prompt as below. +Note that the `-e` flag is optional. It is equivalent to `--editable` and means that if you edit the source files, these changes will be reflected in the package installed. -```bash -(mxnet)$ +Alternatively you can create a whl package installable with pip with the following command: +``` +ci/docker/runtime_functions.sh build_wheel python/ $(realpath build) ``` -**Step 4** Install MXNet in the active virtualenv environment. -Installing *MXNet* with pip requires a latest version of `pip`. Install the latest version of `pip` by issuing the following command. +You are now ready to run MXNet on your Raspberry Pi device. You can get started by following the tutorial on [Real-time Object Detection with MXNet On The Raspberry Pi](http://mxnet.io/tutorials/embedded/wine_detector.html). -```bash -(mxnet)$ pip install --upgrade pip -(mxnet)$ pip install --upgrade setuptools -``` +*Note - Because the complete MXNet library takes up a significant amount of the Raspberry Pi's limited RAM, when loading training data or large models into memory, you might have to turn off the GUI and terminate running processes to free RAM.* -
+
-Install *MXNet* with OpenBLAS acceleration. -```bash -(mxnet)$ pip install mxnet -``` +
-
+# Nvidia Jetson TX family -
+MXNet supports the Ubuntu Arch64 based operating system so you can run MXNet on NVIDIA Jetson Devices. -Install *MXNet* with OpenBLAS acceleration. +These instructions will walk through how to build MXNet for the Pascal based [NVIDIA Jetson TX2](http://www.nvidia.com/object/embedded-systems-dev-kits-modules.html) and install the corresponding python language bindings. -```bash -(mxnet)$ pip install mxnet==1.1.0 -``` +For the purposes of this install guide we will assume that CUDA is already installed on your Jetson device. -
+**Install MXNet** -
+Installing MXNet is a two-step process: -Install *MXNet* with OpenBLAS acceleration. +1. Build the shared library from the MXNet C++ source code. +2. Install the supported language-specific packages for MXNet. -```bash -(mxnet)$ pip install mxnet==1.0.0 -``` +**Step 1** Build the Shared Library -
+You need the following additional dependencies: +- Git (to pull code from GitHub) -
+- libatlas (for linear algebraic operations) -Install *MXNet* with OpenBLAS acceleration. +- libopencv (for computer vision operations) -```bash -(mxnet)$ pip install mxnet==0.12.1 -``` +- python pip (to load relevant python packages for our language bindings) -For *MXNet* 0.12.0 - +Install these dependencies using the following commands in any directory: -```bash -(mxnet)$ pip install mxnet==0.12.0 +``` + sudo apt-get update + sudo apt-get -y install git build-essential libatlas-base-dev libopencv-dev graphviz python-pip + sudo pip install pip --upgrade + sudo pip install setuptools numpy --upgrade + sudo pip install graphviz jupyter ``` +Clone the MXNet source code repository using the following `git` command in your home directory: +``` + git clone https://github.com/apache/incubator-mxnet.git --recursive + cd incubator-mxnet +``` -
- -
+Edit the Makefile to install the MXNet with CUDA bindings to leverage the GPU on the Jetson: +``` + cp make/crosscompile.jetson.mk config.mk +``` -Install *MXNet* with OpenBLAS acceleration. +Edit the Mshadow Makefile to ensure MXNet builds with Pascal's hardware level low precision acceleration by editing 3rdparty/mshadow/make/mshadow.mk and adding the following after line 122: +``` +MSHADOW_CFLAGS += -DMSHADOW_USE_PASCAL=1 +``` -```bash -(mxnet)$ pip install mxnet==0.11.0 +Now you can build the complete MXNet library with the following command: +``` + make -j $(nproc) ``` -
+Executing this command creates a file called `libmxnet.so` in the mxnet/lib directory. -
+**Step 2** Install MXNet Python Bindings -Install *MXNet* with OpenBLAS acceleration. +To install Python bindings run the following commands in the MXNet directory: -```bash -(mxnet)$ pip install mxnet --pre ``` - -
- - -**Step 5** Install [Graphviz](http://www.graphviz.org/). (Optional, needed for graph visualization using `mxnet.viz` package). -```bash -$ brew install graphviz -(mxnet)$ pip install graphviz + cd python + pip install --upgrade pip + pip install -e . ``` -**Step 6** Validate the installation by running simple *MXNet* code described [here](#validate-mxnet-installation). - -**Note** You can read more about virtualenv [here](https://virtualenv.pypa.io/en/stable/userguide/). +Note that the `-e` flag is optional. It is equivalent to `--editable` and means that if you edit the source files, these changes will be reflected in the package installed. -
+Add the mxnet folder to the path: +``` + cd .. + export MXNET_HOME=$(pwd) + echo "export PYTHONPATH=$MXNET_HOME/python:$PYTHONPATH" >> ~/.rc + source ~/.rc +``` -
-
+You are now ready to run MXNet on your NVIDIA Jetson TX2 device. -Docker images with *MXNet* are available at [Docker Hub](https://hub.docker.com/r/mxnet/). +
+
-**Step 1** Install Docker on your machine by following the [docker installation instructions](https://docs.docker.com/docker-for-mac/install/#install-and-run-docker-for-mac). -*Note* - You can install Community Edition (CE) to get started with *MXNet*. + -**Step 2** Pull the MXNet docker image. -```bash -$ docker pull mxnet/python -``` - -You can list docker images to see if mxnet/python docker image pull was successful. - -```bash -$ docker images - -REPOSITORY TAG IMAGE ID CREATED SIZE -mxnet/python latest 00d026968b3c 3 weeks ago 1.41 GB -``` - -**Step 4** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). - -
- - -
-
- -**Prerequisites** - -If not already installed, [download and install Xcode](https://developer.apple.com/xcode/) (or [insall it from the App Store](https://itunes.apple.com/us/app/xcode/id497799835)) for macOS. [Xcode](https://en.wikipedia.org/wiki/Xcode) is an integrated development environment for macOS containing a suite of software development tools like C/C++ compilers, BLAS library and more. - -
- -Building *MXNet* from source is a 2 step process. -1. Build the *MXNet* core shared library, `libmxnet.so`, from the C++ sources. -2. Build the language specific bindings. Example - Python bindings, Scala bindings. - -Make sure you have installed Xcode before proceeding further. - -
- -All the instructions to build *MXNet* core shared library and *MXNet* Python bindings are compiled as one helper *bash* script. You can use [this bash script](https://raw.githubusercontent.com/dmlc/mxnet/master/setup-utils/install-mxnet-osx-python.sh) to build *MXNet* for Python, from source, on macOS. - -**Step 1** Download the bash script for building MXNet from source. - -```bash -$ curl -O https://raw.githubusercontent.com/dmlc/mxnet/master/setup-utils/install-mxnet-osx-python.sh -``` - -**Step 2** Run the script to get latest MXNet source and build. - -```bash -# Make the script executable -$ chmod 744 install-mxnet-osx-python.sh - -# Run the script. It takes around 5 mins. -$ bash install-mxnet-osx-python.sh -``` - -**Step 3** Validate the installation by running simple MXNet code described [here](#validate-mxnet-installation). - -
-
- - - -
-
-
- -Try the **Build from Source** option for now. - -
- -
- -**Step 1** Install prerequisites - Homebrew, python development tools. - -```bash -# Install Homebrew -$ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" -$ export PATH=/usr/local/bin:/usr/local/sbin:$PATH - -# Install python development tools - python2.7, pip, python-setuptools -$ brew install python pkg-config graphviz -``` - -**Step 2** Install optional components - OpenCV - -If you want to use OpenCV you should install it first, then build MXNet with the `USE_OPENCV=1` option in the later steps. - -```bash -brew tap homebrew/science -brew install opencv - -``` - -**Step 3** Install CUDA and cuDNN - -The following instructions are for CUDA 9.1 and cuDNN 7 for macOS 10.12+ and a CUDA-capable GPU. They summarize confirmed successful builds in [#9217](https://github.com/apache/incubator-mxnet/issues/9217). -Alternatively, you may follow the [CUDA installation instructions for macOS](https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html). - -1. [Download Xcode 8.3.3 from Apple](https://developer.apple.com/download/more/). This is the version [NVIDIA specifies in its instructions for macOS](https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html). Unzip and rename to `Xcode8.3.3.app`. - -2. Run `sudo xcode-select -s /Applications/Xcode8.3.3.app` or to wherever you have placed Xcode. - -3. Run `xcode-select --install` to install all command line tools, compilers, etc. - -4. Run `sudo xcodebuild -license accept` to accept Xcode's licensing terms. - -5. Install CUDA for macOS. Specific steps are provided in NVIDIA's [CUDA installation instructions](https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#installation). - -6. [Download](http://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#download-mac) and [install](http://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac) cuDNN for macOS. You will need to [create a free developer account](https://developer.nvidia.com/accelerated-computing-developer) with NVIDIA prior to getting the download link. - -**Step 4** Build MXNet - -1. Run `git clone --recursive https://github.com/apache/incubator-mxnet.git mxnet` to get the latest version. - -2. Run `cd mxnet`. - -3. Edit the `make/osx.mk` file to set the following parameters: - - ``` - USE_CUDA = 1 - USE_CUDA_PATH = /usr/local/cuda - USE_CUDNN = 1 - USE_OPENCV = 0 # set to 1 if you want to build with OpenCV - ``` - -4. Copy the `make/osx.mk` to `config.mk` - -5. Run `make`. If you previously attempted to compile you might want to do `make clean_all` first. You can also run `make -j` with the number of processors you have to compile with multithreading. There'll be plenty of warnings, but there should be no errors. - -6. Once finished, you should have a file called `libmxnet.so` in `lib/`. - -7. Do `cd python`. - -8. Run `sudo pip install -e .` **Note**: the `.` is part of the command. - -
-
- - - - - -
-
- -Install the latest version (3.5.1+) of R from [CRAN](https://cran.r-project.org/bin/macosx/). -You can [build MXNet-R from source](osx_setup.html#install-the-mxnet-package-for-r), or you can use a pre-built binary: - -```r -cran <- getOption("repos") -cran["dmlc"] <- "https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/CRAN/" -options(repos = cran) -install.packages("mxnet") -``` - -
- - -
- -Will be available soon. - -
-
- -
-
- -You can use the Maven packages defined in the following `dependency` to include MXNet in your Scala project. Please refer to the MXNet-Scala setup guide for a detailed set of instructions to help you with the setup process. - -maven badge - -```html - - org.apache.mxnet - mxnet-full_2.11-osx-x86_64-cpu - -``` -
-
-
- -Not available at this time.
- -
-
- - -
-
- -Follow the installation instructions [in this guide](./osx_setup.md) to set up MXNet. - -
-
- - -
-

To build the C++ package, please refer to this guide.

-
-
- - - - - - - - - - -
-
-
-
- -
- -**Step 1** Install Python. - -[Anaconda](https://www.anaconda.com/download/) is recommended. - -
- -**Step 2** Install *MXNet*. - -```bash -$ pip install mxnet -``` - -
- -
- -**Step 2** Install *MXNet*. - -```bash -$ pip install mxnet==1.1.0 -``` - -
- -
- -**Step 2** Install *MXNet*. - -```bash -$ pip install mxnet==1.0.0 -``` - -
- - -
- -**Step 2** Install *MXNet*. - -```bash -$ pip install mxnet==0.12.1 -``` - -For *MXNet* 0.12.0 - - -```bash -$ pip install mxnet==0.12.0 -``` - - -
- -
- -**Step 2** Install *MXNet*. - -```bash -$ pip install mxnet==0.11.0 -``` - - -
- -
- -**Step 2** Install *MXNet*. - -```bash -$ pip install mxnet --pre -``` - -
- - -
- - -
- -Follow the installation instructions [in this guide](./windows_setup.md) to set up MXNet. - -
-
- - -
-
- -
- -**Step 1** Install Python. - -[Anaconda](https://www.anaconda.com/download/) is recommended. - - -
- -**Step 2** Install *MXNet* with GPU support using CUDA 9.2. - -**Important**: Make sure your installed CUDA version matches the CUDA version in the pip package. -Check your CUDA version with the following command: - -```bash -nvcc --version -``` - -You can either upgrade your CUDA install or install the MXNet package that supports your CUDA version. - -```bash -$ pip install mxnet-cu92 -``` - -Refer to [pypi for older packages](https://pypi.org/project/mxnet/). - -
- -
- -**Step 2** Install *MXNet* with GPU support using CUDA 9.1. - -**Important**: Make sure your installed CUDA version matches the CUDA version in the pip package. -Check your CUDA version with the following command: - -```bash -nvcc --version -``` - -You can either upgrade your CUDA install or install the MXNet package that supports your CUDA version. - -```bash -$ pip install mxnet-cu91==1.1.0 -``` - -Refer to [pypi for older packages](https://pypi.org/project/mxnet/). - -
- -
- -**Step 2** Install *MXNet* with GPU support using CUDA 9.0. - -```bash -$ pip install mxnet-cu90==1.0.0 -``` - -
- -
- -**Step 2** Install *MXNet* with GPU support using CUDA 9.0. - -```bash -$ pip install mxnet-cu90==0.12.1 -``` - -Install *MXNet* 0.12.0 with GPU support using CUDA 9.0. - -```bash -$ pip install mxnet-cu90==0.12.0 -``` - -
- -
- -**Step 2** Install *MXNet* with GPU support using CUDA 8.0. - -```bash -$ pip install mxnet-cu80==0.11.0 -``` - -
- -
- -**Step 2** Install *MXNet* with GPU support using CUDA 9.2. - -**Important**: Make sure your installed CUDA version matches the CUDA version in the pip package. -Check your CUDA version with the following command: - -```bash -nvcc --version -``` - -You can either upgrade your CUDA install or install the MXNet package that supports your CUDA version. - -```bash -$ pip install mxnet-cu92 --pre -``` - -Refer to [pypi for older packages](https://pypi.org/project/mxnet/). - -
- -Refer to [#8671](https://github.com/apache/incubator-mxnet/issues/8671) for status on CUDA 9.1 support. - -
-
-
- -We provide both options to build and install MXNet yourself using [Microsoft Visual Studio 2017](https://www.visualstudio.com/downloads/), and [Microsoft Visual Studio 2015](https://www.visualstudio.com/vs/older-downloads/). - -**Option 1** - -To build and install MXNet yourself using [Microsoft Visual Studio 2017](https://www.visualstudio.com/downloads/), you need the following dependencies. Install the required dependencies: - -1. If [Microsoft Visual Studio 2017](https://www.visualstudio.com/downloads/) is not already installed, download and install it. You can download and install the free community edition. -2. Download and install [CMake](https://cmake.org/files/v3.11/cmake-3.11.0-rc4-win64-x64.msi) if it is not already installed. -3. Download and install [OpenCV](https://sourceforge.net/projects/opencvlibrary/files/opencv-win/3.4.1/opencv-3.4.1-vc14_vc15.exe/download). -4. Unzip the OpenCV package. -5. Set the environment variable ```OpenCV_DIR``` to point to the ```OpenCV build directory``` (e.g., ```OpenCV_DIR = C:\utils\opencv\build```). -6. If you don’t have the Intel Math Kernel Library (MKL) installed, download and install [OpenBlas](https://sourceforge.net/projects/openblas/files/v0.2.20/OpenBLAS%200.2.20%20version.zip/download). -7. Set the environment variable ```OpenBLAS_HOME``` to point to the ```OpenBLAS``` directory that contains the ```include``` and ```lib``` directories (e.g., ```OpenBLAS_HOME = C:\utils\OpenBLAS```). -8. Download and install CUDA: Install [CUDA](https://developer.nvidia.com/cuda-downloads?target_os=Windows&target_arch=x86_64&target_version=10&target_type=exelocal), and Download the base installer (e.g., ```cuda_9.1.85_win10.exe```). -9. Download and install cuDNN. To get access to the download link, register as an NVIDIA community user. Then Follow the [link](http://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#install-windows) to install the cuDNN. -10. Download and install [git](https://git-for-windows.github.io/). - -After you have installed all of the required dependencies, build the MXNet source code: - -1. Start ```cmd``` in windows. - -2. Download the MXNet source code from GitHub by using following command: - -```r -cd C:\ -git clone https://github.com/apache/incubator-mxnet.git --recursive -``` - -3. Follow [this link](https://docs.microsoft.com/en-us/visualstudio/install/modify-visual-studio) to modify ```Individual components```, and check ```VC++ 2017 version 15.4 v14.11 toolset```, and click ```Modify```. - -4. Change the version of the Visual studio 2017 to v14.11 using the following command (by default the VS2017 is installed in the following path): - -```r -"C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvars64.bat" -vcvars_ver=14.11 -``` - -5. Create a build dir using the following command and go to the directory, for example: - -```r -mkdir C:\build -cd C:\build -``` - -6. CMake the MXNet source code by using following command: - -```r -cmake -G "Visual Studio 15 2017 Win64" -T cuda=9.1,host=x64 -DUSE_CUDA=1 -DUSE_CUDNN=1 -DUSE_NVRTC=1 -DUSE_OPENCV=1 -DUSE_OPENMP=1 -DUSE_BLAS=open -DUSE_LAPACK=1 -DUSE_DIST_KVSTORE=0 -DCUDA_ARCH_LIST=Common -DCUDA_TOOLSET=9.1 -DCUDNN_INCLUDE=C:\cuda\include -DCUDNN_LIBRARY=C:\cuda\lib\x64\cudnn.lib "C:\incubator-mxnet" -``` - -NOTE: make sure the DCUDNN_INCLUDE and DCUDNN_LIBRARY pointing to the “include” and “cudnn.lib” of your CUDA installed location, and the ```C:\incubator-mxnet``` is the location of the source code you just git in the previous step - -7. After the CMake successfully completed, compile the the MXNet source code by using following command: - -```r -msbuild mxnet.sln /p:Configuration=Release;Platform=x64 /maxcpucount -``` - -**Option 2** - -To build and install MXNet yourself using [Microsoft Visual Studio 2015](https://www.visualstudio.com/vs/older-downloads/), you need the following dependencies. Install the required dependencies: - -1. If [Microsoft Visual Studio 2015](https://www.visualstudio.com/vs/older-downloads/) is not already installed, download and install it. You can download and install the free community edition. At least Update 3 of Microsoft Visual Studio 2015 is required to build MXNet from source. Upgrade via it's ```Tools -> Extensions and Updates... | Product Updates``` menu. -2. Download and install [CMake](https://cmake.org/) if it is not already installed. -3. Download and install [OpenCV](http://sourceforge.net/projects/opencvlibrary/files/opencv-win/3.0.0/opencv-3.0.0.exe/download). -4. Unzip the OpenCV package. -5. Set the environment variable ```OpenCV_DIR``` to point to the ```OpenCV build directory``` (```C:\opencv\build\x64\vc14``` for example). Also, you need to add the OpenCV bin directory (```C:\opencv\build\x64\vc14\bin``` for example) to the ``PATH`` variable. -6. If you don't have the Intel Math Kernel Library (MKL) installed, download and install [OpenBlas](http://sourceforge.net/projects/openblas/files/v0.2.14/). -7. Set the environment variable ```OpenBLAS_HOME``` to point to the ```OpenBLAS``` directory that contains the ```include``` and ```lib``` directories. Typically, you can find the directory in ```C:\Program files (x86)\OpenBLAS\```. -8. Download and install [CUDA](https://developer.nvidia.com/cuda-downloads?target_os=Windows&target_arch=x86_64) and [cuDNN](https://developer.nvidia.com/cudnn). To get access to the download link, register as an NVIDIA community user. -9. Set the environment variable ```CUDACXX``` to point to the ```CUDA Compiler```(```C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v9.1\bin\nvcc.exe``` for example). -10. Set the environment variable ```CUDNN_ROOT``` to point to the ```cuDNN``` directory that contains the ```include```, ```lib``` and ```bin``` directories (```C:\Downloads\cudnn-9.1-windows7-x64-v7\cuda``` for example). - -After you have installed all of the required dependencies, build the MXNet source code: - -1. Download the MXNet source code from [GitHub](https://github.com/apache/incubator-mxnet) (make sure you also download third parties submodules e.g. ```git clone --recurse-submodules```). -2. Use [CMake](https://cmake.org/) to create a Visual Studio solution in ```./build```. -3. In Visual Studio, open the solution file,```.sln```, and compile it. -These commands produce a library called ```mxnet.dll``` in the ```./build/Release/``` or ```./build/Debug``` folder. - -  -Next, we install the ```graphviz``` library that we use for visualizing network graphs that you build on MXNet. We will also install [Jupyter Notebook](http://jupyter.readthedocs.io/) which is used for running MXNet tutorials and examples. -- Install the ```graphviz``` by downloading the installer from the [Graphviz Download Page](https://graphviz.gitlab.io/_pages/Download/Download_windows.html). -**Note** Make sure to add the `graphviz` executable path to the PATH environment variable. Refer [here for more details](http://stackoverflow.com/questions/35064304/runtimeerror-make-sure-the-graphviz-executables-are-on-your-systems-path-aft) - - -  -
-
-
- - - - -
-
- -Install the latest version (3.5.1+) of R from [CRAN](https://cran.r-project.org/bin/windows/). -You can [build MXNet-R from source](windows_setup.html#install-mxnet-package-for-r), or you can use a pre-built binary: - -```r -cran <- getOption("repos") -cran["dmlc"] <- "https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/CRAN/" -options(repos = cran) -install.packages("mxnet") -``` - -
- -
- -You can [build MXNet-R from source](windows_setup.html#install-mxnet-package-for-r), or you can use a pre-built binary: - -```r - cran <- getOption("repos") - cran["dmlc"] <- "https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/CRAN/GPU/cu92" - options(repos = cran) - install.packages("mxnet") -``` -Change cu92 to cu80, cu90 or cu91 based on your CUDA toolkit version. Currently, MXNet supports these versions of CUDA. - -
-
- -
-
- -MXNet-Scala for Windows is not yet available. -
-
-
- -
-
- -Follow the installation instructions [in this guide](./windows_setup.md) to set up MXNet. - -
-
- -
-
-

To build the C++ package, please refer to this guide.

-
-
-
-
- - - - -
- -AWS Marketplace distributes Deep Learning AMIs (Amazon Machine Image) with MXNet pre-installed. You can launch one of these Deep Learning AMIs by following instructions in the [AWS Deep Learning AMI Developer Guide](http://docs.aws.amazon.com/dlami/latest/devguide/what-is-dlami.html). - -You can also run distributed deep learning with *MXNet* on AWS using [Cloudformation Template](https://github.com/awslabs/deeplearning-cfn/blob/master/README.md). - -
- - - -
-
- -MXNet supports the Debian based Raspbian ARM based operating system so you can run MXNet on Raspberry Pi Devices. - -These instructions will walk through how to build MXNet for the Raspberry Pi and install the Python bindings for the library. - -You can do a dockerized cross compilation build on your local machine or a native build on-device. - -The complete MXNet library and its requirements can take almost 200MB of RAM, and loading large models with the library can take over 1GB of RAM. Because of this, we recommend running MXNet on the Raspberry Pi 3 or an equivalent device that has more than 1 GB of RAM and a Secure Digital (SD) card that has at least 4 GB of free memory. - -**Cross compilation build (Experimental)** - -## Docker installation -**Step 1** Install Docker on your machine by following the [docker installation instructions](https://docs.docker.com/engine/installation/linux/ubuntu/#install-using-the-repository). - -*Note* - You can install Community Edition (CE) - -**Step 2** [Optional] Post installation steps to manage Docker as a non-root user. - -Follow the four steps in this [docker documentation](https://docs.docker.com/engine/installation/linux/linux-postinstall/#manage-docker-as-a-non-root-user) to allow managing docker containers without *sudo*. - -## Build - -The following command will build a container with dependencies and tools and then compile MXNet for -ARMv7. The resulting artifact will be located in `build/mxnet-x.x.x-py2.py3-none-any.whl`, copy this -file to your Raspberry Pi. - -```bash -ci/build.py -p armv7 -``` - -## Install - -Create a virtualenv and install the package we created previously. - -```bash -virtualenv -p `which python3` mxnet_py3 -source mxnet_py3/bin/activate -pip install mxnet-x.x.x-py2.py3-none-any.whl -``` - - -**Native Build** - -Installing MXNet is a two-step process: - -1. Build the shared library from the MXNet C++ source code. -2. Install the supported language-specific packages for MXNet. - -**Step 1** Build the Shared Library - -On Raspbian versions Wheezy and later, you need the following dependencies: - -- Git (to pull code from GitHub) - -- libblas (for linear algebraic operations) - -- libopencv (for computer vision operations. This is optional if you want to save RAM and Disk Space) - -- A C++ compiler that supports C++ 11. The C++ compiler compiles and builds MXNet source code. Supported compilers include the following: - -- [G++ (4.8 or later)](https://gcc.gnu.org/gcc-4.8/). Make sure to use gcc 4 and not 5 or 6 as there - are known bugs with these compilers. - -Install these dependencies using the following commands in any directory: - -```bash - sudo apt-get update - sudo apt-get -y install git cmake ninja-build build-essential g++-4.9 c++-4.9 liblapack* libblas* libopencv* libopenblas* python3-dev virtualenv -``` - -Clone the MXNet source code repository using the following `git` command in your home directory: -```bash - git clone https://github.com/apache/incubator-mxnet.git --recursive - cd incubator-mxnet -``` - -Build: -```bash - mkdir -p build && cd build - cmake \ - -DUSE_SSE=OFF \ - -DUSE_CUDA=OFF \ - -DUSE_OPENCV=ON \ - -DUSE_OPENMP=ON \ - -DUSE_MKL_IF_AVAILABLE=OFF \ - -DUSE_SIGNAL_HANDLER=ON \ - -DCMAKE_BUILD_TYPE=Release \ - -GNinja .. - ninja -j1 -``` -Some compilation units require memory close to 1GB, so it's recommended that you enable swap as -explained below and be cautious about increasing the number of jobs when building (-j) - -Executing these commands start the build process, which can take up to a couple hours, and creates a file called `libmxnet.so` in the build directory. - -If you are getting build errors in which the compiler is being killed, it is likely that the -compiler is running out of memory (especially if you are on Raspberry Pi 1, 2 or Zero, which have -less than 1GB of RAM), this can often be rectified by increasing the swapfile size on the Pi by -editing the file /etc/dphys-swapfile and changing the line CONF_SWAPSIZE=100 to CONF_SWAPSIZE=1024, -then running: -```bash - sudo /etc/init.d/dphys-swapfile stop - sudo /etc/init.d/dphys-swapfile start - free -m # to verify the swapfile size has been increased -``` - -**Step 2** Install MXNet Python Bindings - -To install Python bindings run the following commands in the MXNet directory: - -```bash - cd python - pip install --upgrade pip - pip install -e . -``` - -Note that the `-e` flag is optional. It is equivalent to `--editable` and means that if you edit the source files, these changes will be reflected in the package installed. - -Alternatively you can create a whl package installable with pip with the following command: -```bash -ci/docker/runtime_functions.sh build_wheel python/ $(realpath build) -``` - - -You are now ready to run MXNet on your Raspberry Pi device. You can get started by following the tutorial on [Real-time Object Detection with MXNet On The Raspberry Pi](http://mxnet.io/tutorials/embedded/wine_detector.html). - -*Note - Because the complete MXNet library takes up a significant amount of the Raspberry Pi's limited RAM, when loading training data or large models into memory, you might have to turn off the GUI and terminate running processes to free RAM.* - -
- - -
- -# Nvidia Jetson TX family - -MXNet supports the Ubuntu Arch64 based operating system so you can run MXNet on NVIDIA Jetson Devices. - -These instructions will walk through how to build MXNet for the Pascal based [NVIDIA Jetson TX2](http://www.nvidia.com/object/embedded-systems-dev-kits-modules.html) and install the corresponding python language bindings. - -For the purposes of this install guide we will assume that CUDA is already installed on your Jetson device. - -**Install MXNet** - -Installing MXNet is a two-step process: - -1. Build the shared library from the MXNet C++ source code. -2. Install the supported language-specific packages for MXNet. - -**Step 1** Build the Shared Library - -You need the following additional dependencies: - -- Git (to pull code from GitHub) - -- libatlas (for linear algebraic operations) - -- libopencv (for computer vision operations) - -- python pip (to load relevant python packages for our language bindings) - -Install these dependencies using the following commands in any directory: - -```bash - sudo apt-get update - sudo apt-get -y install git build-essential libatlas-base-dev libopencv-dev graphviz python-pip - sudo pip install pip --upgrade - sudo pip install setuptools numpy --upgrade - sudo pip install graphviz jupyter -``` - -Clone the MXNet source code repository using the following `git` command in your home directory: -```bash - git clone https://github.com/apache/incubator-mxnet.git --recursive - cd incubator-mxnet -``` - -Edit the Makefile to install the MXNet with CUDA bindings to leverage the GPU on the Jetson: -```bash - cp make/crosscompile.jetson.mk config.mk -``` - -Edit the Mshadow Makefile to ensure MXNet builds with Pascal's hardware level low precision acceleration by editing 3rdparty/mshadow/make/mshadow.mk and adding the following after line 122: -```bash -MSHADOW_CFLAGS += -DMSHADOW_USE_PASCAL=1 -``` - -Now you can build the complete MXNet library with the following command: -```bash - make -j $(nproc) -``` - -Executing this command creates a file called `libmxnet.so` in the mxnet/lib directory. - -**Step 2** Install MXNet Python Bindings - -To install Python bindings run the following commands in the MXNet directory: - -```bash - cd python - pip install --upgrade pip - pip install -e . -``` - -Note that the `-e` flag is optional. It is equivalent to `--editable` and means that if you edit the source files, these changes will be reflected in the package installed. - -Add the mxnet folder to the path: - -```bash - cd .. - export MXNET_HOME=$(pwd) - echo "export PYTHONPATH=$MXNET_HOME/python:$PYTHONPATH" >> ~/.bashrc - source ~/.bashrc -``` - -You are now ready to run MXNet on your NVIDIA Jetson TX2 device. - -
-
- - - - - -# Validate MXNet Installation - -
-
-
- -
- -Start the python terminal. - -```bash -$ python -``` -
- -
- -Launch a Docker container with `mxnet/python` image and run example *MXNet* python program on the terminal. - -```bash -$ docker run -it mxnet/python bash # Use sudo if you skip Step 2 in the installation instruction - -# Start a python terminal -root@4919c4f58cac:/# python -``` -
- -
- -Activate the virtualenv environment created for *MXNet*. - -```bash -$ source ~/mxnet/bin/activate -``` - -After activating the environment, you should see the prompt as below. - -```bash -(mxnet)$ -``` - -Start the python terminal. - -```bash -$ python -``` - -
- -Run a short *MXNet* python program to create a 2X3 matrix of ones, multiply each element in the matrix by 2 followed by adding 1. We expect the output to be a 2X3 matrix with all elements being 3. - -```python ->>> import mxnet as mx ->>> a = mx.nd.ones((2, 3)) ->>> b = a * 2 + 1 ->>> b.asnumpy() -array([[ 3., 3., 3.], - [ 3., 3., 3.]], dtype=float32) -``` -
-
-
- - - -
-
-
-
- -Run a short *MXNet* python program to create a 2X3 matrix of ones, multiply each element in the matrix by 2 followed by adding 1. We expect the output to be a 2X3 matrix with all elements being 3. - -```python ->>> import mxnet as mx ->>> a = mx.nd.ones((2, 3)) ->>> b = a * 2 + 1 ->>> b.asnumpy() -array([[ 3., 3., 3.], - [ 3., 3., 3.]], dtype=float32) -``` - -
-
-
-
- - - -
-
-
- -
-
- -Will be available soon. - -
- -
-
- -From the MXNet root directory run: `python example/image-classification/train_mnist.py --network lenet --gpus 0` to test GPU training. - -
- -
-
-
- - - -
-
-
- -
-
- -Will be available soon. - -
- -
-
- -From the MXNet root directory run: `python example/image-classification/train_mnist.py --network lenet --gpus 0` to test GPU training. - -
- -
-
-
- - - -
-
-
- -
- -Start the python terminal. - -```bash -$ python -``` -
- -
- -Launch a NVIDIA Docker container with `mxnet/python:gpu` image and run example *MXNet* python program on the terminal. - -```bash -$ nvidia-docker run -it mxnet/python:gpu bash # Use sudo if you skip Step 2 in the installation instruction - -# Start a python terminal -root@4919c4f58cac:/# python -``` -
- -
- -Activate the virtualenv environment created for *MXNet*. - -```bash -$ source ~/mxnet/bin/activate -``` - -After activating the environment, you should see the prompt as below. - -```bash -(mxnet)$ -``` - -Start the python terminal. - -```bash -$ python -``` - -
- -Run a short *MXNet* python program to create a 2X3 matrix of ones *a* on a *GPU*, multiply each element in the matrix by 2 followed by adding 1. We expect the output to be a 2X3 matrix with all elements being 3. We use *mx.gpu()*, to set *MXNet* context to be GPUs. - -```python ->>> import mxnet as mx ->>> a = mx.nd.ones((2, 3), mx.gpu()) ->>> b = a * 2 + 1 ->>> b.asnumpy() -array([[ 3., 3., 3.], - [ 3., 3., 3.]], dtype=float32) -``` -
-
-
- - - - - - - -
-
-
- -
- -Exit the Python terminal. - -```python ->>> exit() -$ -``` -
- -
- -Exit the Python terminal and Deactivate the virtualenv *MXNet* environment. -```python ->>> exit() -(mxnet)$ deactivate -$ -``` - -
- -
- -Exit the Python terminal and mxnet/python docker container. -```python ->>> exit() -root@4919c4f58cac:/# exit -``` - -
- -
-
-
- - -
-
-
- -
- -Exit the Python terminal. - -```python ->>> exit() -$ -``` -
- -
- -Exit the Python terminal and Deactivate the virtualenv *MXNet* environment. -```python ->>> exit() -(mxnet)$ deactivate -$ -``` - -
- -
- -Exit the Python terminal and then the docker container. -```python ->>> exit() -root@4919c4f58cac:/# exit -``` - -
- -
-
-
- - - -
- -Login to the cloud instance you launched, with pre-installed *MXNet*, following the guide by corresponding cloud provider. - - -Start the python terminal. - -```bash -$ python -``` - - -
- -Run a short *MXNet* python program to create a 2X3 matrix of ones, multiply each element in the matrix by 2 followed by adding 1. We expect the output to be a 2X3 matrix with all elements being 3. - -```python ->>> import mxnet as mx ->>> a = mx.nd.ones((2, 3)) ->>> b = a * 2 + 1 ->>> b.asnumpy() -array([[ 3., 3., 3.], - [ 3., 3., 3.]], dtype=float32) - ``` - -Exit the Python terminal. - -```python ->>> exit() -$ -``` - -
- - - -
- -Run a short *MXNet* python program to create a 2X3 matrix of ones *a* on a *GPU*, multiply each element in the matrix by 2 followed by adding 1. We expect the output to be a 2X3 matrix with all elements being 3. We use *mx.gpu()*, to set *MXNet* context to be GPUs. - -```python ->>> import mxnet as mx ->>> a = mx.nd.ones((2, 3), mx.gpu()) ->>> b = a * 2 + 1 ->>> b.asnumpy() -array([[ 3., 3., 3.], - [ 3., 3., 3.]], dtype=float32) -``` - -
- -
- - - -
-
-
- -Run a short *MXNet* R program to create a 2X3 matrix of ones, multiply each element in the matrix by 2 followed by adding 1. We expect the output to be a 2X3 matrix with all elements being 3. - -```r -library(mxnet) -a <- mx.nd.ones(c(2,3), ctx = mx.cpu()) -b <- a * 2 + 1 -b -``` - -You should see the following output: - -```r -[,1] [,2] [,3] -[1,] 3 3 3 -[2,] 3 3 3 -``` - -
-
-
- - - -
-
-
- -Run a short *MXNet* R program to create a 2X3 matrix of ones *a* on a *GPU*, multiply each element in the matrix by 2 followed by adding 1. We expect the output to be a 2X3 matrix with all elements being 3. We use *mx.gpu()*, to set *MXNet* context to be GPUs. - -```r -library(mxnet) -a <- mx.nd.ones(c(2,3), ctx = mx.gpu()) -b <- a * 2 + 1 -b -``` - -You should see the following output: - -```r -[,1] [,2] [,3] -[1,] 3 3 3 -[2,] 3 3 3 -``` - -
-
-
- - - -
-
- -
- Run the MXNet-Scala demo project to validate your Maven package installation. -
- -
- -
-
- -Will be available soon. - -
-
-
- -
-
-
- Run the MXNet-Scala demo project to validate your Maven package installation. -
-
-
-
- -Will be available soon. - -
-
-
- - -
-
-
- -
-
-Will be available soon. -
- -
-
- -
-
- -Will be available soon. - -
-
-
- - -
+
# Source Download -Download your required version of MXNet. +Download your required version of MXNet and build from source. diff --git a/docs/install/osx_setup.md b/docs/install/osx_setup.md index b90dfd1e582c..53039252888d 100644 --- a/docs/install/osx_setup.md +++ b/docs/install/osx_setup.md @@ -102,11 +102,22 @@ If building with ```GPU``` support, add the following configuration to config.mk   We have installed MXNet core library. Next, we will install MXNet interface package for the programming language of your choice: +- [Python](#install-mxnet-for-python) - [R](#install-the-mxnet-package-for-r) - [Julia](#install-the-mxnet-package-for-julia) - [Scala](#install-the-mxnet-package-for-scala) - [Perl](#install-the-mxnet-package-for-perl) +## Install MXNet for Python +To install the MXNet Python binding navigate to the root of the MXNet folder then run the following: + +```bash +$ cd python +$ pip install -e . +``` + +Note that the `-e` flag is optional. It is equivalent to `--editable` and means that if you edit the source files, these changes will be reflected in the package installed. + ## Install the MXNet Package for R You have 2 options: 1. Building MXNet with the Prebuilt Binary Package diff --git a/docs/install/ubuntu_setup.md b/docs/install/ubuntu_setup.md index 13280b58573e..432310dd763d 100644 --- a/docs/install/ubuntu_setup.md +++ b/docs/install/ubuntu_setup.md @@ -115,8 +115,8 @@ You can build MXNet from source, and then you have the option of installing lang ### Build the Shared Library -#### Quick MXNet Installation -You can quickly build MXNet with the following script found in the `/docs/install` folder: +#### Quick MXNet Build +You can quickly build MXNet from source with the following script found in the `/docs/install` folder: ```bash cd docs/install @@ -127,6 +127,8 @@ Or you can go through a manual process described next. #### Manual MXNet Installation +It is recommended that you review the general [build from source](build_from_source.html) instructions before continuing. + On Ubuntu versions 16.04 or later, you need the following dependencies: **Step 1:** Install build tools and git. @@ -135,14 +137,18 @@ On Ubuntu versions 16.04 or later, you need the following dependencies: sudo apt-get install -y build-essential git ``` -**Step 2:** Install OpenBLAS. +**Step 2:** Install a Math Library. + +Details on the different math libraries are found in the build from source guide's [Math Library Selection](build_from_source.html#math-library-selection) section. -*MXNet* uses [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) library for accelerated numerical computations on CPU machine. There are several flavors of BLAS libraries - [OpenBLAS](http://www.openblas.net/), [ATLAS](http://math-atlas.sourceforge.net/) and [MKL](https://software.intel.com/en-us/intel-mkl). In this step we install OpenBLAS. You can choose to install ATLAS or MKL. +For OpenBLAS use: ```bash sudo apt-get install -y libopenblas-dev ``` +For other libraries, visit the [Math Library Selection](build_from_source.html#math-library-selection) section. + **Step 3:** Install OpenCV. *MXNet* uses [OpenCV](http://opencv.org/) for efficient image loading and augmentation operations. @@ -153,7 +159,7 @@ On Ubuntu versions 16.04 or later, you need the following dependencies: **Step 4:** Download MXNet sources and build MXNet core shared library. -If building on CPU: +If building on CPU and using OpenBLAS: ```bash git clone --recursive https://github.com/apache/incubator-mxnet.git @@ -161,7 +167,7 @@ If building on CPU: make -j $(nproc) USE_OPENCV=1 USE_BLAS=openblas ``` -If building on GPU (make sure you have installed the [CUDA dependencies first](#cuda-dependencies)): +If building on GPU and you want OpenCV and OpenBLAS (make sure you have installed the [CUDA dependencies first](#cuda-dependencies)): ```bash git clone --recursive https://github.com/apache/incubator-mxnet.git @@ -169,38 +175,119 @@ If building on GPU (make sure you have installed the [CUDA dependencies first](# make -j $(nproc) USE_OPENCV=1 USE_BLAS=openblas USE_CUDA=1 USE_CUDA_PATH=/usr/local/cuda USE_CUDNN=1 ``` -*Note* - USE_OPENCV and USE_BLAS are make file flags to set compilation options to use OpenCV and BLAS library. You can explore and use more compilation options in `make/config.mk`. +*Note* - USE_OPENCV and USE_BLAS are make file flags to set compilation options to use OpenCV and BLAS library. You can explore and use more compilation options in `make/config.mk` and also review common [usage examples](build_from_source.html#usage-examples). -Executing these commands creates a library called ```libmxnet.so```. +Building from source creates a library called ```libmxnet.so``` in the `lib` folder in your MXNet project root. -Next, you may optionally install ```graphviz``` library that is used for visualizing network graphs you build on MXNet. You may also install [Jupyter Notebook](http://jupyter.readthedocs.io/) which is used for running MXNet tutorials and examples. +You may also want to add the MXNet shared library to your `LD_LIBRARY_PATH`: ```bash - sudo apt-get install -y python-pip - sudo pip install graphviz - sudo pip install jupyter +export LD_LIBRARY_PATH=~/incubator-mxnet/lib ``` + +After building the MXNet library, you may install language bindings. +
## Installing Language Packages for MXNet After you have installed the MXNet core library. You may install MXNet interface packages for the programming language of your choice: -- [Scala](#install-the-mxnet-package-for-scala) -- [R](#install-the-mxnet-package-for-r) +- [Python](#install-mxnet-for-python) +- [C++](#install-the-mxnet-package-for-c++) +- [Clojure](#install-the-mxnet-package-for-clojure) - [Julia](#install-the-mxnet-package-for-julia) - [Perl](#install-the-mxnet-package-for-perl) +- [R](#install-the-mxnet-package-for-r) +- [Scala](#install-the-mxnet-package-for-scala) +
-### Install the MXNet Package for Scala +### Install MXNet for Python -To use the MXNet-Scala package, you can acquire the Maven package as a dependency. +To install the MXNet Python binding navigate to the root of the MXNet folder then run the following: -Further information is in the [MXNet-Scala Setup Instructions](scala_setup.html). +```bash +$ cd python +$ pip install -e . +``` -If you use IntelliJ or a similar IDE, you may want to follow the [MXNet-Scala on IntelliJ tutorial](../tutorials/scala/mxnet_scala_on_intellij.html) instead. +Note that the `-e` flag is optional. It is equivalent to `--editable` and means that if you edit the source files, these changes will be reflected in the package installed. + +#### Optional Python Packages + +You may optionally install ```graphviz``` library that is used for visualizing network graphs you build on MXNet. You may also install [Jupyter Notebook](http://jupyter.readthedocs.io/) which is used for running MXNet tutorials and examples. + +```bash +sudo pip install graphviz +sudo pip install jupyter +``` +
+ + +### Install the MXNet Package for C++ + +Refer to the [C++ Package setup guide](c_plus_plus.html). +
+ + +### Install the MXNet Package for Clojure + +Refer to the [Clojure setup guide](https://github.com/apache/incubator-mxnet/tree/master/contrib/clojure-package). +
+ + +### Install the MXNet Package for Julia + +The MXNet package for Julia is hosted in a separate repository, MXNet.jl, which is available on [GitHub](https://github.com/dmlc/MXNet.jl). To use Julia binding it with an existing libmxnet installation, set the ```MXNET_HOME``` environment variable by running the following command: + +```bash + export MXNET_HOME=//libmxnet +``` + +The path to the existing libmxnet installation should be the root directory of libmxnet. In other words, you should be able to find the ```libmxnet.so``` file at ```$MXNET_HOME/lib```. For example, if the root directory of libmxnet is ```~```, you would run the following command: + +```bash + export MXNET_HOME=/~/libmxnet +``` + +You might want to add this command to your ```~/.bashrc``` file. If you do, you can install the Julia package in the Julia console using the following command: + +```julia + Pkg.add("MXNet") +``` + +For more details about installing and using MXNet with Julia, see the [MXNet Julia documentation](http://dmlc.ml/MXNet.jl/latest/user-guide/install/). +
+ + +### Install the MXNet Package for Perl + +Before you build MXNet for Perl from source code, you must complete [building the shared library](#build-the-shared-library). After you build the shared library, run the following command from the MXNet source root directory to build the MXNet Perl package: + +```bash + sudo apt-get install libmouse-perl pdl cpanminus swig libgraphviz-perl + cpanm -q -L "${HOME}/perl5" Function::Parameters Hash::Ordered PDL::CCS + + MXNET_HOME=${PWD} + export LD_LIBRARY_PATH=${MXNET_HOME}/lib + export PERL5LIB=${HOME}/perl5/lib/perl5 + + cd ${MXNET_HOME}/perl-package/AI-MXNetCAPI/ + perl Makefile.PL INSTALL_BASE=${HOME}/perl5 + make install + + cd ${MXNET_HOME}/perl-package/AI-NNVMCAPI/ + perl Makefile.PL INSTALL_BASE=${HOME}/perl5 + make install + + cd ${MXNET_HOME}/perl-package/AI-MXNet/ + perl Makefile.PL INSTALL_BASE=${HOME}/perl5 + make install +```
+ ### Install the MXNet Package for R Building *MXNet* from source is a 2 step process. @@ -291,69 +378,19 @@ You should see the following output: [2,] 3 3 3 > quit() ``` -
-### Install the MXNet Package for Julia - -The MXNet package for Julia is hosted in a separate repository, MXNet.jl, which is available on [GitHub](https://github.com/dmlc/MXNet.jl). To use Julia binding it with an existing libmxnet installation, set the ```MXNET_HOME``` environment variable by running the following command: - -```bash - export MXNET_HOME=//libmxnet -``` - -The path to the existing libmxnet installation should be the root directory of libmxnet. In other words, you should be able to find the ```libmxnet.so``` file at ```$MXNET_HOME/lib```. For example, if the root directory of libmxnet is ```~```, you would run the following command: - -```bash - export MXNET_HOME=/~/libmxnet -``` - -You might want to add this command to your ```~/.bashrc``` file. If you do, you can install the Julia package in the Julia console using the following command: - -```julia - Pkg.add("MXNet") -``` - -For more details about installing and using MXNet with Julia, see the [MXNet Julia documentation](http://dmlc.ml/MXNet.jl/latest/user-guide/install/). -
- - -## Install the MXNet Package for Scala +### Install the MXNet Package for Scala To use the MXNet-Scala package, you can acquire the Maven package as a dependency. -Further information is in the [MXNet-Scala Setup Instructions](./scala_setup.md). - -If you use IntelliJ or a similar IDE, you may want to follow the [MXNet-Scala on IntelliJ tutorial](../tutorials/scala/mxnet_scala_on_intellij.md) instead. - - -### Install the MXNet Package for Perl - -Before you build MXNet for Perl from source code, you must complete [building the shared library](#build-the-shared-library). After you build the shared library, run the following command from the MXNet source root directory to build the MXNet Perl package: - -```bash - sudo apt-get install libmouse-perl pdl cpanminus swig libgraphviz-perl - cpanm -q -L "${HOME}/perl5" Function::Parameters Hash::Ordered PDL::CCS - - MXNET_HOME=${PWD} - export LD_LIBRARY_PATH=${MXNET_HOME}/lib - export PERL5LIB=${HOME}/perl5/lib/perl5 - - cd ${MXNET_HOME}/perl-package/AI-MXNetCAPI/ - perl Makefile.PL INSTALL_BASE=${HOME}/perl5 - make install - - cd ${MXNET_HOME}/perl-package/AI-NNVMCAPI/ - perl Makefile.PL INSTALL_BASE=${HOME}/perl5 - make install +Further information is in the [MXNet-Scala Setup Instructions](scala_setup.html). - cd ${MXNET_HOME}/perl-package/AI-MXNet/ - perl Makefile.PL INSTALL_BASE=${HOME}/perl5 - make install -``` +If you use IntelliJ or a similar IDE, you may want to follow the [MXNet-Scala on IntelliJ tutorial](../tutorials/scala/mxnet_scala_on_intellij.html) instead.
+ ## Contributions You are more than welcome to contribute easy installation scripts for other operating systems and programming languages. See the [community contributions page](../community/contribute.html) for further information. diff --git a/docs/install/validate_mxnet.md b/docs/install/validate_mxnet.md new file mode 100644 index 000000000000..a4cf5446f606 --- /dev/null +++ b/docs/install/validate_mxnet.md @@ -0,0 +1,185 @@ +# Validate Your MXNet Installation + +- [Python](#python) +- [Python with GPU](#python-with-gpu) +- [Verify GPU training](#verify-gpu-training) +- [Virtualenv](#virtualenv) +- [Docker with CPU](#docker-with-cpu) +- [Docker with GPU](#docker-with-gpu) +- [Cloud](#cloud) +- [C++](#alternative-language-bindings) +- [Clojure](#clojure) +- [Julia](#julia) +- [Perl](#perl) +- [R](#r) +- [Scala](#scala) + + +## Python + +Start the python terminal. + +```bash +$ python +``` + +Run a short *MXNet* python program to create a 2X3 matrix of ones, multiply each element in the matrix by 2 followed by adding 1. We expect the output to be a 2X3 matrix with all elements being 3. + +```python +>>> import mxnet as mx +>>> a = mx.nd.ones((2, 3)) +>>> b = a * 2 + 1 +>>> b.asnumpy() +array([[ 3., 3., 3.], + [ 3., 3., 3.]], dtype=float32) +``` + + +## Python with GPU + +This is similar to the previous example, but this time we use *mx.gpu()*, to set *MXNet* context to be GPUs. + +```python +>>> import mxnet as mx +>>> a = mx.nd.ones((2, 3), mx.gpu()) +>>> b = a * 2 + 1 +>>> b.asnumpy() +array([[ 3., 3., 3.], + [ 3., 3., 3.]], dtype=float32) +``` + + +## Verify GPU Training + +From the MXNet root directory run: `python example/image-classification/train_mnist.py --network lenet --gpus 0` to test GPU training. + + +## Virtualenv + +Activate the virtualenv environment created for *MXNet*. + +```bash +$ source ~/mxnet/bin/activate +``` + +After activating the environment, you should see the prompt as below. + +```bash +(mxnet)$ +``` + +Start the python terminal. + +```bash +$ python +``` + +Run the previous Python example. + + +## Docker with CPU + +Launch a Docker container with `mxnet/python` image and run example *MXNet* python program on the terminal. + +```bash +$ docker run -it mxnet/python bash # Use sudo if you skip Step 2 in the installation instruction + +# Start a python terminal +root@4919c4f58cac:/# python +``` + +Run the previous Python example. + + +## Docker with GPU + +Launch a NVIDIA Docker container with `mxnet/python:gpu` image and run example *MXNet* python program on the terminal. + +```bash +$ nvidia-docker run -it mxnet/python:gpu bash # Use sudo if you skip Step 2 in the installation instruction + +# Start a python terminal +root@4919c4f58cac:/# python +``` + +Run the previous Python example and run the previous GPU examples. + + +## Cloud + +Login to the cloud instance you launched, with pre-installed *MXNet*, following the guide by corresponding cloud provider. + +Start the python terminal. + +```bash +$ python +``` + +Run the previous Python example, and for GPU instances run the previous GPU example. + + +## Alternative Language Bindings + +### C++ + +Please contribute an example! + + +### Clojure + +Please contribute an example! + + +### Julia + +Please contribute an example! + + +### Perl + +Please contribute an example! + + +### R + +Run a short *MXNet* R program to create a 2X3 matrix of ones, multiply each element in the matrix by 2 followed by adding 1. We expect the output to be a 2X3 matrix with all elements being 3. + +```r +library(mxnet) +a <- mx.nd.ones(c(2,3), ctx = mx.cpu()) +b <- a * 2 + 1 +b +``` + +You should see the following output: + +```r +[,1] [,2] [,3] +[1,] 3 3 3 +[2,] 3 3 3 +``` + + +#### R with GPU + +This is similar to the previous example, but this time we use *mx.gpu()*, to set *MXNet* context to be GPUs. + +```r +library(mxnet) +a <- mx.nd.ones(c(2,3), ctx = mx.gpu()) +b <- a * 2 + 1 +b +``` + +You should see the following output: + +```r +[,1] [,2] [,3] +[1,] 3 3 3 +[2,] 3 3 3 +``` + + +### Scala + +Run the MXNet-Scala demo project to validate your Maven package installation. diff --git a/docs/install/windows_setup.md b/docs/install/windows_setup.md index 40ddeb8182d8..99ce7f63e850 100755 --- a/docs/install/windows_setup.md +++ b/docs/install/windows_setup.md @@ -91,7 +91,7 @@ Done! We have installed MXNet with Python interface. Run below commands to verif ``` We actually did a small tensor computation using MXNet! You are all set with MXNet on your Windows machine. -## Install MXNet Package for R +## Install the MXNet Package for R MXNet for R is available for both CPUs and GPUs. ### Installing MXNet on a Computer with a CPU Processor @@ -151,8 +151,8 @@ These dlls can be found in `prebuildbase_win10_x64_vc14/3rdparty`, `mxnet_x64_vc ├── dmlc ├── mxnet ├── mshadow - └── nnvm - + └── nnvm + ``` 6. Make sure that R executable is added to your ```PATH``` in the environment variables. Running the ```where R``` command at the command prompt should return the location. 7. Also make sure that Rtools is installed and the executable is added to your ```PATH``` in the environment variables. @@ -200,7 +200,7 @@ To install MXNet on a computer with a GPU processor, choose from two options: * Build the library from source code However, a few dependencies remain for both options. You will need the following: -* Install [Nvidia-drivers](http://www.nvidia.com/Download/index.aspx?lang=en-us) if not installed. Latest driver based on your system configuration is recommended. +* Install [Nvidia-drivers](http://www.nvidia.com/Download/index.aspx?lang=en-us) if not installed. Latest driver based on your system configuration is recommended. * Install [Microsoft Visual Studio](https://visualstudio.microsoft.com/downloads/) (VS2015 or VS2017 is required by CUDA) @@ -224,7 +224,7 @@ For GPU package: ``` Change cu92 to cu80, cu90 or cu91 based on your CUDA toolkit version. Currently, MXNet supports these versions of CUDA. #### Building MXNet from Source Code(GPU) -After you have installed above software, continue with the following steps to build MXNet-R: +After you have installed above software, continue with the following steps to build MXNet-R: 1. Clone the MXNet github repo. ```sh @@ -261,8 +261,8 @@ These dlls can be found in `prebuildbase_win10_x64_vc14/3rdparty`, `mxnet_x64_vc ├── dmlc ├── mxnet ├── mshadow - └── nnvm - + └── nnvm + ``` 6. Make sure that R executable is added to your ```PATH``` in the environment variables. Running the ```where R``` command at the command prompt should return the location. 7. Also make sure that Rtools is installed and the executable is added to your ```PATH``` in the environment variables. From 28cb75af38c9b6ef4d34b283535eea04714525d3 Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Thu, 6 Sep 2018 00:46:31 +0200 Subject: [PATCH 103/160] [MXNET-908] Enable minimal OSX Travis build (#12462) --- .travis.yml | 107 ++------- .../travis/install.sh | 16 +- tests/travis/is_core_changed.sh | 60 ----- tests/travis/r_vignettes.R | 21 -- tests/travis/run_test.sh | 210 ------------------ tests/travis/setup.sh | 59 ----- 6 files changed, 25 insertions(+), 448 deletions(-) rename tests/travis/travis_after_failure.sh => ci/travis/install.sh (70%) mode change 100755 => 100644 delete mode 100755 tests/travis/is_core_changed.sh delete mode 100644 tests/travis/r_vignettes.R delete mode 100755 tests/travis/run_test.sh delete mode 100755 tests/travis/setup.sh diff --git a/.travis.yml b/.travis.yml index ca5d03b5008d..f61bd86673dc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,108 +1,31 @@ -sudo: false +sudo: true language: cpp +cache: ccache + os: - # - linux - osx -osx_image: xcode8 - -env: - # code analysis - # - TASK=lint - # build mxnet.so with CUDA - # - TASK=build - # run tests/cpp - - TASK=cpp_test - # run tests/python - - TASK=python_test - - TASK=r_test - # - TASK=julia JULIA_VER=0.4 - # - TASK=scala_test - - # TODO, R test, distributed test, clang, more g++ versions +osx_image: xcode9.4 matrix: include: - - # os: linux - # dist: trusty - # env: TASK=perl_test - os: osx - ## sudo is required because - ## prexexisting packages conflict - ## with new ones. - ## would be nice to have macports - ## on travis osx, it has all needed perl packages - sudo: required - env: TASK=perl_test -# env: TASK=julia JULIA_VER=0.4 -# - os: linux -# env: TASK=build -# - os: linux -# env: TASK=cpp_test -# - os: linux -# env: TASK=python_test -# - os: linux -# env: TASK=r_test -# - os: linux -# env: TASK=scala_test - -# dependent apt packages -addons: - apt: - sources: - - ubuntu-toolchain-r-test - packages: - - doxygen - - wget - - git - - libcurl4-openssl-dev - - unzip - - libatlas-dev - - libopencv-dev - - gcc-4.8 - - g++-4.8 - - python-numpy - - python-nose - - python3-numpy - - python3-dev - - python3-nose - - python-h5py - - python3-h5py - - graphviz - - libmouse-perl - - pdl - - cpanminus - - swig - - libgraphviz-perl before_install: - - export NVCC_PREFIX=${HOME} - - source dmlc-core/scripts/travis/travis_setup_env.sh - export PYTHONPATH=${PYTHONPATH}:${PWD}/python - - export MAVEN_SKIP_RC=true - - export MAVEN_OPTS="-Xmx512m -XX:MaxPermSize=256m -XX:-UseGCOverheadLimit -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC" install: - - source tests/travis/setup.sh - + - brew install ccache + - export PATH="/usr/local/opt/ccache/libexec:$PATH" + - source ci/travis/install.sh + +# We build with 2 concurrent jobs to match the number of cores present on MacOS virutal machines. +# nproc does not report the correct number of cores reliably in Travis, so using nproc is not +# recommended. +# https://docs.travis-ci.com/user/reference/overview/ script: - - tests/travis/run_test.sh - -cache: - directories: - - ${HOME}/.cache/usr - -before_cache: - - dmlc-core/scripts/travis/travis_before_cache.sh - -after_failure: - - tests/travis/travis_after_failure.sh - -notifications: -# Emails are sent to the committer's git-configured email address by default, - email: - on_success: change - on_failure: always - #slack: dmlc:NmroCzntCiWOuxUZpii40USd + - export MXNET_STORAGE_FALLBACK_LOG_VERBOSE=0 + - mv make/osx.mk config.mk + - make -j 2 diff --git a/tests/travis/travis_after_failure.sh b/ci/travis/install.sh old mode 100755 new mode 100644 similarity index 70% rename from tests/travis/travis_after_failure.sh rename to ci/travis/install.sh index 50754c9546cd..d04dda7e87f3 --- a/tests/travis/travis_after_failure.sh +++ b/ci/travis/install.sh @@ -17,10 +17,14 @@ # specific language governing permissions and limitations # under the License. - -if [ ${TASK} == "r_test" ]; then - echo "Print the install log..." - cat mxnet.Rcheck/*.out - echo "Print the check log..." - cat mxnet.Rcheck/*.log +if [ ${TRAVIS_OS_NAME} == "osx" ]; then + brew update + brew install opencv + brew install python3 + brew install fftw + brew install libpng + brew install ImageMagick + brew install swig + python -m pip install --user nose numpy cython scipy requests + python3 -m pip install --user nose numpy cython scipy requests fi diff --git a/tests/travis/is_core_changed.sh b/tests/travis/is_core_changed.sh deleted file mode 100755 index 7b9eb6123847..000000000000 --- a/tests/travis/is_core_changed.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -# this is a util script to test whether the "core" of -# mxnet has changed. Please modify the regex patterns here -# to ensure the components are covered if you add new "core" -# components to mxnet - -# temporarily disable this b/c the OS X tests are failing mysteriously -exit 0 - -# DEBUG -echo "Files changed in this PR includes:" -echo "**********************************" -git diff --name-only HEAD^ -echo "**********************************" - -# we ignore examples, and docs -core_patterns=( - '^dmlc-core' - '^matlab' - '^plugin' - '^python' - '^src' - '^tools' - '^R-package' - '^amalgamation' - '^include' - '^mshadow' - '^ps-lite' - '^scala-package' - '^tests' -) - -for pat in ${core_patterns[@]}; do - if git diff --name-only HEAD^ | grep "$pat" - then - exit - fi -done - -echo "I think we are good to skip this travis ci run now" -exit 1 # means nothing has changed diff --git a/tests/travis/r_vignettes.R b/tests/travis/r_vignettes.R deleted file mode 100644 index 1b03b8bba4ec..000000000000 --- a/tests/travis/r_vignettes.R +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -fnames <- list.files("R-package/vignettes/", pattern="*.Rmd") -sapply(fnames, function(x){ - knitr::purl(paste0("R-package/vignettes/", x)) - }) \ No newline at end of file diff --git a/tests/travis/run_test.sh b/tests/travis/run_test.sh deleted file mode 100755 index fd23f0e82b24..000000000000 --- a/tests/travis/run_test.sh +++ /dev/null @@ -1,210 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -if ! tests/travis/is_core_changed.sh -then - exit 0 -fi - -if [ ${TASK} == "lint" ]; then - make lint || exit -1 - echo "Check documentations of c++ code..." - make doc 2>log.txt - (cat log.txt| grep -v ENABLE_PREPROCESSING |grep -v "unsupported tag") > logclean.txt - echo "---------Error Log----------" - cat logclean.txt - echo "----------------------------" - (cat logclean.txt|grep warning) && exit -1 - (cat logclean.txt|grep error) && exit -1 - exit 0 -fi - -cp make/config.mk config.mk - -if [[ ${TRAVIS_OS_NAME} == "osx" ]]; then - echo "USE_BLAS=apple" >> config.mk - echo "USE_OPENMP=0" >> config.mk -else - # use g++-4.8 for linux - if [[ ${CXX} == "g++" ]]; then - export CXX=g++-4.8 - fi - echo "USE_BLAS=blas" >> config.mk -fi -echo "CXX=${CXX}" >>config.mk -echo "USE_PROFILER=1" >> config.mk - -if [ ${TASK} == "build" ]; then - if [ ${TRAVIS_OS_NAME} == "linux" ]; then - echo "USE_CUDA=1" >> config.mk - ./dmlc-core/scripts/setup_nvcc.sh $NVCC_PREFIX - fi - make all - exit $? -fi - -if [ ${TASK} == "cpp_test" ]; then - make -f dmlc-core/scripts/packages.mk gtest - echo "GTEST_PATH="${CACHE_PREFIX} >> config.mk - make test || exit -1 - export MXNET_ENGINE_INFO=true - ./build/tests/cpp/mxnet_test - exit 0 -fi - -if [ ${TASK} == "r_test" ]; then - make all || exit -1 - # use cached dir for storing data - rm -rf ${PWD}/data - mkdir -p ${CACHE_PREFIX}/data - ln -s ${CACHE_PREFIX}/data ${PWD}/data - - set -e - export _R_CHECK_TIMINGS_=0 - - if [[ ${TRAVIS_OS_NAME} == "osx" ]]; then - wget https://cran.rstudio.com/bin/macosx/R-latest.pkg -O /tmp/R-latest.pkg - sudo installer -pkg "/tmp/R-latest.pkg" -target / - Rscript -e "install.packages('devtools', repo = 'https://cran.rstudio.com')" - fi - - cd R-package - Rscript -e "library(devtools); library(methods); options(repos=c(CRAN='https://cran.rstudio.com')); install_deps(dependencies = TRUE)" - cd .. - - make rpkg -# R CMD check --no-examples --no-manual --no-vignettes --no-build-vignettes mxnet_*.tar.gz - R CMD INSTALL mxnet_*.tar.gz - - Rscript tests/travis/r_vignettes.R - - wget http://data.mxnet.io/mxnet/data/Inception.zip - unzip Inception.zip && rm -rf Inception.zip - wget http://data.mxnet.io/mxnet/data/mnist.zip - unzip mnist.zip && rm -rf mnist.zip - - cat CallbackFunctionTutorial.R \ - fiveMinutesNeuralNetwork.R \ - mnistCompetition.R \ - ndarrayAndSymbolTutorial.R > r_test.R - - Rscript r_test.R || exit -1 - - exit 0 -fi - -if [ ${TASK} == "python_test" ]; then - make all || exit -1 - # use cached dir for storing data - rm -rf ${PWD}/data - mkdir -p ${PWD}/data - - if [ ${TRAVIS_OS_NAME} == "osx" ]; then - python -m nose -v tests/python/unittest || exit -1 - python3 -m nose -v tests/python/unittest || exit -1 - # make cython3 - # cython tests - # export MXNET_ENFORCE_CYTHON=1 - # python3 -m nose tests/python/unittest || exit -1 - python3 -m nose -v tests/python/train || exit -1 - python -m nose -v tests/python/doctest || exit -1 - python3 -m nose -v tests/python/doctest || exit -1 - else - nosetests -v tests/python/unittest || exit -1 - nosetests3 -v tests/python/unittest || exit -1 - nosetests3 -v tests/python/train || exit -1 - nosetests -v tests/python/doctest || exit -1 - nosetests3 -v tests/python/doctest || exit -1 - fi - exit 0 -fi - -if [ ${TASK} == "julia" ]; then - make all || exit -1 - # use cached dir for storing data - rm -rf ${PWD}/data - mkdir -p ${PWD}/data - - export MXNET_HOME="${PWD}" - julia -e 'Pkg.clone("MXNet"); Pkg.checkout("MXNet"); Pkg.build("MXNet"); Pkg.test("MXNet")' || exit -1 - exit 0 -fi - -if [ ${TASK} == "scala_test" ]; then - if [ ${TRAVIS_OS_NAME} == "osx" ]; then - LIB_GOMP_PATH=`find /usr/local/lib -name libgomp.dylib | grep -v i386 | head -n1` - ln -sf $LIB_GOMP_PATH /usr/local/lib/libgomp.dylib - fi - make all || exit -1 - # use cached dir for storing data - rm -rf ${PWD}/data - mkdir -p ${PWD}/data - - export JAVA_HOME=$(/usr/libexec/java_home) - - make scalapkg || exit -1 - make scalatest || exit -1 - - exit 0 -fi - -if [ ${TASK} == "perl_test" ]; then - make all || exit -1 - - # use cached dir for storing data - MXNET_HOME=${PWD} - rm -rf ${MXNET_HOME}/perl-package/AI-MXNet/data - mkdir -p ${CACHE_PREFIX}/data - ln -s ${CACHE_PREFIX}/data ${MXNET_HOME}/perl-package/AI-MXNet/data - - export LD_LIBRARY_PATH=${MXNET_HOME}/lib - export PERL5LIB=${HOME}/perl5/lib/perl5 - - cd ${MXNET_HOME}/perl-package/AI-MXNetCAPI/ - perl Makefile.PL INSTALL_BASE=${HOME}/perl5 - make || exit -1 - if [ ${TRAVIS_OS_NAME} == "osx" ]; then - install_name_tool -change lib/libmxnet.so \ - ${MXNET_HOME}/lib/libmxnet.so \ - blib/arch/auto/AI/MXNetCAPI/MXNetCAPI.bundle - fi - make install || exit -1 - - cd ${MXNET_HOME}/perl-package/AI-NNVMCAPI/ - perl Makefile.PL INSTALL_BASE=${HOME}/perl5 - make || exit -1 - if [ ${TRAVIS_OS_NAME} == "osx" ]; then - install_name_tool -change lib/libmxnet.so \ - ${MXNET_HOME}/lib/libmxnet.so \ - blib/arch/auto/AI/NNVMCAPI/NNVMCAPI.bundle - fi - make install || exit -1 - - cd ${MXNET_HOME}/perl-package/AI-MXNet/ - perl Makefile.PL - make test || exit -1 - exit 0 -fi - -if [ ${TASK} == "cpp_package_test" ]; then - MXNET_HOME=${PWD} - make travis -C ${MXNET_HOME}/cpp-package/example - exit 0 -fi diff --git a/tests/travis/setup.sh b/tests/travis/setup.sh deleted file mode 100755 index eec6c23d7158..000000000000 --- a/tests/travis/setup.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -if ! tests/travis/is_core_changed.sh -then - exit 0 -fi - -if [ ${TRAVIS_OS_NAME} == "osx" ]; then - brew update - brew tap homebrew/science - brew install opencv - brew install python3 - brew install fftw - brew install libpng - brew install ImageMagick - brew install swig - if [ ${TASK} == "python_test" ]; then - python -m pip install --user nose numpy cython scipy - python3 -m pip install --user nose numpy cython scipy - fi -fi - -if [ ${TASK} == "lint" ]; then - pip install --user cpplint 'pylint==1.4.4' 'astroid==1.3.6' -fi - -if [ ${TASK} == "julia" ]; then - mkdir -p ~/julia - curl -s -L --retry 7 "https://s3.amazonaws.com/julialang/bin/linux/x64/${JULIA_VER}/julia-${JULIA_VER}-latest-linux-x86_64.tar.gz" | tar -C ~/julia -x -z --strip-components=1 -f - - export PATH="${PATH}:${HOME}/julia/bin" - julia -e 'versioninfo()' -fi - -if [ ${TASK} == "perl_test" ]; then - if [ ${TRAVIS_OS_NAME} == "linux" ]; then - cpanm -q -L "${HOME}/perl5" Function::Parameters Hash::Ordered PDL::CCS - else - sudo sh -c 'curl -L https://cpanmin.us | perl - App::cpanminus' - sudo cpanm -q -n PDL Mouse Function::Parameters Hash::Ordered PDL::CCS - fi -fi From 285adda7c7e1edb2ffd663b2107c4897cdaa4b48 Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Fri, 7 Sep 2018 18:30:37 +0200 Subject: [PATCH 104/160] [MXNET-909] Disable tvm_bridge test (#12476) --- tests/python/gpu/test_tvm_bridge.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/python/gpu/test_tvm_bridge.py b/tests/python/gpu/test_tvm_bridge.py index 4b1105a0585c..c3cf2ddba299 100644 --- a/tests/python/gpu/test_tvm_bridge.py +++ b/tests/python/gpu/test_tvm_bridge.py @@ -19,7 +19,9 @@ import logging import mxnet as mx import numpy as np +import unittest +@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12473") def test_tvm_bridge(): # only enable test if TVM is available try: From 2bcfc08019d82d725d9b3aa0dc7794c0d63f8694 Mon Sep 17 00:00:00 2001 From: Tianqi Chen Date: Fri, 7 Sep 2018 13:17:34 -0800 Subject: [PATCH 105/160] Fix tvm dependency for docker (#12479) --- ci/docker/install/ubuntu_tvm.sh | 14 +++++++------- tests/python/gpu/test_tvm_bridge.py | 1 - 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/ci/docker/install/ubuntu_tvm.sh b/ci/docker/install/ubuntu_tvm.sh index 4f5cb4251ad1..2ee4e534ed93 100755 --- a/ci/docker/install/ubuntu_tvm.sh +++ b/ci/docker/install/ubuntu_tvm.sh @@ -25,14 +25,14 @@ cd tvm # This is a stable tag that support MXNet TVM bridge. # We use this since support for mxnet bridge just checked # into master and there is yet a version tag -git checkout 30eaf463e34d7c301357c31a010945d11df16537 +git checkout v0.4 + +cp cmake/config.cmake . +echo set\(USE_CUDA /usr/local/cuda\) >> config.cmake +echo set\(USE_LLVM llvm-config-5.0\) >> config.cmake +echo set\(USE_RPC ON\) >> config.cmake +echo set\(USE_GRAPH_RUNTIME ON\) >> config.cmake -cp make/config.mk -echo USE_CUDA=1 >> config.mk -echo LLVM_CONFIG=llvm-config-5.0 >> config.mk -echo USE_RPC=1 >> config.mk -echo USE_GRAPH_RUNTIME=1 >> config.mk -echo CUDA_PATH=/usr/local/cuda >> config.mk make -j$(nproc) cd python diff --git a/tests/python/gpu/test_tvm_bridge.py b/tests/python/gpu/test_tvm_bridge.py index c3cf2ddba299..5c87536bdbae 100644 --- a/tests/python/gpu/test_tvm_bridge.py +++ b/tests/python/gpu/test_tvm_bridge.py @@ -21,7 +21,6 @@ import numpy as np import unittest -@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12473") def test_tvm_bridge(): # only enable test if TVM is available try: From 445967e6c316a91876efb60b6a5ef52ec1837d73 Mon Sep 17 00:00:00 2001 From: Luobao Date: Sat, 8 Sep 2018 09:02:20 +0800 Subject: [PATCH 106/160] Fix flaky test: test_mkldnn.test_activation #12377 (#12418) * test_activation_rec_eps * enable case --- tests/python/mkl/test_mkldnn.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/python/mkl/test_mkldnn.py b/tests/python/mkl/test_mkldnn.py index e597d0f5fc58..17fc29c81114 100644 --- a/tests/python/mkl/test_mkldnn.py +++ b/tests/python/mkl/test_mkldnn.py @@ -281,7 +281,6 @@ def check_pooling_training(stype): check_pooling_training(stype) -@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/12377") @with_seed() def test_activation(): def check_activation_training(stype): @@ -292,7 +291,7 @@ def check_activation_training(stype): in_location = [mx.nd.array(data_tmp).tostype(stype)] test = mx.symbol.Activation(data, act_type="relu") - check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4) + check_numeric_gradient(test, in_location, numeric_eps=1e-6, rtol=0.16, atol=1e-4) stypes = ['row_sparse', 'default'] for stype in stypes: From 4eb7626fcc8c13edbc3fed3ce62dc1ec08a2a20a Mon Sep 17 00:00:00 2001 From: Lai Wei Date: Sat, 8 Sep 2018 12:29:58 -0700 Subject: [PATCH 107/160] allow foreach on input with 0 length (#12471) * allow foreach on input with 0 length * add test foreach with unknown dim --- src/operator/control_flow.cc | 1 - tests/python/unittest/test_contrib_control_flow.py | 9 +++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/operator/control_flow.cc b/src/operator/control_flow.cc index d6b6703ddd58..ba7f5c0ad8b2 100644 --- a/src/operator/control_flow.cc +++ b/src/operator/control_flow.cc @@ -314,7 +314,6 @@ static bool ForeachShape(const nnvm::NodeAttrs& attrs, // For the shape of output data. size_t len = in_shape->at(0)[0]; - CHECK_GT(len, 0); for (int i = 0; i < params.num_out_data; i++) { // If the output shape isn't inferred, we don't need to propogate the info. const auto& g_out_shape = subg_out_shape[i]; diff --git a/tests/python/unittest/test_contrib_control_flow.py b/tests/python/unittest/test_contrib_control_flow.py index 1c23c9161977..dd5a4d6d3152 100644 --- a/tests/python/unittest/test_contrib_control_flow.py +++ b/tests/python/unittest/test_contrib_control_flow.py @@ -2146,6 +2146,15 @@ def func3(data): for i in range(len(out1)): assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001) +def test_foreach_with_unkown_dim(): + # MXNet supports using 0 as placeholder for unknown dimensions in shape + step = lambda data, states: (data + states[0], [states[0] * 2]) + # input shape with NCHW format and N is unknown + data = mx.sym.var('data', shape=(0, 3, 32, 32)) + states = [mx.sym.var('state')] + outs, states = mx.sym.contrib.foreach(step, data, states) + _, output_shape, _ = outs.infer_shape_partial() + assert_allclose((0, 3, 32, 32), output_shape[0]) if __name__ == '__main__': import nose From 6043ef0b921898aa64b44314b0b3a2353060d8ac Mon Sep 17 00:00:00 2001 From: cclauss Date: Sat, 8 Sep 2018 21:56:43 +0200 Subject: [PATCH 108/160] [MXNET-696][PYTHON][UNDEFINED NAME] import logging in ci/util.py (#12488) Fixes #12406 @larroy @marcoabreu @szha [flake8](http://flake8.pycqa.org) testing of https://github.com/apache/incubator-mxnet on Python 3.7.0 $ __flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics__ ``` ./ci/util.py:73:21: F821 undefined name 'logging' logging.warning("Exception: %s, Retrying in %d seconds...", str(e), mdelay) ^ 1 F821 undefined name 'logging' 1 ``` --- ci/util.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ci/util.py b/ci/util.py index 98605bedf765..4d68b57a3af4 100644 --- a/ci/util.py +++ b/ci/util.py @@ -17,6 +17,7 @@ import os import contextlib +import logging import requests def get_mxnet_root() -> str: From d410de1d28d719902409a77a841d0872c74423ef Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Mon, 10 Sep 2018 14:59:36 +0200 Subject: [PATCH 109/160] [MXNET-703] Static linking for libprotobuf with TensorRT (#12475) --- ci/docker/install/tensorrt.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ci/docker/install/tensorrt.sh b/ci/docker/install/tensorrt.sh index a6258d94f62f..61e73ef9a62f 100755 --- a/ci/docker/install/tensorrt.sh +++ b/ci/docker/install/tensorrt.sh @@ -30,9 +30,12 @@ apt-get install -y automake libtool git clone --recursive -b 3.5.1.1 https://github.com/google/protobuf.git cd protobuf ./autogen.sh -./configure +./configure --disable-shared CXXFLAGS=-fPIC make -j$(nproc) make install +rm -rf /usr/local/lib/libprotobuf-lite.so* +rm -rf /usr/local/lib/libprotobuf.so* +rm -rf /usr/local/lib/libprotoc.so* ldconfig popd From 690cf7ee5d5df465621531d1045874b9b6d5f00c Mon Sep 17 00:00:00 2001 From: Aaron Markham Date: Mon, 10 Sep 2018 11:37:30 -0700 Subject: [PATCH 110/160] fix render issue on < and > (#12482) --- docs/build_version_doc/AddVersion.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/build_version_doc/AddVersion.py b/docs/build_version_doc/AddVersion.py index b625a2e1b6a3..9c5ca9d70406 100755 --- a/docs/build_version_doc/AddVersion.py +++ b/docs/build_version_doc/AddVersion.py @@ -74,7 +74,8 @@ version_tag_mobile.extract() navbar.append(version_str) navbar_mobile.append(version_str_mobile) - outstr = str(content).replace('<', '<').replace('>', '>') + # The following causes rendering errors in code blocks; refer to #12168 + #outstr = str(content).replace('<', '<').replace('>', '>') # Fix link if args.current_version == tag_list[0]: print("Fixing " + os.path.join(path, name)) From 3406845534451f71e33e9a5775a42fc049a9ea2b Mon Sep 17 00:00:00 2001 From: StephanieYuan Date: Mon, 10 Sep 2018 14:44:15 -0400 Subject: [PATCH 111/160] Adding python SVRGModule for performing SVRG Optimization Logic (#12376) Implemented a python SVRGModule for performing SVRG Optimization Logic. This version supports single machine SVRG with single cpu, gpu and multi-gpus. (#12376) --- docs/api/python/contrib/svrg_optimization.md | 86 +++ docs/api/python/index.md | 3 +- docs/api/python/module/module.md | 2 +- example/svrg_module/README.md | 33 + .../api_usage_example/example_api_train.py | 124 ++++ .../api_usage_example/example_inference.py | 106 ++++ .../benchmarks/svrg_benchmark.ipynb | 379 ++++++++++++ .../svrg_module/linear_regression/common.py | 117 ++++ .../linear_regression/data_reader.py | 45 ++ .../svrg_module/linear_regression/train.py | 45 ++ .../contrib/svrg_optimization/__init__.py | 22 + .../contrib/svrg_optimization/svrg_module.py | 578 ++++++++++++++++++ .../svrg_optimization/svrg_optimizer.py | 171 ++++++ .../unittest/test_contrib_svrg_module.py | 307 ++++++++++ .../unittest/test_contrib_svrg_optimizer.py | 101 +++ 15 files changed, 2117 insertions(+), 2 deletions(-) create mode 100644 docs/api/python/contrib/svrg_optimization.md create mode 100644 example/svrg_module/README.md create mode 100644 example/svrg_module/api_usage_example/example_api_train.py create mode 100644 example/svrg_module/api_usage_example/example_inference.py create mode 100644 example/svrg_module/benchmarks/svrg_benchmark.ipynb create mode 100644 example/svrg_module/linear_regression/common.py create mode 100644 example/svrg_module/linear_regression/data_reader.py create mode 100644 example/svrg_module/linear_regression/train.py create mode 100644 python/mxnet/contrib/svrg_optimization/__init__.py create mode 100644 python/mxnet/contrib/svrg_optimization/svrg_module.py create mode 100644 python/mxnet/contrib/svrg_optimization/svrg_optimizer.py create mode 100644 tests/python/unittest/test_contrib_svrg_module.py create mode 100644 tests/python/unittest/test_contrib_svrg_optimizer.py diff --git a/docs/api/python/contrib/svrg_optimization.md b/docs/api/python/contrib/svrg_optimization.md new file mode 100644 index 000000000000..e6e1c3e23ee3 --- /dev/null +++ b/docs/api/python/contrib/svrg_optimization.md @@ -0,0 +1,86 @@ +# SVRG Optimization in Python Module API + +## Overview +SVRG which stands for Stochastic Variance Reduced Gradients, is an optimization technique that was first introduced in +paper _Accelerating Stochastic Gradient Descent using Predictive Variance Reduction_ in 2013. It is complement to SGD +(Stochastic Gradient Descent), which is known for large scale optimization but suffers from slow convergence +asymptotically due to its inherent variance. SGD approximates the full gradients using a small batch of data or +a single data sample, which will introduce variance and thus requires to start with a small learning rate in order to +ensure convergence. SVRG remedies the problem by keeping track of a version of estimated weights that close to the +optimal parameter values and maintaining an average of full gradients over a full pass of data. The average of full +gradients is calculated with respect to the weights from the last m-th epochs in the training. SVRG uses a different +update rule: gradients w.r.t current parameter values minus gradients w.r.t to parameters from the last m-th epochs +plus the average of full gradients over all data. + +Key Characteristics of SVRG: +* Employs explicit variance reduction by using a different update rule compared to SGD. +* Ability to use relatively large learning rate, which leads to faster convergence compared to SGD. +* Guarantees for fast convergence for smooth and strongly convex functions. + +SVRG optimization is implemented as a SVRGModule in `mxnet.contrib.svrg_optimization`, which is an extension of the +existing `mxnet.module.Module` APIs and encapsulates SVRG optimization logic within several new functions. SVRGModule +API changes compared to Module API to end users are minimal. + +In distributed training, each worker gets the same special weights from the last m-th epoch and calculates the full +gradients with respect to its own shard of data. The standard SVRG optimization requires building a global full +gradients, which is calculated by aggregating the full gradients from each worker and averaging over the number of +workers. The workaround is to keep an additional set of keys in the KVStore that maps to full gradients. +The `_SVRGOptimizer` is designed to wrap two optimizers, an `_AssignmentOptimizer` which is used for full gradients +accumulation in the KVStore and a regular optimizer that performs actual update rule to the parameters. +The `_SVRGOptimizer` and `_AssignmentOptimizer` are designed to be used in `SVRGModule` only. + +```eval_rst +.. warning:: This package contains experimental APIs and may change in the near future. +``` + +This document lists the SVRGModule APIs in MXNet/Contrib package: + +```eval_rst +.. autosummary:: + :nosignatures: + + mxnet.contrib.svrg_optimization.svrg_module +``` + +### Intermediate Level API for SVRGModule + +The only extra step to use a SVRGModule compared to use a Module is to check if the current epoch should update the +full gradients over all data. Code snippets below demonstrate the suggested usage of SVRGModule using intermediate +level APIs. + +```python +>>> mod = SVRGModule(symbol=model, update_freq=2, data_names=['data'], label_names=['lin_reg_label']) +>>> mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label) +>>> mod.init_params() +>>> mod.init_optimizer(optimizer='sgd', optimizer_params=(('learning_rate', 0.01), ), kvstore='local') +>>> for epoch in range(num_epochs): +... if epoch % mod.update_freq == 0: +... mod.update_full_grads(di) +... di.reset() +... for batch in di: +... mod.forward_backward(data_batch=batch) +... mod.update() +``` + +### High Level API for SVRGModule + +The high level API usage of SVRGModule remains exactly the same as Module API. Code snippets below gives an example of +suggested usage of high level API. + +```python +>>> mod = SVRGModule(symbol=model, update_freq=2, data_names=['data'], label_names=['lin_reg_label']) +>>> mod.fit(di, num_epochs=100, optimizer='sgd', optimizer_params=(('learning_rate', 0.01), )) +``` + +## API reference + + + +```eval_rst + +.. automodule:: mxnet.contrib.svrg_optimization.svrg_module +.. autoclass:: mxnet.contrib.svrg_optimization.svrg_module.SVRGModule + :members: init_optimizer, bind, forward, backward, reshape, update, update_full_grads, fit, prepare + +``` + \ No newline at end of file diff --git a/docs/api/python/index.md b/docs/api/python/index.md index 42c4af9e46b5..15d1045a93e4 100644 --- a/docs/api/python/index.md +++ b/docs/api/python/index.md @@ -52,6 +52,7 @@ Code examples are placed throughout the API documentation and these can be run a contrib/contrib.md contrib/text.md contrib/onnx.md + contrib/svrg_optimization.md ``` ## Gluon API @@ -176,4 +177,4 @@ Code examples are placed throughout the API documentation and these can be run a :maxdepth: 1 symbol_in_pictures/symbol_in_pictures.md -``` +``` \ No newline at end of file diff --git a/docs/api/python/module/module.md b/docs/api/python/module/module.md index 86ed74db6c19..5a874ac6df02 100644 --- a/docs/api/python/module/module.md +++ b/docs/api/python/module/module.md @@ -207,4 +207,4 @@ additional functionality. We summarize them in this section. :members: ``` - + \ No newline at end of file diff --git a/example/svrg_module/README.md b/example/svrg_module/README.md new file mode 100644 index 000000000000..63e7ba2f2bfa --- /dev/null +++ b/example/svrg_module/README.md @@ -0,0 +1,33 @@ +## SVRGModule Example +SVRGModule is an extension to the Module API that implements SVRG optimization, which stands for Stochastic +Variance Reduced Gradient. SVRG is an optimization technique that complements SGD and has several key +properties: +* Employs explicit variance reduction by using a different update rule compared to SGD. +* Ability to use relatively large learning rate, which leads to faster convergence compared to SGD. +* Guarantees for fast convergence for smooth and strongly convex functions. + +#### API Usage Example +SVRGModule provides both high-level and intermediate-level APIs while minimizing the changes with Module API. +example_api_train.py: provides suggested usage of SVRGModule high-level and intermediate-level API. +example_inference.py: provides example usage of SVRGModule inference. + +#### Linear Regression +This example trains a linear regression model using SVRGModule on a real dataset, YearPredictionMSD. +Logs of the training results can be found in experiments.log which will automatically generated when running the +training script. + +##### Dataset +YearPredictionMSD: contains predictions of the release year of a song from audio features. It has over +400,000 samples with 90 features. Please uncomment data downloading script from data_reader.py to download the data. + +#### Benchmarks: +An initial set of benchmarks has been performed on YearPredictionDatasetMSD with linear regression model. A jupyter +notebook under `/benchmarks` demonstrates the training process and plots two graphs for benchmarking. + +* benchmark1: A lr_scheduler returns a new learning rate based on the number of updates that have been performed. + +* benchmark2: One drawback for SGD is that in order to converge faster, the learning rate has to decay to zero, +thus SGD needs to start with a small learning rate. The learning rate does not need to decay to zero for SVRG, +therefore we can use a relatively larger learning rate. SGD with learning rate of (0.001, 0.0025) and SVRG with +learning rate of (0.025) are benchmarked. Even though SVRG starts with a relatively large learning rate, it converges +much faster than SGD in both cases. diff --git a/example/svrg_module/api_usage_example/example_api_train.py b/example/svrg_module/api_usage_example/example_api_train.py new file mode 100644 index 000000000000..f6cd1b2e592c --- /dev/null +++ b/example/svrg_module/api_usage_example/example_api_train.py @@ -0,0 +1,124 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import mxnet as mx +import numpy as np +from mxnet.contrib.svrg_optimization.svrg_module import SVRGModule + + +def test_svrg_intermediate_level_api(args): + """Demonstrates intermediate level SVRGModule API where the training process + need to be explicitly defined. KVstore is not explicitly created. + + Parameters + ---------- + args: args + Command line arguments + """ + num_epoch = args.epochs + batch_size = args.batch_size + update_freq = args.update_freq + + di, mod = create_network(batch_size, update_freq) + + mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label) + mod.init_params(initializer=mx.init.Uniform(0.01), allow_missing=False, force_init=False, allow_extra=False) + kv = mx.kv.create("local") + mod.init_optimizer(kvstore=kv, optimizer='sgd', optimizer_params=(('learning_rate', 0.025),)) + metrics = mx.metric.create("mse") + for e in range(num_epoch): + metrics.reset() + if e % mod.update_freq == 0: + mod.update_full_grads(di) + di.reset() + for batch in di: + mod.forward_backward(data_batch=batch) + mod.update() + mod.update_metric(metrics, batch.label) + mod.logger.info('Epoch[%d] Train cost=%f', e, metrics.get()[1]) + + +def test_svrg_high_level_api(args): + """Demonstrates suggested usage of high level SVRGModule API. KVStore is explicitly created. + + Parameters + ---------- + args: args + Command line arguments + """ + num_epoch = args.epochs + batch_size = args.batch_size + update_freq = args.update_freq + + di, mod = create_network(batch_size, update_freq) + mod.fit(di, eval_metric='mse', optimizer='sgd', optimizer_params=(('learning_rate', 0.025),), num_epoch=num_epoch, + kvstore='local') + + +def create_network(batch_size, update_freq): + """Create a linear regression network for performing SVRG optimization. + Parameters + ---------- + batch_size: int + Size of data split + update_freq: int + Update Frequency for calculating full gradients + + Returns + ---------- + di: mx.io.NDArrayIter + Data iterator + update_freq: SVRGModule + An instance of SVRGModule for performing SVRG optimization + """ + import logging + head = '%(asctime)-15s %(message)s' + logging.basicConfig(level=logging.INFO, format=head) + + train_data = np.random.randint(1, 5, [1000, 2]) + weights = np.array([1.0, 2.0]) + train_label = train_data.dot(weights) + + di = mx.io.NDArrayIter(train_data, train_label, batch_size=batch_size, shuffle=True, label_name='lin_reg_label') + X = mx.sym.Variable('data') + Y = mx.symbol.Variable('lin_reg_label') + fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1) + lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro") + + mod = SVRGModule( + symbol=lro, + data_names=['data'], + label_names=['lin_reg_label'], update_freq=update_freq, logger=logging + ) + + return di, mod + +# run as a script +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('-e', dest='epochs', default=100, type=int) + parser.add_argument('-bs', dest='batch_size', default=32, type=int) + parser.add_argument('-f', dest="update_freq", default=2, type=int) + args = parser.parse_args() + + print("========================== Intermediate Level API ==========================") + test_svrg_intermediate_level_api(args) + print("========================== High Level API ==========================") + test_svrg_high_level_api(args) diff --git a/example/svrg_module/api_usage_example/example_inference.py b/example/svrg_module/api_usage_example/example_inference.py new file mode 100644 index 000000000000..312f9796074d --- /dev/null +++ b/example/svrg_module/api_usage_example/example_inference.py @@ -0,0 +1,106 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import mxnet as mx +import numpy as np +import logging +from mxnet.contrib.svrg_optimization.svrg_module import SVRGModule + + +def test_svrg_inference(args): + epoch = args.epochs + batch_size = args.batch_size + update_freq = args.update_freq + + train_iter, val_iter, mod = create_network(batch_size, update_freq) + mod.fit(train_iter, eval_data=val_iter, eval_metric='mse', optimizer='sgd', + optimizer_params=(('learning_rate', 0.025),), + num_epoch=epoch) + + +def get_validation_score(args): + epoch = args.epochs + batch_size = args.batch_size + update_freq = args.update_freq + + train_iter, val_iter, mod = create_network(batch_size, update_freq) + mod.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label) + mod.init_params(initializer=mx.init.Uniform(0.01), allow_missing=False, force_init=False, allow_extra=False) + mod.init_optimizer(kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.025),)) + metrics = mx.metric.create("mse") + for e in range(epoch): + metrics.reset() + if e % mod.update_freq == 0: + mod.update_full_grads(train_iter) + train_iter.reset() + for batch in train_iter: + mod.forward_backward(data_batch=batch) + mod.update() + mod.update_metric(metrics, batch.label) + + y = mod.predict(val_iter) + + # test-train data split, 20% test data out of 1000 data samples + assert y.shape == (200, 1) + score = mod.score(val_iter, ['mse']) + print("Training Loss on Validation Set is {}".format(score[0][1])) + + +def create_network(batch_size, update_freq): + """Create a linear regression network for performing SVRG optimization. + :return: an instance of mx.io.NDArrayIter + :return: an instance of mx.mod.svrgmodule for performing SVRG optimization + """ + head = '%(asctime)-15s %(message)s' + logging.basicConfig(level=logging.INFO, format=head) + data = np.random.randint(1, 5, [1000, 2]) + + #Test_Train data split + n_train = int(data.shape[0] * 0.8) + weights = np.array([1.0, 2.0]) + label = data.dot(weights) + + di = mx.io.NDArrayIter(data[:n_train, :], label[:n_train], batch_size=batch_size, shuffle=True, label_name='lin_reg_label') + val_iter = mx.io.NDArrayIter(data[n_train:, :], label[n_train:], batch_size=batch_size) + + X = mx.sym.Variable('data') + Y = mx.symbol.Variable('lin_reg_label') + fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1) + lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro") + + mod = SVRGModule( + symbol=lro, + data_names=['data'], + label_names=['lin_reg_label'], update_freq=update_freq, logger=logging) + + return di, val_iter, mod + + +# run as a script +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('-e', dest='epochs', default=100, type=int) + parser.add_argument('-bs', dest='batch_size', default=32, type=int) + parser.add_argument('-f', dest="update_freq", default=2, type=int) + args = parser.parse_args() + + print("========================== SVRG Module Inference ==========================") + test_svrg_inference(args) + print("========================SVRG Module Score ============================") + get_validation_score(args) diff --git a/example/svrg_module/benchmarks/svrg_benchmark.ipynb b/example/svrg_module/benchmarks/svrg_benchmark.ipynb new file mode 100644 index 000000000000..db02938af466 --- /dev/null +++ b/example/svrg_module/benchmarks/svrg_benchmark.ipynb @@ -0,0 +1,379 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Linear Regression Using SVRGModule on YearPredictionMSD Dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this notebook, a linear regression model will be fit on YearPredictionMSD dataset, which contains predictions of the release year of a song based on its audio features. The dataset has 90 features and over 400,000 samples. The dataset is downsampled to 5,000 in this experiment." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "import mxnet as mx\n", + "from sklearn.datasets import load_svmlight_file\n", + "import numpy as np\n", + "import json\n", + "import tempfile\n", + "import os\n", + "from mxnet.contrib.svrg_optimization.svrg_module import SVRGModule\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Read Data\n", + "The first step is to get the training features and labels and normalize the data. In this example, we will use 5000 data samples. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Download data file\n", + "# from subprocess import call\n", + "# YearPredictionMSD dataset: https://archive.ics.uci.edu/ml/datasets/yearpredictionmsd\n", + "# call(['wget', 'https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression/YearPredictionMSD.bz2'])\n", + "# call(['bzip2', '-d', 'YearPredictionMSD.bz2'])" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Reading data from disk...\n" + ] + } + ], + "source": [ + "feature_dim = 90\n", + "print(\"Reading data from disk...\")\n", + "train_features, train_labels = load_svmlight_file('YearPredictionMSD', n_features=feature_dim, dtype=np.float32)\n", + "train_features = train_features.todense()\n", + "\n", + "# normalize the data: subtract means and divide by standard deviations\n", + "label_mean = train_labels.mean()\n", + "label_std = np.sqrt(np.square(train_labels - label_mean).mean())\n", + "feature_means = train_features.mean(axis=0)\n", + "feature_stds = np.sqrt(np.square(train_features - feature_means).mean(axis=0))\n", + "\n", + "train_features = (train_features - feature_means) / feature_stds\n", + "train_labels = (train_labels - label_mean) / label_std\n", + "\n", + "train_features = train_features[-5000:]\n", + "train_labels = train_labels[-5000:]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create Linear Regression Network" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "def create_lin_reg_network(batch_size=100):\n", + " train_iter = mx.io.NDArrayIter(train_features, train_labels, batch_size=batch_size, shuffle=True,\n", + " data_name='data', label_name='label')\n", + " data = mx.sym.Variable(\"data\")\n", + " label = mx.sym.Variable(\"label\")\n", + " weight = mx.sym.Variable(\"fc_weight\", shape=(1, 90))\n", + " net = mx.sym.dot(data, weight.transpose())\n", + " bias = mx.sym.Variable(\"fc_bias\", shape=(1,), wd_mult=0.0, lr_mult=10.0)\n", + " net = mx.sym.broadcast_plus(net, bias)\n", + " net = mx.sym.LinearRegressionOutput(data=net, label=label)\n", + " \n", + " return train_iter, net" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### SVRGModule with SVRG Optimization\n", + "In this example, we will use intermediate level API for SVRGModule and the dump mse per epoch to JSON file for plotting graphs." + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "def train_svrg_lin_reg(num_epoch=100, batch_size=100, update_freq=2, output='svrg_lr.json', \n", + " optimizer_params=None):\n", + "\n", + " di, net = create_lin_reg_network(batch_size=batch_size)\n", + " \n", + " #Create a SVRGModule\n", + " mod = SVRGModule(symbol=net, context=mx.cpu(0), data_names=['data'], label_names=['label'], update_freq=update_freq)\n", + " mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label)\n", + " mod.init_params(initializer=mx.init.Zero(), allow_missing=False, force_init=False, allow_extra=False)\n", + " mod.init_optimizer(kvstore='local', optimizer='sgd', optimizer_params=optimizer_params)\n", + " metrics = mx.metric.create(\"mse\")\n", + " \n", + " results = {}\n", + " for e in range(num_epoch):\n", + " results[e] = {}\n", + " metrics.reset()\n", + " if e % mod.update_freq == 0:\n", + " mod.update_full_grads(di)\n", + " di.reset()\n", + " for batch in di:\n", + " mod.forward_backward(data_batch=batch)\n", + " mod.update()\n", + " mod.update_metric(metrics, batch.label)\n", + " results[e][\"mse\"] = metrics.get()[1]\n", + " \n", + " f = open(output, 'w+')\n", + " f.write(json.dumps(results, indent=4, sort_keys=True))\n", + " f.close()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Module with SGD Optimization " + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "def train_sgd_lin_reg(num_epoch=100, batch_size=100, update_freq=2, output='sgd_lr.json', \n", + " optimizer_params=None):\n", + " \n", + " di, net = create_lin_reg_network(batch_size=batch_size)\n", + " \n", + " #Create a standard module\n", + " mod = mx.mod.Module(symbol=net, context=mx.cpu(0), data_names=['data'], label_names=['label'])\n", + " mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label)\n", + " mod.init_params(initializer=mx.init.Zero(), allow_missing=False, force_init=False, allow_extra=False)\n", + " mod.init_optimizer(kvstore='local', optimizer='sgd', optimizer_params=optimizer_params)\n", + " metrics = mx.metric.create(\"mse\")\n", + " \n", + " results = {}\n", + " for e in range(num_epoch):\n", + " results[e] = {}\n", + " metrics.reset()\n", + " di.reset()\n", + " for batch in di:\n", + " mod.forward_backward(data_batch=batch)\n", + " mod.update()\n", + " mod.update_metric(metrics, batch.label)\n", + " results[e][\"mse\"] = metrics.get()[1]\n", + " f = open(output, 'w+')\n", + " f.write(json.dumps(results, indent=4, sort_keys=True))\n", + " f.close()\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import seaborn as sns\n", + "import matplotlib.pyplot as plt\n", + "import matplotlib.patches as mpatches\n", + "import pandas as pd" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Training Loss over 100 Epochs Using lr_scheduler\n", + "When a large learning rate is used with SGD, training loss will drop fast but will oscillates above the minimum and never converges. With a small learning rate, it will eventually reach the minimum after many iterations. A common practice is to use learning rate scheduling by starting with a large learning rate and gradually decreasing it. " + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "train_svrg_lin_reg(optimizer_params={'lr_scheduler': mx.lr_scheduler.FactorScheduler(step=10, factor=0.99)})\n", + "train_sgd_lin_reg(optimizer_params={'lr_scheduler': mx.lr_scheduler.FactorScheduler(step=10, factor=0.99)})" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Text(0.5,0,'Epochs')" + ] + }, + "execution_count": 32, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABKIAAALMCAYAAADXShqaAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzs3X2cnXV9J/zP75xJZjITQElglcSUsMXyFAQLWAvS3nXFCBTs2qoUbPXWLbur1VK16G59qO3d7t1214eV2pat4l1F8bZ6rwqoi88oW+RJRcDyIEIIahLkKTCTzMzv/uOcTE4mM8lk5sxMJnm/X6/z4no61/WdE/76vL7f31VqrQEAAACA2daY7wIAAAAA2D8IogAAAACYE4IoAAAAAOaEIAoAAACAOSGIAgAAAGBOCKIAAAAAmBOCKABgwSilNEspj5dSVnXzWvZupZTXlFK+Ot91AAAzJ4gCAGZNOwja9hktpTzZsX/+nt6v1jpSa11aa72vm9fuqVLKn5VSLuv2faf47EYp5eJSyl3t3/NHpZT/q5SyeI6e/2/a/5aPj/ucPBfPBwAWtp75LgAA2HfVWpdu2y6l3JvkNbXWaya7vpTSU2sdnovaFrBLkvxakvOT3JjkqCSXJTk6yb/t5oN28e9xX6318G4+CwDYP+iIAgDmTbuz6IpSysdKKY8luaCU8txSyv8upTxcSnmwlPK+Usqi9vU9pZRaSjm8vf+R9vmrSymPlVKuK6Ws3tNr2+dfVEr5l1LKI6WU/15K+WYp5ZXT+JuOLaV8rV3/90opZ3WcO7uUcnv7+etKKRe1jx9aSrmq/Z2HSilfn+TeRyW5MMl5tdZ/rrUO11pvTfKbSc4upZxeSjmtlPJAKaXR8b3fKqXc1N5ulFL+Uynl7lLKxlLKx0spT22f+/n2b/aqUsp9Sb44jb//2naH1g3t3/LT2+7fPv/iUsr323/rl0spv9Bx7udKKf9fKWVDu7b37njr8u729+4ppZzRceLVpZR727/rPaWUl+9p3QDA3BBEAQDz7TeSXJ7koCRXJBlO8oYky5OcmmRtWuHLZH47yduSHJzkviR/uqfXllIOTfKJJG9uP/eHSU7Z0z+kPR73uSRXJjkkyUVJriil/Hz7kg8leXWt9YAkxyf5Wvv4m5Pc0/7O09o1TuTfJPlhrfWmzoO11nuTfDvJC5J8M8nWJL8y7u++vL39h0nOSnJ6kpVJNid537jnnJ5Wp9VZmZ7faX8OS1KSvDtJSilHJ/lIkt9P62+9JslnSymLSik9af1udyU5PMkz0vo32eaXk3wvybL2/f6hfc8Dk/y3JC9o/66nJvnuNOsGAGaZIAoAmG/X1lo/W2sdrbU+WWv9dke3zz1J/j47hirjfbLWekOtdWuSjyY5YRrXnp3kllrr/2yfe3eSjdP4W05NsjjJX9Vat7bHEK9Osq1DZ2uSY0opB9RaH+oIlLamFdqsqrVuqbV+bac7tyxP8uAk5x5MsrzWWpN8PMl5SVJKeUqSF7aPJa1Q7z/VWh+otQ4meWeSl3Z2UCV5R631iVrrk5M8a1W7M6nz09tx/sO11ttqrZuTvD3Jy0sppf07fKbW+uX27/xfkhyY5DlJntv++y6utW5u/7/wzY573l1r/WCtdSTJh5OsLKUsb5+rSY4rpfTVWh+std42Sd0AwDwTRAEA8+3+zp1SylGllCtLKT8upTya5F1pBRST+XHH9hNJlk524S6uPayzjnaYs24KtY93WFrrJ9WOYz9KsqK9/RtJzklyXynlq6WU57SP/5f2dV9qj8y9eZL7b0zy9EnOPT3bw7PLk7ykPdL4kiT/XGvd9vesSqsL6eFSysNpdRnVJId23GuHf5MJ3Fdrfcq4z9Ak3/9Rkt60utAOa+8nSWqto2n9zivS6oC6tx00TWT8v12SLK21PppW6PbaJD8upXyulPLM3dQPAMwTQRQAMN/quP2/S3Jrkp+vtR6YVkdNmeUaHkxrTC1JazGibA+P9sT6JM9of3+bVUkeSJJ2p9c5aYU+n0u7S6nW+mit9aL2AuAvTnJxKWWiLrAvJVldSnl258H2Olgnt8+n1vrd9t/0wuw4lpe0gp8XjAuR+mqtY0HPuCBtOp7Rsb0qyVCSh9L6fX6uo+5GWr/7A2mFVz9XSmnu6cNqrVfXWv9NWmHcXWn9PwQA7IUEUQDA3uaAJI8k2dxeU2hX60N1y+eSPLuU8uvttYrekNYaRrvSLKX0dXx6k3wrrTWu3the9+jXkpyZ5BOllCWllN8upRzYHkt7LMlIkrSf+6/bAdYj7eM7dQbVWm9P8j+SfKyUckoppVlKOS7JJ5NcXWv9asflH0trjarnts9v87dJ/ryUsqr97ENLKefswW81Fb/T7mwbSPInST7RDrc+keScUsqvtru13pzW7/DPSa5LsqldW3/79zp1dw8qpTy9/fv1J9mS1ppXk3VVAQDzTBAFAOxt3pjkd9MKKP4urQXMZ1Wt9SdJXpbWotebkvzrJDen1ckzmQuSPNnx+UF7PO3Xk5yb1pjc+5L8dq31X9rf+d0kP2qPHL46ySvax38hyZeTPJ7WYuPvrbVeO8lz/0NaayR9LK3Q5eok/yvJS8ddd3mSX0vyv2qtP+s4/t+SfD6tMcDH0grPTt7F3zmRVaWUx8d9Xtxx/h/TWpT8wSTNJH+QJLXW77d/gw8k2ZDWQvTntNfTGk5rra6j0+qOui+ttwHuTjOtQOvBtP7tfjnJ6/bw7wEA5kiZeec1AMC+pT0etj7Jb9ZavzHf9SwkpZRrk/yPWutl810LALD30REFAJCklLK2lHJQe8TubWmN2F0/z2UBAOxTBFEAAC2nJbknrZG6tUlePO5NcAAAzJDRPAAAAADmhI4oAAAAAOZEz3wXMNeWL19eDz/88PkuAwAAAGCfceONN26stR6yu+v2uyDq8MMPzw033DDfZQAAAADsM0opP5rKdUbzAAAAAJgTgigAAAAA5oQgCgAAAIA5sd+tEQUAAADsX7Zu3Zp169ZlcHBwvktZ8Pr6+rJy5cosWrRoWt8XRAEAAAD7tHXr1uWAAw7I4YcfnlLKfJezYNVas2nTpqxbty6rV6+e1j2M5gEAAAD7tMHBwSxbtkwINUOllCxbtmxGnWWCKAAAAGCfJ4Tqjpn+joIoAAAAAOaENaIAAACA/cqaa2/Nhq3DXbvfIYt68r3Tjuva/fZlOqIAAACA/Uo3Q6jZuF+nkZGRWbv3fBBEAQAAAMyyzZs356yzzsqznvWsHHfccfnwhz+cl770pWPnv/rVr+bXf/3XkyRLly7N29/+9jznOc/Jddddl6uuuipHHXVUTjvttLz+9a/P2WefPelz3vnOd+Z3f/d3c8YZZ+Twww/Ppz71qfzRH/1R1qxZk7Vr12br1q1Jkre85S055phjcvzxx+dNb3pTkmTDhg15yUtekpNPPjknn3xyvvnNb3b9dzCaBwAAADDLPv/5z+ewww7LlVdemSR55JFH8ra3vS2bN2/OwMBArrjiirzsZS9L0gqtjjvuuLzrXe/K4OBgjjzyyHz961/P6tWrc9555+32WXfffXe+8pWv5Lbbbstzn/vc/NM//VP+8i//Mr/xG7+RK6+8Mqeffno+/elP54477kgpJQ8//HCS5A1veEMuuuiinHbaabnvvvvywhe+MLfffntXfwcdUQAAAACzbM2aNbnmmmty8cUX5xvf+EYOOuigrF27Np/97GczPDycK6+8Mueee26SpNls5iUveUmS5I477sgRRxyR1atXJ8mUgqgXvehFWbRoUdasWZORkZGsXbt2rIZ77703Bx54YPr6+vKa17wmn/rUp9Lf358kueaaa/K6170uJ5xwQs4555w8+uijeeyxx7r6O+iIAgAAAJhlz3zmM3PjjTfmqquuylvf+tacccYZednLXpZLLrkkBx98cE4++eQccMABSZK+vr40m80kSa11j5/V29ubJGk0Glm0aFFKKWP7w8PD6enpyfXXX58vfelL+fjHP573v//9+fKXv5zR0dFcd911WbJkSZf+6p3piAIAAACYZevXr09/f38uuOCCvOlNb8pNN92UX/3VX81NN92USy+9dGwsb7yjjjoq99xzT+69994kyRVXXDHjWh5//PE88sgjOfPMM/Oe97wnt9xyS5LkjDPOyPvf//6x67Yd7yYdUQAAAMB+5ZBFPV19090hi3Yfr3zve9/Lm9/85rEupQ984ANpNps5++yzc9lll+XDH/7whN9bsmRJ/uZv/iZr167N8uXLc8opp8y43sceeyznnntuBgcHU2vNu9/97iTJ+973vrz2ta/N8ccfn+Hh4Zx++un527/92xk/r1OZTovXQnbSSSfVG264Yb7LAAAAAObI7bffnqOPPnq+y5i2xx9/PEuXLk2tNa997Wtz5JFH5qKLLpq3eib6PUspN9ZaT9rdd43mAQAAAOzFLr300pxwwgk59thj88gjj+TCCy+c75KmzWgeAAAAwF7soosu2qkD6kMf+lDe+9737nDs1FNPzSWXXDKXpe0xQRQAAADAAvOqV70qr3rVq+a7jD1mNA8AAACAOSGIAgAAAGBOCKIAAAAAmBOCKAAAAADmhMXKAQAAgP3KN659TrZs2di1+y1evDzPO+2fu3a/fZmOKAAAAGC/0s0QajbuN97hhx+ejRtn9xlzRRAFAAAAwJzYb4OokZGh3H//Zbn+2+fm2m+emptuOj8/+cnnUmud79IAAACAfczmzZtz1lln5VnPelaOO+64XHHFFbnqqqty1FFH5bTTTsvrX//6nH322UmSTZs25YwzzsiJJ56YCy+8cJdZxb333pujjjoqr3nNa3Lcccfl/PPPzzXXXJNTTz01Rx55ZK6//vokyde+9rWccMIJOeGEE3LiiSfmscceS5L81V/9VU4++eQcf/zxecc73jHrv8N+GUSNjAzmlu+8Mv9y55/mscduzdDQj/Ozh/93bv3+G/KDH7xNGAUAAAB01ec///kcdthh+c53vpNbb701a9euzYUXXpirr7461157bTZs2DB27Z/8yZ/ktNNOy80335xzzjkn99133y7vfdddd+UNb3hDvvvd7+aOO+7I5ZdfnmuvvTZ//dd/nT//8z9Pkvz1X/91Lrnkktxyyy35xje+kSVLluSLX/xi7rzzzlx//fW55ZZbcuONN+brX//6rP4O+2UQdd/9/5CHH75+wnMPrP9YNm366twWBAAAAOzT1qxZk2uuuSYXX3xxvvGNb+SHP/xhjjjiiKxevTpJct55541d+/Wvfz0XXHBBkuSss87KU5/61F3ee/Xq1VmzZk0ajUaOPfbYPP/5z08pJWvWrMm9996bJDn11FPzh3/4h3nf+96Xhx9+OD09PfniF7+YL37xiznxxBPz7Gc/O3fccUfuvPPO2fkB2vbLt+Y9uP6Tuzy//sFPZvny/2OOqgEAAAD2dc985jNz44035qqrrspb3/rWvOAFL9jl9aWUKd+7t7d3bLvRaIztNxqNDA8PJ0ne8pa35KyzzspVV12VX/qlX8o111yTWmve+ta35sILL5zGXzQ9+2VH1ODQg7s8PzS4fo4qAQAAAOba4sXL5/x+69evT39/fy644IK86U1vyre+9a3cc889Yx1LV1xxxdi1p59+ej760Y8mSa6++ur87Gc/m3GNd999d9asWZOLL744J510Uu6444688IUvzAc/+ME8/vjjSZIHHnggP/3pT2f8rF3ZLzui+nqfnicHJ5+v7O07bA6rAQAAAObS80775zl/5ve+9728+c1vTqPRyKJFi/KBD3wgDz74YNauXZvly5fnlFNOGbv2He94R84777w8+9nPzq/8yq9k1apVM37+e97znnzlK19Js9nMMccckxe96EXp7e3N7bffnuc+97lJkqVLl+YjH/lIDj300Bk/bzJlf1uY+6STTqqf/OT/mbvv+a+TXvOsZ/1Dli/71bkrCgAAAJg1t99+e44++uj5LmMnjz/+eJYuXZpaa1772tfmyCOPzEUXXTTfZe3WRL9nKeXGWutJu/vufjma94xnvDpPecpzJjy3YsX5WXbwr8xxRQAAAMD+5tJLL80JJ5yQY489No888sicrtU0X/bL0bxmszcnnvChPLD+E7n77r/MyMgTKaWZY499bw49ZO0eLQgGAAAAMB0XXXTRlDugNm3alOc///k7Hf/Sl76UZcuWdbu0WbNfBlFJ0mj05hkrX5GNG/5XHvrZN5M08q8OfdF8lwUAAADMglrrgm48WbZsWW655Zb5LiMzXeJpvxzN69Rs9idJat2a0dEt81wNAAAA0G19fX3ZtGnTjEOU/V2tNZs2bUpfX9+077HfdkRtsy2ISpKRkSfTaCyex2oAAACAblu5cmXWrVuXDRs2zHcpC15fX19Wrlw57e/v90FUo7lkbHtk5IksWnTQPFYDAAAAdNuiRYuyevXq+S6DGM1LT3NgbHtk5Il5rAQAAABg37bfB1HjO6IAAAAAmB37fRA1fo0oAAAAAGaHIGqHIGrzPFYCAAAAsG/br4OoWmvWDTXH9kdGdUQBAAAAzJb9Noj63w8/nuddf0f+4kcPjx17990/zI+eHJrHqgAAAAD2XftlEHXb40/m5d+5O3c9MZSh9I0df+DJR/Jvb74rP9s6PI/VAQAAAOyb9ssg6r0/+kkGR2uSZCi9Y8d7M5QHhrbmI+s3zVdpAAAAAPus/TKIumbTo2Pbgx0dUb0ZTJJ8qeM8AAAAAN2xXwZRI7WObXeO5vW1g6jhjvMAAAAAdMd+GUQ99ylLx7aHduiIGtrpPAAAAADdsV8GUa9ddWhKe3tw3BpRBzQbeeWK5fNTGAAAAMA+bL8Mok576gF579GrMtBs7LBY+VMaQ/no8UdkRd/ieawOAAAAYN+0XwZRSfLSpx2cW3752BzevzTD6UmSnHZQM6cYywMAAACYFfttEJUkB/Q0c/iS3rGuqNGRJ+e5IgAAAIB9134dRCVJf7ORwfaC5cMjT8xzNQAAAAD7rjkLokopa0spPyil3FVKecsE51eVUr5SSrm5lPLdUsqZ7ePnl1Ju6fiMllJOaJ/7avue284duqd1da4TJYgCAAAAmD09c/GQUkozySVJXpBkXZJvl1I+U2u9reOyP07yiVrrB0opxyS5KsnhtdaPJvlo+z5rkvzPWustHd87v9Z6w3Rr6282MtTuiBoRRAEAAADMmrnqiDolyV211ntqrVuSfDzJueOuqUkObG8flGT9BPc5L8nHullY52jeqCAKAAAAYNbMVRC1Isn9Hfvr2sc6vTPJBaWUdWl1Q/3+BPd5WXYOoj7UHst7WymlTPTwUsrvlVJuKKXcsGHDhh3OdY7m1dHB1Fqn+CcBAAAAsCfmKoiaKCAan/icl+SyWuvKJGcm+cdSylh9pZTnJHmi1nprx3fOr7WuSfK89ucVEz281vr3tdaTaq0nHXLIITuc6xzNS2pGRwf35O8CAAAAYIrmKohal+QZHfsrs/Po3auTfCJJaq3XJelLsrzj/Mszrhuq1vpA+7+PJbk8rRHAPTLQbI6N5iXJyMjmPb0FAAAAAFMwV0HUt5McWUpZXUpZnFao9Jlx19yX5PlJUko5Oq0gakN7v5Hkt9JaWyrtYz2llOXt7UVJzk5ya/ZQf8doXpKMjDy5p7cAAAAAYArm5K15tdbhUsrrknwhSTPJB2ut3y+lvCvJDbXWzyR5Y5JLSykXpTW298q6fcGm05Osq7Xe03Hb3iRfaIdQzSTXJLl0T2vrb3SO5nlzHgAAAMBsmZMgKklqrVeltQh557G3d2zfluTUSb771SS/NO7Y5iS/ONO6BjrempcIogAAAABmy1yN5u21dh7NE0QBAAAAzAZBVHP8aJ41ogAAAABmgyDKaB4AAADAnBBEGc0DAAAAmBP7fRA10GzuOJo3ajQPAAAAYDbs90HUkkbZMYga3jyP1QAAAADsu/b7IKqUktJcMravIwoAAABgduz3QVSSNDqDKGtEAQAAAMwKQVSSZrN/bFsQBQAAADA7BFFJehqCKAAAAIDZJohKsmiH0TxrRAEAAADMBkFUkoGeRRlMbxIdUQAAAACzRRCVpL/ZyFA7iBoWRAEAAADMCkFUkoFmI0PpS5IMDwuiAAAAAGaDICrbOqLaQZSOKAAAAIBZIYhKsqRjNG90VBAFAAAAMBsEUWmN5g22O6JGvTUPAAAAYFYIopL0N7aP5qVuyejo8PwWBAAAALAPEkQlGehpjo3mJcnoqK4oAAAAgG4TRGVcR1SSEQuWAwAAAHSdICqtt+YNCqIAAAAAZpUgKq3FyjtH8wRRAAAAAN0niEqrI2rH0TxrRAEAAAB0myAqrY4oo3kAAAAAs0sQlW0dUUbzAAAAAGaTICoTjeYJogAAAAC6TRCVZKDZHDeaZ40oAAAAgG4TRCXpb4wfzds8j9UAAAAA7JsEUUl6GiWjDR1RAAAAALNJENVWGv1j2yOj1ogCAAAA6DZBVFuz2RFEWawcAAAAoOsEUW3N5pKxbUEUAAAAQPcJotp6mgNj29aIAgAAAOg+QVRbX7M3I+2fQ0cUAAAAQPcJotr6e5oZSuvNeYIoAAAAgO4TRLUNNBsZSm+SZOuwIAoAAACg2wRRbf3NRgbbHVHDOqIAAAAAuk4Q1dbfaBjNAwAAAJhFgqi2ztG80VFvzQMAAADoNkFUW+doXh15IrXWea4IAAAAYN8iiGobaDbHOqKS0YyObpnXegAAAAD2NYKotv7m9jWikmR01DpRAAAAAN0kiGrrHM1LkuFhQRQAAABANwmi2joXK0+SER1RAAAAAF0liGrbaTRvxJvzAAAAALpJENW202jeyOZ5rAYAAABg3yOIausfN5qnIwoAAACguwRRbf2NHUfzRkasEQUAAADQTYKotoFmc4fRPEEUAAAAQHcJotrGj+YJogAAAAC6SxDV1tco2bpDR5Q1ogAAAAC6SRDVVkpJmv1j+zqiAAAAALpLENWh2Vgyti2IAgAAAOguQVSHZlMQBQAAADBbBFEdmj0DY9sjo9aIAgAAAOgmQVSHxdaIAgAAAJg1gqgOS5qLsiWLkiQjw4IoAAAAgG4SRHUY6GlkKH1JkmEdUQAAAABdJYjqsKTRyFB6kyRbBVEAAAAAXSWI6tDf3N4RZY0oAAAAgO4SRHUYaDYyOBZEeWseAAAAQDcJojq0OqJao3mjozqiAAAAALpJENVhoNkcG83L6GBqHZ3fggAAAAD2IYKoDv0do3mJ8TwAAACAbhJEdegczUssWA4AAADQTYKoDgMdb81LBFEAAAAA3SSI6tDfGDeaN2o0DwAAAKBb5iyIKqWsLaX8oJRyVynlLROcX1VK+Uop5eZSyndLKWe2jx9eSnmylHJL+/O3Hd/5xVLK99r3fF8ppcykxoGdRvM2z+R2AAAAAHSYkyCqlNJMckmSFyU5Jsl5pZRjxl32x0k+UWs9McnLk/xNx7m7a60ntD//vuP4B5L8XpIj25+1M6mzf6fRPB1RAAAAAN0yVx1RpyS5q9Z6T611S5KPJzl33DU1yYHt7YOSrN/VDUspT09yYK31ulprTfL/JHnxTIrsbzZ3GM0btUYUAAAAQNfMVRC1Isn9Hfvr2sc6vTPJBaWUdUmuSvL7HedWt0f2vlZKeV7HPdft5p5JklLK75VSbiil3LBhw4ZJixw/mjcsiAIAAADomrkKoiZau6mO2z8vyWW11pVJzkzyj6WURpIHk6xqj+z9YZLLSykHTvGerYO1/n2t9aRa60mHHHLIpEXuPJoniAIAAADolp45es66JM/o2F+ZnUfvXp32Gk+11utKKX1Jltdaf5pkqH38xlLK3Ume2b7nyt3cc4/0NxvjRvOsEQUAAADQLXPVEfXtJEeWUlaXUhantRj5Z8Zdc1+S5ydJKeXoJH1JNpRSDmkvdp5SyhFpLUp+T631wSSPlVJ+qf22vN9J8j9nUmSzlNTG9iDKaB4AAABA98xJR1StdbiU8rokX0jSTPLBWuv3SynvSnJDrfUzSd6Y5NJSykVpjdi9stZaSymnJ3lXKWU4yUiSf19rfah96/+Q5LIkS5Jc3f7MSKOxJBltbVusHAAAAKB75mo0L7XWq9JahLzz2Ns7tm9LcuoE3/unJP80yT1vSHJcN+tsNPuT4db2iNE8AAAAgK6Zq9G8BaPZHBjbHhnZPI+VAAAAAOxbBFHjLGouGdvWEQUAAADQPYKocRY3l2Q0JUkyMmqNKAAAAIBuEUSNM9DTzFB6kyTDw4IoAAAAgG4RRI3T32xkKH1Jkq3emgcAAADQNYKocfqbjQy2g6hhQRQAAABA1wiixhloNsZG80YEUQAAAABdI4gap7/ZHBvNGxVEAQAAAHSNIGqcgY41ourok/NcDQAAAMC+QxA1TmuNqNZoXupwRke3zG9BAAAAAPsIQdQ4/Y3tHVFJMjKiKwoAAACgGwRR4/Q3xwdR1okCAAAA6AZB1Dj9HW/NSwRRAAAAAN0iiBpnoHONqAiiAAAAALpFEDVOf7OZoSwZ27dGFAAAAEB3CKLG2Xk0b/M8VgMAAACw7xBEjTMwPoga1REFAAAA0A2CqHH6m40M7jCaZ40oAAAAgG4QRI2zU0eUNaIAAAAAukIQNc7iUrK1eGseAAAAQLcJosYppSSN/rF9QRQAAABAdwiiJtDsCKJGjeYBAAAAdIUgagLN5vYganhk8zxWAgAAALDvEERNoKdHRxQAAABAtwmiJrCoaY0oAAAAgG4TRE2gr2dxhtOTJBkWRAEAAAB0hSBqAv3NRgbTl0QQBQAAANAtgqgJ9DcbGUpvkmTrsCAKAAAAoBsEURMYaDbHgihrRAEAAAB0hyBqAv2NRgazJEkyMiqIAgAAAOgGQdQEBjpG80ZHnpznagAAAAD2DYKoCXSuEZXRJ1Nrnd+CAAAAAPYBgqgJdL41L6kZHR2c13oAAAAA9gWCqAm0RvP6xvYtWA4AAAAwc4KoCewwmhdBFAAAAEA3CKImMNBs6ogCAAAA6DJB1AR2XCMqGfHmPAAAAIAZE0RNYOfRvM3zWA0AAADAvkEQNYGdFyvXEQUAAAAwU4KoCfQ3xo/mWSMKAAAAYKZe304XAAAgAElEQVQEURMY8NY8AAAAgK4TRE1gyfjRvFGjeQAAAAAzJYiaQKOU1MaSsX0dUQAAAAAzJ4iaRKPRP7YtiAIAAACYOUHUJBpNHVEAAAAA3SSImkRPs7MjyhpRAAAAADMliJpET09nELV5HisBAAAA2DcIoiaxeIfRPB1RAAAAADMliJpEf09PBtObJBm2RhQAAADAjAmiJtHfaGSoHURtHTaaBwAAADBTgqhJDDSbGUpfEqN5AAAAAN0giJpEf7MxFkQZzQMAAACYOUHUJAaa20fzRgVRAAAAADMmiJpEf7ORwXZHVB01mgcAAAAwU4KoSSzpGM1L3ZLR0eH5LQgAAABggRNETaJzNC9JRnVFAQAAAMyIIGoSnYuVJ8mIdaIAAAAAZkQQNYmBZnNsjahEEAUAAAAwU4KoSfQ3dhzNGxkxmgcAAAAwE4KoSRjNAwAAAOguQdQkBpoNo3kAAAAAXSSImkR/02geAAAAQDcJoiYxsNNo3uZ5rAYAAABg4RNETaJ/p9E8HVEAAAAAMyGImsTiRiPDpWM0b9QaUQAAAAAzIYjahUajf2x7ZFgQBQAAADATgqhdKM2OIEpHFAAAAMCMzFkQVUpZW0r5QSnlrlLKWyY4v6qU8pVSys2llO+WUs5sH39BKeXGUsr32v/9tY7vfLV9z1van0O7WXNPc8nYtjWiAAAAAGamZy4eUkppJrkkyQuSrEvy7VLKZ2qtt3Vc9sdJPlFr/UAp5ZgkVyU5PMnGJL9ea11fSjkuyReSrOj43vm11htmo+5mZ0eUt+YBAAAAzMhcdUSdkuSuWus9tdYtST6e5Nxx19QkB7a3D0qyPklqrTfXWte3j38/SV8pHauIz6KeHYIoHVEAAAAAMzFXQdSKJPd37K/Ljl1NSfLOJBeUUtal1Q31+xPc5yVJbq61DnUc+1B7LO9tpZQy0cNLKb9XSrmhlHLDhg0bplx0X7M3I+2faGTEGlEAAAAAMzFXQdREAVEdt39ekstqrSuTnJnkH0spY/WVUo5N8n8nubDjO+fXWtckeV7784qJHl5r/fta60m11pMOOeSQKRfd39PMUPqSJFu9NQ8AAABgRuYqiFqX5Bkd+yvTHr3r8Ookn0iSWut1SfqSLE+SUsrKJJ9O8ju11ru3faHW+kD7v48luTytEcCuGWg2M7gtiNIRBQAAADAjcxVEfTvJkaWU1aWUxUlenuQz4665L8nzk6SUcnRaQdSGUspTklyZ5K211m9uu7iU0lNK2RZULUpydpJbu1l0f7ORobSWozKaBwAAADAzcxJE1VqHk7wurTfe3Z7W2/G+X0p5VynlnPZlb0zy70op30nysSSvrLXW9vd+Psnb2mtB3VJKOTRJb5IvlFK+m+SWJA8kubSbdQ80G2OjeYIoAAAAgJnpmasH1VqvSmsR8s5jb+/Yvi3JqRN878+S/Nkkt/3FbtY4Xn+zMTaaN+qteQAAAAAzMlejeQtSf2P7aF5Gn0yrQQsAAACA6RBE7cJAT0cQlZGMjm6Z13oAAAAAFjJB1C70NxoZzJKx/dFR60QBAAAATJcgahcGms2OjqhkxDpRAAAAANMmiNqF/mZjhyBqeGTzPFYDAAAAsLAJonahFUR1jObpiAIAAACYNkHULgw0GxncYTTPGlEAAAAA0yWI2oXxo3mCKAAAAIDpE0TtQiuI6hvbF0QBAAAATJ8gahf6m40M7hBEWSMKAAAAYLoEUbuwpDF+NM9b8wAAAACmSxC1C41Skmb/2L6OKAAAAIDpE0TtRqMsGdu2RhQAAADA9AmidqO5Q0eUIAoAAABgugRRuyGIAgAAAOgOQdRu9PR0jOaNWiMKAAAAYLoEUbuxWEcUAAAAQFcIonZjSc+ibMniJMnwsCAKAAAAYLoEUbsx0GxmKL1Jkq06ogAAAACmTRC1G/3NxlgQNTyyeZ6rAQAAAFi4BFG70d9sZDB9SZLhEYuVAwAAAEyXIGo3+huNDLWDqFFBFAAAAMC0CaJ2o3M0r1ojCgAAAGDaBFG7MdAxmpc6mFpH57cgAAAAgAVKELUbrY6ovrH9EeN5AAAAANMiiNqNgWZzbDQvSUZGBVEAAAAA0yGI2o3Ot+Ylycjw5nmsBgAAAGDhEkTtxk6jeTqiAAAAAKZFELUbAx1vzUuSUW/OAwAAAJgWQdRujB/NGxZEAQAAAEyLIGo3BsaN5umIAgAAAJgeQdRu9Dd2HM0bGbFGFAAAAMB0CKJ2o7/Z3KEjanjEW/MAAAAApkMQtRuLGiUjZcnY/qiOKAAAAIBpEURNQWlsD6JGrBEFAAAAMC2CqCloNAVRAAAAADMliJqCZrN/bNti5QAAAADTI4iagsYOQZSOKAAAAIDpEERNweIeo3kAAAAAMyWImoLFjf6MpiRJhgVRAAAAANMiiJqCgUXNDKU3SbJVEAUAAAAwLYKoKehvNDKUviTJ8PDmea4GAAAAYGESRE3BQLORwW1BlLfmAQAAAEyLIGoK+puNsdG8kVGjeQAAAADTIYiaglYQ1eqIGrVGFAAAAMC0CKKmYKAjiMqo0TwAAACA6RBETUF/s5nB9mhe6nBGR7fMb0EAAAAAC5Agagp26IhKMmLBcgAAAIA9Joiagv6dgijrRAEAAADsKUHUFPQ3tr81L9ERBQAAADAdgqgpGGg2tq8RlWRkZPM8VgMAAACwMAmipmBJs5GhLBnb1xEFAAAAsOcEUVPQWqy8oyNq1BpRAAAAAHtKEDUF/c3muDWiBFEAAAAAe0oQNQX9zUYGdxjNE0QBAAAA7ClB1BQsaRRvzQMAAACYIUHUFJRSUpo6ogAAAABmQhA1RY1G/9i2IAoAAABgzwmipqjR0RE1ajQPAAAAYI8Joqao2dERNTyyeR4rAQAAAFiYBFFTtKhnYGxbRxQAAADAnhNETVFP0xpRAAAAADMhiJqixWVLhtOTJHn40e/lkUe/M88VAQAAACwsgqgpuO/+D2XzQ1/OYPqSJINbfpobbvi3ufPOP0+tdZ6rAwAAAFgYBFG78cgjN+XOO/8sfRnMUHqTJDUlSXLf/f+Qn/zks/NZHgAAAMCCIYjajXXrPpIk6e0IonY4/8A/znVJAAAAAAuSIGo3Ht/8gyRJb4YymCVJkpLt43iPP/4v81IXAAAAwEIzZ0FUKWVtKeUHpZS7SilvmeD8qlLKV0opN5dSvltKObPj3Fvb3/tBKeWFU71nN/T0HJQk6ctgHslTkiTNjI6dX9Rz4Gw8FgAAAGCfMydBVCmlmeSSJC9KckyS80opx4y77I+TfKLWemKSlyf5m/Z3j2nvH5tkbZK/KaU0p3jPGftX/+rsJK3RvI1ZPul5AAAAAHZtWkFUKeV5pZRf3oOvnJLkrlrrPbXWLUk+nuTccdfUJNvaiw5Ksr69fW6Sj9dah2qtP0xyV/t+U7nnjD39aS/JgQeemL4MZtO4IGrJklVZterfdfuRAAAAAPukKQVRpZSvllKe195+U5JPJfmnUsrFU3zOiiT3d+yvax/r9M4kF5RS1iW5Ksnv7+a7U7nntvp/r5RyQynlhg0bNkyx5JZmszcnnnBZViw7LZtyyNjxpxx0Sn7xF//fLF588B7dDwAAAGB/NdWOqDVJrmtvX5jkV5M8J8l/nOL3ywTH6rj985JcVmtdmeTMJP9YSmns4rtTuWfrYK1/X2s9qdZ60iGHHDLRJbvU07M0/3rlb+4wmnfwstPSu3jnUT0AAAAAJtYzxesaSUZLKUck6am1fj9JSilTbQdal+QZHfsrs330bptXp7UGVGqt15VS+pIs3813d3fPrhloNvJQlo3tDw7O2qMAAAAA9klT7Yj6VpL3JPnLJJ9OknYotWmK3/92kiNLKatLKYvTWnz8M+OuuS/J89v3PjpJX5IN7eteXkrpLaWsTnJkkuuneM+u6W828rMcnNH2TzYkiAIAAADYI1MNol6ZZDDJD5K8vX3smCT/fSpfrrUOJ3ldki8kuT2tt+N9v5TyrlLKOe3L3pjk35VSvpPkY0leWVu+n+QTSW5L8vkkr621jkx2zyn+PXtsoNnMSOnJz/LUJMng0IOz9SgAAACAfVKpdcJllfZZJ510Ur3hhhv2+Hs/HtqaE771/byj/qc8Mz9IszmQXzn9OylloqWqAAAAAPYfpZQba60n7e66qb417w2llBPa26eUUu4ppfxLKeWUmRa6UPQ3Wz/VtgXLR0Y2Z3j40fksCQAAAGBBmepo3huT3Nve/i9JLknyX5O8bxZq2iv1N1o/1aZsf+ue8TwAAACAqZtqEPWUWuvDpZSlSU5I8p5a698lOWr2Stt71FrzuQ0PpyTZ1O6ISpLNTzwwf0UBAAAALDBTDaLWlVKek+SlSb5Rax0ppRyQZGT2Sts71Frzhz+4P//+th+lZvtoXpL8/Q+/my2jo/NXHAAAAMAC0jPF6/4oyWeTbEnyG+1jZyf59mwUtTf5/MZH8rEHHxrbf6gjiNq0eV0+9MDGXPiMQ+ejNAAAAIAFZUodUbXWz9VaD621rqy1bgufPp3kxbNX2t7h8o4QKtmxI2p5Nu50HgAAAICJTbUjKqWUI5K8PMmKJA8kuaLWevdsFba3WDe4ZYf9zVmawfSlL4NZlo07nQcAAABgYlPqiCqlnJnkO2ktVP5EkmclubmUctYs1rZXOKx38Y4HShlbsHxZNuaw3kXzUBUAAADAwjPVjqi/SPLiWuuXth0opfxakvckuXI2Cttb/PZhB+dLDz26w7GNWZ4VWZeD81B++2kHzVNlAAAAAAvLVN+atyrJV8cd+3r7+D7tRcsPym897ak7HNvWEdXIaM4/pM5HWQAAAAALzlSDqO8k+YNxx16f5LvdLWfv0ygl7z1qVd5/9Koc2d+bJNmUQ8bOj255cL5KAwAAAFhQpjqa9x+TfK6U8gdJ7kvyjCTDSc6ZrcL2Jo1S8ptPOzirl/TmrJvuzKYsGzs3OLh+HisDAAAAWDimFETVWm8rpfxCklOTHJZkfZJv1Vr3q1fGrehrLVy+bTQvSQaHdEQBAAAATMVUO6JSa92ajnWiSimLSyn31FqPmI3C9kaHLu7JolKysW4fzdMRBQAAADA1U10jaiIlyeFdqmNBaJSSp/UuykMdo3lDgigAAACAKZlJEJUk+90r41b0LspwWZSH85QkyeCQIAoAAABgKmYaRO13VrbXidrYfnOe0TwAAACAqdnlGlGllA9O97v7qsN6FyVpLVj+87kzw8OPZnj48fT0LJ3nygAAAAD2brsLkx7Yzfk/71YhC8Vkb85b2nPkfJUEAAAAsCDsMoiqtb5trgpZKCYKooYG12fpgCAKAAAAYFesEbWHVrRH8zZ2dkRZJwoAAABgtwRRe2jC0TxBFAAAAMBuCaL20IE9zRzQbGRT+615STI4JIgCAAAA2B1B1DSs6FucR3NgtqY1pjc4+OA8VwQAAACw99vdW/OSJKWU35nk1FCSdUmur7Vu7VpVe7nDehfljs2D2ViX5+l50GgeAAAAwBRMKYhK8ntJTk6yKa3gaUWS5UluTnJ4kq2llHNrrTfNRpF7m5Ud60Q9PQ9maOjHqXU0pWgwAwAAAJjMVJOTm5K8pdZ6WK31lFrriiQXJ/nnJIcl+Yck75+lGvc6K3pbQdTG9jpRtW7Nli0b57MkAAAAgL3eVIOoVyR537hj/z3J79RaR5P8RZJju1nY3uywvtbaUA9l2dgx43kAAAAAuzbVIOqnSV407tjaJBva271J9ps1orZ3RC0fO+bNeQAAAAC7NtU1ov4gyRWllJuT3J/kGUlOTPKy9vnnJvlA98vbO61od0Rtao/mJTqiAAAAAHZnSkFUrfXqUsrPJzkrrTWhvpzkt2qtP22f/0KSL8xalXuZp/cuSklrsfJtBFEAAAAAuzbVjqi0Q6cPzWItC8biRiOHLu7JpqHtQdSQIAoAAABgl6YURJVSfi7JnyY5IcnSznO11iNmoa693oq+xfnJluE8Vg/MAXnUGlEAAAAAuzHVjqjL01ob6j8neWL2ylk4VvQuzk15IhuzvBVEDT443yUBAAAA7NWmGkStSXJ6rXVkNotZSA4bW7B8WVbnnmzd+lBGRgbTbPbNc2UAAAAAe6fGFK+7Nsnxs1nIQrOyd3GSHd+cNzSkKwoAAABgMlPtiLozyRdKKZ9M8uPOE7XWd3W9qgVgRbsjauO4N+f196+er5IAAAAA9mpTDaIOTvKFJAe0P9vUrle0QBw21hG1YxAFAAAAwMSmFETVWl8x24UsNCvG1ojaPponiAIAAACY3KRBVCllZa11XXt71WTX1Vrvm43C9nbLF/Wkt1GycaSjI2pIEAUAAAAwmV11RN2e7WN496Y1hlfGXVOTNLtf1t6vlJLDehfl3ieekuH0pCfDOqIAAAAAdmFXb807qGN7UZLF7f92fhbPXml7vxW9i1NLIw9lWRJvzQMAAADYlUmDqFrraMf2yGSfuSlz77Sib9uC5a0ganBwfWrdb9dvBwAAANilKS1WXkr5uSR/muSEJEs7z9Vaj5iFuhaE7QuWt9aJGh0dytatD2Xx4mXzWRYAAADAXmlKQVSSy5Pcn+Q/J3li9spZWFb0tjqiNqZjwfLB9YIoAAAAgAlMNYhak+T0/X0Ub7ztHVGHjB0bHFqfA7NmvkoCAAAA2GvtarHyTtcmOX42C1mItnVEbRrXEQUAAADAzqbaEXVnki+UUj6Z5MedJ2qt7+p6VQvEYe2OqM7RvKFBb84DAAAAmMhUg6iDk3whyQHtzzb79SviBprNPLWnmYe2dnREDQmiAAAAACYypSCq1vqK2S5koVrRtzi3DvfniTqQ/mw2mgcAAAAwiUmDqFLKylrruvb2qsmuq7XeNxuFLRSH9S7KrY8/mY1ZllWCKAAAAIBJ7aoj6vZsH8O7N60xvDLumpqk2f2yFo4VfdsWLD8kq3Jftmz5aUZHh9Jo9M5zZQAAAAB7l129Ne+gju1FSRa3/9v5WTx7pS0MK3onWLB86CfzVQ4AAADAXmvSIKrWOtqxPfL/s3fn0XFf9f3/n3dG+25Zkhd5jRMnsZPYSZywBVpK+BLCEuiXQsLvS6G0hPbLVuBLC7QUSkvLUgjQUgq0hLUsBVpCCftaQkjiJHbiLd4X2bIt2dr3mbm/PyTLkq0tIM1I1vNxjk9m7r1z9R4vnMPr3Pv+jPcrO2XOXmdPRI1oWO71PEmSJEmSpPNMqVl5CCEJvBr4LaCGEVf0Yoy/MzOlzQ1nTkSdonZ4zCBKkiRJkiTpfBNdzRvpQ8DrgfuBJwDfBpYBv5ihuuaMMyeimj0RJUmSJEmSNKGpBlEvAm6KMX4QSA/99xbgaTNW2RyxqCCfZDjnal5fYw4rkiRJkiRJmp2mGkSVAIeGXneHEIpjjDuBa2amrLkjLxFYXJBPC9Vkhn47+zwRJUmSJEmSdJ6pBlG7gE1Drx8E/iqE8FbAxIXB63mZkKSVBYAnoiRJkiRJksYy1SDqjcCZp+i9GXgS8HvAH89EUXPNmYblZ/pE9fYeI8aYy5IkSZIkSZJmnUmDqKEn5q0FtgHEGB+LMf52jPHaGONPZ7i+OWHpOQ3L0+kuUqn2XJYkSZIkSZI060waRMUY08A/xhj7slDPnHTmRNQpaofHfHKeJEmSJEnSaFO9mvftEMLNM1rJHLZs6ETU6CfnGURJkiRJkiSNlDfFdQngGyGEXwBHgOEGSDHGV85EYXPJ0nN6RIEnoiRJkiRJks411SBqD/CBmSxkLqsfPhF19mpeX69PzpMkSZIkSRppwiAqhHBbjPFLMcZ3ZKuguagqL0lJMsGp1MLhMa/mSZIkSZIkjTZZj6hPZKWKOS6EQH1hPl2U0UcR4NU8SZIkSZKkc00WRIXp+kEhhJtCCI+FEPaGEN46xvwdIYQtQ792hxBah8afPmJ8SwihN4TwgqG5z4QQDoyY2zhd9T5e9YUFEMJwnyiDKEmSJEmSpNEm6xGVDCE8nQkCqRjjjyf7ISGEJPAx4JlAA/BACOGuGOOOEfu8ccT61wFXD43/BNg4NF4N7AW+P2L7t8QYvzZZDTOtvuhsw/J6GujrO0EmkyKRmGobLkmSJEmSpAvbZClJIfBvjB9EReCiKfyc64G9Mcb9ACGELwO3ADvGWX8b8M4xxl8EfCfG2D2Fn5lVZxuWn3lyXob+/pMUFS3NXVGSJEmSJEmzyGRX87pijBfFGFeP82sqIRRAPXBkxPuGobHzhBBWAquBsU5a3Qp86Zyx94QQHhm62lc4zp63hxA2hxA2NzU1TbHkx2dp4eCJqJFPzvN6niRJkiRJ0lmTBVHTZawTVXGctbcCX4sxpkdtEMIS4ErgeyOG3wZcBlwHVAN/PtaGMcZPxhg3xRg31dbWjrXkN7Zs6ERU8/CJKIMoSZIkSZKkkbLVrLwBWD7i/TJgvJRmrFNPAC8G/jPGOHBmIMbYGAf1AXcyeAUwJ+oLz72aB719jbkqR5IkSZIkadaZMIiKMZZP0895ALgkhLA6hFDAYNh017mLQgiXAguAe8fY4zbOCaiGTkkRQgjAC4Bt01Tv47Zk+GqeJ6IkSZIkSZLGkpVHusUYUyGE1zJ4rS4JfDrGuD2E8G5gc4zxTCh1G/DlGOOoa3shhFUMnqj62TlbfzGEUMvgya0twB/P3LeYWFEyQU1+Hqf7Fw6P9RlESZIkSZIkDctKEAUQY7wbuPucsb865/27xvnsQcZobh5j/J3pq/A3V1+UT/NAira4gEpa6O0ziJIkSZIkSTojW83K54UzfaKahq7neTVPkiRJkiTpLIOoaVRfNLpPVCrVTirVkcuSJEmSJEmSZg2DqGk05pPzen1yniRJkiRJEhhETav6osEgqnlkEGWfKEmSJEmSJMAgalrVF46+mgfQ54koSZIkSZIkwCBqWp05ETUyiOrpPZqrciRJkiRJkmYVg6hpVFuQR34InKJ2eOzQoU+wffub6ek5nMPKJEmSJEmScs8gahoFoCbRQTsVDJA3NJrh+In/4oHNv0t398EcVidJkiRJkpRbBlHTqK39IapSByGEUdfzAAYGWti77/25KUySJEmSJGkWMIiaRseP38VCmgFoHnE974zmph+SSnVluyxJkiRJkqRZwSBqGg0MtLCQUwA0UXfefCRNKtWe7bIkSZIkSZJmBYOoaVRaejELaQLgEKvPm8/Lq6KgoOa8cUmSJEmSpPnAIGoaLV3yImpCKwD7WXPefP3Sl5BI5Ge7LEmSJEmSpFnBIGoaFRUt5fo1rwTgMKvIEIbnqqufyurVb8hVaZIkSZIkSTlnEDXNNix9BgADoYBTYQkAiUQRG676FMlkYS5LkyRJkiRJyimDqGlWnpekIm/wt7Ux7yoAMpleenoO57IsSZIkSZKknDOImgH1hQUA7Ipn+0S1dzyaq3IkSZIkSZJmBYOoGbB0KIjamlo1PNbe/kiOqpEkSZIkSZodDKJmQH3R4JPxjrCCEAZDqQ5PREmSJEmSpHnOIGoGLCsaDJ/SIQ+KLwWgo2MHmUwql2VJkiRJkiTllEHUDFhamD/8uqfwMmCwYXlX995clSRJkiRJkpRzBlEzoH7oRBRAc97a4dcd7V7PkyRJkiRJ85dB1AwoCmH49Tc7lw6/bu+wYbkkSZIkSZq/DKKm2Y9OtfPCh/cMv7+3p5ZeigBo80SUJEmSJEmaxwyiptHJvgH+aNsBeuPZsRiSHGQ1AO0dO8lk+nJUnSRJkiRJUm4ZRE2jLzWepicTzxvfzxoAEqRoa38s22VJkiRJkiTNCgZR02hHV8+Y4we4ePj1sdYt2SpHkiRJkiRpVjGImkaVeckxx/cNnYgCSHVtz1Y5kiRJkiRJs4pB1DR6fl3VmOMnWUwXJQD0dm3LZkmSJEmSJEmzhkHUNHpKVdmYYVQMCY6EwVNRXV17SKfHvsInSZIkSZJ0ITOImkYhBD52+Ur+bPViagvyhseLEoFNi68HIMY0HZ07clWiJEmSJElSzhhETbP8ROBNqxaz5cnreXp1OQC9mUhN5VXDazraH81VeZIkSZIkSTljEDVDkiHwxMqy4feHwtkn57UbREmSJEmSpHnIIGoGbagoHn79SG8l+fnVALR3GERJkiRJkqT5xyBqBl1VXjL8emtnLxXlVwDQ3b2fVKojV2VJkiRJkiTlhEHUDKrOz2NFUQEAWzu6Ka840ycq0tGxPXeFSZIkSZIk5YBB1AzbMHQq6ljfALFo3fC41/MkSZIkSdJ8YxA1wzaUn+0TtT+sGX5tw3JJkiRJkjTfGETNsI0VI/pE9ZZSWLAIgA6DKEmSJEmSNM8YRM2wUQ3LO3oor7gSgJ7ewwwMtOaqLEmSJEmSpKwziJphFXlJ1hQXAkMNy8uvHJ7zep4kSZIkSZpPDKKyYMPQ9bym/hSposuHxztsWC5JkiRJkuYRg6gsGNmwfB8XDb/2yXmSJEmSJGk+MYjKgg0j+0T1FFFUtAyA9vZHclWSJEmSJElS1hlEZcGVZcWEoddb2rupGOoT1dd3nL6+ptwVJkmSJEmSlEUGUVlQmpfkkpIi4PyG5faJkiRJkiRJ84VBVJZsqBjsE9WSStNTdNnwuE/OkyRJkiRJ84VBVJZsHNEnak+0YbkkSZIkSZp/DKKyZGQQtbU7SUnJamCwYXmMMVdlSZIkSZIkZY1BVJasKysmOdSxfGv72T5RAwOn6OtrzGFlkiRJkiRJ2WEQlSXFyQSXlQ42LH+ks5vy8iuG57yeJ0mSJEmS5gODqCzaMHQ9rz2VobPAhuWSJEmSJGl+MYjKog0j+kQ9Fldz5re/wyBKkiRJkiTNAwZRWTQyiNraGSktvRgYvJpnw3JJkiRJknShM4jKosvLisgPgx3Lt3Z0UzHUsDyVaqOn53AuS5MkSZIkSZpxBlFZVJhIsK7sTMPyHspGNCzvsGG5JEmSJEm6wBlEZdmZ63nd6QxtoxqWP5KrkiRJkiRJkmgAgfMAACAASURBVLLCICrLNo7oE7UzvZwQ8gBo79iWq5IkSZIkSZKywiAqyzZUnA2itnSmKSu9FICOjm3EmM5VWZIkSZIkSTPOICrL1pYUUZQ427C8vGKwYXk63UV394FcliZJkiRJkjSjDKKyLD8RWF9WDMD2zh5Kys42LN9/4KN0de3NVWmSJEmSJEkzyiAqB840LO/NRDYf/u/h8ZMnv82v7nsW23f8PzKZ/lyVJ0mSJEmSNCMMonJgw4iG5Vt7i8+bP378P9m77/3ZLEmSJEmSJGnGGUTlwMYRDcv3ccmYa44e/RIDA+3ZKkmSJEmSJGnGGUTlwMUlhRSHwSfkHWDNmGsymV46OrdnsyxJkiRJkqQZZRCVA8kQuLSwC4BDrCQ1zh9DIuRnsyxJkiRJkqQZZRCVI1dXVgOQDvkcYcV58/n5C6mouCrbZUmSJEmSJM2YrAVRIYSbQgiPhRD2hhDeOsb8HSGELUO/docQWkfMpUfM3TVifHUI4b4Qwp4QwldCCAXZ+j6/qesW1g+/3sfa8+YvuuhPSSTmzNeRJEmSJEmaVFaCqBBCEvgY8GxgHXBbCGHdyDUxxjfGGDfGGDcC/wh8Y8R0z5m5GOPzR4y/D7gjxngJ0AL84Yx+kWm0ofzs0/IOhtFBVF3dc1lW/9JslyRJkiRJkjSjsnUi6npgb4xxf4yxH/gycMsE628DvjTRhiGEAPwO8LWhoc8CL5iGWrNidXEh5cnB3/4Tpc9i/bo7hudSqbZclSVJkiRJkjRjshVE1QNHRrxvGBo7TwhhJbAa+PGI4aIQwuYQwq9CCGfCpoVAa4wxNYU9bx/6/Oampqbf5HtMm0QIbCgvAeCx7n4qa59Lefl6AFpafkUq1ZHL8iRJkiRJkqZdtoKoMMZYHGftrcDXYozpEWMrYoybgJcCHw4hrHk8e8YYPxlj3BRj3FRbW/t46p5RGyoGg6h0hB2dPdTUPBOAGAc4dernuSxNkiRJkiRp2mUriGoAlo94vww4Ns7aWznnWl6M8djQf/cDPwWuBpqBqhBC3hT2nJXOnIgC2NLRTW3NjcPvm5p/mIuSJEmSJEmSZky2gqgHgEuGnnJXwGDYdNe5i0IIlwILgHtHjC0IIRQOva4BngLsiDFG4CfAi4aWvhz45ox+i2k2smH5lo5uysouo6ho8HbhqVM/JZMZyFVpkiRJkiRJ0y4rQdRQH6fXAt8DdgJfjTFuDyG8O4Qw8il4twFfHgqZzrgc2BxC2Mpg8PTeGOOOobk/B94UQtjLYM+of5vp7zKdVhQVsCAvCcDW9h5CCNQMnYpKpdppbb0/l+VJkiRJkiRNq7zJl0yPGOPdwN3njP3VOe/fNcbnfglcOc6e+xl8It+cFIYalv+0pYM93b10pdLU1txIQ8NngcHredXVT8lxlZIkSZIkSdMjW1fzNI4zDcsj8GhnD1VV15GXVwFAc9MPGX04TJIkSZIkae4yiMqxhfnJ4de3bz/I+w82U7bgtwDo7TtGZ+fOXJUmSZIkSZI0rQyicuh7zW28e+/ZB/2d7E/x4UMn+EjL5cNjPj1PkiRJkiRdKAyicqQrleZ1Ow6RGmPu56mrSJMPDF7PkyRJkiRJuhAYROXIt5paaU9nxpzrDcVsYz0AHZ3b6e09NuY6SZIkSZKkucQgKkeO9PZPOP8g1w2/9nqeJEmSJEm6EBhE5cjSwoIJ5x8aEUR5PU+SJEmSJF0IDKJy5Hl1VZQmx//tX125nPLyKwFoab2PgYH2bJUmSZIkSZI0IwyicqQiL8k/XLp8zD+AsmSCD166nNqaGwGIMcWpUz/Nan2SJEmSJEnTzSAqh164aAHfuuYSnltbSWXe2T+K59dVcXlZMbW1zxwes0+UJEmSJEma6wyicuzaylL+9YrVPPKUKygbuqp3T0snMUZKS9dSVLQcgFOnfkYmM3GDc0mSJEmSpNnMIGqWKEwkeMbCCgAO9fazs6uXEAK1tYPX89LpTlpa7stliZIkSZIkSb8Rg6hZ5Nk1lcOvv9PUBjDcJwq8nidJkiRJkuY2g6hZ5BkLKygIAYDvNA8GUZWVm8jLqwKgufmHxBhzVp8kSZIkSdJvwiBqFinPS3LDgjIAtnX2cLinj0Qij5qa3wagr+84HR3bclihJEmSJEnSr88gapa5ubZq+PV3m89cz/PpeZIkSZIkae4ziJplnlVTQRh6feZ6XnX1U0kkCoDB63mSJEmSJElzkUHULFNbkM91laUA3NfaRXN/iry8UhYseDIAnZ276Ok5kssSJUmSJEmSfi0GUbPQTUNPz8sAPzjl0/MkSZIkSdKFwSBqFnr2UBAF8J2mwSCqpuYZw2PNzT/Kek2SJEmSJEm/KYOoWWh1SSGXlRYB8LOWDrpSaQoL66io2AhAS8t9NB6/i/7+07ksU5IkSZIk6XExiJqlzpyK6stEfnK6gxgzJBPFQ7MZdux4I/fc8xQe2/1uMpmB3BUqSZIkSZI0RQZRs9Sza0dcz2tuY/+Bj9DSeu+oNZnYT0PDZ3ls97uyXJ0kSZIkSdLjZxA1S11ZVkx9YT4AP2huY/+hO8dde+zYV+npOZqt0iRJkiRJkn4tBlGzVAhh+FRUezrD9rhmgtUZTp/+eXYKkyRJkiRJ+jUZRM1iI5+et5nrJ1wbY2amy5EkSZIkSfqNGETNYk+oLKM6PwnAg+EJZAjjrq1aMHFQJUmSJEmSlGsGUbNYXiLwzIWDp6JaqOYAY1/Pq6m5kbLSS7JZmiRJkiRJ0uNmEDXLjbyet7P0ZefNh5Dk0rXvzmZJkiRJkiRJvxaDqFnut6rLKU4M/jFt5lqe9MQfsnbtO6lecAMAMaZpav5eLkuUJEmSJEmaEoOoWa44meDp1eUA7Onu42hcwvJlv8/l695HCIP9oxoaPmezckmSJEmSNOsZRM0Bz649ez3vu81tABQVLqa29iYAursPcOr0z3NSmyRJkiRJ0lQZRM0BNy6sIDn0wLy7m9qGx5cvf/nw64Yjn812WZIkSZIkSY+LQdQcsCA/jydXlQHwcEc3jX39AFRWXEN5+ZUAnDr9c7q69uesRkmSJEmSpMkYRM0RN9WMvJ7XDkAIgeXLRpyKavhc1uuSJEmSJEmaKoOoOeLZI4OoEdfzFi26mYKCGgAaj3+dVKoj67VJkiRJkiRNhUHUHLG0qICN5SUA/E9LB58/2szR3n4SiULql74UgHS6m2ONX8tlmZIkSZIkSeMyiJojUplI/lDD8gzwlt0NXHfvDt6w8zA1i19CCPkANBz5HDGmc1eoJEmSJEnSOAyi5oi/3X+MB9q7R41lgK8cP83bD/axqO5mAHp6D9N86qfZL1CSJEmSJGkSBlFzwKn+FHc2NI87/7UTLSRqbxt+33Dks9koS5IkSZIk6XExiJoD7mvrpC/GCdc8MLCaioqrATjdcg+dXXuyUZokSZIkSdKUGURdICKwfNnvD79vaPhc7oqRJEmSJEkag0HUHHB9ZRkFIUy45qkLyqmru4mCgjoAGhv/k4GBtmyUJ0mSJEmSNCUGUXNATUEeL69fOO78C+uquKikkESigGX1LwUgk+nhWONXs1WiJEmSJEnSpAyi5oi/WlPPH9TXkDxnPAG8/aKlw+/r628jhAIAGho+T4zp7BUpSZIkSZI0AYOoOSI/Efj7tcvY/OR1fPTyFTyvthKADPDV46eH1xUU1LB40XMB6O09SlPzD3NRriRJkiRJ0nkMouaYJYUFvHhxNR+4dDnFicE/vs8ca6Yvkxles2xE0/J9+z5Iw9F/p71jW9ZrlSRJkiRJGskgao6qys/jJUuqAWjqT/FfJ1qH58rLr6CocPC6Xnf3Ph577B088MAtPPjQbfT1ncxJvZIkSZIkSQZRc9irltUMv/5kw0lijAAcPfrv9PYdO299a+v9bNn6h/aNkiRJkiRJOWEQNYetKSnixoUVAGzv7OWXrZ1kMikOHvrncT/T2bmD5uYfZ6tESZIkSZKkYQZRc9yrl9UOv/5kQxM9PQfp6zs+4WdOt9w702VJkiRJkiSdxyBqjrthQRmXlxYB8P3mdg72Tv6ZQJjhqiRJkiRJks5nEDXHhRB41fLBU1ER+PdThRQVLZ/wM9ULn5qFyiRJkiRJkkYziLoA/G7dAhbm5wHwpeMt1Kx4/bhrS0rWsrD6adkqTZIkSZIkaZhB1AWgKJng5fULAehOZ/hx5gbWXvJXJJNl561NJguyXZ4kSZIkSRJgEHXBeMXSGgrCYO+nfzvaxJL63+epN9zLVVf+C+su/xDl5VcC0NGxjeMn7splqZIkSZIkaZ4yiLpA1BXm84JFVQA09A7wneY2kskSamufyZIlt3D5Ze+BoSbl+/Z9gHS6O4fVSpIkSZKk+cgg6gJy+7La4defamgaNVdevp6lS34PgL6+4xw89Ims1iZJkiRJkmQQdQG5oryEJ1cN9oW6v62Lh9tHn3q6aM2bh/tGHT78KXp6jma9RkmSJEmSNH8ZRF1gXr18/FNRhQU1rF71fwHIZPrYu+99Wa1NkiRJkiTNbwZRF5gbF1awqnjwyXh3nWyhsa9/1Pzy5a+guHgFACdPfpvW1s1Zr1GSJEmSJM1PBlEXmGQI/NFQr6hUhDsbmkfNJxKFXHLx24bf797zN8SYyWqNkiRJkiRpfjKIugDduria0uTgH+0/Hj5J/U+28NwHd/Otk60A1NQ8kwULngRAR8c2Ghu/kbNaJUmSJEnS/GEQdQE61jdAKhMBiEAa2Nzezau2H+RDB48TQmDtJe/gzB//vv3/QCrVmbN6JUmSJEnS/GAQdQF6596j9MU45twHDhznYE8fZWWXUl9/KwD9/U0cPPTxbJYoSZIkSZLmIYOoC8zJvgF+crpj3PkIfO14CwAXrf5T8vLKATh8+NP09BzORomSJEmSJGmeMoi6wDQPpCZd09Q/AEBBwUJWr3o9ADH286v7buInP72c++5/Dg1H/90m5pIkSZIkaVplLYgKIdwUQngshLA3hPDWMebvCCFsGfq1O4TQOjS+MYRwbwhhewjhkRDCS0Z85jMhhAMjPrcxW99ntqovzKcghAnXrC4uHH69aNFzCSEfgEymj0ymn87OXTz22DvYsfMtxHGu+EmSJEmSJD1eWQmiQghJ4GPAs4F1wG0hhHUj18QY3xhj3Bhj3Aj8I3DmUW7dwO/HGNcDNwEfDiFUjfjoW858Lsa4Zca/zCxXmZ/HLYuqxp0vDPB7i6uH3x86/EliHBhz7fHj/8Wp0z+b9holSZIkSdL8lK0TUdcDe2OM+2OM/cCXgVsmWH8b8CWAGOPuGOOeodfHgJNA7QzXO6e9++J61pcVjTm3qbKMhQV5AMQYaWz8zwn3amz8xoTzkiRJkiRJU5WtIKoeODLifcPQ2HlCCCuB1cCPx5i7HigA9o0Yfs/Qlb07QgiF535m6HO3hxA2hxA2NzU1/brfYc5YkJ/Ht65Zy/vWLuOpC8rYWF5MaXLwj/qe1k42t3UBEGOKVKp1wr36+5tnvF5JkiRJkjQ/ZCuIGqtp0XjNh24FvhZjTI/aIIQlwOeBP4hnu2i/DbgMuA6oBv58rA1jjJ+MMW6KMW6qrZ0fh6lKkgleXl/Df2y8mO9uupQPX7ZieO5tuxtIx0gikU9R4dKJ9yleOdOlSpIkSZKkeSJbQVQDsHzE+2XAsXHW3srQtbwzQggVwLeBv4wx/urMeIyxMQ7qA+5k8AqgxvDc2kqeuqAMgEc7e/j8sVMA1NffNuHnJpuXJEmSJEmaqmwFUQ8Al4QQVocQChgMm+46d1EI4VJgAXDviLEC4D+Bz8UY/+Oc9UuG/huAFwDbZuwbzHEhBN5zyTLyhs6mvXd/I6f6U6xY8UcsrH7auJ/r6Tky7pwkSZIkSdLjkZUgKsaYAl4LfA/YCXw1xrg9hPDuEMLzRyy9DfhyjHHktb0XA08DXhFC2DL0a+PQ3BdDCI8CjwI1wN/O+JeZw9aWFnH7sjoAWlNp/n5/I4lEAVdd9SnWr7uDhdVPo7z8SqqqnjD8mZ273k5396FclSxJkiRJki4gYXTmc+HbtGlT3Lx5c67LyJnOVJob7tvF8f4BAnD3tWu5uqLkvHV79v49hw//KwDl5Vew6dqvkkiM2QtekiRJkiTNcyGEB2OMmyZbl62reZolyvKSvPPiwQblkcHG5Zkxwsg1F72ZiorBg2cdHdvYu+8D2SxTkiRJkiRdgAyi5qEX1FXxpKpSALZ0dPOlxtPnrUkkCrhi/UfIyysH4MiRO2lq+mFW65QkSZIkSRcWg6h5KITA312yjORQ4/L37D9Gy0DqvHXFxcu4/LL3Dr/fsfPP6O0d72GHkiRJkiRJEzOImqcuLyvmlfU1AJweSPO+A8fHXFdXdxPL6l8GQCrVxrbtbyCTGchanZIkSZIk6cJhEDWPvWX1Emry8wD43NFm7jrRwi9aOjjW2z9q3cUXv42ysnUAtLU9xP4DH8l6rZIkSZIkae7zqXnz3FcaT/OGXYdHjQXgWTUVvH/tcuoK8wHo7j7A/Q/cQjrdBUBx8UoGBtooKlrMkiUvYln9S32qniRJkiRJ85RPzdOUrCsrIpwzFoHvNrfzoi376E5nACgpWc2la/96eE1PzyFSqVY6O3exZ8/fsnXr7WQy/UiSJEmSJI3HIGqe+/ChE4x3Jm53dy9fO372iXqR9Lj7nG75BUePfXmaq5MkSZIkSRcSg6h5LBMj329un3DN90bMNzZ+fcK1jY1fm5a6JEmSJEnShckgah6LwMAkPcL6Mpmzr/vGfrLe2fkT01GWJEmSJEm6QBlEzWPJELimomTCNddVlg6/Li5aPuHaoqJl01KXJEmSJEm6MBlEzXOvWVE37lwAfndR1fD7pUtfPOFeFeVXTVdZkiRJkiTpAmQQNc89p7aKd61ZSn4499l5g1f33nvgOHHo+l5d3c0sWfKicfc61vhlTrfcO1OlSpIkSZKkOS7ESXoEXWg2bdoUN2/enOsyZp2TfQN882QrpwdS1BXk8dFDJ2nsHwDgnWuW8idDJ6dijJxs+i7Hjn2F3t6jFBQsIpko5NTpnwKQTJZyzdVfoKLC01GSJEmSJM0XIYQHY4ybJl1nEKWxPNjWxQse3stAjCQDfG3jxTypqmzMtTFGdu16O8cavwpAXl4V117zJcrK1mazZEmSJEmSlCNTDaK8mqcxXVtZyl9fvBSAdIRXbz/Iib6BMdeGELjssr+lru5mAFKpVh7e8nJ6eg5nrV5JkiRJkjT7GURpXH9QX8PvLloAwMn+FK/efpCBzNgn6EJIsn7dB1lY/TQA+vtP8vDDL6ev7yQA8+3knSRJkiRJOp9X8zShrnSamx/cw2NdvQD8yfJa3nlx/bjr0+keHt7yctraHgSgoKCOvLxyurv3k5dXyeLFz2PVyv9LYeH4T+uTJEmSJElzi1fzNC1Kk0n+7YpVlCYH/6p8/EgT325qHXd9MlnMhqv+lbKyy4HBk1Hd3fuASCrVSkPD59m8+X/T13ciG+VLkiRJkqRZxCBKk7q4pIg7Llsx/P41Ow7xvx/ew9Pv38VLt+7jWydbyYw4WZefX8HqVa8fd7/evmPs23/HjNYsSZIkSZJmH4MoTcnz66q4fVkNAL2ZyD2tXezs6uXHpzt41faDvHbn4VFhVPOpH0+434kT3yKTGbv5uSRJkiRJujAZRGnKrq4oHXfuGyda+Orx08PvB/pPTbhXJtNLOt09bbVJkiRJkqTZzyBKU/blxtMTzn/h2NnwqaRk9YRr8/Iqycsrn5a6JEmSJEnS3GAQpSk71Ns3yXz/8OulS1/CRH+90ukeWlruna7SJEmSJEnSHGAQpSlbVJA/5fnS0jVcdum7gTDm2hj72bL1lRw79h/TWaIkSZIkSZrF8nJdgOaOlyyu5r62rnHnLy8tGvW+vv42Kio20HD0i3R17SE/r5K6uptpab2fxsavEmOKnbveSk/PIS666E2EYC4qSZIkSdKFLMQRTzqbDzZt2hQ3b96c6zLmpIFM5BWPHuBHp9vHnE8AH1+/klvqFky4T4yRw4c/yd597x8eW1T3XC6//P309TXS1r6FZKKI6uqn2EdKkiRJkqQ5IITwYIxx06TrDKL0ePRnMnz26Cm+2HiKI739LC3MZ2lhAT9r6QAgGeAT61bx3LqqSfc6cfJudux4M5nMYG+pvLwqUqnW4flEopjVq17DypV/TAhjX/GTJEmSJEm5ZxA1DoOomfH+A4186OAJAPICfHL9Km6unTyMamt7iK2P3M7AQMu4ay655C9ZsfwPpq1WSZIkSZI0vaYaRNmUR9PiLasW84aViwBIRbh9+0G+19w26ecqK69h7SXvnHDNoUP/MnxqSpIkSZIkzV02K9e0CCHw1tWLScfIPx0+SSrCH207yCfWr6Q/E3msq5eq/CTPq61iaVHBqM92de2ecO/+/mY6O3dRUXHVTH4FSZIkSZI0wwyiNG1CCPzFRUtIx8jHjzQxECOv3HZw1Jp37z3G/1u9mD9duWhE36f5dT1UkiRJkqT5yqt5mlYhBP5qzVJetmThmPNp4H0HjvMfJ872hFpQ/ZTJdqWra+/0FSlJkiRJknLCIErTLoTAyuKCCdf88+GTw68XVD2RysqJ+plFdux8C9u3v4lUqmOaqpQkSZIkSdlmEKUZsbm9a8L5XV29tKfSwGBwteGqf2Fh9dNGrQnkU1Fx9fD74ye+yX33P4eW1gcA6OjYyaHDn+LQ4U/R0bFjmr+BJEmSJEmabvaI0owoSEyeceYN94iC/PwFbNx4J52du2lrf5hkooiFC59Gfv4CTp++hx07/4y+vuP09h7loYdeSnHxCnp6Do7ar6bmGaxf9yHy8sqm++tIkiRJkqRp4IkozYhnLayYcL4yL0lXOn3eeFnZWuqXvoTFi28hP38BANXVT+EJ13+butpnD63KnBdCATQ3/4idu972m5YuSZIkSZJmiEGUZsTz6qq4qqx43Pm2VJpnbd7N1o7uKe2Xn1/FFVf8Ixev+fMJ1508eTfd3QcfT6mSJEmSJClLDKI0IwoSCb68cQ3Pr6sa9ZdsVVEBywrzATjWN8AtD+3h68dPT2nPEAIFBTWTrmtt2/zrlCxJkiRJkmaYPaI0Y6rz8/jk+lWcuHiAvd29VOXnsa60iM50htfuPMT3mtvpzURes/Mwj3b28Berl3Bfexeb27ooSiR4Vk0lq0sKR+0ZwuR/ZTPp3pn6SpIkSZIk6TcQYoy5riGrNm3aFDdv9sRMrmVi5B8OHudDB08Mj5UlE3SmM6PW/f7Shfz92mUkhxqb9/ef4hf3PIUYB8bdO5ksY81Fb6S+/v8jkcifmS8gSZIkSZKGhRAejDFummydV/OUE4kQ+LPVS/i3K1ZRMvSEvXNDKIDPHTvFPxw4Pvy+oGAhK5a/csK90+lOdu/5G+5/4HmcPv1Lensb2b37b7jnnqfy8/+5nq2P3E5Ly6+m9wtJkiRJkqRJeSJKOXfn0Wbetrth3PmKZIKHn7Ke0mQSgBgzHDjwUQ4f+TTpdBcAyWQp9fUvJRA4fOQzxNg//PkQCka9P+OyS99Dff2t0/xtJEmSJEmaf6Z6IsoeUcq5Y73nh0QjtaczPNbZyzWVpQCEkOCii/6UFSv+iLb2LQBUVmwgL68cgKVLX8yePX9H86kfA4wZQgE8tvtd1NTeSOEUGqBLkiRJkqTfnFfzlHP5iTDpmrwx1uTllbGw+gYWVt8wHEIBlJSsZsOGT3H55e+fcM8YBzhx/K7HX7AkSZIkSfq1GEQp5565sHLSNR89dIKm/vEblI+ltOSiSdf09h59XHtKkiRJkqRfn0GUcu7qihKeXTNxGPXfTW381v27+MaJFs70NcvEyP7uPvZ395Eeo9dZUdFSYOLTVo3Hv0FDwxfJZPqGx1KpLk6c+G8aGr5AS8t9zLc+apIkSZIkzRSblWtW6ElneOfeo3y58TT9Q38n6wry+N+LFvDtpjYOj+gj9b8WVvCkqlLuPHpqeHx5UQFvXLWIly5ZOGrfrVtfNdwraiKFhYtZueJ2Qshn7773kU53Ds+Vlq7lyis+Rmnp5CesJEmSJEmaj6barNwgSrPKqf4U2zp7KEwErqkooSCRoCud5r37G/nXhmYm+9v6rjVL+eMVdcPve3uP8eBDt9Hbe+5T+ZJUVV1HW9tmYkxNWldh4SKe+ITvjepFJUmSJEmSBhlEjcMgau66v7WTN+w6zIGe8Z+yV5JIsPUp6ynPSw6PDQy00NDwRZqavk8600tlxUaWL38F5eXr6Ok5wsFDH6ex8euTBlJrL3kHy5e/Yrq+jiRJkiRJFwyDqHEYRM1tXz9+mtfsPDzhmk+sX8ktdQse175dXXv51X3PmnBNTc2NbLjqE49rX0mSJEmS5oOpBlF52ShGmi4DUwhOWwfSj3vfoqL6Sde0tT7IiZPfobbmRhKJfABaWu7jWON/0NfbSFFRPUuXvpiqqkn/3UmSJEmSNC8ZRGlO2VBeMumaDx48TlEiwYsWLyAZBp+aF2PkUG8/vZkMq4oKKUqOfmBkMllMVeV1tLY9MO6+A6kWtm17LYUFi1i69Fb6+o9z7NhXRq1pPP51Vq54NWvWvIUQJn5inyRJkiRJ841X8zTnvHjLXn7e0jnpustKi3j7RUsIwN/tb2RnVy8AC/KSvHJZDW9cuZi8xNmw6PTpe3h4yyuAzHl7JRKFZDJ9U65xw1Wfoqbmd6a8XpIkSZKkuWyqV/MSky2QZpuPrVvJVWXF541fWlLI82urOBMt7erq5fcfPcDLHj0wHEIBtKTSfPDgCd6y+8ioz1dXP4UrrvgIBQU1o8YrK6/liU/4Addf998sXfoSEonzf/a5Go5+4fF/MUmSJEmSLnCeiNKclI6RH55q56enOwD4rQXl3LiwgrxEYGdnD3+3v5EfnGqfdJ+fXHcpl58TamUy/bS03MvAQBulpZdQXn75xIynHgAAIABJREFUqPmBgXbu+eVTSafHP5WVn1/NU578M5LJs1cJOzp3cfTol+ju2kt+QTWLF91CTc3vEIJ5sCRJkiRpbvOpeeMwiJo/vnr8FK/feWTCNW9etYi3rF7yuPe+7/7n0Nm5a8I1yWQJtTXPZPHiW+juPsTuPe8GRv97q619Fles/8hw83NJkiRJkuYin5qnee/i4qJJ12zt6CaViaN6RQEc6umjNZVmZVEBVfnn/zNZvOj57J0kiEqnuzl+4pscP/HNcdc0NX2PI0fuZOXK2yetVZIkSZKkuc47QbpgXVJaRHFi4r/iPzzVwRN+tYOPHz5JeyrNQ+1dPOfB3TzhVzt51ubdbLhnO2/cdZiOVHrU55Ytexnl5VeOuWdZ2eWsWPFqioqWTanOhqP/PuZ4X18Tp079jJbWB8hkBqa0lyRJkiRJs5lX83RBe8eeBj7V0DyltcWJQH8mkh5j7tqKEv7r6kvIH3FyKpXq4OChf+HYsa8yMHCagoIali59CatW/jHJZAkxRtraHuTRba+nv//EhD97w1Wforr6BhKJAlKpLh7b/U5OnPgWMaYAKCio4+I1f8aSJS+c8neXJEmSJClb7BE1DoOo+aU3neFPdhziO81to8brC/P5m0vq+X5zO9840UL/FP4d/Mu6lbxg0YLzxmOMxDhACPmEEM6bf3Tb6zl58tuT7p9MlrFw4W/T1bWHrq7Hxlyzfv2HWbzoeZPuJUmSJElSNhlEjcMgav6JMbK5vZu7m1rpzUSuqSjhebVVFCUHr+2d7BvgXxua+OjhkxPu87zaKj51xarzxjMx0p3OUJJMkBgjiGpq/hGPPDI9PaCKi1fxpCf+4Lwn7aVSHbS3PwIEKis3jnpanyRJkiRJM81m5dKQEALXVZZyXWXpmPN1hfm8fuWiSYOoX7Z28s2TLTxrYSVFyQRtAyk+dPAEXz5+mrZUmgV5SW5dUs2bVi2mPC85/LmahU+ntvYmmpq+e96ehYX1rL3kL2ht20xT0w/o7Z34KX89PQdpb99GZeVVAMSYZt/+O2g48lnSmW4A8vLKWbnidlau/JMxT2hJkiRJkpQrnoiSGDw19bT7d7Gnu2/StZV5SW6uqeS+tk729/SfN7+xvIRvXH0xJcmzp5YymRRHGu6koeGL9PYeIZksY/HiW1i96nUUFtYO17B9x5s4ceKuSSpIUFmxgerqG+js2jNmwAWwevUbuGj16yf9PpIkSZIk/aa8mjcOgyiN54vHTvHmx8Y+kZQAMo9jr7++eCmvXl435lwm0z9uP6kTJ7/Dtm2vfRw/aXyJRDFPveFe8vLKR/zsFKdO/Zim5h+RyfRTVXktixe/gLy8smn5mZIkSZKk+cmredLj9NIl1Rzo6eOfzrmiV5WX5NNXrGZBfpKvHD/N10+00NSfmnCvb5xoOS+I6kqnuetkK3u7+6jOz+MFdVXUFxWMWlNbcyMlJWvo7t435r4LFtxAzPTR1v7w8BP1xpPJ9HCk4QusXPGHJBIFDAy0s/WRP6St7aHhNSdO3MWBg//Exo2fobzssgn3kyRJkiTpN+WJKOkc+7v7+M8TLbSmUlxaWswL66ooHdHzKZWJXHHPo7Smxj8jlR8Cr11Rx821lVxRVswvWzv5o20HaUmlh9ckgD9fvYQ3rFo06rM9PQ088ugf09m5c8RoghXL/4CLL34rISRIpbrYu+/9HD36hUm/TyJRSEXFRgYGWujq2j3mmqKiep70xB+RSOSPGs9k+mlpuY9Uqp2ysssoLV0z6c+TJEmSJM0/s+5qXgjhJuAjQBL41xjje8+ZvwN4+tDbEqAuxlg1NPdy4C+H5v42xvjZofFrgc8AxcDdwBviJF/IIErT4cVb9vLzls4prV1SmE9T/wCpcf5m/su6lbxg0YJRYzFmaGm5l/b2R0gmi6mt/V8UFS0dtaazczf33f/sX6v+sVx5xT9TV/es4ffHT3yLPXv+lv7+5uGx6gU3sG7dBygsHPvaoSRJkiRpfppVQVQIIQnsBp4JNAAPALfFGHeMs/51wNUxxleGEKqBzcAmIAIPAtfGGFtCCPcDbwB+xWAQ9dEY43cmqsUgStPh7qZWXrnt4Ljz+SEwMMV/W1eVF/P9TZeOGuvLZLi7qY0t7d2UJBPcXFvJleUl531269ZX0Xzqx2PuW1X1BMrL19PWupn2jm1M1uWqqKieRXXPobziKtKpLnbu+vMx15WWXsL1132TRKJw1HiMkc7OXfT3n6SoaDmlpRdN+PMkSZIkSReO2dYj6npgb4xxP0AI4cvALcCYQRRwG/DOodfPAn4QYzw99NkfADeFEH4KVMQY7x0a/xzwAmDCIEqaDs+uqeQ1K+r42Dn9pADetGoRr1lex09Od/Cd5jb+62QL6QkyqUc6evjJqXaeVFVGUTLBrq4e/s8j+2noHRhec8ehE7xo0QLuuGwF+YmzTc7Xrfsg27a/ntOn/2fUnrW1N7F+3T+QTBYD0NK6mYceesmE36m39yiHDn9y0u/e1bWHkye/y+LFtwyPtbc/yq5df0FH5/bhsaqq67n8sr+npGTVpHtKkiRJkuaHbAVR9cDIx5E1AE8Ya2EIYSWwGjhzzGOsz9YP/WoYY3ysPW8HbgdYsWLF469eOkcIgXesWcrNNZV8qfE0R/v6WV5UwG1LFnJ1xeDJpefWVfHcuiraBlL88HTHhPvd9sh+ihOBTZWlbG3vpj19/umlr51oYVlRAW+9aMnwWH5+BVdv/Azt7Y9w+vQvISRYuPBp5zUer6q8huLiVfT0HJygiiSQnmD+rAMHP0aMKcrK1hFCkoce/j+k06OvKra23s+DD72UJ1z/LQoKFo6ay2QGOHXqZ3T3HKAgv4ba2htHPd1PkiRJknRhylYQdf5z6gev2Y3lVuBrMcYz/494vM9Oec8Y4yeBT8Lg1byJS5Wm7trKUq6tLJ1wzc11VZMGUQA9mcj/TNJ36s6jzbxh5SKKk4nhsV+0dPBPh0r5ResTSRD47bYC3rCya1RdISRYe8lfsvWRVzNW2LRq5Z+watVr6ezcwemWX7F//wcnrKO7ex87dv7Z0LsE41376+8/QcPRf+ei1a8bHmtre5hHt72Ovr7G4bHk7lIuXfvXLFnywgl/riRJkiRpbstWENUALB/xfhlwbJy1twKvOeezv33OZ386NL5sintKOfO7dQv4t4Ymtnf2njdXFODdlyzjUG8//3O6g0c6eybcqy2V5t37jvKc2iquLi/h+6fa+b87Do1IYCPfP9XOT053cOeVq7lxYcXwTE3N07l642fYf+DDtLU9CEBx0QpWrHwV9UtvI4RAZeU1VFRczYkTd9HVtWeK33Di3lMNDZ8lL1lKaenF5OVX8vDDryCdGR24pdNd7Nj5FgoLF1Fd/eRRcx0d2znS8Hk6OraTTJawqO5mli79PZLJ83tmSZIkSZJmt2w1K89jsFn5M4CjDDYrf2mMcfs56y4FvgesPvP0u6Fm5Q8C1wwte4jBZuWnQwgPAK8D7mOwWfk/xhjvnqgWm5UrF071p3j7ngb++2Tr8HmkDeXF/P0ly7hmxMmlt+9u4NNHm8fe5BxJBo8AjhcDLS3M5/4nriNvRE+ptoEUnz92ip83NZDO9LGuahmvXLaI1SWjG48fP34X23e8ccx9CwuXcsX6O+juPkhn504ajn6BGFNTqnky1dVP5eqNnxl+39j4dXbsfCvnfsuyssu45uovkp9fdd4efX0naG17iERIUlX1RPLzK85bI0mSJEmaXrPqqXkAIYSbgQ8z+P+fPx1jfE8I4d3A5hjjXUNr3gUUxRjfes5nXwm8fejte2KMdw6NbwI+AxQz2KT8dXGSL2QQpVxq6h/gYE8/1flJ1pQUnTf/UFsXNz801ZNIk/vEupU8v66KEAINvf387sN7OdzbP2pNUSLwmStX89vVowObI0c+w959HyCTOXuSq6zsMq684mOjGpBvfeTVNDf/cNpqrq5+KiUlq8nPr+bAgY8yXtS2ZMnvse7y9w6/T6f72L37XRxr/Dpnrh8mEkWsXPEqVq9+PSEkxtxn8H8y4rjzkiRJkqTJzbogarYwiNJsFmPkVdsP8t9NbefNJYCPr19JWTLJA21d3HWyhf09/edvco7agjyuKithb3cvh3rHXl+Vl+TBJ6+jNJkcHmtPpbnz0H72Hv8BMd1BYclabl71OzyjpnLUZ1tbN/PgQ7cxVmAUQiEbN3yKTKaPrq49HDr8aQYGpnbiazIh5HHtNV+hrGwtyWQJ27a/kRMn7hpz7erVfzqqTxUMPunvwMF/4tSpnxFjiqqq61i54nZqap4+LfVJkiRJ0nxiEDUOgyjNdv2ZDH+3v5EvHDtF59DT89aWFPGONUt45ogQ6OenO3jx1n3T9nM/cOkyXra0Bhi8SvjCh/eyu/v8vlZvXb2YP121eNTY8eN3seuxvyCd7h4ey89fwPp1d7Bw4VOHxw4d+iR7971v3BoSiWIymT4m6zt1rvz8BQwMtIw7n0yWcsNT7hl+Mt/plnvZsuWVxHh+MHfZpe+hvv7WUWPpdA+NjV/nxMm7Sac7KS9bz7JlL6O8fN3jqlOSJEmSLlQGUeMwiNJc0ZVOs7e7j5JEgotLCglh9IMiMzFyw3272N/TN+bnlxbm84TKUh7p6GHfOGvOtbyogMtKi/j/2Tvv8Diqc/9/Zrbvqq1675Ity02WC7bBdAjNIbTQIdQbSACHUAMJEEIgAQIhlFxCC4RieuimF+Pei5rVe2+r7TPn98dKK612V+TekPxyk/k8jx95jr575pwzs6vd777vezo83ojF1Sf4dMksymIsweOaMTc/r6mGoY+xM0gPacQlHcWvZhWRZTYGdT7fMJs2nxCyY94EsmxiceXL2GzFuFyt1Df8jt7e9/+mcf8tWK1FxMTMxmzOpLPzNXy+/og6WTZz8MqvMRjig2PeseM8Rh37piuZU3YPGRmnhLQKIRga2kRn1+t4vb1YLPlkZZ5JTEzptzYXDQ0NDQ0NDQ0NDQ2NfzU0IyoKmhGl8e/E3lEnZ+yqZ8CnhLRnGA28UlEUrEO1fWSM47d9e7WnTkqJ586SbFKNelrdXo7dWsugXwnT5ZqNrFtcSoJhcoNOl6uFzXt+gt+xI9gmjLlUzPkVSVN2zBsba2DjpqOjjiEmZjZ2+wpcrhZGRnbj9fZ8S7ODpKQjSE4+HLMpnY7Ol+ntXRdRJ0l6Viz/FLM5MzAPoVJd/TM6OtdOV1Jacis5OReE9THqqKaj40VcrlZMxlQyMk4lIeEbX7s1NDQ0NDQ0NDQ0NDT+pdCMqChoRpTGvxu9Xh/PdvTz1aADGTgsMZZzMpOwTzF/AI7dWsOuUVfEPnTAqsRY6p2esGLmM2HTyZhliX5fuAk1wc8KM/hxXlrw+KHmbn7V0EmWaCGdToZJoJ4STkxN5NE5eSG7/DU2PkRD4wNhfRqNaSyuXIvFkg3A6Oh+Nm85aYaRSuh0lpDUwW+LlORjyc4+B6MxhYHB9dTV3RlVu2Tx68TFzQ8eR0tVzM4+j9KSX4RFwY2O7qOt7TlGHVUY9HGkpZ1Ievp3kWVTWB8aGhoaGhoaGhoaGhr/TDQjKgqaEaXxn8quUSen7TjAqBJef+neWTmcm5kEwIDPR8X6/Xi+pdeGeL2OszISyTUb8aqC2+o7omrvLMnikuyU4HGD08Ovd/yF+Z43KKABFxY2s5zUrEv4aem8EKNm565L6O//NGK/uTkXU1x8E37/CENDm9m957++lbn9T0lIWE5R4TUYjcm4XK3s3HVhVO2cOfeRkX5y8LijYy1V1TcDodclLq6CioVPo9fHhLQPDG6gpeVPDA1tRZaNJCcfSX7e5VitBRHPJ4SK09mIQMVqKUCW9RF1GhoaGhoaGhoaGhoakdCMqChoRpTGfzIHnG4eau5hXd8wHiFYGmfjitxUViXGhuiur2nlzx2RaygBXJSVzIhfocHlYceIk2/rVSTDpOcv84vINBkwSRKrttTQGiVCa7pp5fAM8+a2H5PuXh9sU5Hoi/0upy+6G73OEGzfvuM8Bge/jtiv0ZjCvLmP4PX14nQ2U1//G6abP/8MzOY8Zs/6BQZDIqrqZtv2s4lWxD0n+0JKS28NHnd0vEJV9Q1hOp0uhkUVzxEXNy+kvbPzNRoaf4/b3QqAyZhGXt5lZGdfEBaVBeB2d9Lb9xGK4iQudh52+/KIOg0NDQ0NDQ0NDQ2N/xw0IyoKmhGlofHNDPj8nLLjANVj4QXLp6fanb2znk8GR7/1MRglCe8Mr0/pRj1bl89BL8sA/HBfE6/3DJElWpjNflR07KKCASmZn+an89OCyZ3+XK52Pt1yFiZ/e0ifPimGyoVPk2KvCLbt3nPljEXTi4puQJIkvJ5e2tqfG9/175+LJBkpKb4RozEZSTawd+/VEXcEBIiNncuSxW8EjaO29uepqbk1orag4GoKC64KHgshqK//Dc0tTwCT6ZgxMWXMn/dYMFVyAlX10d39Fl1db+D1DWK1FpCddQ52+7Koc/F4+/C4OzCZ0jCZ0qLqNDQ0NDQ0NDQ0NDT+tdCMqChoRpSGxt/GqF/h6fY+3ugZZMSvMifGzMVZKWHRU18OjHL6rvqIfRgkiTcXFWOWZVpcXn7d2EHN2Ldn1BgkiTSTngS9nr2OyPWvAKyyxK6Vc4nV6wB4oKmLBxoaOZRPWMg2dChUM4ePOYYj0gp4tDw/+Fins5UvtpyGQekL6zcx+4dUlP40eFxb9ytaW5+MOo6M9NOwWLLxevvp7nkHn2/gfzHrv5+UlGOxWHLR6Ww0N/8RVY28dpJk4OCV6zEaA2mbzS1/4sCBX0fUWq1FLFv6DrIciDxTVQ+7dl3GwOBXYdrCgjUUFPwopM3t7qCm9nb6+j5hIvIrKelQSkt+gdWaF9aHx9tHZ+erjDlq0RviSU9bTXz8wqhzFkLF6+1Flk0YDAlRdRoaGhoaGhoaGhoa/zs0IyoKmhGlofHt8+f2Pm6paw+JYLLpZP5QlstxKZMf+t/qGeLSfU1R+zkkIYZ5sVY6PF7WDzro9fm/tTHG63VkmQwkGvRsHHbgn+Gl74ulsym1BXYcfL6jnzuqd3ESr7OMr7HgoolCPuAE+qyH8enSWZjGo7I8nj4+2vQ9zP7wOlg+2xKOWfJs0Khpb3+R6pqfIYBISW3JyUdity/H5x2gt+8jxsZq/94l+F9hMCRiNmWg08cwPLwjaqQVBKLD0lKPQ6+PpbX1zzQ2PRhVu3jxa8THLQDA5xtk85aTcbvbwnRGYypLl7yJyZQabOvr+5Q9e38cZqBlZpzB7Nm/QpLkYJsQgra2P9PS+gRudyACLiF+CYVF12JPWBJ2PlX10dv3IQMDAQMt0b6SlJRjgtdtOkII3O52VNWNxZKLLBujzllDQ0NDQ0NDQ0Pj3xnNiIqCZkRpaPxj6PX6eL17kC6PnzyLkZNTE4iftnOfIgSX7G3ivb7hsMeXWE28taiEhPHHfNQ/wrm7G6KeL14vMy/GSrfXR4vbi0f99l7LjJJEmslAkkFH9Zgb9wx931WcxTlZSZhkmde7B7lh3y5O40VW8iVm3AyRwCccxTucyntL51EWYwHAr7h59eszSfbtCetzUM7m2INeJ9acCMDIyB62bD05qmllt68kJ/t8fL4h+ge+pKfn7W9jGf6h2GwlpKYch14fw+DQZvr6Poqqzc29lJLiG4FA5NSGjUejquFpowAlxT8jN/ei4HFd3V20tD4RppMkPQsXPEli4spgm8fTw86dF+IYq5k21lIqFj4dlirY3/85B+rvxeHYDwRMu+zs8ynIvwJJ0oVohVDo7f2Iru6/4vcNYbOVkJV1NjExpRHnIYTK8PB23O4OzJYs4uMWaXW4NDQ0NDQ0NDQ0/qXRjKgoaEaUhsb/X3yq4Im2Xv7c0U+Dy0OyQc/p6XauykvDPsW4UoXglB0H2Dg8FtaHBDwzr4BjkuMB2OdwceSWmjDdVP1sm5kBn59erz9Kye+/D6tOxqcKfBOvqUJFh4KCHsYNhFX2GK7MTSNer2Pb8Bi319Wzmtc4kg+JYwQXFr7iUF7lDNYUl3FFbiAKyK8K7t9wA5WeV8PO208SmeV/4eC0EgAUxcXnX61EKOFmH0BM/FIWzn0An28Ix1gt+/ZdE9Hgmmgzm7NQVS8+3/CM0VD/aAwGOzqdDUVx4fNFL6Sv1ydQXHwjer0Nv2+U6pqbo2pttlKWLX03aPBs2342Q0ObImoTEpZSueiF4HFf36fs2n0ZkQrIZ2acQVnZZAqjqnrZveeKCLs6ypTN/hWZmWeEtA4P72B/1XU4nY3BNqu1mPI5vyUubn6IVgiV9o4XaW//C2Nj9RgNiaSlryY/73IMBnvY2Pz+Ubq63mRkZBeyzkpqyrEzFpv3+YYZHtmBhEx8/KKw3RmnM/GeQjPNNDQ0NDQ0NDT+89CMqChoRpSGxr8OQogZP7CO+BVurG3jze7BYGnsLJOBXxRnsTo1tM7Pebsb+LB/JGI/l+ekcHtxFgADXh8Lv96Hd4aXvrkxFjyqSo/Xz7BfiS78thACCYFACppWsTqZU9LsxOl1dHq8vNI1SCnVLGEjuTQhIdhPOZ9wLLPiM3mrMhBZ41cFF3z9F8713okRX8hp+khmU/KD3D//oGDbe1suxjj6WUQzymWexwnLX0eSJIRQWb/hKDzu5vDhjz82LW01Bn08Pv8IPT3vIoQvTPuvhtGYil4fhyRJjI3VzajNy/svbNZCZNlC3YFf4fF0RdUuW/peMNqpsfEhGhofiKKUOWjZB9hshQA4nY1s3rIaRXGGKfW6WJYufTtYFF4Iwf6q6+jqej1Ma7UWUrnoJYzGxGDbyMhudu66OKw2WXLSEcyd+wd0OlOwTVX91Df8lra2Z4MF+HU6G7m5l1CQ/6OQ9EcAh6OGxqY/0Nf3CUL4iI9bRG7epaQkHxk2NiFU+vo+obvnbfz+EWJss8nKOguLJSfiCimKi96+j/B6erFYsklKOmzGFEifbwi3ux2DMQmzKT2qTkNDQ0NDQ0ND49tFM6KioBlRGhr/9+jy+KhyuIjR66iItaKXw82rUb/Cj6uaeb9v0oySgfMyk/hVSXbIY2470M5jrb0Rz3V4YiwvLCgCAh/0D91cTa0zeoH1Q+wxxOh0DPr8bBke459gW0XEKEnE6GV0SPT6/BiEh1S6SaQfAz6GsNNKLn7JxM2FGaSbDJhkiZuqavmu8hSzqMaEGzNuTLjpIJPfy7fy+cpDgumSD+16ljn9t4Wcd8KEqpHmc/7BLxMzrt20/w4cXc+EjXNCX1R6F4lxZfj9Dhoaf8/w8JaoczMYErFYcvD7x3C726Km5f1roUOvtyLLFrzefpjhzoiJmUNS4iHIOjP9fZ8xMrorqjYtdTWFhVchyyaGh7ezd9/VUbXZ2Rcwq/TnACiKk683HI7XG150HyAn5weUltwSPK6u+QXt7c9F1Obn/4iiwjXB4+HhHWzfcV7EovelJT8nJ+eC4LGqesajwz4L0UmSnvLyB0hLPS6kvafnA6qqb8Lvn4zwMxnTKC9/ALt9aYjW5xuktvaXdPe8gxCB+nJ2+wpKS26NmALZ0/sBra3PMDq6F53ORmrqceTnXR5xt0avd4COjrUMDW1CkvQkJR9ORvrJ6HTWiGs0MrqX/r7PEMJPQsIS7PYVUU13VfUyNLQFRRkjJqYsqiE3gd8/is83iNGYik5nnlGroaGhoaGhofHPRDOioqAZURoa/97UjLnZMOTAIEkclhhLljk8csKvCn5W18azHf0hiVVHJcXxyJw84vST9X3e7hnikigF1hfEWnhnUWnQ5LqhppVnOqKnjX0/PZHyGDNDfoWXuwZpdf//S3X7RoQASSJOJ5No1GOWZWrH3MSJAXJpJpYRTHgw4KWXVHazkONTUzkkMQaLLPOXtjbKRh4hizZMeDDiwYgXI142s4x5s+7gnKwUAN5vXo+u/gJkIv89Gs17jJOLjgagpXsddft+GLVelmpdyILCS1GUMQYGN9HVFZ7OOImE1VqIqnrw+YZQFMfft2b/csjExpYjyyb8viHGnAeiKiXJQEH+j9DpbaiKm/qG+yDK9ZAkYzDaSpIM7Nz1A8bGIqfGBnZe/AqjMRmA+ob7aWp6OKp2xfJPMJszARge3snWbWcQycTTyRaWLXsXiyUXCERNbd12Gg5HdZhWr49nyeLXQ3ZfjBalZjSmsbhybTDqDAKm0s6dF+LzDYZoLZZ8FlU8h9mcEWxTFDf79l9Lb+/7Idq4uAXMn/dHTKaUkPaurjepO3BXiEGYnHwUc8ruDkutdLlaqTtwN729HwIKOp2V9PRTKC76KXp96G6mquqnveN5OtpfxOVuxWhMISPjVHJzfhDRPBsdraKt/Vkcjmr0uljS0k4kPX01smwK0yqKi+7utxka3oosGUhOPpKkpFVhddEmcDhq6B/4AiFUEu3Lw9JLp/c9OLgRRXESFzcveH2j4XK14fMNYLHkRExFDV0TH37/CHp9XNTNBzQ0NDQ0NDT+PjQjKgqaEaWhoTFBq9vLZwMj+FTB8oSYYCHx6bzUOcAd9R30T9nF7+ikOB6YnUuScbKuVbvby3Hbaunxhu/2tyDWwpsVJZh1gZSm93uHuXBvY5hugpsK0jklPZFRv8ILnf083hY5kgXAIktUxtlwKCptbi993+Jug/9I9BKYZRmvKpBUJ0n0hxhWenx0kI1izOb4lHjMskS1w4U6+C45tGDAixEf+vGfChLrzRdx25zFmCSJPq+T2j0/IJm+YH8GfOjwo0Ow03AMPzn4ESRJwudz8OGXB2EiPKoHwIuRivJ7A1rvAFW1v0Qm+jrbbLPQ62NQFBcORxXRTJ3/BAyGRIzGJCTJwNhYDUJEjw6LjZ1HQsJiZMlIb99HOJ31UbVJSYeTnXU2kmSgf+BzWlufiqpNS1vN7Fl3IEkGXK5WNm0GE3pRAAAgAElEQVT+TlRtSsqxzJ/3CBAwdDZsPAq3uzWiNtG+koqKPwePq6pvpqPjpYja+PhFVC5aG4yM6un9gD17roiojYudT2Xly8hy4PXF7e5ky9ZT8Hp7ImoXLXoxmFophMKePVfS2/dhuDZuIYsqng0xozo6Xqaq+mam1zuLj6tg4cJn0OttwbaxsQPs2HkhHk9niDYhfgkLFjweYogpipv9VdfR0/NuiDbRfjBz5z6EwRAX0t7W/jz19feGRL+lJB9NWdk9GAzxIdqR0b3U1t7B8PA2IBBRl5Z6AqWlP8dgCE3b9vlGaGh8gM7OV1EUx7iBdzKFBWtCUlcDayfo6nqdtrZncYzVYjAkkJZ2Enm5l4VpIWDgtbY9zfDwDmTZRErKMWRnnRNR6/MN09H58uSOnIkrycw4I2xugXGoDAx8SU/vOlTVTXxcBenp3w0zHCdwuVrp7n4bnz+wEUJa6gnodJH/nk2kunrcnZjNWSQnHxWSljt9PUZGd+Mcq8dgsJOYuHLGtFiPpxeHYz+yzkp83MIZDT9V9Y6nQ8vYbCXBez163z2oqhezOSOq6TnZtwdFcQfTrr+JbyoToKGhoaHxt6MZUVHQjCgNDY3/DR5V5etBBw5FZW6MhQJr5DfuzS4Ptx/o4P2+YVTAIsucnm7nlqLMkEgrIQTX1bTxXGd4BNURibE8Pa8AoxwwrUb9Cis2VtEbxWC6uzSbC7MCEScdbi9LNuyPmgiWbtRzT2kOLlWl0eXhnsbodY4AiiyBeY4qSkSD7f8qsvCjoiPeoMciy4Bg1DNEMj3o8WPAN25c+dHjp5VcypJmkW02IgE7Oj6jUNSim6KZ+OfCTHzOT6iMj8MoS6yrephc3yb0+MY1gZ86/Bjx4U2/hsMyK5BVDxtq7ifWtTFqdJjXVEpR6sGoqoeuvvUonqaocxTImIyJqKoHv9/JTOmBGgEmIu2s1iJk2YSiOHG5mmbUpqR8B6PBjir8dHa+QiTTcUKbm3MxVlsREjoaGn83Y52x4qIbxgvJ62lqfpSenneiaktLbycr8/tIkp7u7rfYt39N1KjBosJryc8PGGAuVysbNh4VTGWcPt6pKZuq6mfjpmNwuZojatPTTqa8/L5g+/6qG+jsfCVkHBP/T0o6jIULJnez7Ox8nf1VP43Yb3x8JZWLXgzWJXOM1bF166koylhQM/EzNqacysqXg8aKojjZtu1MRh37wtbBai1mceXLIYZYTc1ttLU/G6a1WHKprHwZ03hkH0B3z3uBzR6mrZ3ZlMmiRS9isWQF28bGGtix81w8nu4QrdGYxqKKZ7HZioJtiuJk9+7/YmBw/TRtMgsWPEFc7NzJNRKChsbf0dT0CFPvO4PBzvx5j5GQEPo5oLfvY/bvvx6/f2iKNpHyOfeRlLQqROt0NrJ33zWMju4NGcOs0jtITT02ROv3j1Fbextd3X8NrofRmEpx0fVkZHwvRCuEoKX1CZqb/xisV2cypZOffyXZWWcznf7+L6hvuC84DpMpndzcS8jJvjDMPHI6G6lvuJ/e3nUI4cdkyiAn+3xycy8OM6/8/lGamv9IZ+creL29mM3ZZGWeSW7uRWGRgEKodHa+Qlv78zidjRiNyWSkf4+cnB+EGLUTDA5uorXtaUZH96HTWUlNPYGc7PPCTNLAmJtpbXuGoaEtSJKO5KTDyM4+NxhFOn3M7R0v0d//GUINpP1mZZ8TsRaeqvrp7fuQnp53UfwOYmLLyco8K+S+nMrw8E46u17H6+3Fai0gM+N0rNb8iFq3u4OOzldxu1owmlLJSP8eNltxRK3f76C7+y1GHdXo9YFoy9iY2RG1Qij093/O0NBWJNlASvKRM0ZQjozuZaD/C4RQsNuXEx9fGdVQdLs76e37CFVxEhs3H3vCQVG1fr+Dvr5P8PkGsFoLSUxcGdX8FEJhYGA9LncbZlMGiYkHz2jAjozuZcwRMLnt9pVRTWAAl6udkdHd6GQTCQnLIt5rE/h8IwyPbAcgPm5RmNE/FVX1MjyyC1VxExs7B6MxKapWCIHDUR0wua2FEVPXQ8fchsfThdmcGYxujj7mQVyuVgwG+zempCuKG6erCZ1swWLJndE4FkLB5WoBpHGtPINW4PF0jZvcWd9oiPt8Q/j9Dkym1BlN+cCYXfh8QxiNiRGji6eiqj58viH0+rgZ74mJMfv9w8iy+W9Kz/f7x5Ak3d+kVVUvQohvHENgHCpC+CLOTTOioqAZURoaGv8Mhn1+Bv0KqUYDVl3kP4JCCN7qHea5jj6aXV7STAa+n57IGemJGKbVwdo76uSCPY20eyYLgEvAj3NTuakwI+SP8j0NnfyuOfQDD4AOeHpeAUcnT34Df/auej4ZGI04vvkxFj5YXBrs+/httWwfCS+iHTxvaRbpJiMuReXO+g7aPNGLlRdYjOSZTbhVld0OF07lH7GX4f9dJKGMm1UKepTxKC4Fl2wny2JFL0n0exzE+RqCOt0UnR6FHjmHxWnzMUgSbY4ezCPvY8CHCS+G8X9GfOjw4cZGedaJmCUfDncPrr7XxvvxB/uXx88hIUhMXIVVp0fxjzA8+CUSCjICGRUJEfJPb0jCKJtQhQevN1CbTYs9+P+HJOkxm7ORJD0+30BY8fpQAh+MJVmP19vP8HDk908BI0giK+vcYCRgW9szE1swRNBCQf5VmM1ZIEnUHbgbf4RxBGvKFV5HfHwFkqSjofFBBge/jjriosLrSU8/CSSZjvaXaGz6fVRtfv7VFBVeBcDQ0Fa2bf9+VAMvM/NMymb/CghEN321fmXEumgASYmHsXBhwGgTQrB5y2ocjv0RtTbbbJYtfTv4OltdfQvtHS9EHIfBmMrK5Z8FPyR0dLxCVfUNEbWyLoYVyz8Ommejo/vZvPUUiLCJhCSZWLr0TWJsJePzG2Hj5uPxejoj9C2zqOJZ7PaDgvPbsfMCBgfXRxxHefkDpKedFDyeKT23pPhn5OZeFDzu7f2I3Xt+CKhhfefmXkJJ8U3BY6ezkc1bT0Xxh+8Ym5a2mvI59wfX2O93sGX7WTgjXJME+0oqFjwRNBOEEOzb/1O6u98I09pi5rJ40V9CdhNtbX2G2ro7wrRGcx5LK18KSc8dGNzAzl2XIKbVPdQZklmy6IXgJhYQ+IC/ZfvZ+DztIVpJF0PlwqeIj18UbFMUF9t3XcLI0MbQQUhG5s99iJSUo4JNQghq6n5Je9v0mo46Zs+6g6ysM0NaAxGUP2P6FxsF+VdTOP5cmmBoaCs7dl+OOsX4BMjIOIuy2XeEGARudwfbdl6E2xm6aUhi8tHML38w5IOxojjZtfcaBvs/DtHGxC9l0fxHQww/IQR1B35DS+sTSFPGbLaVsWj+YyFp2IH5vUpV7e2gTu7YbDDnUjHvYWJj54TNb+feNSjejmCbbEhjQflvSUxcGaJ1udrYufdqnKM7g22S3k5Z6a1kpH83ROv3O9hbdTN9ve8iTRjMso3iwqvJzbko5P2eEAp19ffT0vo0kgjcR0IykZtzISVF14YZaO0da6k+cB/4A1H2Aj1p6acwZ9bPw6IoBwY3sKf6NvyuA+NamcTko5k7+86wqE+ns5HdVbcwNjx5z8UkrGR+2Z1hKdY+3xD7am6nr+ddpPHIcnNsBfNm/YK4uHkhWlX1UdfwO1rb/oKkBson6C2lzC29Kcw8F0LQ1v48tY2PgC/wJY9kyqWs8KowQxygr/8z9tXdi99ZFXi8IZXi3IvJy704zOgaHa1ib+3dOIcDEa1CF09u1tkUF14VZkh5PN3sq72Hgb73kIQXVQpE4ZaVXB8W1aooTmrrH6StYy2yOoKQDCQmH8eckuvCjDwhVJpbnuZAy1NIvg4EMjH2w5hb8lNiYmaFza+7+132NzyC6grMzxi3jLlF14TV1wQYGt7G3gMP4B7egIRAtpZTXnhl2JcOAE5nE/sOPMBw/wdIwgumQkrzLyI788zgumlGVBQ0I0pDQ+P/Kl5V5YO+EfY7XMTqdZyUmkBOhBpYQgie6+zn4ZYemlyBOlRL421cX5DOwfbQP4K9Xh9n7WpgryP0A1WBxciLC4rIs0y++ds05OD0nfV4I/zdOCsjkd/Nnnyz8WrXAFdWtUSch1mW+HJZWXDsz3b0cV1NW9R5X5+fzpkZiXhUwSvdA9zXFG6yTZBnNnJ+VjIeVWXniJN1UXZShMCHmnkxFlRgwOenY8I4i7CLoca3iyRUZPwhppgJD5Jkwm7QYcCP19uLjRH045FjAZ0/GIWmSBZSLAno8TPm6sAiBqeYd/6gMadHQUWH3ZKCLBTc3n706tC4uaYGfwb+BQw1o86GhIJQ3UjCHXhjhho1Uk3j/zaSpCPwfji6IS4AsymQFqYoY2E1w6bqAOJi545H1blwOPZHNGkm2hISlmE0JiGEQk/vh0gzjCPRfnAg+kSSaet4FaGEGy8TJNhXkGRfAZJMe9fbuMcim2EA1thK8rJOA2T6+r+kt/ftqFqDdRZlxdciIeFw1FLf8NuoBh6GNCrnPYQk6fD7h9mx69IQQ2AqQrKwbPFadDoLAti4/Tzwdobrxn8uXfzmeISPzLY9P2Fs6LOIWgmoWPgcdvsyQKK28SHamh6Mqp01626ys04HJsywy6MblLlXUFZ8LRCIMFy/4Yio1y8u+USWzA+cV1U9fPrVIeCPXFfSYFvAqmWvBY+/2noWnpHNEbUYUjl85efBD8R7qu+gp+OZyPecZOKQlV8EDcq2jlepqb4+yv0psWzJm8TGlgMwMrKHzVu/N2mOBHWBx86d+whp4x9cfb4hPlt/GLI6GlGbV3gDxfmXBdqEymcbT0R11UTUJqWfxcI5dwbbt+xZw0jvXyNGW5riV3Bw5WRUY13Tf9PScE/E+UmmAg5b/l7QdOzt+5zduy8iEqougUOXrwtGDzmdzazfdAKyCDejVcnI8iVvBM0BRXHy6YbjkLxtYeMQSMyf/ydSkw8bXwvBV9vOxzvydcQx55fcRlHOecHjXdW/oq/jyYhjTsq8kIWzbw0et3S8Rl31dRG15oTDWVHxeNBIGBrawZbtZyHjC4s8xTKLw5e9HoyE8Xi6+XzjSeiU/jCtok/hsIPeDkb4qaqHTzedCq6qcK1kYfniV4mNnTRVvt71I1z9701Zr4BWRWbB/D+Rmnxo8Hf76h+mq/n+iPPLK76d4txzg8cdPevYv/eK8fd6oetsz/wBi2ZPbt4yOlrFhq1noBPOMK0x4XAOnrJuXm8/n246Gb2vg+kIy2wOX/pK0PBTVR+fbT0H4dgWpvXr0zhs2RuYTKnBtk17b8TR83K4VrKyfPFLxE0xSqubnqK9IfCcmTpmFZnyuY+RmTq5o3FX3+fs2X1ZxHITWYW3MDv/B8Fjh+MA67eejl4Nf28dm34uS+fcDmhGVFQ0I0pDQ+M/BSEEPV4/RlnCbogebuxTBe/1DfPpwAiKEBxsj2V1SkKwntVUtgyPcVdDBxuGAt8Wphr1XJKdwpW5qehCvqUT3NvUxf1N3SFvWWN0Mv9dns8RSZNh415V5dzdDXwxGF4sfGm8jZcWFGEZH4tTUTlqSw0NrvCdDGXghQVFHJoYMNtcisrSDfujpjSekW7n92V5wTFUbthPb5T0w0S9jo+XzkJCwqUofHf7AXpmqMV1XX4aWWYjXlXw28auyTEIgYSKmPIt5fwYC7NizPhVwecDowz4FRAqsYyiQ8GLESc2kCSsskyqSY9PFfT5/HhUgSwCRoqKjPoNtVM0/k6EGDevAgbXRJTY1Ki1wE91yrESoT309/J4H4H+JjVyyONCtdKUtqkRa5OPU6aMU52iUdHBuOnmH/+nBs8hBU25QJuGxn8aAhm9zhqoCeh3BaM2omnNpjQkJLz+UVQlcoRxQAs2axGSpMPnd+D1hH9YndBJQIxtNjq9FVXxMOrYF93sA6y22ZjHo636BtYjz/DcNVpLSLAVgyTR2fclOjX6mCVTLun2JQFt/2bwRv6CCUDRJZOXfhxI0D+8H9do+AfsCfyShcLsc5AkmTFXG32970bVqsjk516MTmfF73fQ3PrkeMRtKBPrk519IWZTGkKo1DT+AX0Es2iC5LRTSYyfC0jsa3wCgy9yPUAAi/1I8tOOAiSq215DOKIYg4CwVjA3P2AYtfZ9xUjPa1G1HkMui2fdAMDIWD3NjfdHvdZeKZbK8t8gywb8/mF2778u6uu0gsz88gcwGhIQQmHTnjWY1KEwXTDytOR24sdNoA37f4nJHZ7SPEFK9hXkpR0JksSO+j+hDEa/foakU6govBCA+q519Lf+IarWZzuIg8t/AcDgSA211ddEXQuXPofDK59EkiT8/hE2bD0DfZTnqgcbq5a+hk5nRgiVTzZ/H4saXnNx4v3qooq1WK2BNNaPtl+NxRXuHQSje2f/juzkFQCsr34Ate+FqPOLy17DwoKAIVbV9ia9jXdEnZ+wn8Jh8wI7H/cN72PfrnOiap3mRRy/LFCv0usd5IsNR6AnclaCS07luEM+CX4B8+6XR2FVI78W+TFw2MqvghFwb2+6AKszPCJ5YlzzF71BSsI8zYiKhmZEaWhoaPz99Hv9OFWVDKMhuGtgJJpcHl7rHmTA56fIaubUNHtIrawJPKrK4629PN85QJvbS7rJwFkZiVyekxqW2tju9vKjquagGQYBQ+zOkmxWp4bW4Ng+PMbZuxsY8od+A784zsrzC4pCxvJ+7zAX720M+65eBzw+N5/jUyb7Xts1wFVRIr6OSYrjmXkFwW/IXujsZ0115De3sTqZrw8qI8UY+EZ2pkgygD+V53Pi+BzX9Q1z/p7oBe/PSrdzS1EWihBsGxnjB3ubgEDaXzxD+DAxJgVSSrLNBn5Tmo0ioM3t5aa6QPpHkuglgzYEOtrIYViyIwNX5aVhkWVG/AqPtvYgCR/ZtJJC4I3dIIm0kYtHMrMiwUaa0YBPCN7vG0ZW3aTTSSwjCGQ8mOgnmSHspJoMpBoN+IWg3ukB1UMsI0iIoD3iw4gLC7IkY5QlFAE+IbRYpX8E48apjqmRY5Nm16RhNfm7qSbW1LbQyDM1TD9z29/2TwprE9PalCjt0dom+hRh/UuIkJ/h2sn+pkbUTWg1NDQ0NDT+r9NPIkkEUuudiedw0sI7NCMqGpoRpaGhofHvQZXDRc2YmwSDjhUJMcHi7tMZ8Pl5sXOAbSNjmGSZ45LjOS45PqKBtnnIwe9bevhivG7WIfZYrspLZVlCTJj2+c5+ft3QGYyiMkgSZ6TbubMkOxjBBYHosHsau3iwOTQ6LNGg48m5BRw0pW8hBNfWtPJ8Z3i9nAuzkvl1SVbQ4BJCcNm+Zt7qDf+GM9ds5O1FJaSaJuucXLS3iff6wtN4JAK1w46dUjvssn1N/LUnvF+A8zOT+M2sycKia6pbeCHCeAEKLSa+XDY7GC13+4F2Hm0N1InSiUBReDdmkCR0wNcHlQXTQR9u6eGX9R0gBMXUkcAAvaTSTAFIEo/NyePkNDsAr3QN8KOqFqxilJV8SQrdDJPARlbSL6VydnoiNxRmoAjBxiEHV1S1kClaWcGXZNKBCzO7Wch2llBoi+O3s3JQgBaXh6urWykWNSxlAxl0oiDTRCEbWcGgnM2vS7MxyBIjfoVf1LVTLPaxkG2YcaOOG23VzGE3FfwgOzVoyj3S0k22so9ZVAMErZBhEtjOYirsqZTaLKhC8HbPEPG+GnJpDrFNFHTUU4LFksW8GCsqgu3DTvTeRpLpnWJ/SON9xzOky2XOeEpqq8uL4u0KmoJTtX709EspZJkDaVKjfgWPbwQz7pBERTH+040Fk84IEvhVgV/1B6L/JvqcoWDsfyRCRDWvphtXhJhdYorJFcn0Ykofof1NPDa8LzHt3KH/l8ft+UjnZ/IKRxhDpLEQNqbQPqaOJfLjCdNOzmt6X9G0f8u/iTlP/Tk5rtBzyDOcK/TxkX4/9fFMGXf4eSbmH3o+Qv6vGZ0aGhr/LKooo4xAHapWy7FcuPwRzYiKhmZEaWhoaGh8ExN/G79pS2+vqrJ9xIlbVZkbYyXZGD0Fstnl4Y3uIQb9fmbZzKxOTcCmC48OE0LwYf8IL3YO0O7xkm02ck5GEocnxoaNx68K/tTWy9MdfTS5vMTpZU5JS+Ta/LRglNUEbkXlzoYO/tIxgEsNfFAptJi4tSiD41JCI8nGFIVrqlpDTC4JODMjkXtKs0NMvzG/wkV7m/h8MDS9I8ds5IUFhRRbJ3dqGVMUztnVwMbhsRCtDNw/O4czMyZ373EpKmftqg/TApyYEs8fy/ODBpdPFZy5q571Q+HpndlmA+8uKg0x5c7d3cjHA+E1DvQSvLigKKSW2hX7m3mtO1APSC98AQNIClzn6/LTubZgcseqW+vaeLytD53wkU8jOhSayccjWVhlj2HtwsmdpX7f3M1dDZ3EiWEWsQULTpoopIpyko0GNh5URsx4xN5LnQNcXd1CjmjiYD4ngSG6yOBzjmBQSmbd4lLmxVoB+HJglNN3HmA+OzmcD0mjm0HsfMHhbOYg7p2dzzmZgXWuG3OzalMVK/mcY3iXPJpxYmUDK/krp7A6qyRoOg75/FSs38cq9W2O568kEyh220ARr3AmxK3kg8WBtA6/Kli6YR+LPWs5kdexEdjkwI2J9zmeD+Rz2LRiLhZdIKnk1O21ZI0+z/H8dbymlwxIHKCEx/khjy9cQqnNjCoEN9e1I3r/wjG8iwxBW2AMC89yEacVHcEx46m/T7T10tLxMsfwHtOtkS84gtjUM7goOxkV+Kh/mE3Nb3EM7wUtkglLoIU89tou5PqiPAQBE/ythg84knUEapxMWgVOrKzTncV1pfPQSRL9Pj/P1H3OkXwwTRsYx2ccxekFB2M36vGqKk/Wb+Vg9QNkJqL8JvuuZg4lGccGn1PPt1RT4XsPHWqYdoBk/PaTWBIfhwA+6G4l3/U+epQwC8OLiVbL0RyanI5AsHlwkATHOkx4plgpk+Ou0y3h0LRiBIK6MRdi+BOsOKfZIoHHNEuFLEqZj06W6PX46B/8mjhGgjbLhFYgMUAimfZKYvU6XIpK48AuEulDIE1Zu0C/bkzoYipJNRlRBdQM1pEsJnahDLdnHKa5ZFttCAH1Ix0kqu0h85pAIDGszyXHloQQ0OYcJNbfipjye4LjgBEpicyYNISAXo8Lk68lwhgCxy6s2K0ZyBKM+PzgbRu/zqHnB/Cjx2BKxyjLeFUVj6cH3ZR43TALSp+EWadDAGOeIQz4xnsWwfWbGIki2bDq9QgBLr9jXMu0KxKYgR8jJp0BGRWv4kWHb8ooJ1YicHVUZAyyHhAoaiD1V4StMEzU5ZGD11UN9hNquEWcacj/mTLmqcbf5BWYnEv4FZx8/PTzTjcDQ69OtHOHjnn670PHQdh4p543dFxTzz3zWCPNNfKYYOp1jtQWTTt1Xn+LNtp8pj5m5vbp/UfX/8/GP1P75O9CzzfTPMPHF+l33/T76fdI6LnCNdPHGjrG6dpI85je/j/TuDGzgsAur7Vxl/LDxTdqRlQ0NCNKQ0NDQ+PfEZ8q0EvfbJ6N+BVqx9xYdTKzbWbkGfQHnG7WDzqQJVhljw0pXj8VIQTrhxy83zeMVxUsjbdxYpQ6Yz5V8EbPIG90DzGqKJTZzFyYlUxZjCVM61ZUnmrv46WuAbo9PnIsRs7NSOKczKSQmmQQMK4eaO7m2Y4+BnwKZlni5FQ7NxZmkG4yhGl/Wd/BC52TplyZzcxtxVnBGmNTx3BLXTsvdvXjH3/LZJFlrshN4dr89JD186mC62tbwyLEDrXH8sfyPBKm1GpThOCGmjae6wwtVpxq1PPs/EIWjBtLE+v78wPtPN7WF6LVAb+ZlRM0liaItnPmaWl2fl+WGzLmx1p6uK0+vD5Emc3MaxXFIfXlXuoc4JrqFhAKSfTjR8+QlEicTuaVimLmTxnzur5hLtrbiE51U0QdMir1FOOSbDw4O5fvZ0zuurR1eIxTdx7AqAxRwTZMuGmgmHpKOD8rOSQCr97p5vhtdRh9HazgS2IZpZNMvuYQZsUl8+aiYkzjRmmv18cxW2rRe+o4lE9IoYcBkvicIxk1lvLh4lIyxzdNcCkq39lWi9+xm6N5n2xaGcPG1xzCBlbxYkUZK+wxwetx+s56+gY3cRxvUUItXoxsZRnvchI/KV3AJdmTu6NdXdXC7s4v+C6vUs4eAPYzlzc5lfKMVTw0XqsO4L7GLt5q/JgzeD4YLTdAIu9yEi0xp7FuSVnw3l/bNcBD+9dxPk9SSD0AfnRs4GDe0F/KhwctDhrSG4YcrNm+jsv4A3k0B8/XSCGPcyWPVh7NovjA9vDNLg9nbPyIH4p7yWEyVXiUGJ7hEk6ffTZnj99zY36FYzd8ykW+u0L6VZF4nxMxZ/2EX83KDa7bSZs2stp5G0UcCLnf9lPOl3G3s3ZxRbDtsl17mdd/M3MIrVXTTxL/bbiNt1d8J/gac0ddE+bWG6lkS4jWh4FHpTX8/qALg69fz7b3UFvzMw7jE6bzBqdy8oJbOHLczPxqcJSXd9zOal4P0+6iAmPhA/woP7DzWovLw8833Mt5hBeP7iKd7SkPcd+8wO52TkXlkq/+yIXK/WE1flyYWWu9i2eWrUaSpEBE68ZXOMN1K8YIdV/+LF/JIwdfjW3cuL5252esGriKGMJN/A/5Dhctu49SW8DMvLd2LzltFweN5alUU0benKc4MT1wL7/c0c5o9fnk0RTUTNhQ/STRmfMkV5fMBWDb0ChfbL+A+ewK03ox8Jn9Ae6p+A4QeJ4++NWPOZwPw8YggDeMa3hwxRVIkoRXFdz45S85Xn1uimby4/nH0gncsPIeYvQ6hICbNz/F0a57Q/qb+EhfxRyOqHyaWbbA3567931IZf/16McNvwnrRkKijyQspc9wXFqgbtAzTXtJab0cK+5gvxN4MNOY/iCXFi9GAJ/1djFY8wOS6Av2N8jykmMAACAASURBVJUNsTdy3fzArm4NThdf7biYfBrCtAKJrwync8OSawAYU1Se3HwdFWLTFDtw0hrcKS3hgiW/xaYP1AK6f8t9HOT/a4h2Yt06yGTh/McpsgVew3+/ey2VY49NGeXkOMawIBU+yhGpgfv+mbqvKO6/Y4o5MalVkGhK+SVnFwZ2anu/sx5LyzUY8YZpAXbZLuGyuaciBOwfGaK1+ockMBSc/1R26I/h8oqrkSSJAa+Pj3ZeQ+74vTldWyfN4ZSKX2PRySiq4M/b72SO2BJci6nj6COJ8vI/kGMJrMUTu//EfO/bU9ZtEi9GpLz7WZ4SuC+er32f0pH/jrDGAdqSb+J7eYH74p22PaR03x62bhOPrbadw7mzv4cAtg504Wm8FtP4uk13b6r0h3Lu/CsB6PR42LtvDYn0R1yLRqmEExfcgUmW8Cgq7+y+jTxRRyQGsTOn7D4yzBaEEDy/52FmK6GF9BMYJJMOPBjRl73NMRlFmhEVDc2I0tDQ0NDQ+PdFEYJhv0KMTo6arjnBqF/hgNNDrF6myGKa0cTr9vjYMjyGQZZYnhATsdbZBI1OD58MjOAXgoMSYkJMpelUOVz8tWcIh6IwL9bKSSkJIamdU9k+MsbLXYP0eH0UWkycnZFEgTWyObhxyMFzHf00u7ykmfR8Pz2Ro5LiIs5x/eAoT7b3BXfkPDnVzvmZScGIrKl8PejgkdYeNg87MEoyxybHcWVuGoURxrFpyMEDzd18PjCKChwUb+OqvLSQzQom2DHi5O6GzmBkXabJwKXZKVyekxJmllY5XPz8QDtfjm9wYJIlTkuz84virLDr0uzycH1NW0jE3vIEG/eU5gQ/jE/Q7fGxprqFTwYmtdlmA3eVZHPMlNRVCBi611S18O6UdFerLLMmP40f5aaGrLNbUflpTSuvdg8iicCHXFXScWqanXtn5YRcb0UIfl7XzpPtfcSKYQx4GSCR+XExPD2vgAzT5E6pQgh+19zNvY1dpIoOYhiliwyspkSemltA5bixNMFzHf3cVNNKlqgniT76SKFTKuS304xBCNTM+699jeSp+8mknVHi2MVCLsrJ5rbizJD5bR0e49xdB8jy76SIejyY2MYSyhOLeGpeQUidvwNON9/fUUeSZyvl7EFFYjcVeCyLeLmimKwpO8H2en2cvqMO09hGKtmCHh81zGafbhVPLSxnyZT5ORWVc3bV4Rz6iuWsx8oYreTxGUfxs7LKkGhLRQiu2NdEU8+nrOIT7AzSQxqfchSH5R7KbUWh8/tlfQdfNK/jSNaRTgfDJPAVh2JIPIGn5k8anwBPt/fxTM37HMs7FNCACwubWUGt5WReqKwIiVRd1zfMXXs+4DviDcrYh4KOHVTyuf4UHq44NBjlCLBr1Mma7R9ylPIyFWxDRqGaObzLyfx47snB2oEQqPN30dbPOMT7HEvYhAE/HWTyASewqOACflKQEdSO+hXO37aRBWNPcBDrMeBnDBufciTOlEt5dG5Z8PnnVwWX7NpJ9uBjrORzjPjwo2MrS9luu5xnKleEvGbcVH0Af8eDHMbHmAlsMFJHKe/oL+IPS1aHfLHxWHMH++of4GjeC0ZQ9pLC69LZXL3woqAJDPBuzyCv7n2Ik3iNWALPVScW3ucEls+6lnOzJnca2zni5LfbHud74s/Yxw0NBZmNrETNuonbZk3u0Nbu9nLt5uf5nv/REGOuhtlsj7+JPy5aGTSBx/wKl295m2Nd95LJpJHfQypvmX7CI0u/F/ziQRWCH+74kiVDd1BAY1A7hpVXdRdzy+L/omTKa9FtVXtI7LyFcvYG2/zoeJ+TOGn+rRyRPHmtn2xppfvAjSxlI1PZzDJSiu7mkrzJ3Yw/7hvko923cAQfMHUH2EYKaUq7mzvLK4NtVQ4XD2/9NavVF0KM0kHsfBhzGw8vPiFY3qDX6+PmTY9xiu/hEKPUg4nXDFfxm4MuIXF8LTyqyo82r+Uk513YphilKhLvyN/n8sU/oywmcN8LIbhu1ycsHbiJJEK/sNnASpbNu5+jU5KDbffW7ia57eoQ8xzgACVQ8BCXF5QE29a2t9Jdc2WYyd1HMnuS7+XO+YdMnmtwiI92rGEFX4RoXZh5z3orDy79fnAtmpxu/rD5No5TQ3e3U5F4U385dyxfE1wLh1/hxo0Psdr7cJgZ/al0HOcu+S2zx7+gU4VgzZaXOMZxR/C5NMFuFjBv3mMclTJ539++9zPm9vyEBELLMbSTjSf/US4tnNxh70+N+zA2/pAs2kO0I8SyM+U+bp83ucPehz1d1Oy9lDJCd2D1oed9683ct+x8dJKkGVHR0IwoDQ0NDQ0NDY1/Hn418F5zpo0NJhjxK7gUlRSjfsZoPQgYRwM+P9lmI7EzGIMArW4vbW4vmSZD1Mi+CZpcHmrH3CTodVTG28Ki76bS6PSwfbz+3KrE2BkNyiaXhy/HDbGDE2KjmogQiK5Z1z+CS1GpjLOxPMEW1Shtd3v5a88Qgz4/s2MsHJ8cHzEaEQJr9kr3IB3uQNrvaen2sDTeCXq9PtZ2DXLA6SbRoOfUNDtzIkQuAgz6/LzcNcD2ESdWncyJKQkclhgb8RqO+hXWdg2wftCBJMERiXF8L80etjEFBCLVXuse5IPxaMsl8TbOy0wKptpOxacK3uwZ5I2eIUb8M0dbqkLwbu9wSLTleZlJHGoPT4EG+KR/hD939NHg9JJs1HNGup1T0xIxRLinNw05+FNbH3scTmJ0OlanJnBBZhLxEXav3edw8WhLDxuGHBhkiaOS4rgsO4XcCPdok8vDIy09fNQ3jE8IDkqI5Ye5KSyKs4Vpezw+Hm7t4Z3uflx+N7Ni7VyakxKWhg2B6/FISw+vd3Ux5h0mxZLMOVlpXJCZHPac9aoqj7f18VJ7G8OuHqymRL6bkcOVualhz0EhBM919vNMayvDY82gi2FVWilr8tPJnmI4TvBWzxB/bG5hYLQGgZ5ZyXO5Jj+LhXHhRv5Xg6P8vrGVrqFdyKjEx83lyvz8MMMYYO+ok3sbWmnp34IRD4q5lLNyZ/ODrOSwa93q9nLXgVYO9G7AIkZw6nM4Mnsxa/LSwp5Tgz4/v65vZ3fXemLUHkblZOakreTmouyIqfH3NXayvv1r4pUmxoghKWkVNxYXMWuaIa4KwR9be3mneQN23z68GFFjD+bq4vKQtPEJXu4a4C8Nm0lwBz7fDporOadwGWekJ4ZpPxsY4Q9124kZ+xIjXnr0ZRyaczhX5aeHvc7tHnXym9pd6Ic/JoZReqQ8CjOO5dbi/GD03QQtLg931u7H3f8eifTTTwoxycdxa2lZMOp0giGfnztqa+nteYdU0cYocbhjj+GnsytDomohYFzdfaCJ2o63yVJr8WCiy7yKi0sPC7vWQggebu5gfcvb5Pl3oyLRql/MsYXHcUFWati1fqWzj1cPvEeebyN6fLTIZZRlfZfrigrC7vsv+kf479qPyHR9go0x2qV8YlNO5hez54a95lc5XNyz/3OSHO9hZ5BeUnHFr+aWOcvCntedHi+379uEaejNoMndZT2Wq+YcFfa8dvgVbq/eyVjva+SJelxYaTCu4szSkzg+NTQq2q8KfnOglqb2tRSLfSjI1OqWsCL/VC7JzQlZCyEET7S0saFpLaXKFnQo1EtzyMw8gxtL5oS9xr3V3cvrta9Q6vsSMy5aKEBKOo2fz1kafI3TjKgoaEaUhoaGhoaGhoaGhobG/x4hxDemgk+gCBGo/PY36L2qioz0NxnXbkVFhYgm5nRciopHVYnX675xHG5FZVRRsOv13zgOj6oy4PNj1+ujGsATeFWVHq+fOL1uRtMaAmvW5vZikeWIxutUVCFodQfStnLMxhlNfCEEbR4fbkUlz2L8xsjhTo+XIZ9CjtkYMUp2Kr1eH90eH+km44w1MyFgSDW7vSQa9OREMCanMuZXqHN6sOpkSqwzRy97VJUqhxtZgjKbJaJZPIEiBPsdLryqYLbNHGawTeX/tXfn0ZZU1R3Hv1tmBAFBlElbZZAsxSE4ZQkiGGKIoigYXYpxTKLLiDFqNCbSSpyCQaMJsiIqTjESNYhxAMTglAgKStPIoGgrg6gIAm0HtOHkj1Pvdb2qvXfVk+7b1c3vs9Zdfe97vz7vVO1b55yqd+99pRQu/dUt3LT6Nu6/9ZaD23fFqlv46a2r2WOrzQe376pbfs2K/7uVnTbflH223jLdvp//+jdcuvIWttl0E/bbdqv0FyU3rb6NZTevYhMzHrrt1unz85bb6meeri6FB2+7lXvhfM7q2wsX3PQrVt52O/tus+WCV+qCLkSFdCFKRERERERERGTtGnshSn9LV0REREREREREZkIXokREREREREREZCZ0IUpERERERERERGZCF6JERERERERERGQmdCFKRERERERERERmQheiRERERERERERkJnQhSkREREREREREZkIXokREREREREREZCZ0IUpERERERERERGZCF6JERERERERERGQmdCFKRERERERERERmQheiRERERERERERkJnQhSkREREREREREZkIXokREREREREREZCZ0IUpERERERERERGZCF6JERERERERERGQmdCFKRERERERERERmQheiRERERERERERkJnQhSkREREREREREZkIXokREREREREREZCZ0IUpERERERERERGZCF6JERERERERERGQmdCFKRERERERERERmQheiRERERERERERkJnQhSkREREREREREZkIXokREREREREREZCZ0IUpERERERERERGbCSinruw8zZWY/B37U+fJOwHWLaGYx+Y05O5V+bGjZqfRjCtmp9GMK2an0YwrZqfRjQ8tOpR9TyE6lH1PITqUfU8hOpR8bWnYq/ZhCdir9mEJ2Kv2YQnYq/djQslPpxxSyU+nH2sjep5Ryj8H/XUq509+Ab62r/MacnUo/NrTsVPoxhexU+jGF7FT6MYXsVPqxoWWn0o8pZKfSjylkp9KPKWSn0o8NLTuVfkwhO5V+TCE7lX5MITuVfmxo2an0YwrZqfRjXW5f96a35omIiIiIiIiIyEzoQpSIiIiIiIiIiMyELkRV/7oO8xtzdir92NCyU+nHFLJT6ccUslPpxxSyU+nHhpadSj+mkJ1KP6aQnUo/ppCdSj82tOxU+jGF7FT6MYXsVPoxhexU+rGhZafSjylkp9KPdbl9C9zpPqxcRERERERERETWD70iSkREREREREREZkIXokREREREREREZDbuyJ/c2xhuwBOAy4DvA68ZyL4f+BmwfCC3B/DfwCXAxcAxSXZL4Dzgwib7hhF93gT4NvBfI7IrgIuA7zDwJxaB7YFPAJc2fX90kNunaW/udhPw8qTdv2y2bTnwMWDLJHtMk7vYa9OrAXB34Czge82/OyTZo5q2bwf2H2j3+GZfLAP+E9g+yR7X5L4DnAnsOvScAV4JFGCngX4sBa5u7e/DsraBv2ie0xcD/5C0+/FWmyuA7yTZhwDfmHseAY9Isg8G/rd53n0GuFt2XHj1S7K9+iXZqH5RvlfDKOvVMGm3V7+s3W79knZ79Uuyvfol2ah+7lgF3Bc4t6nfx4HNk+xLqWPt/PM+yX602Q/Lqc+zzZLs+5qvLaOOYdtE2dZ+fjewcqAPpwA/bO3nhwzkDXgTcHmzX1+WZL/aavca4LQkewhwQZP9GrBnkj24yS4HPghsGs0dXu2SbK92SbZXuyTbq92Y+a5dv6Rtt35Btle7JNurXZLt1S7JZrVbQWcuJ577vGw093nZaOz0su7cF+Wj+S9oeyn+3Oe2iz/3ee1Gc5+XjeY+LxuNnb21VVS7JB/Vz8tG9fOy0dolXA86tfPajWrntuvVLmk7qp+XjernZXv1I1jvevVLst66JcpGtYvy3rolXaOzcN0StdurX9Zut35Ju966Jcp665Yo6x57Td965yAEc1+Qdee+IOvOfUHWnfu8bDTvBe2egr9u8bLZvOfl3bkvyLpzX5B15z6cc0Liec/LRuOml3WPvSQfjZ3heSz9sdNrdyn+2Om2iz/vee1G46aXjcZNLxsee2Nuo4Mb4426CLwCuB/15OlC4HeS/IHAwxi+ELUL8LDm/rbUA9xtlzoIzA0+m1EHxkcNtP8K4N8YfyFqp6Fck/0g8MLm/ubtg3BgH14L3Cf4/m7UAXGr5vGpwHOD7AObJ/jWwKbAF4G9hmpAnfRe09x/DfC2JLsvdSI7h4WDkpc9lDWD4dsG2m1Pei8DTsqeM9SLAGcAP2LhxOa1vRR45ZjnI/C4Zr9t0TzeecxzF/hH4PVJu2cCf9jcPww4J8l+E3hsc//5wHHZceHVL8n26pdko/pF+V4No6xXw6TdXv2SbK9+WR+69Uva7dUvyUb1c8cq6jH9jObrJwEvTrIPBZbQGpeS7GHN94y6WMnabdfuBOrzKBxbgf2BD7PmQlTU7inAkc7xEuWfB3wIuEurfoNjPPBJ4DlJu5cD+zZff0nTLy/7e8CVwN7N198IvKD1cxbMHV7tkmyvdkm2V7sk26td1rZXv6Rtt35Btle7rA/d2iXt9mrnZamvUM9q5+33aO7zstHc52WjsdPLunNflPfGzqTtpfhzn5eN5j63D92xM2k3mvu8bDR29tZWUe2SfFQ/LxvVz8tGaxd3PRjUzms3qp2XdWuX9SOon9d2VD8v69av9bPm17tZ/ZysW7sg69YuyYfHXzcb1S9o161fkA3r5/XBq13Qrlu7IBsde+45CP66Jcp665Yo661boqy3bgnPmeivW6J2T6Ez7yVZd97L+tFqc27dErXtrVu87PNx5j6Cc0L8c4Yo650zRNlo3Izy3jlDeB5L/5whancp/XOGKOudM4w5l547Z4ja9c4Zomw6bg7d7uxvzXsE8P1Syg9KKb8G/h14chQupXwFuH6o0VLKT0opFzT3b6ZeZd4tyJZSysrm4WbNrURtm9nuwB8BJw/1YzHM7G7Uiwrva/r161LKL0f810OAK0opP0oymwJbmdmm1CfwNUFuX+AbpZRVpZTVwJeBI9qBoAZPpi4qaP59SpQtpVxSSrms+4OD7JlNP6BeGd49yd7UenhXmhomz5l3AK+mU+uxz7Ek+2LgraWUW5vMz4baNTMDnk6dNKNsof52EGA7mhoG2X2ArzT3zwKe1mSj46JXvyjr1S/JRvWL8r0aDhzLC2q4yOM+yvbqN9Ruu35Jtle/JBvVLxqrDqb+Ng/W1M/NllK+XUpZ0dkXUfZzzfcK9ZU/uyfZm1r7Yqs1zfazZrYJ9Tdfrx7qA4Ek/2LgjaWU25vcz4baNrNtm314WpL16udlbwNuLaVc3nx9vn7duaPZV73aedlmW3q1S7K92iXZXu2ytr36RdlIkO3Vbqjddu2SrDt2OtkdCWqXcOc+jzd2Jll37Ayy7tw3wJ3/7iB37st0576AW79Ab+xM1lZu7aK8V78k26tfku3Vb2A9uKB2i1k7Jlm3dkNtt+uXZHv1S7Lu3NfSXu8OHXvz2RHHXjs75thr54eOv+4aPTv2xqznvezQsddrNzn22tmhY6+dzWrXPQf5CcHc52Sviea+IOvOfUE2mvt62Wje87JOP7OsO+8Ntd2d+4JsVL9u9lf4c190Tugde242OPaibHTsRXnv2MvOY7vH3uA571Cf8Y+9tN3OsRdlvdpF2aFxM3VnvxC1G/Uq7JyrCE4cf1tmtoR6Nf3cJLOJmX2H+hans0opYRZ4J/WJfPvILhTgTDM738z+NMndD/g58AEz+7aZnWxmdx3R/jNIFnGllKuBtwM/pg7+N5ZSzgziy4EDzWxHM9uaNW9hGnLPUspPmp/3E+oV4bXt+cDns4CZvcnMrgSeRb3SHOUOB64upVy4iJ//UjNbZmbvN7MdktzewAFmdq6ZfdnMHj6i7QOAn5ZSvpdkXg4c32zf24HXJtnlwOHN/aNwatg5LtL6jTmGRmTd+nXzWQ3b2aEaOv0I69fJpvULts+tXyeb1q+TDevXHauoryj9ZWvynh9DFzOuZVkz2ww4GvhCljWzD1B/O/oA6svXo+xLgdPnnnMj+vCmpnbvMLMtBvL3B/7YzL5lZp83s71G7IsjgLNbi1Iv+0Lgc2Z2VbMv3hrU4zxgMzPbv2n7yFb9unPHjgS1c7KZMNutXZT1apfk3fol/fDq52Xd2mXbR6d2QdatnZO9jrh24M/l0dg5dt4fk22PnW42GTd7+WTsjPrhjZ1eNho7s+3rjp1eNho7vaw3dkZrq6h2i1mLjcnO1S/MOvVzs0Htsj50axdlo9oNbV+7flHWq1+UHVq7tNe7Q+vOdG08MhutOxfkk+NvQXZo3eL0I1t3trND605v+6J1Zzs7tO5sZ93aeecgwPk4c99izleGsu25L8t2574k25v3BvqwYN5Lsu68N2JfzM99SbY39wX1OBV/7ovOCb1jbzHnj2Oy7WMvzDvHnpsNjr2sH91jL8p6x97Q9rWPvSjrHXtRdvCcL1UW8fKpje3W7LCTW4+Ppg4E2f9ZwsBb81rZbagD3lNH5renfmbLA4PvPxE4sbl/EOPemjf3ntWdqW89PDDI7Q+sBh7ZPP4nBl5eR30583XUQSHK7AB8CbgH9Tf2pwHPTvIvoL5P+CvUlzm+Y6gG1Aml/f0bhuqF8xLpJPs66vuFbczzgHrAvsHLUq/+nwts1zxeQf/l0d3tuyf1Jch3ob6X+/1JdjnwLurLgh9BfQmsDWzfe4C/GujDu4CnNfefDnwxyT6A+rLO84FjgV9kx8VA/dxjKKhflO3Vb+j4dGo4nx2qobN9Wf262ax+0fZ59eu2m9Wvm03r12TmxqoDqK8qnfv6HsBFQfaBra8t2GcD2fcC7xyZ3QQ4EXhekD2Q+lkFcy+/Xpm1S337ogFbUH/z9vqB/Mq5WjTPla+O6PPn52qTtPsp1ozNr6I1bznZR1M/x+E84O+pn0HUmzuoY3Kvdl6287PmazciO1+7EdkFtQv6vKtXv6htr35Jtle7EX2er13Sbq92SbZXu9bP6s3lBGOnl21lzmHhW7uy7IKxM8sG46bXZ3fsDLLu2Blk3bFzYPsWjJ1Bu+7YGWR7YyfB2iqpXboWY+FbTIay8/UbyrbrF2SP92qXbF+vdkk2qt3Q9s3XL2m7V78kG859dNa7Uf28bHTsDWSjdUu47qZ//M1nGV63dLcvW7d0s9m6Jdo+b93SbTdbt3Szbu3wz0GOxp/70vMVFo5XQ9n23DeUnZ/7guxz8Oc9t138eS/KumuWEX1uz31R297cF2XduQ/nnJB47AzPH+nPe1nWO+dLz01pHXtBn6N5z8tG856XjcbObPu6857XbjTvednBc4bsNjq4Md6aJ/4ZnSfSawf+zxJGXIhqDrAzgFcssk/HErwvG3gL9cr9CuoV9FXARxbR9tKk7XsBK1qPDwA+O9Dek4EzBzJHAe9rPX4OzQJ8RH/fDLxkqAbUD2nbpbm/C3DZUL0YeSEK+BPqh7BtPfZ5QH2/+nIvCzyI+uqFFc1tNfU3A/ca2XZ327uPvwAc1Hp8BXCPZPs2BX5KfdtT9nNuZM3CwoCbRvZ3b+C87LiI6udlo/pF2aR+6fHZrmE3m9VwRLtLonaz+iXb16tf0K5bvxH9XVC/zveOpS4urmPNAmnBmNrJvrL1eAXB57a0s83902g+v2Co3eZrj8X/LJ9jm9u1rdrdTmtBOtDuQV677Tz1gy6XtPbzjQPbtyP1ZNX9Aw6tfXxF62v3Br47ss+HUn/b6M0dH/VqF2Q/0mpzvnZZtlu7oXa7tQvyN3j1G9n2QdSLWW7Wq93A9i2oXZD9rFe7kf09FDg1eF4spT7fwrmvm209Pgfnc2q6WYKxM2q3+dqCuc/J/x0D81/S9hKv7da+COe+YPvcuc9pN5z7Bvq7N/Wkyl1bRbWL8l79smy3fkPttusXZM8OavegEe0uSdr9bFS7ge1bUL+k7V79Ru6L7tplwXo3qp+XzY49L9ut3VA+Ov7aWQbWngPtLonabR5n605v+6J1Z7fdbN2Z9Xe+dvjnIO/Bn/vS8xUWzn1hlv7cN3geRDP3Bdkf4s97Y9o9KGn3RII1y8D2dee+aB97c9+YPrtzH805IePmvQXnj+Tz3nyWgXnPa9s79jrZYxg373ntLknafQnj5r329g3Ne3Ptjpn3vP6G5wzR7c7+1rxvAnuZ2X3NbHPqyzxPv6ONmplR33t+SSnlhIHsPcxs++b+VsDjqQNDTynltaWU3UspS5q+fqmU8uyk7btafR8vzcuOD6UuBry2rwWuNLN9mi8dQh00Ms9k+KXHPwYeZWZbN/vlEOpn0UR93rn5997Uq/NjXtp8OnXwoPn30yP+zyAzewLw18DhpZRVA9m9Wg8PJ67hRaWUnUspS5o6XkX9wOhrk7Z3aT08gqCGjdOo79vGzPZmzW+NIo8HLi2lXJVkoL4/+LHN/YOpf60i6u9cDe8C/C31qnl2XPTqt8hjyM1G9UvyvRp62aiG1MHda7dXv2T7ovpF+2JB/ZJ2e/VL9kNUP2+suoT6Spwjm/8+V7/R41qUNbMXAn8APLM0n18QZC8zsz1b2/+k5v972fNLKfdq1W5VKWXPpA+7tNp9Cs2xl2zffP2a/X35wL44inrh5ZaBfbxd83wA+H3gkqTPc/Xbgvr8PymYO57l1W4x80yU9WrnZYGjvdolbe/g1S/pR69+yfb1ajewLxbULti+J3u1S/rbq13zOJrLvbFz9LwfZb2xM8m6c1+Q/2Ywdt4ctO2NndH2eWPnqmRfdMfOqF1v7Iz2RW/sTNZW7rplMWuxKOvVL8n26hdkLwjWLhcF7fZql2ybO+8N7IsF9Uuyvfol+8Kd+xrd9W627hyzNnazI9ad3Xy29pzPjlh7dtvN1p3d7cvWnd6+iNad3Wy27uz2N6qddw7yXZy5L8hG5ytu1pv7kqw393nZ8K0DDwAABhtJREFUE7x5L2nXW7dE29ab97I+N99bMPcl+7g39yV9juY+75zQPfaCrMvLZsdekI/mvm72Q9GxF7TrHnvB9rnHXrIvesdekHWPvaC/2bg5bOwVq431Rn2P4+XUq4ivG8h+jPqe1t80T6QXBLnHUD8/YO7POs7/+UUnux/1rRPLmidb760fwf87iIG35lHfB38ha/6899D2PYT6ZxqXNU/uHZLs1tQr4tuN6OsbqAfocupffNgiyX6VOoBdCBwypgbUq/NnNwfK2cDdk+wRzf1bqRcOzkiy36d+hthcDU9Ksp9stm8Z9c9X7jbmOUP/5dFe2x+mvmx4GXUA3iXJbk797f5y6ssnD876Qf0rFn8+Yh8/hvqyywupLzH93SR7DPWYupz6eShzV9Xd48KrX5Lt1S/JRvWL8r0aRlmvhkm7vfol2V79sj5065e026tfko3q545V1HHmvGZ//wf15eBR9mVN/VZTJ7qTk+xq6rg817fXe1nqy5e/3uzj5dRX+twtardTu5UD2/alVrsfYc1fqYvy21N/C38R9bdqD876Qf0t3ROG5gPq8/6ipn7nNPs8yh5PXfBdRudPB3fnDq92SbZXuyTbq52XjWo3dr7Df2tlux9u/YJsr3ZZH7q1S9rt1S7JurUjmMvxx84o642dUbY3dibZaO4bXH+wZuyM2vbGzijrjZ1hH+iPnVG73tgZZaOxs7e28mrX6ouXj9YuXjaa+7xsVL90PcjCV4d47UbrFi/rrluyfnTrl7QdrV28bFS/3no3ql+QjWrnZd3aJfmofukavVM/r92ofl42Wne6fQhq57Ub1c7LurVrvtc7ByGY+4KsO/cFWXfuC7Lu3Odlo3kvaDdat3jZcN6L+oEz9wVtu3NfkI3mvt45IfGx52WjY8/LZseel4+OvaHz2BWsOfa8dqNjz8tGx57bB/xjz2s3Ova8bHjsjbnNDbIiIiIiIiIiIiLr1J39rXkiIiIiIiIiIjIjuhAlIiIiIiIiIiIzoQtRIiIiIiIiIiIyE7oQJSIiIiIiIiIiM6ELUSIiIiIiIiIiMhO6ECUiIiKygTKzYmZ7ru9+iIiIiIylC1EiIiIia4mZrTCz/zOzla3bP6/vfomIiIhMxabruwMiIiIiG5knlVK+uL47ISIiIjJFekWUiIiIyDpmZs81s6+b2bvN7EYzu9TMDml9f1czO93Mrjez75vZi1rf28TM/sbMrjCzm83sfDPbo9X8483se2Z2g5n9i5lZ8//2NLMvNz/vOjP7+Aw3WURERMSlV0SJiIiIzMYjgU8AOwFPBT5lZvctpVwPfAy4GNgVeABwlpn9oJRyNvAK4JnAYcDlwH7Aqla7TwQeDtwNOB/4DPAF4DjgTOBxwObA/ut6A0VERESGWCllffdBREREZKNgZiuoF5pWt778KuA3wJuB3Uqz+DKz84B3A+cAK4DtSyk3N997C7BLKeW5ZnYZ8OpSyqedn1eAA0opX2senwpcUEp5q5l9CLgFeGMp5ap1sLkiIiIii6a35omIiIisXU8ppWzfur23+frVZeFvAH9EfQXUrsD1cxehWt/brbm/B3BF8vOubd1fBWzT3H81YMB5ZnaxmT3/t9weERERkbVGF6JEREREZmO3uc9vatwbuKa53d3Mtu187+rm/pXA/Rf7w0op15ZSXlRK2RX4M+BEM9vzt+u6iIiIyNqhC1EiIiIis7Ez8DIz28zMjgL2BT5XSrkS+B/gLWa2pZntB7wA+Gjz/04GjjOzvazaz8x2HPphZnaUme3ePLwBKMBta3ujRERERBZDH1YuIiIisnZ9xszaF3zOAj4NnAvsBVwH/BQ4spTyiybzTOAk6qujbgCOLaWc1XzvBGAL6geP7wRcChwxoh8PB95pZts1P++YUsoP78iGiYiIiNxR+rByERERkXXMzJ4LvLCU8pj13RcRERGR9UlvzRMRERERERERkZnQhSgREREREREREZkJvTVPRERERERERERmQq+IEhERERERERGRmdCFKBERERERERERmQldiBIRERERERERkZnQhSgREREREREREZkJXYgSEREREREREZGZ+H9/niiZpk4gWAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# plot graph\n", + "#Plot training loss over Epochs:\n", + "color = sns.color_palette()\n", + "#Draw Weight Variance Ratio\n", + "dataplot3 = {\"svrg_mse\": [], \"sgd_mse\": []}\n", + "with open('sgd_lr.json') as sgd_data, open('svrg_lr.json') as svrg_data:\n", + " sgd = json.load(sgd_data)\n", + " svrg = json.load(svrg_data)\n", + " for epoch in range(100):\n", + " dataplot3[\"svrg_mse\"].append(svrg[str(epoch)][\"mse\"])\n", + " dataplot3[\"sgd_mse\"].append(sgd[str(epoch)][\"mse\"])\n", + "\n", + "x3 = list(range(100))\n", + "plt.figure(figsize=(20, 12))\n", + "plt.title(\"Training Loss Over Epochs\")\n", + "sns.pointplot(x3, dataplot3['svrg_mse'], color=color[9])\n", + "sns.pointplot(x3, dataplot3['sgd_mse'], color=color[8])\n", + "color_patch1 = mpatches.Patch(color=color[9], label=\"svrg_mse\")\n", + "color_patch2 = mpatches.Patch(color=color[8], label=\"sgd_mse\")\n", + "plt.legend(handles=[color_patch1, color_patch2])\n", + "plt.ylabel('Training Loss', fontsize=12)\n", + "plt.xlabel('Epochs', fontsize=12)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Training Loss Comparison with SGD with fixed learning rates\n", + "Choosing learning rate (0.0025, 0.001, 0.005) for SGD and a relatively large learning rate 0.025 for SVRG, we can see SVRG smoothly goes down faster than SGD. Learning rate for SVRG does not need to decay to zero, which means we can start with a larger learning rate." + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "train_svrg_lin_reg(output=\"svrg_0.025.json\", optimizer_params=(('learning_rate', 0.025),))\n", + "train_sgd_lin_reg(output=\"sgd_0.001.json\", optimizer_params=((\"learning_rate\", 0.001),))\n", + "train_sgd_lin_reg(output=\"sgd_0.0025.json\", optimizer_params=((\"learning_rate\", 0.0025),))\n", + "train_sgd_lin_reg(output=\"sgd_0.005.json\", optimizer_params=((\"learning_rate\", 0.005),))" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Text(0.5,0,'Epochs')" + ] + }, + "execution_count": 34, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABJwAAALMCAYAAAChcKgRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzs3Xt4ldWd//33SnYISTgICagUKWBQBILBAmqhWusU8dD0YMcjrR11hjr20R8dKfKbGW3n96j9XXWm1kfGVltbO7WFuZx26LRAKVQraC2KxhPQcqxyaIUAARICOaznj+xsAiQQSMhpv1/Xlct73/faa617y1+fa63vCjFGJEmSJEmSpLaS0dETkCRJkiRJUvdi4CRJkiRJkqQ2ZeAkSZIkSZKkNmXgJEmSJEmSpDZl4CRJkiRJkqQ2ZeAkSZIkSZKkNmXgJEmSOp0QQmYIYV8IYUhbtlXnFkK4PYTwfEfPQ5IktZ6BkyRJarVk4NPwVxdC2N/o880n2l+MsTbG2CvG+G5btj1RIYT/N4Twg7but4VjZ4QQZoUQ1iV/zz+FEB4IIfRop/H/Kvn/ct8RfxPaY3xJktS1JTp6ApIkqeuLMfZquA4hbAJujzEuaa59CCERY6xpj7l1YXOAjwE3AyuBkcAPgPOAz7TlQMf4//FujHFoW44lSZLSgyucJEnSKZdcKTQvhPCTEMJeYFoI4eIQwsshhN0hhG0hhEdDCFnJ9okQQgwhDE1+/lHy+cIQwt4Qwu9CCMNOtG3y+ZUhhD+GEMpDCP9fCOHFEMIXTuKdRocQfpuc/1shhKsbPbsmhLA6Of7mEMKM5P2BIYQFye/sDCG80EzfI4HpwI0xxt/HGGtijG8DnwWuCSFcEkKYHELYEkLIaPS9vw4hvJa8zggh/O8QwvoQwo4QwtwQQr/ks8Lkb/Y3IYR3gcUn8f7LkyuuXk3+lj9r6D/5/FMhhHeS7/qbEMK5jZ59MITw3yGE7cm5fevwrsM3k9/bEEKY0ujBbSGETcnfdUMI4YYTnbckSWofBk6SJKm9fBr4MdAXmAfUAHcDBcAkYCr1IUtzbgL+GegPvAv8nxNtG0IYCPwnMDM57kZg4om+SHJb2y+AXwIDgBnAvBBCYbLJ94HbYoy9gbHAb5P3ZwIbkt85IznHpvwVsDHG+FrjmzHGTcArwMeBF4Fq4NIj3vvHyesvA1cDlwCDgQrg0SPGuYT6lVNXc3I+n/wbBATgmwAhhPOAHwH/D/XvugT4nxBCVgghQf3vtg4YCpxF/f+TBh8G3gLyk/19L9lnH+DfgI8nf9dJwJsnOW9JknSKGThJkqT2sjzG+D8xxroY4/4Y4yuNVu9sAJ7g8PDkSM/GGF+NMVYDzwDFJ9H2GqA0xjg/+eybwI6TeJdJQA/gGzHG6uT2wYVAw4qbamBUCKF3jHFno+ComvpwZkiM8WCM8bdH9VyvANjWzLNtQEGMMQJzgRsBQginAVck70F9ePe/Y4xbYoxVwFeB6xqviALujzFWxhj3NzPWkORKo8Z/2Y2ePx1jXBVjrADuA24IIYTk7/DzGONvkr/z14E+wIXAxcn3mxVjrEj+W3ixUZ/rY4xPxRhrgaeBwSGEguSzCIwJIfSMMW6LMa5qZt6SJKmDGThJkqT28l7jDyGEkSGEX4YQ/hxC2AP8C/VBRHP+3Oi6EujVXMNjtB3UeB7J0GZzC+Z+pEHU1zeKje79CfhA8vrTQAnwbgjh+RDChcn7X0+2W5rc6jazmf53AGc28+xMDoVkPwauTW5FvBb4fYyx4X2GUL+qaHcIYTf1q4YiMLBRX4f9P2nCuzHG0474O9DM9/8EZFO/qmxQ8jMAMcY66n/nD1C/omlTMlBqypH/7wB6xRj3UB+u3Qn8OYTwixDCOceZvyRJ6iAGTpIkqb3EIz5/B3gbKIwx9qF+hUw4xXPYRv32MqC+WBCHQqITsRU4K/n9BkOALQDJlVsl1Ic7vyC56ijGuCfGOCNZiPtTwKwQQlOrupYCw0IIFzS+maxTNSH5nBjjm8l3uoLDt9NBfcDz8SPCop4xxlSgc0RgdjLOanQ9BDgA7KT+9/lgo3lnUP+7b6E+pPpgCCHzRAeLMS6MMf4V9aHbOur/DUmSpE7IwEmSJHWU3kA5UJGs+XOs+k1t5RfABSGETyRrCd1NfY2hY8kMIfRs9JcNvER9Dap/SNYl+hhwFfCfIYScEMJNIYQ+ye1ke4FagOS4ZyeDqvLk/aNW+sQYVwPfBX4SQpgYQsgMIYwBngUWxhifb9T8J9TXkLo4+bzBt4EHQwhDkmMPDCGUnMBv1RKfT65UywO+BvxnMsT6T6AkhPDR5OqrmdT/Dr8HfgeUJeeWm/y9Jh1voBDCmcnfLxc4SH1NquZWSUmSpA5m4CRJkjrKPwC3UB9EfIf6QuKnVIzxL8D11BefLgPOBl6nfmVOc6YB+xv9/SG5rewTwCep3972KHBTjPGPye/cAvwpuVXwNuBzyfvnAr8B9lFf9PtbMcblzYx7B/U1jH5CfbiyEPg1cN0R7X4MfAz4dYxxV6P7/wYson773l7qQ7IJx3jPpgwJIew74u9TjZ7/B/XFwbcBmcD/AogxvpP8DR4HtlNfEL4kWe+qhvpaWudRv9rpXepP3zueTOqDq23U/7/7MPClE3wfSZLUTkLrV1JLkiR1TcltXVuBz8YYl3X0fLqSEMJy4Lsxxh909FwkSVLn4wonSZKUVkIIU0MIfZNb4/6Z+q1xKzp4WpIkSd2KgZMkSUo3k4EN1G+Fmwp86oiT1yRJktRKbqmTJEmSJElSm3KFkyRJkiRJktpUoqMncCoUFBTEoUOHdvQ0JEmSJEmSuo2VK1fuiDEOaEnbbhk4DR06lFdffbWjpyFJkiRJktRthBD+1NK2bqmTJEmSJElSmzJwkiRJkiRJUpsycJIkSZIkSVKb6pY1nCRJkiRJ0omrrq5m8+bNVFVVdfRU1IF69uzJ4MGDycrKOuk+DJwkSZIkSRIAmzdvpnfv3gwdOpQQQkdPRx0gxkhZWRmbN29m2LBhJ92PW+okSZIkSRIAVVVV5OfnGzalsRAC+fn5rV7lZuAkSZIkSZJSDJvUFv8GDJwkSZIkSZLUpqzhJEmSJEmSmlS0/G22V9e0WX8DshK8NXlMm/WnzssVTpIkSZIkqUltGTadiv46uxgjd911F4WFhYwdO5bXXnutyXYrV66kqKiIwsJC7rrrLmKMAMycOZORI0cyduxYPv3pT7N7924ANm3aRE5ODsXFxRQXF/PFL36x3d6ppQycJEmSJElSl1ZbW9vRU2jSwoULWbt2LWvXruWJJ57gjjvuaLLdHXfcwRNPPJFqu2jRIgA+/vGP8/bbb/Pmm29yzjnn8NBDD6W+c/bZZ1NaWkppaSnf/va32+V9ToSBkyRJkiRJ6hQqKiq4+uqrOf/88xkzZgxPP/001113Xer5888/zyc+8QkAevXqxX333ceFF17I7373OxYsWMDIkSOZPHkyd911F9dcc02z43z1q1/llltuYcqUKQwdOpSf/vSnfOUrX6GoqIipU6dSXV0NwL333suoUaMYO3Ys99xzDwDbt2/n2muvZcKECUyYMIEXX3yx2XHmz5/P5z//eUIIXHTRRezevZtt27Yd1mbbtm3s2bOHiy++mBACn//85/nv//5vAKZMmUIiUV8N6aKLLmLz5s0n8at2DAMnSZIkSZLUKSxatIhBgwbxxhtv8Pbbb/OpT32Kl19+mYqKCgDmzZvH9ddfD9SHU2PGjOH3v/8948ePZ/r06SxcuJDly5ezffv24461fv16fvnLXzJ//nymTZvGZZddxltvvUVOTg6//OUv2blzJz/72c945513ePPNN/mnf/onAO6++25mzJjBK6+8wn/9139x++23NzvGli1bOOuss1KfBw8ezJYtW45qM3jw4GO2AXjqqae48sorU583btzIuHHjuPTSS1m2bNlx37e9GThJkiRJkqROoaioiCVLljBr1iyWLVtG3759mTp1Kv/zP/9DTU0Nv/zlL/nkJz8JQGZmJtdeey0Aa9asYfjw4QwbNgyAG2+88bhjXXnllWRlZVFUVERtbS1Tp05NzWHTpk306dOHnj17cvvtt/PTn/6U3NxcAJYsWcKXvvQliouLKSkpYc+ePezdu7fJMRpqMTUWQjjhNg888ACJRIKbb74ZgDPPPJN3332X119/nX/7t3/jpptuYs+ePcd95/bkKXWSJEmSJKlTOOecc1i5ciULFixg9uzZTJkyheuvv545c+bQv39/JkyYQO/evQHo2bMnmZmZQNOhzfFkZ2cDkJGRQVZWVirkycjIoKamhkQiwYoVK1i6dClz587lscce4ze/+Q11dXX87ne/Iycn57hjDB48mPfeey/1efPmzQwaNOioNo23yh3Z5umnn+YXv/gFS5cuTc0xOzs7Nf8PfehDnH322fzxj39k/PjxJ/w7nCqucJIkSZIkSU0akNW261SO19/WrVvJzc1l2rRp3HPPPbz22mt89KMf5bXXXuPJJ59Mbac70siRI9mwYQObNm0C6rfetda+ffsoLy/nqquu4pFHHqG0tBSor6v02GOPpdo13G9KSUkJP/zhD4kx8vLLL9O3b1/OPPPMw9qceeaZ9O7dm5dffpkYIz/84Q9Tq7gWLVrE//2//5ef//znqRVWUF9HqqFQ+oYNG1i7di3Dhw9v9Tu3JVc4SZIkSZKkJr01eUz7jvfWW8ycOTO16ujxxx8nMzOTa665hh/84Ac8/fTTTX4vJyeHf//3f2fq1KkUFBQwceLEVs9l7969fPKTn6SqqooYI9/85jcBePTRR7nzzjsZO3YsNTU1XHLJJc2eEnfVVVexYMECCgsLyc3N5fvf/37qWXFxcSqsevzxx/nCF77A/v37ufLKK1O1mr70pS9x4MABPv7xjwP1hcO//e1v88ILL3DfffeRSCTIzMzk29/+Nv3792/1O7elcDLLzjq78ePHx1dffbWjpyFJkiRJUpeyevVqzjvvvI6exknZt28fvXr1IsbInXfeyYgRI5gxY0ZHT6vLaurfQghhZYyxRfv23FInSZIkSZK6vCeffJLi4mJGjx5NeXk506dP7+gppTW31EmSJEmSpC5vxowZR61o+v73v8+3vvWtw+5NmjSJOXPmtOnY7TVOV+KWOkmSJEmSBHTtLXVqW26pkyRJkiRJUqdi4CRJkiRJkqQ2ZeAkSZIkSZKkNmXRcEmSJEmS1KRlyy/k4MEdbdZfjx4FfGTy79usP3VernCSJEmSJElNasuw6VT019jQoUPZsePU9d9SvXr1OqH2ixYt4txzz6WwsJCvf/3rTbY5cOAA119/PYWFhVx44YVs2rQp9eyhhx6isLCQc889l1/96lep+7feeisDBw5kzJgxJ/UerWXgJEmSJEmSdArV1tY2e//OO+9k4cKFrFq1ip/85CesWrXqqHbf+9736NevH+vWrWPGjBnMmjULgFWrVjF37lzeeecdFi1axN///d+nxvrCF77AokWLTt1LHUe3D5wqKzex5g/38dJLl/HiS5eyevVs9lWs7ehpSZIkSZKkI1RUVHD11Vdz/vnnM2bMGObNm8eCBQsYOXIkkydP5q677uKaa64BoKysjClTpjBu3DimT59OjLHZfjdt2sTIkSO5/fbbGTNmDDfffDNLlixh0qRJjBgxghUrVgDw29/+luLiYoqLixk3bhx79+4F4Bvf+AYTJkxg7Nix3H///S16l+eff57LLruMm266iaKioibbrFixgsLCQoYPH06PHj244YYbmD9//lHt5s+fzy233ALAZz/7WZYuXUqMkfnz53PDDTeQnZ3NsGHDKCwsTL3LJZdcQv/+/Vs011OhWwdO5eWvs+KVErZseYb9Ve9SVbWZrdv+k1de+SQ7d77Y0dOTJEmSJEmNLFq0iEGDBvHGG2/w9ttvM3XqVKZPn87ChQtZvnw527dvT7X92te+xuTJk3n99dcpKSnh3XffPWbf69at4+677+bNN99kzZo1/PjHP2b58uU8/PDDPPjggwA8/PDDzJkzh9LSUpYtW0ZOTg6LFy9m7dq1rFixgtLSUlauXMkLL7zQovdZsWIFDzzwQJOrlgC2bNnCWWedlfo8ePBgtmzZcsx2iUSCvn37UlZW1uLvd4RuGzjFWMeq1TOpra046lld3QFWrZpJXV11B8xMkiRJkiQ1paioiCVLljBr1iyWLVvGxo0bGT58OMOGDQPgxhtvTLV94YUXmDZtGgBXX301/fr1O2bfw4YNo6ioiIyMDEaPHs3ll19OCIGioqJUTaRJkybx5S9/mUcffZTdu3eTSCRYvHgxixcvZty4cVxwwQWsWbOGtWtbtnNq4sSJqbk3palVWSGEFrdr6fc7QrcNnHaXr6SycmOzzw8c/As7dy5rxxlJkiRJkqRjOeecc1i5ciVFRUXMnj27ye1ljZ1IuJKdnZ26zsjISH3OyMigpqYGgHvvvZfvfve77N+/n4suuog1a9YQY2T27NmUlpZSWlrKunXruO2221o0Zl5e3jGfDx48mPfeey/1efPmzQwaNOiY7WpqaigvL6d///4t/n5H6LaB04GqbcdtU1W1tR1mIkmSJElS19SjR0G79rd161Zyc3OZNm0a99xzDy+99BIbNmxIrUCaN29equ0ll1zCM888A8DChQvZtWtXq+e3fv16ioqKmDVrFuPHj2fNmjVcccUVPPXUU+zbtw+o3972/vvvt3osgAkTJrB27Vo2btzIwYMHmTt3LiUlJUe1Kykp4emnnwbg2Wef5WMf+xghBEpKSpg7dy4HDhxg48aNrF27lokTJ7bJ3For0dETOFWye5553DY9e3aO1E+SJEmSpM7oI5N/367jvfXWW8ycOZOMjAyysrJ4/PHH2bZtG1OnTqWgoOCwMOX+++/nxhtv5IILLuDSSy9lyJAhrR7/kUce4bnnniMzM5NRo0Zx5ZVXkp2dzerVq7n44osB6NWrFz/60Y8YOHBgq8dLJBI89thjXHHFFdTW1nLrrbcyevRoAO677z7Gjx9PSUkJt912G5/73OcoLCykf//+zJ07F4DRo0dz3XXXMWrUKBKJBHPmzCEzMxOo3374/PPPs2PHDgYPHszXvva1Fq/MagvhWFXcu6rx48fHV15Zwcu/v4LKyg1NtsnOPoMPX/w8GRlZ7Tw7SZIkSZI6p9WrV3Peeed19DQOs2/fPnr16kWMkTvvvJMRI0YwY8aMjp5Wt9fUv4UQwsoY4/iWfL/bbqkLIYNR532DzMxeRz3LyOjJqFEPGzZJkiRJktTJPfnkkxQXFzN69GjKy8uZPn16R09JLdBtt9QB9O1bzIUT/4d333uKrVufpa5uPwAfumAeffqM6eDZSZIkSZKk45kxY0aLVzSVlZVx+eWXH3V/6dKl5Ofnt9mcTnSc9ppXZ9KtAyeAnJwhnHvOV8nMyOFP7z6RvNv9thFKkiRJkpTu8vPzKS0t7XTjtNe8OpNuu6XuSLl5Z6euKyrXd+BMJEmSJEmSure0CZzycgtT15UV6zpwJpIkSZIkSd1b+gROrnCSJEmSJElqF92+hlODRKI32T1O58DBv1BRYeAkSZIkSdLx/HHyR6jdsaPN+sssKOCc5cvarD91XmmzwgkO1XHav/9P1NVVd/BsJEmSJEnq3NoybDoV/TU2dOhQdpzC/luqV69eJ9R+0aJFnHvuuRQWFvL1r3+9yTYHDhzg+uuvp7CwkAsvvJBNmzalnj300EMUFhZy7rnn8qtf/QqA9957j8suu4zzzjuP0aNH861vfSvV/qtf/Sof+MAHKC4upri4mAULFpz4S7ZAWgVODdvqYqxh//4/dfBsJEmSJElSOqitrW32/p133snChQtZtWoVP/nJT1i1atVR7b73ve/Rr18/1q1bx4wZM5g1axYAq1atYu7cubzzzjssWrSIv//7v6e2tpZEIsG//uu/snr1al5++WXmzJlzWL8zZsygtLSU0tJSrrrqqlPyzukVODUqHO62OkmSJEmSOpeKigquvvpqzj//fMaMGcO8efNYsGABI0eOZPLkydx1111cc801AJSVlTFlyhTGjRvH9OnTiTE22++mTZsYOXIkt99+O2PGjOHmm29myZIlTJo0iREjRrBixQoAfvvb36ZW/owbN469e/cC8I1vfIMJEyYwduxY7r///ha9y/PPP89ll13GTTfdRFFRUZNtVqxYQWFhIcOHD6dHjx7ccMMNzJ8//6h28+fP55ZbbgHgs5/9LEuXLiXGyPz587nhhhvIzs5m2LBhFBYWsmLFCs4880wuuOACAHr37s15553Hli1bWjTvtpJWgVPuYYXDPalOkiRJkqTOZNGiRQwaNIg33niDt99+m6lTpzJ9+nQWLlzI8uXL2b59e6rt1772NSZPnszrr79OSUkJ77777jH7XrduHXfffTdvvvkma9as4cc//jHLly/n4Ycf5sEHHwTg4YcfZs6cOZSWlrJs2TJycnJYvHgxa9euZcWKFZSWlrJy5UpeeOGFFr3PihUreOCBB5pctQSwZcsWzjrrrNTnwYMHNxkMNW6XSCTo27cvZWVlLfr+pk2beP3117nwwgtT9x577DHGjh3Lrbfeyq5du1r0LicqrQKnw1c4GThJkiRJktSZFBUVsWTJEmbNmsWyZcvYuHEjw4cPZ9iwYQDceOONqbYvvPAC06ZNA+Dqq6+mX79+x+x72LBhFBUVkZGRwejRo7n88ssJIVBUVJSqiTRp0iS+/OUv8+ijj7J7924SiQSLFy9m8eLFjBs3jgsuuIA1a9awdu3aFr3PxIkTU3NvSlOrskIILW53vO/v27ePa6+9lkceeYQ+ffoAcMcdd7B+/XpKS0s588wz+Yd/+IcWvcuJSqvAqUePAhKJ+h+40i11kiRJkiR1Kueccw4rV66kqKiI2bNnN7m9rLGmwpnmZGdnp64zMjJSnzMyMqipqQHg3nvv5bvf/S779+/noosuYs2aNcQYmT17dqrm0bp167jttttaNGZeXt4xnw8ePJj33nsv9Xnz5s0MGjTomO1qamooLy+nf//+x/x+dXU11157LTfffDOf+cxnUm1OP/10MjMzycjI4G//9m9T2wnbWloFTiEE8nLrt9VVVK4nxroOnpEkSZIkSZ1XZkFBu/a3detWcnNzmTZtGvfccw8vvfQSGzZsSK1AmjdvXqrtJZdcwjPPPAPAwoUL22Rr2Pr16ykqKmLWrFmMHz+eNWvWcMUVV/DUU0+xb98+oH572/vvv9/qsQAmTJjA2rVr2bhxIwcPHmTu3LmUlJQc1a6kpISnn34agGeffZaPfexjhBAoKSlh7ty5HDhwgI0bN7J27VomTpxIjJHbbruN8847jy9/+cuH9bVt27bU9c9+9jPGjBnTJu9ypMQp6bUTy80rpHzP69TVVVFVtZWcnMEdPSVJkiRJkjqlc5Yva9fx3nrrLWbOnElGRgZZWVk8/vjjbNu2jalTp1JQUMDEiRNTbe+//35uvPFGLrjgAi699FKGDBnS6vEfeeQRnnvuOTIzMxk1ahRXXnkl2dnZrF69mosvvhiAXr168aMf/YiBAwe2erxEIsFjjz3GFVdcQW1tLbfeeiujR48G4L777mP8+PGUlJRw22238bnPfY7CwkL69+/P3LlzARg9ejTXXXcdo0aNIpFIMGfOHDIzM1m+fDn/8R//QVFREcXFxQA8+OCDXHXVVXzlK1+htLSUEAJDhw7lO9/5TqvfoynhWFXcu6rx48fHV199tclnf3r3Sdat+zoA55//PQryP9qOM5MkSZIkqfNavXo15513XkdP4zD79u2jV69exBi58847GTFiBDNmzOjoaXV7Tf1bCCGsjDGOb8n302pLHRxeONw6TpIkSZIkdW5PPvkkxcXFjB49mvLycqZPn97RU1ILpN2Wurw8T6qTJEmSJKmrmDFjRotXNJWVlXH55ZcfdX/p0qXk5+e32ZxOdJz2mldnknaBU8+eHyAjoyd1dVVUVLrCSZIkSZKk7iI/P5/S0tJON057zaszSbstdSFkkJs7HICKivV0xxpWkiRJkiRJHSntAieAvLyzAaip2U11dVkHz0aSJEmSJKl7Sc/AKffs1HWFhcMlSZIkSZLaVNrVcALIbVw4vHI9/fpd2IGzkSRJkiSpc/rGN75BRUVFm/WXl5fHzJkz26w/dV6ucPKkOkmSJEmSmtSWYdOp6K+xoUOHsmPHjlPWf0v16tXrhNovWrSIc889l8LCQr7+9a832ebAgQNcf/31FBYWcuGFF7Jp06bUs4ceeojCwkLOPfdcfvWrX6XuDx06lKKiIoqLixk/fvxJvUtrpOcKp9yhhJBJjLVUuqVOkiRJkiSdQrW1tWRmZjZ5/8477+TXv/41gwcPZsKECZSUlDBq1KjD2n3ve9+jX79+rFu3jrlz5zJr1izmzZvHqlWrmDt3Lu+88w5bt27lr/7qr/jjH/+YGuu5556joKCgXd7xSGm5wikjowc5OUMAqKh0hZMkSZIkSZ1BRUUFV199Neeffz5jxoxh3rx5LFiwgJEjRzJ58mTuuusurrnmGgDKysqYMmUK48aNY/r06cc8hX7Tpk2MHDmS22+/nTFjxnDzzTezZMkSJk2axIgRI1ixYgUAv/3tbykuLqa4uJhx48axd+9eoH5r4YQJExg7diz3339/i97l+eef57LLLuOmm26iqKioyTYrVqygsLCQ4cOH06NHD2644Qbmz59/VLv58+dzyy23APDZz36WpUuXEmNk/vz53HDDDWRnZzNs2DAKCwtT79LR2i1wCiFMDSH8IYSwLoRwbxPPPxhCWBpCeDOE8HwIYXCjZ7UhhNLk38/bYj65yW11Bw78mZqavW3RpSRJkiRJaoVFixYxaNAg3njjDd5++22mTp3K9OnTWbhwIcuXL2f79u2ptl/72teYPHkyr7/+OiUlJbz77rvH7HvdunXcfffdvPnmm6xZs4Yf//jHLF++nIcffpgHH3wQgIcffpg5c+ZQWlrKsmXLyMnJYfHixaxdu5YVK1ZQWlrKypUreeGFF1r0PitWrOCBBx5g1apVTT7fsmV2NP+DAAAgAElEQVQLZ511Vurz4MGD2bJlyzHbJRIJ+vbtS1lZ2TG/H0JgypQpfOhDH+KJJ55o0XzbUrsETiGETGAOcCUwCrgxhDDqiGYPAz+MMY4F/gV4qNGz/THG4uRfSVvMKe+wwuEb2qJLSZIkSZLUCkVFRSxZsoRZs2axbNkyNm7cyPDhwxk2bBgAN954Y6rtCy+8wLRp0wC4+uqr6dev3zH7HjZsGEVFRWRkZDB69Gguv/xyQggUFRWlaiJNmjSJL3/5yzz66KPs3r2bRCLB4sWLWbx4MePGjeOCCy5gzZo1rF27tkXvM3HixNTcm9LUqqwQQovbHev7L774Iq+99hoLFy5kzpw5LQ7J2kp7rXCaCKyLMW6IMR4E5gKfPKLNKGBp8vq5Jp63qcaFwystHC5JkiRJUoc755xzWLlyJUVFRcyePbvJ7WWNNRXONCc7Ozt1nZGRkfqckZFBTU0NAPfeey/f/e532b9/PxdddBFr1qwhxsjs2bMpLS2ltLSUdevWcdttt7VozLy8vGM+Hzx4MO+9917q8+bNmxk0aNAx29XU1FBeXk7//v2P+f2G/w4cOJBPf/rT7b7Vrr0Cpw8A7zX6vDl5r7E3gGuT158GeocQ8pOfe4YQXg0hvBxC+FRTA4QQ/i7Z5tXGS+yac9gKJwuHS5IkSZJ0lOMFJm3d39atW8nNzWXatGncc889vPTSS2zYsCG1AmnevHmptpdccgnPPPMMAAsXLmTXrl2tnt/69espKipi1qxZjB8/njVr1nDFFVfw1FNPsW/fPqB+e9v777/f6rEAJkyYwNq1a9m4cSMHDx5k7ty5lJQcvbGrpKSEp59+GoBnn32Wj33sY4QQKCkpYe7cuRw4cICNGzeydu1aJk6cSEVFRar+VEVFBYsXL2bMmDFtMueWaq9T6pqKHI9c93UP8FgI4QvAC8AWoCb5bEiMcWsIYTjwmxDCWzHGw1KiGOMTwBMA48ePb75SWFJu7vDUdUWlgZMkSZIkSUeaOXNmu4731ltvMXPmTDIyMsjKyuLxxx9n27ZtTJ06lYKCAiZOnJhqe//993PjjTdywQUXcOmllzJkyJBWj//II4/w3HPPkZmZyahRo7jyyivJzs5m9erVXHzxxQD06tWLH/3oRwwcOLDV4yUSCR577DGuuOIKamtrufXWWxk9ejQA9913H+PHj6ekpITbbruNz33ucxQWFtK/f3/mzp0LwOjRo7nuuusYNWoUiUSCOXPmkJmZyV/+8hc+/elPA/Urom666SamTp3a6vmeiHCsKu5tNkgIFwNfjTFekfw8GyDG+FAz7XsBa2KMg5t49gPgFzHGZ5sbb/z48fHVV1897ryWvziJAwf+TE7OUD588dLjtpckSZIkqTtbvXo15513XkdP4zD79u2jV69exBi58847GTFiBDNmzOjoaXV7Tf1bCCGsjDGOb8n322tL3SvAiBDCsBBCD+AG4LDT5kIIBSGEhvnMBp5K3u8XQshuaANMApou736C8nLrt9Xt3/8udXUH2qJLSZIkSZLUhp588kmKi4sZPXo05eXlTJ8+vaOnpBZoly11McaaEMKXgF8BmcBTMcZ3Qgj/ArwaY/w58FHgoRBCpH5L3Z3Jr58HfCeEUEd9QPb1GGPbBE55hezctRyoo7JyE716ndsW3UqSJEmSpDYyY8aMFq9oKisr4/LLLz/q/tKlS8nPz2/iGyfnRMdpr3l1Ju1Vw4kY4wJgwRH37mt0/Sxw1Da5GONLQNGpmFNu3qGT6ioq1xs4SZIkSZLSXozxhE5/60zy8/MpLS3tdOO017zaSluUX2qvLXWdUsOWOvCkOkmSJEmSevbsSVlZWZsEDuqaYoyUlZXRs2fPVvXTbiucOqO8RiucKivWdeBMJEmSJEnqeIMHD2bz5s1s3769o6eiDtSzZ08GDz7qHLcTktaBU48e+WRl9aO6ehcVla5wkiRJkiSlt6ysLIYNG9bR01A3kNZb6gByc+tXOVVWbiDG2g6ejSRJkiRJUteX9oFTw7a6uroDVFVt6eDZSJIkSZIkdX3dfktdjJH9paVUvPgS1NWRe9GF5E6YkKq4f2Th8JycIR01VUmSJEmSpG6hWwdOtfsq2PK//hcVy5cfuvnv/07O+A9x1mOPkXnaaYcVDq+oXEcBl3XATCVJkiRJkrqPbr2lbts//9PhYVPS/ldXsuWemQDkHrHCSZIkSZIkSa3TbQOng5s3s3fRr5p9XrF8OVV/+CM9e55JRkZO/b2Kde01PUmSJEmSpG6r2wZO+994A2I8dpvXXyeEDPLyhgNQWbmOeJzvSJIkSZIk6di6beAUevRocZuGwuE1NXs5eHD7KZ2XJEmSJElSd9dtA6e8iy4i5OQ03yCRoNclHwEgt3HhcLfVSZIkSZIktUq3DZwye/em4ItfbPZ5/t98gURBAXBohRNARaWFwyVJkiRJkloj0dETOJXy/+5vyeiZzfZvf4e6XbvqbyYyGfClL5H/d3+XapeXdyhwqvSkOkmSJEmSpFbptiucAEII9L/lFkb89nky+50GQPbwsyn44hcJGYdePSdnCCHUZ28VlW6pkyRJkiRJao1uHTg1yOjRg8SZZwJQs3Pn0c8zssjJGQpAhSucJEmSJEmSWiUtAieARH59vabanTuJtbVHPc9LFg4/ePB9amr2tuvcJEmSJEmSupP0CZySBcKpq6O2oZ5TI3m5nlQnSZIkSZLUFtIvcAJqduw46nnjwuFuq5MkSZIkSTp5aRQ45aeua7YfHTjl5jVa4WThcEmSJEmSpJOWNoFTZuMVTmVNrHDKHZ66rnSFkyRJkiRJ0klLm8ApUTAgdV3bxJa6zMxcevb8AOAKJ0mSJEmSpNZIo8Dp2Fvq4FDh8P37N1Nbe6Bd5iVJkiRJktTdpFHg1HhLXVmTbXJThcPrqNy/sR1mJUmSJEmS1P2kTeCU0acPISsLaPqUOji0wgmgomJtu8xLkiRJkiSpu0mbwCmEkCocXrNje5Nt8lIrnCwcLkmSJEmSdLLSJnCCQ9vqanc0vaUuL6/RCqdKAydJkiRJkqSTkZ6B0+7dxIMHj3qeldWPrKz+AOwpL6Wqamu7zk+SJEmSJKk7SLPAqdFJdTt3HvV8y5afUFOzB4CqA1t58aVLeOvtu6ip2dtuc5QkSZIkSerq0ipwymx8Ut0R2+q2bfspa/7wT8RY0+hu5P33f8mbb36RGGM7zVKSJEmSJKlrS6vAKXFY4HSocHiMtWzY+K1mv7dr98vs2v3yKZ2bJEmSJElSd5FmgdOA1HXtjh2p64rKDVRVbT7md8vKnj9V05IkSZIkSepW0ixwalTDqdGWuhhrj/vdlrSRJEmSJElS2gVOjbfUHVrhlJc7nKys/Ka+ktLvtAtP2bwkSZIkSZK6EwMnICOjBx/84N82+71eeeeSn3/ZKZ2bJEmSJElSd5FWgVNGXh4hJwc4vIYTwJCzbueDH/wiISQOu5+TM4zzi58iI+Pw+5IkSZIkSWpaWgVOcGiVU80RgVMIgcKzZzLpw8sYctZtqfsDB06lZ/YZ7TpHSZIkSZKkrszA6QjZ2QMZNuxuGn6avXvebK+pSZIkSZIkdQtpGDjVFwev27ePuqqqptsk8ujV6xwAyve8QYx17TY/SZIkSZKkri7tAqfMwwqHlzXbrk+f8wGord1HZeWGUz4vSZIkSZKk7iLtAqfGJ9XV7tjebLu+fYpT1+V7Sk/pnCRJkiRJkrqT9Auc8huvcGq6jhMcWuEEsGfPG6d0TpIkSZIkSd1J+gVOA1q2pS4vr5DMzDzAwEmSJEmSJOlEpF/gVNCyFU4hZNK79xgA9u37A7W1TRcYlyRJkiRJ0uHSLnDKPGxLXfM1nOBQHacYa9i79+1TOi9JkiRJkqTuIu0Cp0RBfuq6tqz5LXUAffpax0mSJEmSJOlEpV3glNGzJxm9ewNQs735LXVweOFwT6qTJEmSJElqmbQLnAAS+fWrnI5VwwmgZ/YZZGefAcCePW+e8nlJkiRJkiR1B+kZOCULh9eUlRFjPGbbhlVOVVWbOXjw2AGVJEmSJEmS0jRwyhxQHzjF/fupq6g8Ztu+h22rs46TJEmSJEnS8aRl4JRodFJd7XFOquuTPKkOYE+5dZwkSZIkSZKOJz0Dp4JDgVPNcU6q6917DA0/kyfVSZIkSZIkHV96Bk4DGgVOxzmpLpHIo1feCAD27H2TGOtO6dwkSZIkSZK6urQMnDKTp9TB8U+qg0OFw2tq9lJZufGUzUuSJEmSJKk7SMvAKVEwIHVdU9aCwKlvozpOe6zjJEmSJEmSdCzpGTgNaFw0/PiBU99GhcM9qU6SJEmSJOnY0jNw6t8/dX28Gk4AeXmFZGbmAq5wkiRJkiRJOp60DJxCVhaZp50GHP+UOoAQMunduwiAffv+QG1t1SmdnyRJkiRJUleWloETHNpW15Ki4QB9k4XDY6xh7753Ttm8JEmSJEmSurq0DZwy8+sDp9odO4gxHrd9n0Z1nPaUW8dJkiRJkiSpOWkbOCUK6gOnWF1N3Z49x23fp8/Y1HW5dZwkSZIkSZKalejoCZxqf/7zn3nxxRdZv349MUaGDRvGpEmTUoET1G+ry+zb95j99Ox5Jtk9TufAwb+wx5PqJEmSJEmSmtWtA6eNGzfyzDPPUFNTk7q3atUq1qxZw3VZWal7Ndt3kH322cftr0/f89m+fTFVVZs5eHAHPXoUHPc7kiRJkiRJ6abbbqmrq6tj/vz5h4VNjZ+9tmFj6nNNWcsKhx9Wx2nPm62fpCRJkiRJUjfUbQOnP/3pT+zevbvZ5+UZh1699gRPqgPrOEmSJEmSJDWn2wZOe/fuPebzAz2zU9c1LQycevceAwTAk+okSZIkSZKa020Dp379+h3z+f6cnNR1zY6yFvWZSPQiL28EAHv2vkGMdSc/QUmSJEmSpG6q2wZOgwcPZuDAgc0+zxk4EJLb6lq6wgmgb7KOU03NXiorN7VqjpIkSZIkSd1Rtw2cQgh85jOfITc396hn2dnZXPvXf01m//7AiQVOfRrVcdpjHSdJkiRJkqSjdNvACeCMM87gi1/8Ih/+8IdT9/r3788dd9zBWWedRaKgAGh50XCAPn0PnVRXvsc6TpIkSZIkSUfq1oETQJ8+fZgyZQqJRCL1+bTTTgNIBU41O3cSa2tb1F9ebiEZGfX1n1zhJEmSJEmSdLRuHzg1yEkWCd+/f3/qXiI/v/6itpba3btb1E9GRoI+fYoA2LdvDbW1VW07UUmSJEmSpC4u7QKnysrK1L3EgILUdUtPqoNDdZxirGHfvlVtNENJkiRJkqTuIe0Cp8YrnDILGgdO21vcV8NJdWAdJ0mSJEmSpCOlTeDUcFpdTU0N1dXVACTyDwVOJ1Q4vPFJdeXWcZIkSZIkSWosbQKnhhVOcGiV08luqcvOPoMePQYCrnCSJEmSJEk6UnoHTodtqWv5CqcQAn2Tq5yqqt7j4MGWh1WSJEmSJEndnYFT0okETgB9GtVx2rPnzVbOTpIkSZIkqftIy8Cp4aS6jL59ISsLgNqyEwyc+h6q41S+xzpOkiRJkiRJDdIycGpY4RRCIJGfD0DN9hMMnHqPAQIAu3a+RE3NvraZqCRJkiRJUheXNoFTwyl1cChwgkPb6k50S93+qi1kZPQEoHzPa7ywbCKr1/wjNTV722C2kiRJkiRJXVfaBE5NrXACUiucanfvJlZXt6ivior1vLbyBurqDvUT4wG2bp1LaenfUFd3sI1mLUmSJEmS1PWkfeCUOSBZODxGanbualFfGzZ+i5raplcyle95nfffX3TyE5UkSZIkSeri0jJwaigaDkeeVLf9uP3EWMv27b8+Zpv3319wEjOUJEmSJEnqHtImcMrKyiKRSABHbqk7FDjVtqCOU4w1xHjsLXO1tZXHfC5JkiRJktSdpU3gBIcKhx8WOA1ovMKp7Lh9ZGRkk5c34phtevcZe5IzlCRJkiRJ6vrSKnBq2FbX1Cl10PKT6oacdVuzzzIysvnAoBtPcoaSJEmSJEldX9oHTpnJU+qg5YHTmWd+liFD/vao+yFkUjRmDjk5H2jlTCVJkiRJkrqutAycampqqK6uBiAxYEDqeW1ZywKnEAIjCu/logt/xQeH3EFmZv1WvUSiD/n5l7bxrCVJkiRJkrqWtAyc4NBJdRl5eYSePQGo2d6ywKlBXl4hhYX3cPrAawCort7Fvn2r22i2kiRJkiRJXVPaBk4N2+pCCCSS2+pauqXuSP3zP5K6Ltu5vBUzlCRJkiRJ6vrSKnBqOKUOmi4cXlN2/FPqmtK/34eBAMDOnctOfoKSJEmSJEndQFoFTk2tcALIHFAfONXt2UPdgQMn3G9W1mn06V0EwO7dK6mtrWzlTCVJkiRJkrouAycgkV+Quq492W11/ScDEONBdu1ecZIzlCRJkiRJ6voMnDi0pQ5asa2u/6E6Tjut4yRJkiRJktJY2gZODafUASQGNAqcTnKFU9++xWRm5gEGTpIkSZIkKb2lbeB0WA2n5Cl1ADXbTy5wysjoQb9+FwFQUbGWqgN/PslZSpIkSZIkdW3tFjiFEKaGEP4QQlgXQri3iecfDCEsDSG8GUJ4PoQwuNGzW0IIa5N/t5zsHFq2pe7kAic4VMcJXOUkSZIkSZLSV7sETiGETGAOcCUwCrgxhDDqiGYPAz+MMY4F/gV4KPnd/sD9wIXAROD+EEK/k5lHVlYWWVlZwBGB04ABqeuTLRoOkG8dJ0mSJEmSpHZb4TQRWBdj3BBjPAjMBT55RJtRwNLk9XONnl8B/DrGuDPGuAv4NTD1ZCfSsMrp8FPqWr+lrr7vofTs+QEAdu58kRjrTrovSZIkSZKkrqq9AqcPAO81+rw5ea+xN4Brk9efBnqHEPJb+N0WaypwysjJISOvvuD3yZ5SBxBCSG2rq67eyd59q066L0mSJEmSpK6qvQKn0MS9eMTne4BLQwivA5cCW4CaFn6XEMLfhRBeDSG8un379mYn0hA4VVZWEuOhbhrqOJ3sKXUN+jfeVlfmtjpJkiRJkpR+2itw2gyc1ejzYGBr4wYxxq0xxs/EGMcB/5i8V96S7ybbPhFjHB9jHD+gUU2mI+Xm5gJQW1tLdXV16n5mWwVO/S6mISPbuXNZq/qSJEmSJEnqitorcHoFGBFCGBZC6AHcAPy8cYMQQkEIoWE+s4Gnkte/AqaEEPoli4VPSd47Kcc7qS5WVlJXUXGy3ZOVdRp9+owFYHf5SmprK0+6L0mSJEmSpK6oXQKnGGMN8CXqg6LVwH/GGN8JIfxLCKEk2eyjwB9CCH8ETgceSH53J/B/qA+tXgH+JXnvpBwvcILW1XECUnWcYqxm167ft6ovSZIkSZKkribRXgPFGBcAC464d1+j62eBZ5v57lMcWvHUKs0HTo1Oqtuxgx5Dhpz0GP37f4RNm+YAsHPXixQUXHbSfUmSJEmSJHU17bWlrtNoHDhVVh7a7pbZeIVTK+s49e1TTGZm/al3O3daOFySJEmSJKWXtA6cmt1S18rAKSMji379LgagomItVVXbWtWfJEmSJElSV5J2gVPDKXXQfOBU28rACQ7VcQLYufPFVvcnSZIkSZLUVaRd4NSyFU6tKxoOkH9Y4LSs1f1JkiRJkiR1FQZOSZn5hxcNb/04Q+nZczBQXzg8xrpW9ylJkiRJktQVGDglZfToQUbfvkDbBE4hhNS2uurqXezd+06r+5QkSZIkSeoK0i5wSiQSZGVlAYefUgeHttW1RQ0nOLKOk6fVSZIkSZKk9JB2gRMcWuXUeIUTHAqcanbsIMbY6nH697uYhp/YwEmSJEmSJKWLtAycGk6qOypwStZxigcPUrd3b6vHyco6jT59xgKwu3wltbWVx/mGJEmSJElS15eWgVPjFU6NVzIlBrTtSXVwaFtdjNXs2vX7NulTkiRJkiSpM0vrwKm2tpbq6urU/cyCxoHT9jYZK7//R1LXbquTJEmSJEnpIK0DJzh8W10i/1Dg1FaFw/v0OZ/MzF4AlBk4SZIkSZKkNJD2gVPjk+pOxZa6jIws+vW7KDnWOqqqtrZJv5IkSZIkSZ1VWgZODUXD4YgVTodtqWubFU5w5La6F9usX0mSJEmSpM4oLQOnZrfUnaLAqaFwOMDWbc+yd++qw4qVS5IkSZIkdScGTo0Cp8x+/SAEAGrK2i5wqq2tIoQsAMrLX2XFK5/glVc/yd69q9psDEmSJEmSpM7CwKlR4BQSCTL79wegdnvbBE5VVdt4vfRmYqw+7P7eve/w2uvT2L9/c5uMI0mSJEmS1FkYODUKnODQtrq22lL33nvfp7p6d5PPamrKefe9p9pkHEmSJEmSpM4i7QOnxqfUASTy8wGo2bmTWFfX6rF2lD1/zOdlZc+1egxJkiRJkqTOJO0Dp6NWOA1IFg6vqaG2vLzVYx25le6o53U1rR5DkiRJkiSpM0nLwCmRSNCjRw/g8MCpds8eDm7Zmvq87Z/+mapVrSvsfdppE1v1XJIkSZIkqatJy8AJDq1yagicqt9/n41//dfsf/XVVJt9S5ey8a+vo/wXvzzpcYacdWvqhLojhZBgyJBbT7pvSZIkSZKkzsjAKRk4/eWBB6n+07tHN6ytZds//iM1O3ee1Di9ep3L2KLHSSROO+rZyHMfoHfv0SfVryRJkiRJUmeV9oFTZWUl1WVl7F2ypNm28cAByn/+85Meq6DgMiZPWs7oUd8kP/9jqft1dVUn3ackSZIkSVJnlfaBU11dHZWbN0Nt7THbV7+3uVXjZWbmcMYZJYwc+X+AAMBf3l/Qqj4lSZIkSZI6o7QNnHJzc1PX1b16Hbd94vTT22TcntlncFrf8QDs3r2CAwf+0ib9SpIkSZIkdRZpGzg1rHACqM7NJe+SjzTfODOTviWfaLOxB55+VfIq8v77i9qsX0mSJEmSpM7AwIn6wuFn/OM/kpmf32Tb0++9l6wzzmizsQcOmIrb6iRJkiRJUndl4ER94NTjgx9k2H89S/9bPk/Iy0s9O/2r99P/c9PadOzs7IGcdtpEAMrLX6XqwJ/btH9JkiRJkqSOZOBE/Ul1AFlnnMHps2dz+le+knqW6Nv3lIx/+sCrUtfvv7/wlIwhSZIkSZLUEdI2cGpcNHz//v2HPcsePix1fWDDhlMy/oABV9Dw87/vtjpJkiRJktSNpG3gdOSWusZ6nH126vrg+lMTOGVnD6Bfalvda1RVbT0l40iSJEmSJLU3AyeODpwy+/UjM7mV7sDGjadsDgNPvzp17Wl1kiRJkiSpuzBw4ujAKYRAj+HDATi4cSOxru6UzGHggCk0/C/wtDpJkiRJktRdpG3glJmZSY8ePYCjAyeAHmfXB06xqoqabdtOyRx69CigX7+LANiz53X2799ySsaRJEmSJElqT2kbOMGhVU4Np9Q1lj1seOr6VBUOhyNOq9vuaXWSJEmSJKnrS+vAqeGkuiZXODU6qe7gKQycBgyYQgiZgKfVSZIkSZKk7iGtA6eGFU779+8nxnjYs+zhjVc4nbrC4T165NPvtIsB2LPnDfbv33zKxpIkSZIkSf8/e/cdHfdV5///daeo92JbluUaO65x7OAUQtiQUFJomx5SNkuoCwR+CywsbWkBloUvLKFsIGGzBBJIMQlhgcDG6T2xE9e4Wy6yLVm9Tr2/PzQajR1pNJLm8xlJ83ycM8cffe6dz+etxHBOXufe94UbCJwkRaNRBYPB48b8s2bJ+P2SpODu3Y7WMW16wrY6VjkBAAAAAIBJjsAp5nUn1Xm9ypk7V5IU2OvcCiep/7S6gW11nFYHAAAAAAAmOwKnmKH7OPVvq4s0NyvS1uZYHX5/ucrL3yhJ6uzcpJ6eesfeBQAAAAAA4DQCp5ghT6pbkNDHyeFVTtOnXRy/bmzktDoAAAAAADB5ZXXgNHBKnTTMCqd5g4GTkyfVSQOn1fX3jKKPEwAAAAAAmMyyOnAaeUvdvPh1wOHAye8vVUXF2ZKkzq4t6ulxdkUVAAAAAACAUwicYoYKnHLnDQZOwT3OB0DTpyWeVse2OgAAAAAAMDkROMUMFTh5Cgrkm1kjSQrs2e14PVVVb4tvq+O0OgAAAAAAMFkROMUM1TRcknJjfZxCBw4qGgw6Wo/fX6LKinMkSV1d29Td7ew2PgAAAAAAACcQOMUMtcJJknLmxxqHR6MK1dc7XtO0hG11DYfvVSQydBAGAAAAAAAwUWV14OT1epWbmytp+MApd8HgSXWB3c6vOKqsPEcD/1r27/+5Hn/8VG3c9DGaiAMAAAAAgEkjqwMnaXCV07ArnOYNBk7Bvc4GTtZGtXXb5yRFB+8poqamv+illy9XT4/zK6wAAAAAAADGi8BphMApd/7gSXUBh0+qO3ZsnZqbHxtyLBRq1Z69P3T0/QAAAAAAAOlA4JQQOFlrXzfuraqSp6REkhTc4+wKp6ONf0w63tj4Z0WjIUdrAAAAAAAAGC8Cp1jgFI1GFQgEXjdujFHuvP5VToG9e2Wj0dfNSZdwuCPpuLUhRaPOnpQHAAAAAAAwXlkfOBUUFMSvRzqpzvb0KHz0qGO1FBUuTjqel1cnr7cg6RwAAAAAAIBMy/rAaWCFk5QscErs4+Tctrra2qtljH/Y8bpZ18sY49j7AQAAAAAA0oHAKYXAKXfBgvh1cLdzgVN+fp2WLfvBkKFTcfFK1dX9g2PvBgAAAAAASBdfpgvItJRWOM1LWOG019nG4dOnXajSkpVqaLhXHR0b1dzyuCSraLRP5IMAAAAAAGAyyPoEI6XAqa5O8vevOgru2et4TXl5MzV//id16qm3q6rqfElSd/d2dXRscPzdAAAAAAAA40XglBA49fT0DDnH+HzKmTNbkhR0sIfTUGprr45fHzx0l6vvBgAAAAAAGIusD5xSOaVOknLn9Z9UF25qUqSz0/G6BlRWnKO8vFpJUmPjnxQKtWGe2CwAACAASURBVLv2bgAAAAAAgLHI+sAplS11kpQzf3782s1VTsZ4NXPmlZKkaDSgw0fWuvZuAAAAAACAscj6wCkvLy9+nXSF0/yExuEu9HFKNLPmchnT39/90KHfylrr6vsBAAAAAABGI+sDJ6/Xq9zcXEkjrXBaEL8O7tnteF2JcnOnxZuH9/TsUlv7S66+HwAAAAAAYDSyPnCSBrfVJQ2c5mVuhZMk1c4cbB7ecOhu198PAAAAAACQKgInDTYOH+6UOknyFhXKN326JPdPqpOkioqzlZ/Xf1JeY9OfFQq1ul4DAAAAAABAKgicdPwKp2T9kXIX9DcODx44IBsKuVLbAGM8Cc3Dgzp8mObhAAAAAABgYiJw0mDgZK1VIBAYdl7OvNhJdeGwgvv3u1HacWpmXjbYPLyB5uEAAAAAAGBiInDSYOAkjdQ4PLGPk/vb6nJzqlRd/XZJUk/PHrW1Pe96DQAAAAAAACMhcFLqgVPu/Pnx62AGGodLUu3Mq+LXh2geDgAAAAAAJiACJx0fOCVrHJ4zf0H8Orhnt6M1Dae8/Czl58+RJDU2PaxgsDkjdQAAAAAAAAyHwEmDp9RJyVc4+aZVy1NYKEkKZGiFkzGe+Cona0M6fPj+jNQBAAAAAAAwHAInpb6lzhijnNi2uuCePRlr2l1Tc6mMyZE00Dw8mpE6AAAAAAAAhkLgpNQDJ2mwj1O0u1vhxkZH6xpOTk6lpsWah/f21qu19dmM1AEAAAAAADAUAieNLnDKOa5xuPsn1Q2orb06fn2o4bcZqwMAAAAAAOBEBE4abeA0L34dyGDgVFZ2hgoK+sOvpqa/KhA8lrFaAAAAAAAAEhE4KfVT6iQpd0HiSXWZaRwu9feTGmweHtaO7V9TY+NfFA53ZqwmAAAAAAAAicBJkuTxeJSXlycphRVOdXWSzydJCuzZ7XhtyVRVvVUD/wobm/6kTZs/pqeeeqP21d+asYbmAAAAAAAABE4xA6ucRgqcjN/fHzopsyucrLXatu1zko4/oS4S7dHu3d/VwUO/zkxhAAAAAAAg6xE4xaQaOEmDjcPDR48q0tXtaF3DaW19Vm3tLw47vm/fTxWNhlysCAAAAAAAoB+BU0xi4BSNRpPOzU08qW5vZhqHt7Q8lXQ8GGxUd/dOl6oBAAAAAAAYROAUMxA4WWsVDAaTzs1JDJwyeFLdyOjjBAAAAAAA3EfgFFNQUBC/HvGkuvnz4teBDPVxKq94Y9LxnJwqFRYudKkaAAAAAACAQQROMQMrnKQUTqqbACucKsrfqNKSVcOOz5nzEXk8OS5WBAAAAAAA0I/AKWY0gZO3uFi+6mpJUiBDgZMxHp1yys9VUXHO68Y8njzNrLkiA1UBAAAAAAAQOMWNJnCSBlc5Bffvlw1l5jS4nJwKrTr1Dp1x+p+0ePG3VFlxriQpGu1Tw+F7MlITAAAAAAAAgVPM6AOnWB+nUEjBgwedKislRUUnq3bmlVq85GYZ45ck7d9/m6LR5M3PAQAAAAAAnEDgFDPawCl3/oL49UQ5qS4vd4ZqZvy9JCkQOKIjR/6Q4YoAAAAAAEA2InCKGc0pdVLCCidlro/TUObM+ZAkI0mq33+rrI1ktiAAAAAAAJB1CJxiRr/CKfGkur2O1DQWBQXzNG3ahZKknp49amr6W4YrAgAAAAAA2YbAKSYvLy9+nUrg5JsxQya2KmqibKkbMHfOR+LX++p/JmttBqsBAAAAAADZhsApxuPxxEOnVAInY4xy5/Vvqwvs2TOhQp3i4mWqqDhHktTZuVmtrc9kuCIAAAAAAJBNCJwSDGyrSyVwinR2ygb7T4GLdnZq76WXqv3BBydM8HTiKicAAAAAAAC3EDglGGgcPlLgFGlvV/37rlFg5874vcDWbWr43Od15GtfmxChU1nZGSopWSVJam19Vu0dr2a4IgAAAAAAkC0InBIkrnCKRqPDzjv2058dFzYlavvt79Tz/POO1DcaxpjjVjnV72OVEwAAAAAAcAeBU4KBwMlaq0AgMOQca63aHngg6XPaf//7tNc2FlVV56mwcKEkqenY39TdvSvDFQEAAAAAgGxA4JRgIHCSht9WZ4NBRdvbkz4n1NiY1rrGyhiP5sz+cPzn+vpbM1gNAAAAAADIFgROCVIJnExOjrzVVUmfkzOrLq11jcf06e9UXl6tJOnI0T+or68hwxUBAAAAAICpjsApQUqBkzEqv/yKpM8pu+LytNY1Hh6PX7Nnf1CSZG1Y9ft/keGKAAAAAADAVOda4GSMucAYs90Ys8sY8/khxmcbYx41xmwwxmw0xlwUuz/XGNNrjHkl9vkvp2ocOKVOknp6eoadV/mhD6pgzZohx6o/eZPyV6xIe23jMbPmMvn9FZKkhoZ7FAw2Z7giAAAAAAAwlbkSOBljvJJ+IulCSUslXW2MWXrCtC9Jusdau0rSVZJ+mjC221p7auzzETkklRVOkuTJy9Ps22/TjG98XXmnnhq/X7Bmjao++lGnyhszrzdfs+v+UZIUjfbp5Zev1MaNH9HBg79RONyd4eoAAAAAAMBU49YKp9Ml7bLW7rHWBiX9VtJ7TphjJZXErkslud5sKNXAServ5VR++eWa99u75ZtZI0kK1tfLWutojWNVUfFmSUaS1NO7V03H/qbtO76i51+4SL29BzJbHAAAAAAAmFLcCpxqJSWmGgdj9xJ9VdK1xpiDkv4k6RMJY/NiW+0eN8acM9QLjDEfMsa8ZIx5qampaUxFjiZwSlSwarUkKdzYqHDDxGvKba3Va699Qf2Z3vH6+g5q85ZPTdigDAAAAAAATD5uBU5miHsnJhxXS7rDWjtL0kWS7jTGeCQdljQ7ttXunyXdZYwpOeG7stb+3Fr7BmvtG6qrq8dU5FgDp/xVq+LXPes3jOndTuro2KDOri1Jxl9RZ+cmFysCAAAAAABTmVuB00FJdQk/z9Lrt8zdKOkeSbLWPispT1KVtTZgrW2O3X9Z0m5Ji5woMi8vL349qhVOqwcDp94N69NaUzp0de8ccU539y4XKgEAAAAAANnArcDpRUkLjTHzjDE56m8K/ocT5uyXdL4kGWOWqD9wajLGVMeajssYM1/SQkl7nCjS4/HEVzklO6XuRLmLFskTO+FuIq5w8vvKRpzj85e6UAkAAAAAAMgGrgRO1tqwpI9LeljSNvWfRrfFGPN1Y8y7Y9M+LemDxphXJd0t6Qbb31jozZI2xu7fJ+kj1toWp2odCJxGs8LJ+HzKW3mKJCmwY4ciXV2O1DZWlZVvls/3ul2IcX5/hSrK3+RiRQAAAAAAYCrzufUia+2f1N8MPPHeVxKut0o6e4jv3S/pfscLjBlL4CT1Nw7vefY5KRpV76uvqujs1/0qGeP15mvRwi9r67bPDjk+b+5N8npzXa4KAAAAAABMVW5tqZs0BgKnvr4+RaPR1L+3enX8uncCbqurqblEK0+5TaUlq1431tW1NQMVAQAAAACAqcq1FU6TxUDgZK1VIBA47uS6pN87daVkjGStejdMvMBJkqqq3qKqqrcoEulRJNKrF158twKBI2o4fJ/qZr9fRYULM10iAAAAAACYAljhdIKCWPNvaXTb6rxFRcpd1H94Xu+rr8pGImmvLV283gLl5FRq/rxPxu5EtXv39zJaEwAAAAAAmDoInE6QmzvYy6ilZXS9yfNX929Xi3Z3K7BjR1rrcsKMGZeoMLaq6dix/1Nb20sZrggAAAAAAEwFYwqcjDHnGGPemO5iMm3jxo16/vnn4z//5je/0YMPPqhgMJjS9wsS+jj1TNBtdYk8Hp8WLBhsJL5z13fUfzAgAAAAAADA2KUUOBljHjPGnBO7/oyktZLuN8Z8zsni3LRlyxatXbtWgUAgfs9aqw0bNujee+9NKYjJXzXYkHsiNg4fSlXleSorXSNJ6ujYoKamv2a4IgAAAAAAMNmlusJphaRnY9cflnSupDMk/ZMDNbkuGo3qkUceGXZ8586d2r9//4jP8dfWylddLUnqXb8+bfU5yRijk04azA137/kPRaOhDFYEAAAAAAAmu1QDJ4+kqDFmviSftXaLtXa/pArnSnNPc3PziP2atm/fPuJzjDHKj22rCzU0KHT0aFrqc1pp6SpVV18gSerp2auGw/dmuCIAAAAAADCZpRo4PSPph5K+K+n3khQLn5odqstVkRROlEtljiQVrE7YVjcJ+jgNOGnBZ2SMV5K0d+9/KhzuznBFAAAAAABgsko1cLpBUp+k7ZK+Eru3VNItDtTkuqqqKuXn5yedM3v27JSeldjHqWeSbKuTpIKCeZo58ypJUjB4TPsP/DLDFQEAAAAAgMkqpcDJWttkrf0Xa+0XrbVdsXt/tNZ+39ny3OHz+XTmmWcOO15ZWanFixen9Ky8JUtk8vIkTZ7G4QPmzbtJXm+BJGn//l8oGDyW4YoAAAAAAMBklOopdZ80xpwauz7dGLPHGLPDGHO6s+W555xzztGaNWted7+yslLXXnutvF5vSs8xfr/yV6yQJPVt26ZoT09a63RSbk6VZs/+oCQpEunW3r0/znBFAAAAAABgMkp1S92nJe2LXX9H0k8kfV/SjxyoKSM8Ho8uvvhi3XTTTZo3b178/t///d+rvLx8VM+Kb6uLRNS7aXM6y3Tc7LoblZNTJUk61HC3WlqeU3f3LkWjwQxXBgAAAAAAJotUA6cya22bMaZI0qmSfmitvVVSavvMJpGKigqdcsop8Z9HOr1uKPnHNQ6fPH2cJMnnK9S8uTdJkqwNa8Mr1+i559+hp54+W3v3/ljWptY8HQAAAAAAZC9fivMOGmPOkLRM0pPW2ogxpljSlEwfKisr49fNzaM/iK/g1FPj15OpcfiAnNyq190LhVq0Z+8PFAge1eKTv5GBqgAAAAAAwGSR6gqnf5H0kKSvS/pm7N47Jb3oRFGZNt7AyVtWppwFCyRJva+8KhuNpq02p1kb1a5d3x12/NChu9TdvdvFigAAAAAAwGST6il1f7TWTrPWzrLWDoRMv5f0XudKy5yCggLlxU6aG0vgJEkFsW110Y4OBXdPnoCmq2u7env3JZ3T2PQXd4oBAAAAAACTUqornGSMmW+M+YIx5ifGmC9IqrXW9jlYW8YYY+KrnFpaWmStHfUz8letjl/3rN+QttqcFol0pzBn8py8BwAAAAAA3JdS4GSMuUjSq+pvGN4jaaWkDcaYix2sLaMGAqdAIKDu7pFDmBMVHNc4fPIEToWFi+Tx5CadU1K8wqVqAAAAAADAZJTqCqdvS3qvtfYKa+1nrbVXqn873bedKy2zxtvHyT9njrwVFZKknkl0Up3fX6KamsuGHc/JmaGqqvNdrAgAAAAAAEw2qQZOsyU9dsK9J2L3p6TxBk7GGOWv6l/lFKrfr/CxY2mrzWkLT/qCKivPHXIsN7dSxnjdLQgAAAAAAEwqqQZOr0r61An3bpK0Mb3lTBzjDZykE7bVvfLKuGtyi9ebp5Wn3KbVq+5WXd0/aubMq5SXVydJ6uzcosOH789whQAAAAAAYCLzpTjvnyT90RjzKUn7JdVJCkt6t1OFZVpFbDucNPbAaWCFk9TfOLz4rW8dd11uMcaovPx0lZefLqk/aHrhxfdKimrnru+oqup85eRUJH8IAAAAAADISimtcLLWbpV0sqTrJP1E0vWSFltrNztYW0bl5uaquLhY0tgDp7xly2T8fklS7/rJ08dpKMXFy1RX9w+SpHC4Tbt2/3uGKwIAAAAAABNVqlvqZK0NWWsfs9beZa19TJKMMXscq2wCGNhW19LSomg0Ourve3Jzlbd8uSSpb8sWRQOBtNbntvnzPqXc3BmSpMOH71Nr6wsZrggAAAAAAExEKQdOQzCS5qapjglpIHCKRCJqb28f0zPyY32cbCikvi1b0lZbJvh8RVq08Cvxn1/b/mVFo8EMVgQAAAAAACai8QROkmTTUsUElZbG4Ql9nCb7tjpJqq5+uyor3yJJ6unZpf37b89wRQAAAAAAYKIZb+A0paUjcDqxcfhkZ4zRyYu+Ko8nT5K0d98t6u3dn+GqAAAAAADARJL0lDpjzC/H+t2pIB2Bk6+yUjlz5ihYX6/eDRtkrZUxJl0lZkR+/izNm3eTdu/+rqLRgLbv+KpWnnL7pP+9AAAAAABAeoy0wulQkk+9pG85Wl2GlZWVxUOUsQZO0uAqp0hrq4L79qWjtIybXfd+FRYulCQ1Nz+upqaHM1wRAAAAAACYKJKuUrLWftmtQiYin8+n8vJytbS0jC9wWr1K7Q88IEnqXb9BufPmpavEjPF4/Dr55G9o/fqrJEk7dnxdJSUrJUk5OVXyePyZLA8AAAAAAGQQPZxGMLCtrq2tTaFQaEzPKFi9On7ds2HyNw4fUF62RjU1l0uSAsGjevqZN+npZ96kp54+S7t3f1/RaCDDFQIAAAAAgEwgcBpBYh+n1tbWMT0jZ/58eUpKJEndTz+jwJ69aaltIphZc6Wk43s3hUKt2lf/U23a9HFZG81MYQAAAAAAIGMInEaQjsbhiX2bwocPa89FF2nf+65RYNeu8ZaXcfsP3CbJDjl2rHmdmpsfd7cgAAAAAACQcQROIxhv4BQ62qj6665XtKPjuPu969er/rrrFTp8eNw1ZkokEtCxpr8lnXO08Y8uVQMAAAAAACaKpE3DBxhjrh9mKCDpoKQXrLVja3A0wVVUVMSvxxI4td75K0WG+V6ktVUt//MrTf/858ZcXyZFoz2yiiSdEw53uVQNAAAAAACYKFIKnCR9SNIaSc3qD5hqJVVJ2iBprqSQMeY91tqp0xE7pqSkRD6fT+FweEyBU+ejj40wvm7SBk4+X5lyc2sUCAy/SquoaLGLFQEAAAAAgIkg1S116yV93lo701p7urW2VtLnJD0vaaak2yX92KEaM8rj8cRXOY0lcLLB4Ajjk3dhmDFGdbOGW/wmSV7VzrzStXoAAAAAAMDEkGrgdJ2kH51w7xZJ19v+Y8i+LWlZOgubSAb6OHV3d6u3t3dU3y1YvWqE8dVjrmsimD37Rs2Y8d5hRq0CgUZX6wEAAAAAAJmXauDUKOnCE+5dIKkpdp0rafIu1RlBYuPwlpaWUX234oYbJK936EGvt398EjPGq6VLvqfVq3+r2tprNH3aO1VV9dbYaFRbtn5akUhPRmsEAAAAAADuSjVw+pSku4wxjxtjfm2MeVzS3ZI+GRs/S9LPnChwIhjPSXV5S5ao9gf/T56ioteNzfzOt5W/Yvm468s0Y4zKy9Zo8clf1/Ll/6lTVvyXqirPkyT19u7Tzl3fyXCFAAAAAADATSkFTtbaP0s6SdIdkrZJ+h9JJ8Xuy1r7sLX2y04VmWnjCZwkqeTtb9dJjz2mmpu/qbyVK+P3vQkn4E0lxhgtXvJt+f39v9+hQ7/RsebHMlsUAAAAAABwTaornGStbbTW/re19mZr7S+ttVnTnGe8gZMkeYsKVXbppZr2z/8cv9f1yLpx1zZR5eZUacnim+M/b9v2eQWDo9uOCAAAAAAAJqeUAidjzBxjzK+MMRuNMXsSP04XOBEUFBQoLy9P0tgDp/izTlstb2mpJKnz0UdlrR13fRNVdfXbVVNzuSQpGGzSa9u/OKV/XwAAAAAA0C/VFU53ScqR9EVJHzzhM+UZY+KrnJqbm8cVmhifT0Xn/p0kKXz4sPq2bk1LjRPVooVfUl5enSSpqemvOnJkbYYrAgAAAAAATks1cFoh6Rpr7UPW2kcSP04WN5EMBE7BYFBdXV3jelbReefHr7vWPTquZ010Pl+Rli39niQjSdq+4+vq7T2Y2aIAAAAAAICjUg2cnpJ0ipOFTHTp6OM0oPDss2X8fklS57qp28dpQFnZGzRnzoclSZFIl7Zu/YwikYBCoXZZG8lwdQAAAAAAIN18Kc7bKelhY8x9ko4kDlhrv572qiagEwOnuXPnjvlZ3qJCFZx1prqfeFKBbdsUOnRI/traNFQ5cc2f90k1Nz+urq5tamt/UY8/sVLWhuTzlWpmzWWaN+8m+XxFmS4TAAAAAACkQaornCokPSypWNLChM9JDtU14aRzhZMkFSdsq+uc4tvqJMnjydHik7+hga111oYkSeFwu/YfuF0bNlynSKQ3gxUCAAAAAIB0SSlwstZeN8zneqcLTJfucERd4bFv36qoqIhfpyNwKnrLW+LXXY9O/W11ktTc8pSkoRuud3RuVEPD79wtCAAAAAAAOGLYwMkYMyvhevZwH3fKHLvHWjr0rpd3asGTm3TSk5v0jpe26y9N7aN+Tm5uroqLiyWlJ3DyT5+mvBUrJEndL7yoSEfHuJ850R09+oek40eOPuRSJQAAAAAAwEnJVjhtS7jeJ2lv7M/Ez15HqkqTB4626upX9+jFju74vVc7e3XD5r36TcPoQ6OBbXUtLS2KRqPjrq/4/PP6L8JhdT3x5LifN9GFQm1Jx8Ph0QeBAAAAAABg4kkWOJUmXPsl5cT+TPzkOFfa+ASiUX1p56FhNnBJ/7brkLpHucVuIHCKRqNqa0senqSi6Lzz4tdd6x4Z9/MmuqLCRUnHCwuypiUYAAAAAABT2rCBk7U2mnAdGe7jTpmj91Rrl46FwsOOd0Wi+lvz6LaxpbtxeO7ChfLP6t+52PXEk7LB4LifOZHNmpW85VdNzRUuVQIAAAAAAJyUUtNwY8wcY8yvjDEbjTF7Ej9OFzhW7SmsXmob4wonKT2BkzEmvq0u2tWl7hdfHPczJ7Lq6rdrzuwPDTt+tPEPsna4NWkAAAAAAGCy8KU47y5JByR9UVKPc+Wkz+LCvLTMSZTuwEmSis47Xy3/8ytJUtcj61R09tlpee5EZIzRSSd9TtXTLtDhhnsVCByV31+uY83rFAq16ujRh1Raulp1I6yEAgAAAAAAE1uqgdMKSW+eyFvoTrS0KF9nlhbqufbuoccL83RGaeGonlleXi5jjKy1aQucCk5bLU9pqaLt7ep89FFN//KXZIxJy7MnqtKSlSotWRn/ubXtRW3YcI2sjWjnzm+ppHi5SktXZ7BCAAAAAAAwHiltqZP0lKRTnCzECT9eOkfz83Nfd782169fLJ876mDH6/WqvLxcUvpWOBmfT0V/92ZJUvjwYQW2bRvhG1NPedkanbTgc5Ika0PatPkTCgaPZbgqAAAAAAAwVqkGTjslPWyM+akx5iuJHyeLG69ZeTn6vzUn6+aFtfF7y4vy9fjpi7WgYHTb6QYMbKtrb29XKBRKS53F550fv+58ZF1anjnZ1NW9X9OqL5QkBQJHtHnLpxSNDt/0HQAAAAAATFypBk4Vkh6WVCxpYcJnwp9jX+D16P21VfLFFjNV+L0q8nnH/LzEPk4tLS3jLU+SVPimN8n4/ZKkznXZGTgZY7RkyXdUUDBfktTa+qz27P1BhqsCAAAAAABjkVIPJ2vtdU4X4iRjjEp9PjWHwqM+me5EJzYOnz59+njLk7eoUAVnnanuJ55UYNs2hQ4dkr+2duQvTjE+X5FWrPipXnrpEkUiPaqv/y+Vlpyq6uq3Zbo0AAAAAAAwCsOucDLGzEq4nj3cx50yx68stqqpPZTewCldis87L37due7RtD13sikqXKgli78d/3nL1k9r22tf0sZNH9W2176g1rYXZa3NYIUAAAAAAGAkybbUJXav3idpb+zPxM9eR6pyQKk/FjileYVTuhS9ZTBw6no0O7fVDZg+/Z2qm3WDJCkS6VZDw91qavqrGhp+p/Xrr9Jrr31B1kYzWyQAAAAAABhWssCpNOHaLykn9mfiJ8e50tKr1DcYOEXHsUKmuLhYPl//TsR0Bk7+6dOUt2KFJKn7hRcV6ehI27MnoxkzLhl2rOHwPTp06G4XqwEAAAAAAKMxbOBkE5aQWGsjw33cKXP8BrbUWUmd41jl5PF44quc0hk4SVLx+bFVTuGwup54Mq3PnmwOH7k36fiBg79yqRIAAAAAADBaKZ1SZ4zxGmP+yRjzO2PMI8aYdQMfpwtMl1L/YH/0dDUO7+npUW9v77ielei4bXXrHknbcyejrq7tScd7enZpEuWdAAAAAABklZQCJ0n/T9JNkl6QdIak/5U0S9JTDtWVdgMrnKSJ28cpd9FC+Wf192rveuJJ2WAwbc+ebHy+kqTjXm+RUv/rCwAAAAAA3JTqf7FfJukCa+33JUVif75H0psdqyzNShMDpwl6Up0xJr6tLtrVpe4XX0zbsyebGdPflXS8quqtMsa4VA0AAAAAABiNVAOnAkn1seseY0y+tXabpNXOlJV+A6fUSenbUidJLS0t43rWiYrOOz9+3fnIpNmxmHbV1ReoovzsYce7u3coEulzsSIAAAAAAJCqVAOn1yS9IXb9sqSvGGM+L6nBkaocMBm21ElS/qpTZfLyJEltd92l7WeepSM3f0vhNL9novN4fDrllF9o7pyPyu+viN31yectliR1dW3V1q2fUUJvewAAAAAAMEGkGjj9f5IG/sv+05LOknS5pI84UZQTErfUtYXC43pWQUGB8vPzJaU3cLLW6shXvybbN7hyJ9rWptY779S+K69SuKkpbe+aDLzeXC1Y8Bmd86bndM6bXtC5f7dRp5/+R+XkVEmSGpv+rN27v5fhKgEAAAAAwIlGDJyMMV5JiyRtliRr7XZr7bnW2tOstY85XF/alCWcUjfeFU7S4Cqn5uZmWWvH/TxJ6nn+ebWvXTvkWOjgQTXd8uO0vGeyMcarnJxKeb25ys+fpVNO+bk8nv5VYPX7b9Whht9luEIAAAAAAJBoxMDJ9p89f4u1NuBCPY4pTeOWOmkwcAoGg+rq6hr38ySp/YEHk4//8SHZyPhrn+xKS1Zq2dLvS+pvGr59+5fV3DJpDkwEAAAAAGDKS3VL3f8aYy5ytBKHJfZwGm/TcMmZPk4j9WmyPb2K9tIoW5KmTbtAJ530OUmStRFt2vQxdXXtyHBVAAAAAABAknwjT5HUH0ytTQCsvAAAIABJREFUNcY8JemApPgeMmvt+50oLN0KvR55jRSxUnso/YHT3Llzx/3MnLlz1f3kk8OOe6ur5CksGPd7porZdR9Qb0+9DjXcrUikS6+8+o+aPv096unZLZ+3UNXT3qHqqreqf1coAAAAAABwS6qB005J/+FkIU4zxqjU51VLKKK28PiahkvOrHAqv+Jytf7619IwPaHKr7xKxpi0vGsqMMZo0aKvqrfvoFpanlQgcET7998aHz9y9EGVl52plSt/Ia+XoA4AAAAAALck3VJnjLlakqy1Xx7u406Z6VHm68/X0tHDqaKiIn6drsApd+FCTf/SF4ceW7RIlR/6YFreM5V4PD4tPvlbGu6vcmvbc9q1+7vuFgUAAAAAQJYbqYfTrSOMTyoDjcPTsaUuJydHJSUlktIXOElSxTXXaO7996n0skuVe/LJ8fveinJ5cnLS9p6p5FjzI5Kiw44fbrhf4XC3ewUBAAAAAJDlRgqcptT+rTJ/LHAKRxQdZtvaaAxsq2tpaVEkjafH5S9bppnf/KbmP/iA8leulCT1PPe8gvv3p+0dU0l31/ak45Foj/r6DrpUDQAAAAAAGClw8hpj3mKMOW+4jytVpsnACqeopK7I8CtiUtHV1aXu7v5VM9FoVLfccouee+45RaPje+6Jyq64PH7ddv/atD57qvD5y1KYU+pCJQAAAAAAQBo5cMqVdHuSz22OVpdmA4GTJLWFxt44vKurS7fddpsaGxsHn9fWpr/85S9au3atbBpWTw0oueACeQr6G163r10rm4aG51PNjOnvSjpeUDBfebkzXKoGAAAAAACMFDh1W2vnW2vnDfOZ70qVaVLmHzyUbzyNwx977DG1tbUNObZ582bt3LlzzM8+kaewUCXvfKckKdzUpK7HH0/bs6eKoqKTNWvWdcOO9/TsU2PTwy5WBAAAAABAdhspcJpSElc4jTVwikaj2rhxY9I5I42PVtnlCdvq7r0vrc+eKhYt/IoWLfo35efNjt3xqKBgQew6qs2bP6Xm5iczVR4AAAAAAFklu5qGH7elbmyBUyQSUTAYTDpnoLdTuuQtX6bcxYslSV1PPKHQkSNpff5UYIxHdbOu11lnrdObz3lZ5/7dJp115l81Z85HJEnWBrVx00fU1vZShisFAAAAAGDqSxo4WWuL3SrEDQOn1EljX+Hk8/lUUlKSdE5VVdWYnj0cY4zKLr+s/4doVO2//31anz+VGGPk95fJ682TJC2Y/xnV1l4rSYpG+/TKqzeqo3NzJksEAAAAAGDKy9otdW1jDJyMMVqzZk3SOaeddtqYnp1M6bveJZObK0lqu+9+2TSfhjdVGWN08qJ/04wZ75UkRSJdeuWVG9TVnb4+WwAAAAAA4HhZFTgd1zR8HKfUnXXWWVq0aNGQYxdeeKFmzEj/iWjekhKVXPAOSVLo0CF1P/Ns2t8xVRnj0ZLF/67q6rdLkkKhVq1ff402bvq4nn7mXD3z7Fu0fcdX1dt7IMOVAgAAAAAwNWRV4JSOFU5S/7a6q666SldccYUWLlwYv19XV6czzjhjXDUmc1zz8PtoHj4aHo9Py5f9UBUV50iSQqFmNTX9WX19B9Tbu18HD96pF154lzo60tvwHQAAAACAbJRVgVNZmgInSfJ4PFq6dKmuueYaVVdXS5IaGxsVdXCrW/5ppyln3jxJUucjjyjc0uLYu6YijydXy5f9QMb4hhwPRzq1ddu/yFrrcmUAAAAAAEwtWRU4FXo98sbO3Wsf4yl1Q5kzZ44kKRAIqLGxMW3PPZExRmWXxZqHh0Jqf+BBx941VbW2viBrh99O2d29Ux0dr7hYEQAAAAAAU09WBU7GmPi2urbw2Hs4nWj27Nnx6/r6+rQ9dyil732P5PdLktruvZfVOKPUF2hIYc5hFyoBAAAAAGDqyqrASZLKfP3bqdrHuaUuUWLgtH///rQ9dyi+ykoVn3eeJCm4d696X37Z0fdNNXl5M0eek1vjQiUAAAAAAExdWRc4DaxwSueWurKyMpWWlkrqD5ycXnV0XPPwe2kePhpVlecqJ6dq2HFj/MrJmeZiRQAAAAAATD1ZFziV+WOBUziiaBqDoYFVTp2dnWptbU3bc4dS+Maz5J/Zv1Kn4+GHFenocPR9U4nHk6ulS74njyd3yHFrQ1q/4X3q7T3gcmUAAAAAAEwdWRc4DaxwikrqiqTvRDk3t9UZj0ell10qSbJ9fWr/4x8dfd9UU1l5jta84QHNrLlCeXl1ys+fo5kzr1RR4WJJUl/fQb28/ir19OzNcKUAAAAAAExOWRs4SVJbyJnG4U4HTpJUdsklkqf/X1/bvffRPHyUiooWacmSb+vsNz6mN561TksWf0unnfZblZauliQFAkf08vqr1dW9M8OVAgAAAAAw+WRd4FTm98Wv09k4vLq6Wnl5eZLcCZz8M2ao6M1vliQFtm1T3+Ytjr9zqvP5inXqyjtUVnaGJCkYbNL69e9TZ+c2SVI43KlgsIVwDwAAAACAEbgWOBljLjDGbDfG7DLGfH6I8dnGmEeNMRuMMRuNMRcljP1r7HvbjTHvGE8diSuc0hk4eTye+CqnY8eOqbu7O23PHk7Z5ZfFrxs+/3kd/fZ31PPSSwQi4+DzFerUlberouIcSVIo1KKX11+p559/lx5/4lQ9+dQaPff829XQwKoyAAAAAACG40rgZIzxSvqJpAslLZV0tTFm6QnTviTpHmvtKklXSfpp7LtLYz8vk3SBpJ/GnjcmZcdtqUtf4CS5v63OVzMzvq0uuHu3Wv7nf1R/7XU6+LGPKxoIOP7+qcrrzdfKU25VVdX5kqRIpFtd3Vvj4z09e7Tttc9p374fZ6pEAAAAAAAmNLdWOJ0uaZe1do+1Nijpt5Lec8IcK6kkdl0qqSF2/R5Jv7XWBqy1eyXtij1vTEr9zqxwkqQ5c+bEr50OnKLBoA5+/GNS9PWNz7vWrVPj97/v6PunOo8nV8uX/UgeT/6wc/bsvUWBwFEXqwIAAAAAYHJwK3CqlZR4zvzB2L1EX5V0rTHmoKQ/SfrEKL6bsuOahqc5cKqpqZHP198jqr6+Pq3PPlHnX/+mcMPhYcfb7r1PkS7nt/VNZd3dOxSN9iaZEVFj459dqwcAAAAAgMnCrcDJDHHvxAY4V0u6w1o7S9JFku40xnhS/K6MMR8yxrxkjHmpqalp2EISt9S1p/GUOkny+Xyqre3Pwg4fPqxgMJjW5yfq25K8Sbjt7VVw7x7H3p8NQqH2keeEO12oBAAAAACAycWtwOmgpLqEn2dpcMvcgBsl3SNJ1tpnJeVJqkrxu7LW/txa+wZr7Ruqq6uHLaQ04ZS6dK9wkgb7OFlrdfDgwbQ/f4CnoCAtczC8wqKFGul/IkWFi9wpBgAAAACAScStwOlFSQuNMfOMMTnqbwL+hxPm7Jd0viQZY5aoP3Bqis27yhiTa4yZJ2mhpBfGWkiZQ6fUDXCrj1Px29+edDx34UnKmT/fsfdng7zcGZpWnfxQxGPHHlE0GnKpIgAAAAAAJgdXAidrbVjSxyU9LGmb+k+j22KM+box5t2xaZ+W9EFjzKuS7pZ0g+23Rf0rn7ZK+oukj1lrx5wUFXk98sY26bWn+ZQ6SZo1a5aM6X+Bk32c8k5epNLLLh12fNpnPxuvA2O3ePE3VVy8Ytjxw0fu18ZNH1Y4TL8sAAAAAAAG+Eaekh7W2j+pvxl44r2vJFxvlXT2MN+9WdLN6ajDGKNSn1ctoYgjW+ry8vI0ffp0HTlyRAcPHlQkEpHX6x35i2NQ87WvyV9To9Zf/0aR1tbBAWOUE9vah/Hx+8v0htPuVVPTX3Xs2DpFbVBlpW9Qfn6dtmz9Z4XDnWpuflzrN7xPK1fertycqkyXDAAAAABAxrm1pW5CGTiprj2c3qbhAwb6OIVCIR05csSRd0iS8XpV/bGP6aTHH9P8//2jqj/96f4Ba3Xs579w7L3ZxuPxa/r0i7Vs2fe1Yvktqqv7B1VVnafTVv9OubkzJEmdnZv18kuXq6dnrySpr69BXV3bFYn0ZLJ0AAAAAAAyIksDp/6FXU70cJKO7+Pk5La6AZ6cHOUuWKDKf7hevpk1kqT2P/xBwYOHHH93NisqOllvOO0+FcYah/f27deLL/29nn3uQj39zDl6/oWL9ORTZ2rHjm8oEunLcLUAAAAAALgnKwOnsvgKp4istWl//uyE7WxONg4/kcnJUeUHPtD/Qzis5ttY5eS0vLwanbb6dyorO0OSFA53qqdnR3w8EunWgYN3aNOmj8raaKbKBAAAAADAVVkZOJX6+wOniJW6IukPAYqLi1VeXi6pP3ByItQaTtmll8pb3d9HqP3+tQodPerau7OV31+iVaf+t/z+ymHnNLc8oZaWp12sCgAAAACAzMnOwMk32MTbicbh0uC2up6eHjU3NzvyjqF4cnNV+f4bJUk2FFLLL3/p2ruzWSTSp1Ao+b/nxqY/u1QNAAAAAACZlZWBU1lC4NQecrZxuOROH6dE5VdeIW9shVXr7+5R2MXAK1ul0hycBuIAAAAAgGyRlYFTqd8Xv3ZqhVOm+jhJkqegQBU33CBJsn19arnjDlffn41yc6fFT6wbTl5enUvVAAAAAACQWVkZOB23wsmhwKmyslKFhYWS3A+cJKn8mvfJU1IiSWr9zV2KtLW5XkM2McarulnXJ51z6NCdam55yqWKAAAAAADInKwMnEqP21LnTOBkjImvcmptbVVHR4cj7xmOt6hIFddeK0mK9vSo5c5fu/r+bDR79gc0c+aVr7tvTP/ft3C4U6+88o/af+C/XW0kDwAAAACA27IycCrzO980XMrstjpJqrj+OnkKCiRJLXfeqUhXl+s1ZBNjvFqy+Fs6fc1DmjPnI6qtvVaLT/6m3nT28wlBVFQ7d35T2177V0WjAUmStVaRSIAQCgAAAAAwZfhGnjL1uLGlTnp94LR8+XLH3jUUb1mZyq95n5p/cZuiHR1q/c1dqvrwh1ytIRsVFy9VcfHS4+4tPvlmFRUt0c6d35C1ER0+fK+6u3eoqGiJGhv/rHC4Xbk50zWz9irNmf1heb25GaoeAAAAAIDxy8oVTsc1DXfolDpJmjFjhvx+vyT3T6obUHHDDTJ5eZKkljvuULSHk9IywRijulnX6dSVd8jnK5MkdXS8qoaG3yocbpckBYJHtXfvf+rVjR9QNBrKZLkAAAAAAIxLVgZObq1w8nq9qqvrP5ns6NGj6uvrc+xdw/FVVqrsisslSZHWVrXec4/rNWBQRcUbdfqa38vvrxh2TmvrMzpy9EEXqwIAAAAAIL2yMnAq8nrkNf3XTgZO0vHb6g4cOODou4ZTeeONMrGVVi23/1LRQCAjdaBffv5seTzJt8wdOULgBAAAAACYvLIycDLGxE+qa3PolLoBmW4cLkn+6dNVeuklkqRwU5P23/gBHfn6N9S5bp1sxNnfH0Mb2EY3nFCo1aVKAAAAAABIv6wMnCTFAyenVzjNmjVLHk//P+ZM9XGSpLJLLolf9770klrvuksH/+lj2nf5FQo3N2esrmxVULAg6bjPV+JSJQAAAAAApF8WB079jcPbws41DZeknJwc1dTUSJIOHTqksMPvG07jf3xvyPt9W7eq4bP/4nI1mFV7bdLxtrYXtGfvLbKWFWgAAAAAgMknawOnsoQVTtZaR981sK0uEomooaHB0XcNpW/bNvW8+OKw493PPKPAzp0uVoSamks0s+aKJDOs9u79oV559UYFg6xAAwAAAABMLlkbOJX6+wOniJW6IlFH3zVnzpz4dSa21fVt2TLinN4U5iB9jPFo8eJvadWpv9KM6e9RefkbNXPmlVrzht9ryeLvxJuKt7Q8qRdefLfa2l5SKNShgwd/rR07b1Z9/a3q6zuc4d8CAAAAAICh+TJdQKYM9HCSpLZwRMUJP6dbXV1d/DoTjcM9BQVpmYP0MsaoouJsVVScfdz9kpJTVFyyQps2fUy9vfsUCBzRy+uvljFeWRuKz9u1+/taeNLnNXv2+90uHQAAAACApLJ2hVNZQsDUHnK2r1J+fr6Ki4slSbt27dIdd9yhjRs3Khp1dmXVgMJzzpHJzx923FNYqKKzzx52HO4rLlqs09c8oGnTLordiR4XNvWLaOeum3Xs2Dq3ywMAAAAAIKmsDZxK/YOLu9ocPKkuGo3qgQceUGdnpyTJWqt9+/Zp7dq1Wrt2rSuhk7e4WNWf+MSw4+XXXCNPYaHjdWB0fL5iLV/2I5WVnZ50Xv3+21yqCAAAAACA1GRt4HTcCicHA6fNmzdr48aNw469+uqrjr07UeX7/1E13/yG/LNmvW6sb8sWxxunY2yMMYpGA0nndHS84lI1AAAAAACkJmsDp9LjttQ5FzitX79+XOPpVHbZZVrw14e14C9/1vw//a9yTz5ZktT99NPqWse2rInK48lLOm5MjkuVAAAAAACQmqwNnMr8xzcNd0pra2vS8ba2NsfePRTj8Shn7lzlzp+vGV/6Yvz+0e/8u6KB5CtpkBnV1W9LOh6N9unIkQddqgYAAAAAgJFlbeBU6tKWupKSkqTjA83EM6FgzRqVXHShJCl04IBa/vuOjNWC4c2suVwFBQuGHbc2pC1b/1mbt3xKoVCHi5UBAAAAADA0AidJbQ6eUrdq1apxjTtt2mc/K5PXv2Xr2M9/rtDRoxmtB6/n8xVp9eq7NK36QkmDf29LS9eoZsalkowk6ejRh/TCCxertfV5BYLHtG/fz7Rp8ye0bdu/qrn5SVnrzqmIAAAAAAD4Rp4yNZUlnFLn5AqnlStX6rXXXtOOHTteN7ZgwQKtXr3asXenwl9To8oPfVDHfnSLbE+PGr/3fdX+x3czWhNeLzenSitW/FjB4DH19h5UTk618vNrJUkzZrxXW7d9VoHAEfUFGrR+wzUyxitrB4PUhsP3qKrqrVqx/EfyeHIz9WsAAAAAALJE1q5wKvJ64r+8k4GT1+vVlVdeqYsvvlg1NTXy+QaDrjPOOENerzfJt91R+f73y1/bH150PPSQelxsZI7RycmpUmnpqfGwSZIqKt6oM07/X02bdlHsjj0ubBpw7Nj/ac/eW1yqFAAAAACQzbI2cPIYE99W1+bgKXVSf+i0Zs0affjDH9a1114bv//aa685+t5UefLyNO1z/xL/+eg3b5aNOPvPBOnl95dp+bIfadq0dyadd+jQ3YpGgy5VBQAAAADIVlkbOElSaeykOidXOJ1o9uzZKioqkiRt27ZNkQkS7BS/7W0qOPNMSVLf1q1qu//+DFeE0TLGyONJvks2HG5TIHDEpYoAAAAAANkquwOngRVOYeeahp/I4/Fo6dKlkqTe3l7t2bPHtXcnY4zR9C/8qxTb4tf0gx8q0sGJZ5ONz5f8VERJ8noLXagEAAAAAJDNsjpwKov1U2oPR2Stde29y5Yti19v2bLFtfeOJG/RIpVffbUkKdLaqmM/+UmGK8JoTZ+efEudJG3c9FH19Ox1oRoAAAAAQLbK6sBpYEtdxErdEfeOjK+rq1NxcbGk/j5OYRdXWI2k+hMfl7esTJLUcuevdfR739Ox/7pV3c8972ooh7EpLVmtGdPfm3ROe/vLev6Fi1W//xeytn9LZzDYrGPH1qm5+QlFIj1ulAoAAAAAmMKSN3yZ4sp8gyfEtYUjKvK5c2Kcx+PRsmXL9Nxzz6mvr0979uzRokWLXHn3SLylpaq66RM6+vVvSNGoWm67PT6Wt3y5Zv3kx/JPn57BCpGMMUZLl35XhYULdfDgrxQIHpVkVFl5rmpqLlN9/X+ps3OTotGAdu36jhqP/kl5+bPV1PSwrA1Jkny+Ys2d81HNnv0hGWMy+wsBAAAAACalrA6cShMCpvZwRLNcfPdA4CT1b6ubKIGTJIWODN1Uum/zZh34yEc17/77ZDxZvThuQjPGq7lzP6I5cz6oYLBFXm++fL7+RvXVVW/V/gO3a+/eHyoaDaqjc6M6Ojce9/1wuFO7dn9XktGcOR/KwG8AAAAAAJjssjo1SAyc2kLubmurra1VSUl/g+eJtK0u2tOjtrvuHnY8sG2bup9+2sWKMFbGeJWbWx0PmyTJ4/Fp7pwP6/Q1D6moaEnS7++r/5kikV6nywQAAAAATEFZHTiV+QcXeLWHI66+e2BbnSQFAgHt3r3b1fcPp2/7dkW7upLO6XnpZZeqgVMKC0/SjOnvSTonHO5Qe/t6lyoCAAAAAEwlWR04lZ7Qw8ltiafVbd682fX3D8X4c0ae48vqnZhTyMiN8geaigMAAAAAMBpZHTglNg1vD7n/H9a1tbUqLS2VJG3fvl2hUMj1Gk6Ut/hk+UZoCl70lre4VA2cVFZ+5ohzDhz8lfr6GlyoBgAAAAAwlWR14FTqP75puNuMMfFVTsFgULt27XK9hhMZn0/VN9007Li/rk75y5cNO47Jo6T4FJWXn5V0TnPzo3ru+Xdo//7bFY329xnr62vQoYbf6eChu9Tdnfm/swAAAACAiSer90aVZXhLnSQtX75czzzzjKT+0+qWLEneyNkNZZdeIhsJq+lHtyhy7NhxY6EDB9T19NMqOvvsDFWHdDHGaMXyH2vT5k+otfWZxBFVVJyj3t569fbWKxLp0c5d39Lhw2uVXzBHTU1/U+J2vOqqt2np0u8d15wcAAAAAJDdsjtwSmwa7vIpdQNqampUXl6u1tZWbd++XcFgUDk5I/dRclr5FVeo7L3vVe+rryoaCCrUeFRHvvBFSdLhL3xR8//woLyx7YCYvPz+Mq1edac6Ojaqre1FGeNTZeW5KiiYo0gkoPr9P1d9/U8VjQbV1f2aurpfe90zmo79TVu2florT7k1A78BAAAAAGAiyuotdUVeT/wfQKZWOCVuqwuFQhNiW90Ak5OjgjVrVPSms1V+ySUqefe7JEnho0d15OabM1wd0qmk5BTNnn2j6ur+QQUFcyRJXm+u5s/7hM44/U8qKzs96fePHfs/dXa9PowCAAAAAGSnrA6cPMbET6rLRA+nAYmn1W3ZsiVjdYxkxpe+FG8o3vGHh9Txl4czXBHcUFAwT7PrPjDivNbWZ12oBgAAAAAwGWR14CQNNg7PxCl1A2bMmKGKigpJ0o4dOxQMBjNWSzLekhLVfGtwZdORr35V4aamDFYEtxgz8v9VRMI9LlQCAAAAAJgMCJxiK5z+f/buOzrO6tr7+PeZplHvkiVbzZKNey8YG4gJob60EExCCJhOCEkgCSSQcpPce9NuCgkk1CSkgGmhGGICpmNj3HCRe5dkyepdo9G05/3DtixZ0sg20ozK77NWVjzn7DmzH2OEtXXOPuE6Ugddj9Xt2rUrbLn0Jmb+fBK//GUA/PX1HPrBDzFNM8xZSX9LSJiF1RIVNKao+BGKip8gEBiYBVMREREREREJnWFfcEqwHW4c3uDzhbVwMliO1QGkfefbOHJzAWh+/33qX3ghvAlJv7PZYsnKvjFojN/vYs+en/Px6guprn4Xd1s5O3f9lA9XzOO996ey/pNrqKp6M0QZi4iIiIiISDgN+4LT0SN1PhNc/kAv0f0nPT2dlJQUAHbv3k1bW1vYcumNJTKSzF/9EqyHf+8qf/4LPCUlYc5K+tvovG+Sm/s1LBZnh1ErI9KvICf7diyWw7crtrYeYNPmm/noo4UcPPg3PJ5K/P5m6utXs7nwq+zf/1B4HkBERERERERCZtgXnBKOHKmDgXOszufzDehjdQCRU6aQctutAARcLsq+dx+thYW0fPwx3orKMGcn/cEwLOSP/hYL5n/E5Ml/YtKkB1kw/0MmTvw1BQX3cPrcN0lNvaA93jS7P1q3b//vcLn2hyptERERERERCYNhX3CK71BwCudNdTC4jtUBpNx+O84JEwBoXb+eA1ctonjxDexZuJCDd92Nr64uzBlKf7Db40lLPZ/0tIuIiEhvH4+MzGLK5D8yefLDva5x6NCL/ZmiiIiIiIiIhJkKTh0KTnVeXxgzgbS0NFJTU4HDx+rcbndY8+mN4XCQuPj6rhOBAE3/+Q8lN92MOUBv3JP+Ex2V32uMu0274ERERERERIayYV9wSrDb2n8d7h1OcGyXk9/vZ+fOnWHOpncNL73U45x72zYa31wewmxkIIiISMdiOILG1NWtpL5+XYgyEhERERERkVAb9gWn+AHSw+mojsfq3njjDZ5//nl27NhBIBC+huY9CbhcuFZ9HDSm+Z23Q5SNDBQ2WwzpIy4NGtPWdoj1n1zNps230dKyl0CgjYMHn2Ldui/w0UcL2bjxBqqqlof15kgRERERERE5dbbeQ4a2jk3DG7zhLziVdLjtzeVysXXrVrZu3cppp53GokWLsFqtQd4dWqav9yOIAR2pG5bGFHyPpqYtNDfvOG7GICoqD5drHwDV1W9RXf0uDkcyHs+xY3at7mJqaj8gK+sGxhR8H8MwQpi9iIiIiIiIfFra4WQfOE3Dq6urefXVV7ud27lzJytXrgxxRsFZYmNxFATv1xM1Y2aIspGBxG5PZNbM5xk75kckxM8mJmYCmRmLmDN7KafPfZMpUx4jOnrMkWh/p2JTRyUlf6Wu7qPQJS4iIiIiIiJ9QgWnAXSkbt26dUGPEK1du3ZAHTEyDIPkm27uOcBiIfaz54QuIRlQrNYosrKuZ+bMZ5g751XGj/85sbETMAyD1JTPMmf2a4wb9zN6+zJUduj50CQsIiIiIiIifWbYF5w6H6kL7y111dXVQeebmprwDLAjavGXX0bqN78B3R31CwQo/+//wfSH/6iiDDwWi43MjEVA8P5kra6SoPMiIiIiIiIy8Az7glOszcrR7jDh3uEUFRUVdN5ms2GzDay2W4ZhkPLVr1Lwztuk3/c9Uu64g4yf/S/2UaMAaFmxgqrf/yHMWcpAZRgGEREZQWOaW3Zx6NCLmGbnfz/9fjetraX4/a7+TFFEREREREROwcCqXoSBxTCIt1mp9/lGZx5KAAAgAElEQVTD3sNpypQpbN68ucf5yZMnD6im4R3Z09NJuv769tfOiZM48MUvYra2UvPYYzgnTCDugvPDmKEMVJkZV7H/QM9FyUDAxbbt93Cg6GFG532TxMT57Nv3Gw6Vv0wg0Iph2ElPu5iCgnuJiEgPYeYiIiIiIiLSk2G/wwmO9XEK9y11+fn5TJw4sds5p9PJZz7zmdAm9Ck4TxtL5s/+t/112f3307Z7dxgzkoEqJ+cW4uKmdzsXGZkLR/Ygulz72LL1m6z8aD6lZUsIBFoBME0v5RUvs2791Xg8NSHKWkRERERERIJRwYljN9WF+0idYRh8/vOf57zzziMhIaHTXEREBLGxsWHK7NTEXXghyTffBIDpclFy5534GxvDnJUMNFZrFDOm/4OCgvuIiRmPw5FKQsIcJk74HfNOf4u5c5aRlnphe3wg0NbtOm53CcXFfw5V2iIiIiIiIhKEMZBuPesrs2bNMtetW3fC8Ys27uGDumZsBpScPRXDMHp/Uz8zTROfz8fSpUspLCwEYNGiRUyYMCHMmZ0c0++n5JZbafno8NX20WefRdbDD2NYVOuUk9PUtJVPNlyLz9dz0dLpHMX8M94PYVYiIiIiIiLDh2EY603TnHUisfquH4g/0ojbZ4LLH/zGrFAxDAO73c68efPax1atWhXGjE6NYbWS+ZtfYx85EoCW9z+g+qGHaNu3n5ZVq/AUFYU5QxksYmMn9tqjyeOpxTQHxr/DIiIiIiIiw5kKTkCC/Vgj7nAfqzteZmYmubm5AJSUlFBSMviuiLclJjLqoQcxnE4Aqv/0MPsuuojiG25k7/kXUHT9YhWe5ITExIwPOh8IuFi95mIqKl5rv9XO7S5j374H2Lz5drZtu5eamg8Zijs7RUREREREBhIVnDjWNBwI+0113Rnsu5wAnOPHk/atu7udc61eTdFXrsNXo4bPElzWqOt6jWlp2cWWrd/k49UXsGvXf7Pyo3PYf+BBqqqXc6j8X2zctJjCLV8jEPCGIGMREREREZHhSQUnOhec6sN8U113xowZQ3JyMgDbt2+nrq4uzBmdGk9RcY9zvspK6p56KoTZyGAUHz+d08b+lO6+dGVmXE1Kyrntr12ufZQcfBLoWliqqnqDA0WP9F+iIiIiIiIiw5wKTnQ+Utfg84Uxk+5ZLJb2XU6mafLxxx+HOaNT0/TuO8Hn3w4+LwIwatSXmXf6W+Tm3EF6+iVkZ9/M3DnLGD/+Z0yd8ihzZr9GWtpFva5TWvrP9mN3IiIiIiIi0rdUcOJY03AYeD2cjpo6dSpRUVEAbNiwgdbW1jBndPJMb/AjTKbHE6JMZLCLisohP//bTJr4AGMK7iMm5rT2udjY8Uye9CCJiQuCruHxVOPxDs7dgiIiIiIiIgOdCk5AQsceTgPwSB2A3W5n9uzZAHg8Hj755JMwZ3TyoqbPCDrvnDE9RJnIcBDpzOw1Zt++B2htPXbU0zT9VFe/w569v2bf/j/Q1LS1P1MUEREREREZslRwAuLtA7tp+FGzZ8/Gaj2c6+rVq/H7B26u3Um6YTFYev4jZ42JDV0yMuSlp1/Sa0xZ2RI+WvVZCrd8g6qqd1i95mI2bb6FoqKH2b//96xZeymFW75OINAWgoxFRERERESGDhWc6LzDaaAeqQOIiYlhypQpADQ2NrJ16+DafRE1fTqZv/wFRqSz2/m6v/2N+n+9GOKsZKhKTJxHWtrF3c5ZLE7s9sQjrwJUVv6bzYW30NKyu0tsZeUydu/5ZT9mKiIiIiIiMvTYeg8Z+jreUjeQdzgBzJs3jw0bNgCwatUqJk+ejGEYYc7qxMVfcgkxZ59N4+v/wVdRjn3kSAIeLxU/+QkAh370I6yJicSeszDMmcpgZxgGEyf8ltiY8Rw8+A/aPBUYWElJ/RwF+d8hIiKT8vKXKCp+nNbWA0HXKit7ltF5d2G3x4UmeRERERERkUFOBScgzmbFAEyg3jvwbqnrKC0tjYKCAvbs2cOhQ4c4cOAAeXl54U7rpFjj4ki8elGnMbOlmcpf/wb8fkrvvpvsv/yZqJkzw5ShDBUWi43c3K+Sk3MbXm89VmsUVuuxHXYjR36RzMyr2LLlLiqrlvW4TiDgprl5O4mJc0ORtoiIiIiIyKCnI3WAxTDadzkN9B1OcHiX01GrVq0KYyZ9J+mmm0havBgAs62Nktu/invnrvAmJUOGYVhwOJI6FZuOzVmJjZ3Q6xrFJX+lucORu0DAQ3nFq2zbdi9bt32HQ4dexO9XrycRERERERFQwandYCo4jR49mvT0dAB27dpFdXV1mDP69AzDIO3ee4i/7FIAAk1NlNx8M56DpQCYPh+maYYzRRnCUlLP7TWmuno5q1dfwIaNiykvf4U1ay5j69a7OFT+L8rLX2Lb9ntYveYiWlsPhiBjERERERGRgU0FpyOO3lRX7x34BSfDMIbkLifDYiHjf/6H6LPPAsBXVcWBq65i92fPZcekyew6fR7l//szfLW1Yc5UhpqY6DFkjLiyx3mb7VjvptraD9m67Vu0uLruwGttPUDhljtVHBURERERkWFPBacjEjrscBoM3yxOmjSJmJgYADZt2kRLS0uYM+obht3OqN/9DufUqQD46+rwlR7e5RRoaKDuH//gwJe+pKKT9Llx4/6XnOzbsFqj28eczpFMmPAbzlywhokTfkdc7JRe12lqKqSxcUN/pioiIiIiIjLgqeB0RLztcP90r2niCgTCnE3vbDYbc+cebmDs8/n405/+xBNPPMHKlStxu91hzu7TsURFkfTla3qc9xYVU/3wIyHMSIYDi8VOQcG9LJi/ilkzn2fO7KWcMe9dMkZcjsViZ8SIS5k160Xycr/Z61qNjVu7HTdNc1AUtEVERERERD4t3VJ3RMKRI3UADV4/0VZrkOiB4egOJ4CWlhZaWlo4ePAg69evZ/HixcTFDd4r3JveejvofMPSpaTffx+GYYQoIxkubLZo4uNndDtnGAZxcZN7XePAgYcImG1kZnwBuz0Bl2s/+w/8kaqqN/H7W4mLm0J29k2kp13U1+mLiIiIiIgMCNrhdMTRpuEwOBqHt7a2smxZ99e419bW9jg3WPh7OTIXaGiAQbATTYaexMQzsNkSgsZ4vNXs2fNzVqw8g82bv8qatZdRXv4Sfn8LEKCxcSNbtnyd/fsfCk3SIiIiIiIiIaaC0xEdC071g6DgVFhYiNfr7XF+586dNDU1hTCjvuUYPTrovC0rC2MQ7EKTocdqjWDMmPt6nI+NnYhh2AEIBNqoqn7zSKGpq337H6C1tbhf8hQREREREQknFZyOOP5I3UBXV1cXdN40TRoaGkKUTd9LuHpR0HnDaiXQ2hqibEQ6y8z4ApMn/YmYmHHtY1FR+Uyc8FvmzF7K/PkryB/9bRyOtF5WMikvX9q/yYqIiIiIiISBejgdcbRpOEC9zxfGTE5Mx/5NnyZmoIqcOJH0+++j4mc/73bee+AAJbfeRtYjD2OJju42RqQ/paWdT2rqeXg81UAAhyOtvadYhCOF3Nw7SEpawNp1VwRdp75hDX5/G1ZrRPuYy1VERcVSPN46oqMLGJF+CTZbbH8+joiIiIiISJ9SwemIhEHWw2ny5Mm8/fbbBHroY5SXl0dCQvA+MwNd0nXXETlzJvXPPodn3z6syclEzZlNzSOP4quqwrV2LcU330LWY49ijdU34xJ6hmEQEZHa43xkZDaGYcM0ey5i19auZMXKeWSMuIKMzEVUVv6bAwf+2Clm755fMWnyQyQnLeiz3EVERERERPqTMRSv6J41a5a5bt26k3rPpiYX56/bBcDdOel8d3RGf6TWp1avXs3rr7/e7dzixYvJzc0NbUIh4ikqomjxDfgOHQLAOXky2U88jjU+PsyZiXS1deu3Ka94+VOvY7FEMu/0N3E6M/sgKxERERERkZNnGMZ60zRnnUisejgdkTDImoYDzJ07l2uvvZb8/HwiIiJwOBztcxs3bgxjZv3LkZNDzj/+gX3UKADchYWHC1B1dQTcbtw7d+EpKWEoFlNl8Bk79gedej0dZRgOThv7U7KzbsJuT+x1nUCgldKyZ/ojRRERERERkT6nHU5H1Ht9jFuxBYDPpyfypwk5/ZFav/J4PDz00EM0NjYCcOutt5KZOXR3Q3gPHaJ48Q14iooAsCYmEvB4MFsO3wgWMWE86ffcQ/S8eeFMUwS/3015+UtUVr2B399KfNxURo68hqioXODwbXYVFcvYtv07QddJiJ/LzJlPdxozTZPGxg20uktxRmQQHz+zvZeUiIiIiIhIXzqZHU4qOB0RME1GvrcJEzgnKZanp+b3T3L9bPPmzbz44osAZGdnc8MNNwzpbz69lZUU33ADnr37ug+wWsl+4nEVnWTAM00/7743EdP0BokySE39HBkZXyA56Wyam7exbfu9tLTsbo+IispnwvhfER8/rf+TFhERERGRYUVH6k6BxTCIO3KsbjA0De/J5MmTGXXkqFlxcTHbtm0Lc0b9y56WRvq99/Yc4PdT+evf6HidDHiGYSU15dxeokyqqt5k8+ZbWbFyHuvWL+pUbAJwufayYeP1uFwH+i1XERERERGR3qjg1EH8ECg4GYbB+eef3/56+fLleL3BdkwMfi2r1wSdd2/dire0LETZiJy6vLxvYLVGdTsXETGCqKhjOy+93toed0P5/c0Ul/ylX3IUERERERE5ESo4dXC0cXi9d/AWnACysrKYPHkyAPX19axevTrMGfWvgKul1xiz1RWCTEQ+nZiYscyY/jTx8cd2qBqGnYwRVzJ3zuucPvcNZs16kZEjr6G3L9/V1e93O+5yHaCq+m3q69dhmoP7a52IiIiIiAxctnAnMJDE24/tcDJNc1D3Pjr33HPZvn07Pp+PDz74gKlTpxIbGxvutPpF5OTJ1D/zbI/zRqQTe3Z2CDMSOXVxcZOZNfNZWltL8frqiHRmYbfHt8/Hx00lPm4qNTUf4naX9LhOW1spO3b+kBEjLic+bgYeTyXbtn+X2toP22OczixOO+3HpCR/pj8fSUREREREhiHtcOrg6JE6r2niCgTCnM2nEx8fz/z584HDt9e9++67Yc6o/8RddBG21NQe581WNzWPPqo+TjKoREaOJC52UqdiU0dJSWf0soJJaenTrF+/iI9WLeTj1Rd3KjYBuN0lbN58G/X1J3fJgoiIiIiISG9UcOogwXZsw1fDID9WBzB//vz2XU2ffPIJhw4dCnNG/cMSGUnWY48GLTpV/+lhyr77XQIeTwgzE+k/2Vk3YrE4e5i1YrMltL9yu0vw+eq6jTRNH/v3P9gPGYqIiIiIyHCmglMHR4/UweBuHH6Uw+Hgs5/9bPvrN954Y8ju8nGOH0/+m2+Q8bOfkXjNl0i++SZyn3+Okb/9DYbDAUDj0lcpuelm/A0NYc5W5NOLji5g6pTHcDjSOo07HKlMm/pnzlywmmlT/0J6+qX09qW+tm4FgUBbpzHTNGlo2EBxyV8pLV1CW1tFXz+CiIiIiIgMYcZQLEDMmjXLXLfu5I+IPFhUwf/uO7wL6KXpBcxLiOnr1EIuEAjwxBNPUFZ2+Ja2q6++mvHjx4c5q9ByrV/PwTu+1l5ocuTlMerRR/CVl+NavQasFmLOPIvIyZPCnKnIyQsEPNTUfIDbXYrTmUFy8mewWBydYjZsvIHa2g+CrpOS8llGjLiclOSF+HzNFG75Gg0N69vnDcNGdtaN5Offg2HoZxUiIiIiIsORYRjrTdOc1XukCk6d/L20mnt3HQTgyUl5XJDafe+UwaaoqIi//vWvACQmJvKFL3wBq9VKamoqVqu1l3cPDZ4DByi+7Ta8RcWHB2w28Pk6xcQsXMjI3/waS1T319KLDFZFxY+zZ88vTijWao3GMGz4fN3vBMzPv5fcnNv6Mj0RERERERkkTqbgFLIfUxuGcYFhGDsNw9hjGMb3upn/nWEYG4/8b5dhGPUd5vwd5pb2V44dj9TVH1eMGMxycnKYOHEiAHV1dTz++OM88sgjPPDAA6xZs2bIHrPryJGbS+4zzxA5ffrhgW7++Ta/+y7lP/lJiDMT6X+ZGVdhtyf1OB8RMaL9135/S4/FJoDi4ie6HL8TERERERE5XkgKToZhWIE/AhcCE4AvGYYxoWOMaZp3m6Y5zTTNacCDwIsdpluPzpmmeWl/5dmpafgQ6OHUUVxcXJexpqYmli1bxkcffRSGjELPlphI6r33BI1pePU1vOXlIcpIJDTs9gSmT/sbkc7sTuMWSwRjx/yI+WesYM6cf5OTcztWa2zQtbzeWpqadxw3Vk9R0WNs2LiYDRsXU1T8BF5vY58/h4iIiIiIDB623kP6xBxgj2ma+wAMw3gGuAzY1kP8l4D/ClFu7eJtHXY4DYFb6o5yuVysXbu2x/n33nuPmTNn4nT2dOPV0NG2tac/ckcEArRu2ox9xIjgcSKDTGzsBE4/fTm1tR/Q0rIbuz2R1NTzsNsP32YXGzOO2JhxmCYUFz8SdK2NGxeTlnoBaWkX4XCksnHTDXg8le3ztbUfUlLyF2ZMf4qoqLx+fS4RERERERmYQnWkbiRQ0uH1wSNjXRiGkQPkAe90GHYahrHOMIyPDcO4vIf33XokZl1VVdUpJZkwxG6pO2rv3r34ghwR9Hq97N27N4QZhY/hsPceE+HoNUZkMLJYbKSknENOzm1kZi5qLzZ1lJLymV7X8fkaKTv0HBs3LWbN2ks6FZuOamurYMvWbw6LI7siIiIiItJVqApORjdjPX0X8kXgBdM0O1Z8so80pboGeMAwjPwui5nmY6ZpzjJNc1ZqauopJdlxh9NQKjh5vd4+iRkKYs46C3pplG66XCHKRmTgSYifRVLigh7nExNOx+FI6TAS6DG2qWkrjY2buoybpklLy16amrbh97s/TboiIiIiIjJAhargdBDI6vB6FFDWQ+wXgSUdB0zTLDvy//uA94DpfZ8ixA3RI3UjR3a7mayTUaNGhSCT8LOPGEHStdcGjSn91rep/O3vMP1D58+AyIkyDIPJk/9IevoldPxPhM0Wz9ix/8WMGU+xYP5HzJj+NAkJp/e63qFDL+DzNbW/rqp6k49Xn8/Hq89jzdpLWLFyHnv3/ZZAYOhc1CAiIiIiIqHr4bQWGGMYRh5QyuGi0jXHBxmGcRqQCKzqMJYIuEzTbDMMIwWYD/yqP5K0GgZxNguNvgANQ+iWuvT0dEaPHs2+ffu6nc/MzCQlJaXbuaEo7d57sMTEUPv3vxNoOvyNsCUxkYj8fFrXrQOg5rHHcG8pJPPXv8aW1PPtXiJDkc0Ww6SJD1CQfy+NTYVYLU4SEuZitR7u82YYVhIT55Ltu576+o+DrlVatoSyQ/8iKWkezohRlJY91Wne52vkwIE/0tZWyYTxv+i3ZxIRERERkdAKScHJNE2fYRh3Am8AVuAvpmluNQzjp8A60zSXHgn9EvCM2bnpx3jgUcMwAhz+cfsvTNPspfPzqUuw2Wj0eYbUkTqAK6+8kqeffprS0tIuc42NjbhcLqKiosKQWegZViupX7+T5JtuxL19O1gsOCdOxOJw0PDavzn0wx9itrbS8tEq9l/5BTL++79p3byJ5nffw/T5iJo+ncSvXEtEnpohy9DmdGbidGb2OJ+UdDZ2eyJeb13QdUzTQ03N+0FjDh16nuzsm4iJHnNKuYqIiIiIyMBiDMWGrrNmzTLXHdmpcrLOW7uTzc2tpDpsFM6f1MeZhVcgEGDfvn3tO53Ky8vbfz1hwgSuuuoqDKO7dlvDi3vXLkq/8U08Bw70GGNERDDqoYeIObPnXjciw0FFxWts2XoXXdvyGYwpuB+fv4Wqqjdpbu795wR5uXcxevTX218HAh7KK5ZSUfEaXm89MdFjGTXqWuLipvTtQ4iIiIiIyAkxDGP9kR7bvceq4NTZVRv38GFdMw7DoOjsKUO6AON2u3n44YdpaGgA4PLLL2fatGlhzmpg8Dc3c+i++2ha/laPMZb4eMa89y6WyMgQZiYy8NTWfsSBooepq1sFGCQlnkFu7ldJTDzW46mi8nW2bLkz6DpWaxQj0i8jJeWzxMVPp7DwDurrV3eJGzv2x2SN+kpfP4aIiIiIiPTiZApOoerhNGgcvanOY5q0BkyirEO34OR0Orniiit48sknAVi2bBk5OTkkJiaGN7EBwBoTQ+q3vhW04BRoaKBp+XLiL700hJmJDDxJSWeQlHQGgYAPwzAwjK43QSYnLcBicRII9Hwrnd/vorRsCaVlSzAMG6bZfS+9Xbt+QlLiGURHd7mwVEREREREBohQ3VI3aCTYjtXghlLj8J7k5uYyf/58ADweDy+99BKBQM/XnA8n3tKeLlI8xlNSEoJMRAYHi8XWbbEJwGaLJTNzUY/vtVpjsNuPNejvqdh0ZJaysme7jHq9dRw8+E/27P01paVLOt2OJyIiIiIioaUdTseJtx/7Zqne6ycjIozJhMjChQvZu3cv5eXlFBcXs3LlSs4888xwpxV2trTUXmMMHacTOWEF+d+jra2cqqo3O41HOrOZNu0vREZm09C4kYqK1zh48O9B16qs+g8JiXNJSjwDqzWSQ4deZMfOHxAItLXH7N7zMyaM/zVpaef3y/OIiIiIiEjPVHA6TrTl2KavvS4342OGfkHBZrPx+c9/nsceewyfz8e7775Lfn4+mZk93041HESMGYNz4kTcW7f2GFP7xBNE5OURu3BhCDMTGZys1ggmT/oTjY2bqKpeTsDvJj5+Bqmpn8NicQCQED+TuNjJlJU9F/T4ndtdyubNt2KxOIiJmUBj48YuMX6/iy1bv8HsqFeIjRnXb88lIiIiIiJd6UhdB/8sq+Gh4or217dsLeL2rQdo8vnDmFVopKWlce655wKHb7N78cUX8Xg8Yc4qvAzDYMRPf4IlJqbHGH9tHQe/egeHfvxjAq2tIcxOZHAyDIP4+GkU5N/D2LE/JD394vZi01EWi4P09EtOaL1AwNNtseko0/RRUvLXbsZNGho+oazsBaqr3+m0M0pERERERD493VJ3xPPltXx9e3G3cwsSYnh+Wv6QvrEODheannrqKfbu3QvArFmzmDRpEj6fj4yMDKKjo8OcYXh4Dhyg5s9/pund9zC9XqJmzCDxmi/R9OZy6p9/vj3OkZdH+vfvp+XDD2lc9jr+5macp51G0nVfIfbCC4f8nx+RvtTWVsm69Ytwu7v2ScvM/BIZGVdQU/0u1TXv0dy8PehadnsKs2e9QGRkFgAtLXvZsvWbnd5ntycz7rT/1vE7EREREZEgTuaWOhWcAL9pMu/j7RS7e97R869p+cxPjO2L9Aa0xsZGHn74YVqP261jtVqZNm0aF1xwAXa7PUzZDTxNb73FoR/8EH99fdC45FtuIe3b3wpRViJDg8dTzYGiR6moWIrX20hMzBhGjbyOjIwrOxVwV6ycT1tbea/rRUWNJjFhLhWVy/D5GrqJsDJj+j9JTJzTh08hIiIiIjJ0nEzBSUfqgD2utqDFJoC3ahpDlE14xcXFMWnSpC7jfr+f9evX869//SsMWQ1cseeeS97SV4jupcl6zeOP496xI0RZiQwNDkcKY8d8nzMXrOachduZM3spmZlf6LJbMDX1xHYluVz7KC1b0kOxCcDPgaI/9fDe/ZSXL6Wq6i38ftfJPIaIiIiIyLCkghOHdzj1HhOCRAYAv9/PjiCFkR07dlBWVhbCjAY+e1oaIx/4HViC/+vU8NLLIcpIZHjJyb4Zuz2x2zmHI5WCgvtITT0Pq7XnfmxH1dauoK2tqv2111vHpk23sOrjc9m67W42F97GipVncPDgP/ssfxERERGRoUi31AEFURGk2G1Ue309xpyR0Ps3KkNBRUUFTU1NQWN27tw57G+wO16gsRECgaAxvqqqoPMicmqczkxmzFjCzp0/or5+Tft4UuICxo37byIjs4GbCQS8rFv3BZqatwRZzWTFytOJjZlIYtIZVFe/g8u1t1OEz9fEzl3/hdUaRUbG5/vnoUREREREBjkVnACHxcId2Wn8dG/3O3fGRzs5NzkuxFmFh9/f+418gV4KK8ORNTkZS1QUAVfPR218NdWYXi+GemCJ9LmY6DHMnLEEl6uItrYKnM5MIiNHdYqxWOykpV/US8HpsKbmrTQ1bw0as3//g4wYcTmG0Xl3Y3PzTurqV2NgISnpTKKick7+gUREREREBjkdqTviq1mp3Jmdhu24i8TGREbw1JTR2CzD44ax9PR0IiIigsZkZ2eHKJvBw+JwEH/55UFjXKvXsH/R1bRuCf5NrIicuqioHBIT53QpNh01MnMRDkdqD+82GJl5DUlJZ2KxBP86CNDqLqaubhVHL9/w+ZrZtPk2Vq+5iF27fsLOXf/Fqo/PYdu2ewkE2k71kUREREREBiXdUnec8jYvvz1Qzt/LagD4SX4mt2Wn9WV6A97bb7/Nhx9+2O2c1WrlrrvuIjZ26N/Yd7L8zc0U33Aj7sLCrpM2G/iOHNm0WEhavJiUr91BywcfUP/yy/iqqnBkZZOw6Cpi5s8PbeIiw0xzy262bvkmzS0728fs9iROO+2npKddCIDf38aWrd+guvqtXtdzOkeSmHgGTU3baO5hV9TIkV9m3Gk/7ZsHEBEREREJk5O5pU4Fp24cdHuYtWobAF9IT+ShCcPrOITf7+f111+np9/DrKwsrr/+emw2ncg8XsDtpuHlV2hctgx/cxPOceNJvOYarLExHPrRf+Favbo91oiMxGxt7bJG0k03kvad73S5iUtE+o5pmjQ0rMfl2ofdnkRyctddTZVVb1BYeEeffJ5h2Jg/fyURjpQOOfipqXmfquq3MU0fCfEzSU+/BKs1sk8+U0RERESkr6ng9CkLTqZpMnHlFmq9fsZGOflg7rg+zG7wqK2tZefOnfh8PtLS0li+fDnV1dUATJkyhSuuuEJFkZNgmiYNL75IxSbJe/IAACAASURBVC9/dbjJeBBZTzxBzALtdBIJp0DAx9p1l9Hc3P3Nnelpl2Lip65uFV5vba/r5eTcTk72rdjt8fh8TWzcdDMNDZ3/W+WMyGTatL8SHV3QJ88gIiIiItKXVHD6lAUngC9u3Mt7dU1YgN1nTSbaau2b5Aax2tpaHn/8cVqP7Mo555xzOOuss8Kc1eDjq6pi70UXEwhyG2Dseecx6g+/D2FWItKdtrZKCrd8vVNhyGKJIC/36+Tk3I5hGJhmgL37HqCo6I8nsKJBbOwE/P5WXK593UZERuYy7/Q3MQz9d0dEREREBpaTKTjpTFQPpsRG8l5dEwFgW7Ob2fHR4U4p7JKSkvjiF7/I3/72NwKBAO+88w5JSUlMmjQp3KkNKrbUVMxebgP0FBeHKBsRCSYiIo1ZM5+lsXEzjY2FWK1RpKQsxG5PaI8xDAsjM6+iqOhPQG8/xDFpagp+cUBr6wFqat4nJeWc9jGPp5aSkr9QUflvfL4mYmLGkzXqelJTz/0UTyciIiIi0n90S10PJsdGtf96U1PPV90PNzk5OVx66aXtr19++WUOHjwYxowGJ1tKStB5f10dvtrej+iISGjExU1h1Kgvk5FxRadi01GRkVlkZlzV4/tHjPg8o0d/i8TEMzAMR6+ft//AQ1RULqPNU01bWyXr1l3JgaKHaW0txuuto67uIzYX3sa+/X/4VM8lIiIiItJfVHDqwZTYY01bC5u6NnYezqZNm8aZZ54JgM/nY8mSJdTX1+P1emlpaSEQCIQ5w4Ev/vLLgs77KirYe/4F1P79H5heb/u45+BBmleuxL1zF0PxOKzIYHbaaT9h1KivYBj29jGLJZLc3DuZMP6X5OV+jRnT/8H0aX/vda3Gxk1s2fJ1VqyYy6qPP0eru/tdj/v3/57m5p1dxk3TpKFxE1VVy7udFxERERHpb+rh1APTNBm/Ygv1Pj/jo528O2d4Ng7vSSAQ4Pnnn2f79u0ARERE4PF4ME2TmJgYZs+ezYIFC7Cq91W3Ai4XRdcvxl1Y2GXOcDgwPZ721xFjCki+7TYaXnqZlpUrj41PGE/Gj39M5JQpIclZRE6Mx1NNQ8MnYFhJiJ+N3R7Xad40/Xy06hzc7r7ZHZo16kbGjv1+++v6hvVs334/Ltee9rH4uOlMmPB/REXl9clnioiIiMjwpKbhfVBwArhq4x4+rGvGasCeM6cQadWGsI48Hg9PPPEElZWV3c6PHz+eRYsW6Sa7HgRaWqh58kkaXnoZX3U1jqxRJCy6mrhLL6H2iT9T++STnXY3dceIiiLv+eeIyM8PUdYi0hcqq96gsPBrdNfzKTv7NtLTLqCufg21NR9SW7ci6FqGYScpaQEJ8bOIcKazffsPME13lziHI425c17D4Ujuq8cQERERkWFGBac+Kjj9dE8Zfyo5XExZNmMMM9Q4vIunn36aXbt29Th/7bXXUlCg671PhefAASp+8Uua33svaFz8ZZeR+ctfhCYpEekz1TXvsW/fb9ubiDsjMsnOuYVRI7/SXqj3+9t4/4MZ3RaQTsXovLvIy/t6p7G6ujWUli2htbUIhyOVjBFXkJp6HoahH7KIiIiISGe6pa6PdOzjtLm5VQWn4/h8Pvbs2RM0prCwUAWnU+TIzSXrkYfZ+/8uwRPk97npnXdCmJWI9JWU5M+QnHQ2Hk8lgYAXpzMDw+h8DNlqjSAz4wpKy5b0uE5kZDatrSd2s2XZoX+RmnYB0VH5GIaFffseYP+BBzvFVFe/RVrqhUyc+AAWi/6aICIiIiKnRn+TDGJKh5vqNuumui48Hk+vDcLd7r75qfxwZomOCjofcLsxTVNHF0UGIcMwiIhIDxqTn38PDY0baG7e0WVu7JgfkZV1PR5PLQ0Nn1C45U5Ms+ejuG53CatXX4DNFkdUVB6NjZu6jausep2Esjlkjbquy1xLyz6am7dhtUaTmHgGVmtEL08pIiIiIsOR9ssHkRvpIPZI3ybdVNeV0+kkNjY2aExaWlqIshm6ombMDB7g9VL0xS/RsmZN5+HKSppXrqR10yZMv78fMxSR/mS3xzNzxnOMGfMD4uKmERWVT3ra/2PmjGfJyroeAIcjidTUc0lOPvuE1vT5GnssNh118OBTnV57PNVs2LiYj1d/ji1bv8mmzTez8qP5lJW9cGoPJiIiIiJDmnY4BWExDCbFRrKqvoUdLW7aAgEiLKrRHWWxWJg9ezbvBDnSZdHv16eW+OVrqFuyBDPIbrHWTZsovu56os8+i5TbbqPu6SU0vv46HCk02UeOJP2+7xF77rmhSltE+pDNFk121g1kZ90QNC4n+xaqq98Buu4+tVic5OV9k1bXfhoaN9DSsjvoWi7XHjZuupG4uOnExU5iz97/o6VlZ6cYr7eO7Tu+i9UWTXrahSf9XCIiIiIydKlpeC/+a08pj5ZUAfDGrLFMjQ1+vGm48fv9/Otf/2Lbtm09xpx//vnMmzcvhFkNPS2rVlH6rW/jr6trHzMiIki87jrcGzfiWru290UMg6xHHyHmrLP6MVMRCbfyilfZseMH+P3N7WMORxqTJv6exMQ57WOfbLieul5uwDtR0dFjmTtnWaejvTU1H1JU/Bj19WsxDBspKQvJzbmD2NjxffKZIiIiIhJ6uqWuDwtO/yqv5WvbDzdj/b/TRvGVzJQ+WXcoMU2Tffv2UVhYiNvtJi0tDbvdzttvv90eo6LTpxdwu2lavhxPUTG2lGRizz8fW2IipmnS8uGHVP7mt7Tt3Bl0DeekSeS98HyIMhaRcPH5WqiqXo6nrZLIyBxSUhZisTg6xVRU/JstW7/R4xp2eyJeb12P88cbO/YnpCSfjdM5ikOHXmD7ju91ibFYnEyb9iSJCbNP/GFEREREZMBQwakPC067W9ycueZwo9brMpP51WlZfbLucLBu3Tpee+219tcqOvUvMxCgePENuI7r5XS8gg/ex67eWiLDnmn6KSz8GlXVy7vMxcSMZ+aMJZimSWPTZvbs+SXNzT3vZO3IbkvA62uku2N9AFFRBZw+9z+ddkP5/a2UVyylpuY9TNNPYsJcMjK+gN0ef0rPJiIiIiL942QKTurh1IvRURFEWS24/AE26aa6kzJr1iwMw+DVV18F4I033sA0Tc4444wwZzY0GRYL9pEje43zVVSq4CQiGIaVSZMe5GDpPyktXUJraxEORwoZI64gJ+c2bLbDl0IkJy3Ak13Ntm3fPqF1vb76oPMu1x7q61eTmHg6AO62cjZsuA6Xa297THX12xQVP8b0aX8nJua0U3xCEREREQkn7XA6AZd9spvVDS04DIO9Z03BbtH18ydj/fr17UUngIKCAmpra2lsbCQxMZEZM2Ywe/ZsbDbVPz+t+hde4NAPfhg0xnA6Sbz6apJuuhF7Whreikrqn30W17p1GDYbMZ/5DPGfvwJrTEyIshaRgS4QaGP1motxufZ3O5+beycORwqNjZuoqXkfr7e21zWjokYTFzuFxsZNuFq7XzcyMod5py/HMKydxltbS6isegO/30Vs7ERSkj/TJUZERERE+p6O1PVxwekHuw/yxMFqAN6aNZZJahx+0j755BOWLl3a43xBQQFf+tKXsFr1DcOnEWhtZe9FF+E7VN5rrBERQczChTR/+CFmS0unOXt2Njl/exJ7RkZ/pSoig0xraylbttxJY9Pm9jGLxcno0XeTk31z+1hF5ets2XJnn33utKl/ITn5bABMM8CePb+guOQvwLG/v0RG5jJ1yuNER4/us88VERERka5UcOrjgtNz5bV840jj8N+Oy+KajOQ+W3s4ee6554LeZnfppZcyY8aMEGY0NLXt3cvBO76Gp6jo2KDFQtJ112FNSqL2r3/tdNtdT6LPmEf2X/7Sj5mKyGBjmiYNjZ/Q3LQdqy2GlOSFXfosBQIeVn50Nh5PZbdrOJ2jiI7Op7Gx8IR2QjkcKSQlLSA2dhIu1wFKS//Zw7ojOX3um1itzuPy8VJXtwqPp5qoqDzi4qZ16h8lIiIiIidOPZz62OSYyPZfb25q5Rpt+jglNTU1Qec3btyoglMfiMjPZ/Syf9P8wQe4t2/HGhND7Oc+175bKenL11D3zLNUP/IwgabmHtdp+WgVnqIiHDk5oUpdRAY4wzBIiJ9JQvzMHmMsFgeTJz3Ixk034vd33j3pdI5ixvSniIwchWmaVFa+zpatXw/6mR5PNeXlL1Ne/nLQOLe7lMrKZWRkfL59rLr6Hbbv+H6n4ldMzHgmTXyA6OiCoOuJiIiIyKejgtMJGBPlJNJi0BowKVTj8FPW1NQUdL65uefih5wcw2olduFCYhcu7DJniY4m+aYbCbS2Uv3QQ0HXadu/XwUnETlpCQmzOH3uG5SWPkVd/Vosho2UlHPIyLgKuz0OOFy8Sks7H+fekbjdpT2sZMFmi8Hnazyhz923/wHc7lJiYydimn42F34N8HeKaW7ezicbrmXunGU4HEld1mhu3kWru4SIiHRiYyZqN5SIiIjIKVLB6QTYLAYTYiJZ3+hiW3MrvoCJTY3DT1pSUhIuV88Fu/h4XX8dSrb03m+qq/rdA+D3E7NwIYbFgunx0Pjmcpo/eB98fqLmzCb+kkuwREeHIGMRGUyczgzy878TNMYwrJw29sds2nw7xxeGAMaO/RGjRl6L211KTe0H7NwZ/FIEt7uUffsf6DU3j6eKsrJnyM29o32spWUf27ffS0PjhvaxmJhxjB/3c+LipvS6poiIiIh0ph5OJ+i+XQf5a+nhxuHvzj6N8R2O2cmJ2bBhA6+88kqP8zExMSxevJiUlJQQZjV8+Rsa2H32ZzDd7l5jHbm5JCy6ivqXX8Gza1enOVtGBtl/foKI0WrWKyKnpq5uDfsPPEhd3UcAxMZOJjfnq6Slnd8eY5oma9ZeQnPz9j75TIcjjbzcO4mJHYfDkcr69YvweKq6xFmt0cyZ/QpRUXld5lpbS2lp2YXNHkd83DTdlCciIiJDnpqG90PB6elDNXxrRwkAvx+XzdUZXbfhS3CBQICXX36ZzZs39xjjdDq5+uqrycvr+hd76Xt1zzxL+Y9/3HXCZiOioIC2HTtOaB1Hfj6jX12KYbH0bYIiMqz4/W2AH6u1+9tgq6vfYdPmW+l4Q91RiQmnM2nSH2hu3kFj01b27v0/INAneWVmLGL8+J+3v/Z4ati+436qq99uz8XpHMXYMT8kNfXcPvlMERERkYHoZApO+u7wBE2NPfaX38Jm9XE6FRaLhcsvv5xFixYxduxYRowYwbhx47jyyivJOdInyO12849//IMNGzb0spr0hcQvXk3Wo48QNWsWWCwYdjuxnzuX3GefYfTLL5H7/PPEXngB9FJI8uzdS8uqVSHKWkSGKqs1osdiE0BKyjlMnvRHnM5R7WOGYSNjxJVMmfIoDkcySUnzyc25leTkM/ssr/KKVyktXUJDwwY8njo2bryB6uq36Fj4crsPsrnwq9TWrux2jdbWUioqXqOq6k18vuA9DUVERESGAu1wOkHegEnBh5tpC5jMiY9m6Ywxfbr+cOfz+XjttdfYuHFj+9iCBQs4++yz2b17N9XV1URHRzN+/Hiionr+ZkROnRkIgGF02yC37plnKP/xT4K+P/U73yHl5pv6Kz0RkXam6aepaSs+XzMxMWNxOLoexa6rX8snn3yZ7npD2WyJTJ3yKG1t5TQ1b+fgwb93uVHvVMXHz2LWzGfbX/t8LezYcT8Vlf/maIHKaokiN/cOcnJuV1NyERERGVR0pK4fCk4AF6zbxcYmF1FWC7vPnIxVf0nsU6ZpsmLFCt5+++32MZvNhs/n6/T6/PPPZ/bs2eFIcdhqWb2G4uuvDxpjTUgg+Zabif/857ElJuKtqKDm8SdofOM/BFpcRE6YQOL11xH3uc+FKGsRGe4qK//Djp0/xOutbR+Lispn0qQ/EBszrn1s584fc7D0H332uaPz7iYubioxMWPZtv0+amvf7zZuTMH9ZGd3LdQ3N++itnYFJgGSEs8gNnZCn+UmIiIi8mmo4NRPBad7d5bw97IaAD6YM46x0c4+/wyBrVu38tJLL3UqNB3vmmuuYezYsSHMangzfT72fPZcfBUVvcYaERHELPwMro9X46+v7zKf8o2vk3rHHd28U0Sk7/n9bdTWvo/HU0NUVB4JCXMwjM7HhF2u/axe8/8IBLq7RMHCxAm/BsNCc/NODh16odvm4qfCbk9k/hkrsVojjuTqZtv2e6isXNYpLjl5IZMm/g6bLbZPPldERETkVKmHUz+Z0rGPU5P6OPWXiRMnMnny5KAxK1asCFE2AmDYbKTfdx/0sKvPOXEiht0OgNnWRtN/3ui22ARQ/YcHadu/v99yFRHpyGqNIDX1PEaO/BKJiad3KTYBREXlMXXKY9jtSce9N4ZJEx9gxIjLGJF+CQX53yE3t+8K5l5vHdu2f4eysudoaNjA9u3f61JsAqipeZdt2+7pMm6aAWpqV7Bjxw/Yuu3blBz8u/pDiYiIyIBhC3cCg8nk2Mj2X29uauXKEWFMZoirq6sLOl9cXEwgEMCiW9FCJu6C87FEP0b1Qw/RumkTcPh2upTbbiX+0kvx1dZS//wL1D39dK87oRpeeYW0u+4KRdoiIickKWk+889YQXX1clpbDxIRkU5q6nnYbNGd4jJGXMGBA4/g8XT/dW5Mwf1ERY2muXkn5RVLaWnZGfRzKyuXdVtkOl5V9XJaWvYQHV0AHN4NVVj4VWpqP2iPKS9/mf37/8DUqX8mPm5qr2uKiIiI9CcVnE7CuGgndsPAa5ps1k11/cpqtQadN3pobi39K+bMBcScuQB/fT2m3481Kan9n4MtKYmU224l5pyF7L/k0qDruNavJ+B2Y3EeO5batn8/ja++hq+mBkduLvGXX4YtMbFfn0dEpCOrNYL09P8XNMZmi2X6tCfZXHg7ra1F7eOGYWN03t3tPZlSUhYSnzCTTz75Yp/lV1j4NZKSzyQ6qoCa2hWdik1Heb11bNp0C/PPeK/TjX9+v5uysmcoL38Fj7eO6Oh8Ro28lpSUhX2Wn4iIiEhHKjidhAiLhXHRTgqbWylsaiVgmlhU9OgXY8eOZe/evT3OW61WDh48SFZWVgizkqOsCQk9ztkzMjEcDkyPp8eY1rXr2H3W2cRfcgnxX7iS5rfepvqPf+wUU/X735P5f79Sk3ERGXBiYsYy7/Tl1NR8QHPzDmz2eNJSz+tyW15C/CwSEuZSX7+623Vycu4kPe0CWlp2U1n5H6qq3wj6uS2uPbS49vSan9dbQ3n5vxk58irg8E15GzZeT2PjhvYYt7uEmpr3yM35Kvn53+l2HZ+vCY+nGocjRf2jRERE5KSpafhJ+vaOYp46dPi2m4/mjmd0VES/fM5w19bWxqOPPkptbW2PMYZhsHDhQhYsWKCjdQNM2f3fp+HFFz/1OobdTt7SV4jIy+uDrEREQs/rrWfr1rs77UYyDBtZWTdQkH9ve08pr7eeFSvPIBBoC7KaBQic0Ocahp3Y2IlER42m1V1Gff3HPcbOmvk88fEz2l+3tVWxZ8/Pqaj8N6bpwzBspKVdyJiC+4iISD+hzxcREZGhSbfU9WPB6cnSar636yAAj0zI4fJ0HfnpLw0NDSxdurTTTqekpCRiYmIoLi5uH8vLy+OKK64gLi6O+vp6Dh06hMPhICcnB5tNm/jCwV9fT9H1i2nbeVzvEquV1Lvvwl9bR8PLL+MPUlA8KvG6rzDi/vv7KVMRkdBoat5BQ8MnWCwOkpPOJiIitUvM3n2/5cCBP3bzbhg16joK8r+Hq3U/dbWr2L3nf/ost+TkzzJp4m+w2WLxehtZu+4KWlsPdImLjMxm9qwXsds7/90nEPBRU/MuLS17juz2ugCHI6nL+0VERGTwU8GpHwtOnzS2cNH63QDckZXGjwoy++Vz5JiamhpqamqIjo4mM/Pw7/e6dev4z3/+g9/vByAyMpKUlBRKSkra3xcVFcW5557LjBkzul1X+lfA5aL+pZdoev0/+F0tRE6cSOKXv4xz3DgATI+Hxnfeoeyuu4OuEzF+PKNf6rxbyt/YSOOyZXhKSrCnpxN38cXYkpP77VlERELBNAPsP/AQxcVP4Pe3AGCxOMkadR2jR38bi8V2JM5k7brLaWra0uNa0VEFuNvK8PtPvOekw5GKxRKB232wx5i83K8zevSxSx8am7ZQWHgHbndp+5jFcJBfcC/ZWTec8GeLiIjI4KCCUz8WnFr9AQo+3IzfhAUJMbwwvaBfPkd6V1FRwfPPP091dXXQuCuvvJLJkyeHKCs5WTtnziLQ0hI0xjl5MvGXXkrcxRfhWr+eQ9/9HgHXsW+iDLud9B/+gMRFi/o7XRGRfufzNdPQsAEIEBc3Hbs9rktMXd1qNmy8DtP0dZnLzPwi48f9L6YZwO0+xOo1F+H3N/dJbjZbAmMKvkdUVB52exLr1l+Fz1ffbeykSQ+RnnZhp7HGpi2UlDxJY+MmLBYnqannkTXq2i67pkRERGRgUsGpHwtOAOes2cG2FjfxNis7FkzSbWlh5PF4eO6559izp+cmqklJSdx5553q8zRAnVS/J6sVAgHo4etW9pNPEn363D7MTkRk4KqvX8fefb+hvn4NAA5HGtlZi8nOvqW9NxTA7j2/oLj48R7XSU+/jEDAjcu1j5aW3X2WX2zsJObMfqX9dXnFq2zd+m3A3ynO6RzFzBlLcDq77hr3+VpobNoMQHzc1E4374mIiEjonUzBSQ1uTsHk2Ci2tbhp8PkpdnvIiVTj8HBxOBwkJycHLTjV1tZSV1dHso5cDUgpt99G09tvE2ho6DJny8zEkZODa/Xqw4Umv7+bFY6pVcFJRIaRhIRZzJyxBI+nBr/fTUREevuxu47ycr9GXd1HNDVt7TKXlXUjY8d8v/31ps23U129vE/ya2rawrr1i4iOyscRkUZR0aMcX2wCcLsPsnPXj5k65bH2MdMMsH//gxSX/Ln9eKHVGkNO9i3k5t7RqaAmIiIiA5MKTqdgSmwkz5Yf/vXmplYVnMIsEOj9xp4TiZHwcGRnk/v0U1T87Oe0rFwJgOFwEHfxxaR/77tY4+PxVlbS+Nq/qXrgAUyPp8e1XGvXYvr9GFZr+1jb7t3ULVmCe+curHFxxF10EXEXXoChhvIiMkQ4HMF/oGKzxTJj+hJKS/9JefkreLx1REfnM2rktaSmnt8pNjvrxqAFp9PG/gSbLQ6Xaz9lh56jra086Gc3NKynoWF9r89QXf02tbUfER8/Has1kr17/4+i4sc6xfj9zezb/zsCpof80d86bs5NecUrVFUtJxBoIz5+Ov+fvfsMjKpKGzj+v9MnmUwySSY9IYEUQg0QARGUVURFXTsothVdy9p17bv2VdfXXcvqqqi4uqgoYO+4FpTQSygBQnrvZTIlU+/7ITAwzAxYAgQ4v2/33GfOPTfilOee85zUlIvQ6ZL3e21BEARBEA4MsaTuV1jdbePMdX1Tzm/MSOC+IaJw+KG0ZcsWFi5cuM+YCRMmcOKJJ6LViuTgQOZpbcXT0Yk6JRllVFTQ+crzL6B3c/giuQAqs5moU0/FeNppOCsqaLr//r7ZUXuImDCB9JdfQqHT9ev4BUEQjgS1dW9SWvoIsOd7p4Kc7HvIyJjjb2loeI+t2+4J249SGYEse/D5wj8oCEWjMeNytYY9r1BomXzcMn/dJ5ernfXrL8VqC9wZVamMYPSoVzCZJgb1IcsyVus23J4uIiOyQ+4aKAiCIAhCMFHD6QAnnGxeLzlLN+EDppqiWFAw5IBdS9g/r9fLSy+9RGtr+C+nANHR0Zx++unk5uYC4HQ6aWpqQpIkkpOTUavVB2O4wm/Q9tJLtD7zbL/0FffHq0i4/fZ+6UsQBOFI43DU0tj4Pr299Wh1ySQnnUtExKCAGK+3l9Vrzg5Z90mSVIwd+zbRxgJ6e5uorn6R+oZ3+m180dGFxJqORa/PoKFxMV1dK0LGqdUmjpu0NKD2U0fHMkp3PLLHuJUkJJzC0LyHRfFyQRAEQdgPkXA6wAkngONXbqPU3kusWsmW40Th8EOtu7ubd999l4aGBn+bJEnk5uZSU1ODw+Hwtw8fPhyj0ci6detwOp0AREREMGXKFCZOnCj+Ww5g3q4uKs87H3d9fdA5hcGAYcoUrEVFIetBBcVHR5O77KegpXVeqxXn9u1IGg26/Hyx9E4QBGEfnM5Wtm67m/b27/1ten0GebkPERd3/O44VxvLlk1Glt0h+4mIGEJc3Ak4HNVYLJtwuVr6bYxDhtxBRvocFAoNXd1rWbdudsjd/aKiRlA4biEKhcbfJss+WtuW0NiwiF5nE3pdKikps4iLmyq+LwiCIAhHJZFwOggJpxtKqlnU3AnAmmOHkabT7OcVwoEmyzI1NTU0NDSg0WjIy8vDYDBgtVr56quv2LRp0377mDZtGpMnTz4IoxV+LXdDA00PPYx16VL/bnUR48eTdP9f0WZnI7tc2FasoPHBh/DskYAMJeX/niTqlFNQaDTIbjctTz9D5zvvIO9MUKqSkjDffDMx55x9wO9LEAThcOZw1GKzlaFWx2A0jg5Z1Lu+fgHbtt8X1K7RxDNu7AIiIrIAsNkqWLHy5H4eoQKdNgmP14rHYwkbNXzY0yQl/R7oSzaVlNxBU/OHQXGpqZeQl/tgUNJJlmW6ulZht1eg1sQSFzsVpVIs5xcEQRCOHCLhdBASTnNrW7i/rO/H7LwRmcwwxxzQ6wm/XVlZGR9//DEWS/gvmlqtlttuu03UejoMuJuacDc0oEpIQJOWFnS+8cEH6Vrw7n77UURGYjjheNwtLTjWhC5sm/z44yLpJAiC0A86O1dSUzsPi2UDCoWOBPMppGfMQadNCojbuOk6Wlu/DtmH2TyDvLwHcDiq6epcQ3nFk/02Pq0mkYSE09Dp03A4aqmreyNs7KiRL2I2T/cfW63b2bzlhRHfbAAAIABJREFU5oAlhmq1ibzcB0lMPCNsPz6fC0lSiZ33BEEQhMOCSDgdhITTii4rZ68vA+DWQYncNVjsgnI4WLlyJV988cU+Yy666CLy8vIO0oiEA8W+bj3Vs2f3S1+qxESy//eNWF4nCIJwkHg8PWzefBPtHUsD2uPjTmT48GdQqSL9batWn01PT7hZzBJJSWfjdrVjd1TjcFT32xijDCMYPvxpdLpUvF4bK1edhsvVFiJSwZgxbxJrOtbfIssyjY2Lqa2dh9W2HUnSYDafzOCsm4mMFLVBBUEQhIHrlyScxK+nX2mEQY8EyEBxj/1QD0foRy7XL9tNRxiYIsaOIebCWSFnOSnNZpL++hcc6zfQ8803uGtr99mXp7kZx6ZNRIwZ42/rLSmh7ZVXsP20DHw+Io6dSPxVV6EvKOj3exEEQTjaqFRRFBS8jsWykY6OIgBiY4/DaBwZFJuX+yDr1l+Cz+cIOjdk8J/JzLwW6EvyrFh5KnZ7Wb+Msce62b/0T6mMxOu1hYn0UV31YkDCqbz8Sapr5vqPZdlFS8tndLQvZey4BUQZhgb04PH00Ni4mM6ulYCC+LgTSEw8E6VS3y/3IgiCIAgHgpjh9Cv5ZJlxy0todLpRSxKXJMdyWWo8+QbxwT+QNTY28vLLL+8zxmg0Mm3aNEaMGIFCsXt6u8PhwOv1EhkZKQqFHiZkWaZr4UI657+Fs7QURVQUxhkziL/uWtRJSf6YpgcfouvdfS+/U0ZHYzjpJAxTT0BSKqm/9TbkvZOTSiVpzz5D1LRpB+qWBEEQhBB6rNuoqnye1rZvkGU3xqhRZGRcRWLi6QFxDY2L2Lr1rpB9qNUxjD/mUzxeK72OOrZuuw+Xq7nfxmg0jkanS0WpjKSxcWHYOJNpEmPH/Nd/bLOVsX7DZTidgWOJiBjMmII30emCZ9k7XW10tC/FJ7uJiS4Us6YEQRCEfiOW1B3ghJPHJ3NNSRWftQbuhCUBT+alcWlK/AG7tvDbvfHGG1RWVu43Ljk5menTpyPLMt999x21O2fBxMbGMnnyZMaMGSMST4cR2edDUoSuj2H98Sdq//jHfrmO0mQi+/vvUIg6YIIgCAedLPuQZS8KhTrMeZnKqueprPwX4PW3azWJjBr1EkbjKH9bdc2rlJU9HvZaCQmno9HE0dtbT2fHcry+/pvxnpl5IwZDHjptEltK/ozDURUyzmQ6lrFj5vuPZdlHWfmT1Na+HrATX3z8NIYPewqVKipkPy5XOx5PDzpdMgqF+PwSBEEQwhMJpwOccHqhpoVHykPvfiUB3x6TJ2Y6DWB2u50FCxZQU1MT0D548GAiIyN/1m52AFOnTmXq1KkHYITCwSb7fFSeex7ObdtCntdkZ+Pt7sLbGqo2R7DUfz2H8eTdOyx52troeONNepYswdfbi37kCGIvv5yIwp/1Pi0IgiD0s97eRlpavsDt6cIQmYfZfDIKReCOw16vkw0bLqere3XQ6+Nij2fUqLn+xFZ19VzKyv8e9npKZSQg4fVa+/U+AEaNepVY07EolToqKp6lsuq5kHGxsVMYU/CfgDaLZRNl5X+ns3M5ACpVNKmpsxmcdVPQ30MQBEEQQCScDmjCSZZlxq/YSm1v+Do/c1LjeSw3eNcsYeCQZZmamhoqKytRKBRkZ2eTkpIC9C27+/rrr/c7C0qSJG699VaMRuPBGLJwgLkbG6m97k9BSSfjGWeQ8tjfQKWid0sJ7XPn0rNkyT77UmdkEHPuOUQeNxlFdDQ1l12Gp6kpKC7p4YcwzZzZr/chCIIg9B+vt5fa2tdpaFyE09mETpdGasos0tIuCUjIuN1drFw1I2jZWx8l48a+TXT0ODweC1XV/6am5tV+H6tabcLt7gZ8YWMKC98n2jgaAEvPZtauvTBk7av4+GmMGvlSwExur9dJff18Gpvex+lsQa8fRGrKhSQnnyt22BMEQTiKiITTAUw49Xp9ZC7duM+Y400G3ivIPiDXFw4OWZZZsWIFX3311T7jTj31VCZOnHiQRiUcaLLPh23ZMuzr1qHQajH87kR0ebkBMY7iYqpmXfjzO1Wrwe0OfU6lIvt//0OdmBA8FlkGtxtJI54wC4IgHA5stgq2lNxCT88Wf5tGY2Zo3sOYzdP9bW53F8uKpuD1hl6CZzbPIDXlAnp7G2hr+5a29v/12xhVyigiDTlotclYLBvo7a0PGzum4L/Exk4C+hJvG4qvoKtrVVBcUuJZDBv2VFDSqce6jYb6BdgdVWg1ZpKSzsFkOlaUIxAEQTjMiV3qDiCNQiJSqcDmDf/0KFYt/qyHO0mSMJvN+43r6uo6CKMRDhZJocAwZQqGKVPCxuhGjUI7dGjY5XeSRoPs8YBv53tEuGQTgMeD5ZOPibvqKn+Tu6WFtn//G8snn+Kz2VCnpWGaPZvYyy5FUon3FkEQhIEqMnIwxxR+hKVnI3ZbORpNHCbTpKB6Ump1DCOGP8emzdfj8zkDzhmNYxiW/5i/1lJi4un8+NOxIWchASiVBpKSzsLpbMZmKwtb62kXj7eH7u51P+t+tm2/D7P5FHTaJLotm0ImmwCamj/CbJ5OQsKp/raamnnsKPtbQFxj0/ukJM9k6NC/BSWnvF4nLa1fYO0pQakykJBwGobInJ81TkEQBGHgEjOcfoU7ttfy34b2sOffHJnF9PjoA3Z94eCwWCw8/fTT7Ov/EUmSGD16NFOmTCEuLg6fz0dpaSmbN2+mt7eXpKQkxo0bh8lkOogjFw603tJSaq6Yg7c98H1AYYwi47XX0AwahG35Cro//RTrfpbfKWNjiT77bCInTkCdkUHtnCtxNwTXiIs65RRSn3laPBkWBEE4Qjgc9dQ3vEOPZSNKZQQJCaeRkHBaUO2kurq32F56f4geFIwc8TwJCacA4PXa+WnZJDyenrDX1OnScLs78Xpt/XkrREQMISvzBrS6ZNyudjZtvj5s7NChj5GaMst/bOnZTHHx1UE7AqamXkJe7gMhl+tZraV0da9BIamIjZ0Scqc+QRAE4cAQS+oOcMKp2elmxtpS6p3BMxdOjI1i/qjBKMSPwiPCe++9R0lJyX7jJEli2LBhWCwW/252uyiVSs4//3zy8/MP1DCFQ8DT1kbnOwuw/fQTsiwTOXEiptkXoU5K2h3T3s6OyVPg577PStI+Y9NeepGovQrVy7JMb3Ex7oYGVEnJ6McUiKSUIAjCEaal5Suqqv9NT89mAGKijyEr60ZiY48LiKuq+jflFf8I2Ud8/EmMHjUXWZZxu7tYvuJkPJ7OAz72vel1GYwc9SJaTQKSpGL5imm43aEf5GYPuYtBg672H3s8PWwpuZ22tj2XGSpJS5tNTvZfUCiCZwJ7PFa6utcgy16ijQVoNHH9fUuCIAhHFZFwOsAJJ4Amp5t/VjWxuLkzYHndtWlmHsxJPaDXFg4eu93O/PnzadhrxklkZCRjxoxh48aNWCyW/fajUqm46aabRIHxo1Dt9Tdg/V+Y+huShKRWI7vCb0Kwp6jp00l77ln/sWPzFhrvuRvnjjJ/m2bIEFIefwz9qFGhuhAEQRAOYx6PFUlSoFRGhDwvyzKVlc9SXfMKPl/vzlaJxITTGTr0MVSqSH9sZdULVFT8M2Q/CkUk48d/jOxz0utsZOvWe3C5Wvr7dpAkFbLsCXterY5j4oQlaDR9Kwc2bLiC9o6lIWMHZVxNdvZd/mNZlqmqeoHqmrn+GV2SpCY15UJycu4NuQufLPuwWrfh9dqJjMxGrY75LbcnCIJwRBIJp4OQcNrFJ8t0ub1MWrmVLo8Xo0rBumOHY1ApD8r1hQPP6/Wybds2SktL8Xq9ZGRkMGrUKHQ6HR6Ph40bN/Ljjz/S2bnvp4S/+93vOOGEEw7SqIWBwt3YSPXFlwQvk5Mkkh99FOOZZ+DYsAHbsmW0vzx3v/3pRowgorAQTVYWzU8+iWwLXhahMBjIen8xmoyM/roNQRAE4TDidnfT0VmE7HMTHT0WvT5492Sfz8PWrXfS1PxRQLtKGcXIUS8SazrW31ZT+zo7djwa9nqDs25Dp0vC6Wymrv5tnM7G/rsZ+mpVqdXR+yxyLkk6pkxejlrd93CvsvJ5KiqfDhmbnHQew4Y9GdDW1vYtpTv+5q+DpZA0JCWfS27OfWETfL3OJtyudnS6VJGcEgThqCESTgcx4bTLY+UNPFfT9+Tn0ZxUrkrbf8Fp4cjR2dnJs88+u8+Y/Px8Zs2atc8Y4cjk6eykc/5b9Hz9NT6HA/2oUcRedin6ggJ/jCzLlJ80LWT9pl8j5qILSX7ggYA2+5o1dL79Ds6yMpQxMRjPOJ3os89GIXbCEwRBOCrJsozFsoHm5k/xeHowROWTnHROUPLE53OxcdO1tLf/ENRHaurF5OU+5F/O3dj4ASVb/xz2mibTRIxRo3A6W+joXB5Uu+m3UCj06HTJqNWxWCzrkWVv2NhjJ35LRMQgANrbf2BD8VVA8KZAsbFTKBj9esBydau1lNIdD9PZuRzom6mVmHAGubl/Qa0OXbfT7e7G6WxCo4kXy/oEQTisiYTTIUg4NTndHLO8BLcsk6HTsHxiPkpRR+Wo0dvby9///vd9FhhXKBSMGzeO8ePHYzabcTqdFBUVsWHDBnp6eoiNjfWfVyrFDLmjUfurr9LyVOjaGwC6kSNwlu5AdjrDxuyiiIkh4+WX0OXnI2k0YfvWF44j45VXUOj1v2nsgiAIwpHN5/PQ1PQBjY2Lcbpa0evTSU29CHP89IBkjM/nZkPxHDo7i4L60OsHUThuERpNLADt7T+yofgPYa+p0ZiJiS7E6WrGZivH4+nut/tRq2OIiBiMRmOmq2s1bndH2NgxBW/662XZ7VWsXnNuyLEYDPkUjluIUrn7M9XlaqN0x99oafl85/JBBfHxJ5Kb8xf0+vSQ1/N67TgctahURlEQXRCEAUcknA5BwgnghpJqFjX3Lat6bUQmp5vF1NqjyTvvvMP27dt/VmxmZibd3d0hl+Hl5eUxa9YsFIrgXVmEI5vsdlN/2230LPkm8IQkkfTAA5gunIXP5aJ38xZq/3Qdvq79f/GWtFo0gwfj3Lo1bEzcNdeQcOstgWORZRzr1uEo3oik0xJ14okBBdEFQRAEIRyv10l19YvUNyzA5WpFqYwkKelsBmfdhEYT74+TZR8biufQ0fFjUB+SpGJMwRuYTBOBvllFK1edto+rKjEaR+ByteF0Nu+zNtQvpVabiIoagVZjxtKzGZutNGxsXt4jpKXOBvqKnK9ecx52e3lQnEaTyPhjPkSrTfC3eb0Oysr/j8bGhXi9dgCiowvJybmXaOPokNfzenux28uRFBoiI4aE3NVPEAShP4mE0yFKOG3qsXPymr4PoPHRkXw8Nuegj0E4dNra2njttddwOBxB5xITE7FardhC1NsJ5bzzzmPkyJH9PUThMCD7fFi//57uTz7B29mFdnAWMbMuRJeXGxDX+Nf76Vq4sF+uqTCZyF32E9LOJKe7uYW6m26kt3jjHkEKYi+7jIQ77/DHCYIgCMK+yLKM12tHqdQhSaFnb3u9DnaUPUFj4yJ/oXODYSg52fcG7cK3adMNtLR+EbKfwVm3kpV1AwBOZzs/LZsEhE86abUpeDwWvF7rr7iz8JRKA6aY8Wg08TgctXR2LQ8bm5F+JTk59wI7k28b/kBH57KgOIVCx7ixCzAad3839Pk8VFU9T23dG3g8fRvY6PWDGDLkzyQmzAh5Pa/XSU/PJmRkjFHDw9amEgRB2BeRcDpECSeA89aXsayr74Pr87E5jI2O3M8rhCNJe3s73333HSUlJfh8PmJiYpgwYQITJkzA5/OxZcsWVq1aRX19+KKXAEOGDOHSSy89SKMWDkfO8nIqzzsfubc3+KRaTcJtt+Jpasa+YT29GzfBft7rtcOGEVEwGt3wEbS99hruioqQceZbbiH+2muC2n1OZ98sKklCm58v6kIJgiAIv4jH04PdXolSaSAiIitgmd4uXm8v27ffT2PTh0BffSaFQs+gjD+SlXVTwGu2lz5IXd1/Q14rLvZ4CgpeB8Dl6mLFypP3uaROpYrC47ERqsbTbyFJamJjj0OjicfrsYVNpu09ZoCt2+6joWFByNjhw58hKfFM/7Esy9TUvkpV1Ut4PF1A3z2lp88hK/OGkLOivF4HnZ0r8Pp6MUaNDFl4XhCEo5NIOB3ChNPXbd1ctqkSgLMSYnh5eOYhGYdwaHm9XjweDxqNJuQXpkceeQSvN3why8jISObMmUNc3O6ikpWVlaxZs4aOjg4MBgOjR49m2LBhYundUcy2fDn1d96Jt7XN36aMjyfl8ccxTJnsb6u/+24sH34UqotfTBkTQ/YP36PQaoG+L7Edr71G+6uv4e3q+xKrNJmI++Mfib3iDyH//QuCIAjCb9Hb20i3ZT0KSUVMzET/znR78vlcbNv+VxobFwW0x8WdwPBhzwS8prr6ZcrKn9y7CwC02iSOnfgNCoUGt7uTdesvx2bbFnZsCoUWn2//tRZ/jZiYiWi1CUiSkqamD8LG6bQpTJr0vX9WWWXVC1RU/DNkbEbGH8nJvjugra5uPuUV/9yjTpWE2XwK+UMfD/m39nh6aGv7Fpe7g8iIbGJjjxNL+wThCCYSTocw4eSTZaas3Ea5w4lSghUTh5GuE0/6hUAvvvgizc3735UlIyODgoIC2traKCoKLr6Zn5/P+eefL4qMH8Vklwvrjz/ibmhElZSI4YQTgmYX2desofqS8DPmVAkJeHt6kEMsBw0l+pxzMEydim74cLoWL6b9xRdDxplvvon4664LHrMs42luRu7tRZ2aiqRW/6zrCoIgCMIvZbdX096xFFn2YIqZQFTUsKAYWfZRuuMR6ureDGjX6dIZPWouBsPuZe3NLZ+zefONIa8lSRomTvgCnS4Vl7udbdvuo739+7BjkyRVv9aa2lNkZA56XToqlZHm5k+RwywvlCQVx036Ca22b4fthoZFbN12V8jYmJgJjB3zVsDDpIaG9yjd8She7+6yEXp9JiNHvkCUYWhQHy5XO03NH9PbW49Om0xi0llo96jrJQjCwCcSTocw4QTwn/o27i6tA+DadDMPZqcesrEIA9Pq1av57LPP+qWvU089lYkTJ/ZLX8KRq+nhR+h8++2gdk1WFoPefgtlVBTO8gpa/vEPbEuX9ss1Jb2OnB9+QGnc/TTU+uNPtD79NL0lJQAozfHEXnYZcVdeKWpDCYIgCIeU3V5JS8uXeLw2jFEjiI8/CYUi+KFIZeXzVFQ+A+z+HaVURDB8xDOY40/yt1ksm1i95lzCLcUbPfo1Yk3H4XZ3UFX9MnV1b+xjdAokSXFAElRqdSx6fQYadSydXSv8BctD2XPHvra27yjeeFWYPuM4duJXqNUmf1tj4/ts3XYfsuzyt0mSmqF5j5CSckFQH3Z7NQ0N72Kz7UCtjiEx8ffExk4Ws6cF4RATCadDnHCyeb0UFpXQ6fESpVSwbtJwolRiBoqwm8/nY/HixWzZsiXo3MSJE0lMTKS4uJiqqqr99mU2m7n++utDXsNms6HRaNDuXP4kHL1kWcbyySd0vPUWrrJylDExGM84g7g5V6CMjvbH2deupfriS/rtujEXziLm/AvQ5mRjW76cuj9dD77gL96m2bNJuv+vIftw19fj7e5GnZGB0mDot7EJgiAIwq/lcNTQ1PQRLnc7EfoskpLOQq0O3qG6L8lyL7Ls3qNVQXb2XQzK2J2scbnaKVp+Ytgi5jnZ95GefgUej4Xm5s/ZXvqXfYxOQqUy7rEkrv9IkhKtJhG1JhaHo8ZfsDyUrMwbdtbWUtJtKWbNmvMJnXyTGDd2ATExu3+/NjV9RMnWO4MSbAkJMxg+7GkUClVAu8Wykdra/9BtWY9Socdsnk5a2qVoNHGE4/X2AhJKpfieLAi/hEg4HeKEE8DjFY08W923ZOrh7BSuTk/YzyuEo43P52Pbtm1s2LABq9WKyWRi3LhxDB482B/T2dnJvHnz6OnpCduPJElceeWVpKamIkkSXq+XoqIiVq5cidXa96UlJyeHk046iSSxrb2wH7IsU/vHq7H99FPI84n3348uL5feLVuwfPEFjvUbfl7HSiWSQoHsdocNGfzF52izsvzHjuJimh9/AseGvmtIOh3Rv/89CXfeidIgNmQQBEEQDg9OZzONjR/g6K1Fq00iOels9Pr0oLjOzlVs3HRtUKIoNfVi8nIf9NdFkmUvK1bOwG4vC3m91NRLGJr3ED6fm56eEtasPXef49NpU/B4rftMHv16Emp1DD6fK2DZ3d5MMRMZOvRR1GoTLlcHK1edFnY2V/aQOxk0aPcGJk1NH7Gl5M/snczSaVMYO3YBen3gapP29h+orHqB7u61AMREH0Nm1g3ExU4mFJ/PQ0/PZnw+JwZDfsg6VoJwNBEJpwGQcGpyujlmeQluWSZdp2H5hHxUCjH9U/jl3n77bUpLS/cbFx0dTX5+Pq2trZSXlwed12g0XHHFFSQnJx+IYQpHEJ/NRtPDD9P96Wews7i90mTCfPNNmC680B/nLCuj4owzw3Xzi0WdfDJxV1+NNnsIzooKqi++JOQufPrCcQz6z3+QVIFPN2WfD8e6dbibmlGnpqAvKBDT7gVBEITDisfTQ1PTx1ht21CpjCQmnEFUVH5QnMNRw4biOdjtlQHt8fHTGDH8uYBZO5s330xzy6chrxcXdwIFo+ftvLadouUn7HPHvsjIXEDG5WrfZ9xvI7HncsW9qVTR5A99ArXGBEisX39pwDK9PcXHncjo0a/4jxubPqSk5PaQ1xwx/BkSE88IaG1sfJ/yin/gdDYBoFDoSEmZRU72XSgUwTOjXK42Wlq+wuPpJjIyl7i4qUGzsQThcCcSTgMg4QRw49ZqFjZ1AvDK8EzOTAieZisI+7NlyxYWLlzYL30NHjyYyy67rF/6Eo587uYWerdsQaHToh83zr8z3Z5qrrkG2w+haz7pJ07AdO659G7dhn3VKnpDLCENSZKQdLp9FjFPfe5ZjNOn+4/t69bReM+9uKqr/W3anGySn3gC/fDhP++6giAIgnAY8fnctLX9j67utSgUWszx0zAaRwc9bPF4bGzecjPt7d8FtMfETGDUyBdRq3cvra+pmceOsr+FvJ7BkM/4Yz7y7363bv3ldHaGnhENYDSORq024XZ30tNTsteywoMnI/1KdLpUlMpItpc+hM8XukaVWh3L5ON+8ieSwienICHhdEaOeC6grbp6LuUV/wy4T50unVEjXwyZNOzqWkND4yKcziZ0ulRSU2ZhNI76tbcpCAeNSDgNkITT5h4709b0zUwpNEbw6bjc/bxCEIL5fD4WLlzI1q1bg84lJiYyefJkduzYwfbt23E6978N75133klERIT/uL29nZUrV1JdXY1SqSQ3N5fCwkIMolaO8DN4u7upu+FG7KtXB7RHTJxI2nPP+guGe7u7KZ08BfaxpO6X0ObmEn/D9Wizs5E9HqpmXRgyQaUwGhn84QeoU1IC2j3t7XR/+CHOykpUpliif38m2pycfhmbIAiCIAxEFstG2tuXIiMTazqW6OhxQckpWZapqHya6uqXA5a0RUePZeSI59FqE/1tXV1rWLf+4pBL3/S6DMaP/wSVqu/7ZHnFP6mqeiHs2KKjj8EYNRy3u4uOziJcrpbferu/ijFqFBGRQ1CromloXBS2phbA+GM+8e962Les77aQcX0F1L/xL8WTZZkdZY9RWzsvKDYr62YGZ90U0CbLXurq36a+/i3s9krUahNJSWeTOejakHXDAFyuDqzWbShVkRijRviThILQH0TCaYAknADOX1/GT119b1Sfjs2hMFrUHRF+Oa/Xy5o1a1izZg0dHR0YDAZGjx7NpEmT0Ol0AHg8HubPn7/fQuPp6ekMGzaM3Nxcurq6eOedd/B4Ar8oREVFcfnllxMfL7apFfZPlmUca9ZgW74ckIg8bhL6sWODvsQ2PvAgXe++G7IPldmM+ZabcZZX0FtSgn3Fip8/AEmCfXyWxV5xBYl33ek/tixZQsMddwYt14u76krMt98uluEJgiAIRz2nq432tu/weu0YjaNDzpwCaGv7lm3b78fpbPS3xcSMZ1j+/6HXp/nbXK52Vq0+KyBuF602kWMKP0KrNe/sM/zud7v6T0o8C7e7k/aOH+nqWvlbbvVXUyh0aLWJqFUx2Ow79rm7X0b61WRkXIFKFU1b+//YvPnGsLEFBW/460nJssyWkltpbv4kKC4iYgiF494N2AnQ67VTWvoIjU3v+xOBOl0q2dn3kJhwWlAfsuyjvWMpnZ0rkFAQF3cCMTHjxXchYZ9EwmkAJZyWtHVz6aa+tdVnmmN4ZUTmoR2QcEQrKiri66+//tnxkiQR7j0gIyODOXPm9NfQBAGfw0HdjTcFFSRXJSaS/spcdLl9s0BlWab81FNxV9f0y3UVERHEzJyJZnAWiohIGu6+GzyhC5EmP/44MeecHdBm+fprOt54k95Nm1BERBA1fTrx11yNOjU1ZB+CIAiCcDTx+Tx0da/G7e4kMmIIBkNeyDiHo47tpQ/S3v49fTWaJOLijicv90H0+gx/nCz72LT5elpbg7/TajRmCsct9hcCd7raWLZsctjlejHRx5Cdcw9udyfd3eupqnr+t95uP1AQere+PpGRuQwadA1qVTQ22w7Kyv8eNjY9fQ65OfcBfd+fNhRfQUfHjyFjR418EbN5dzkCp7OV4o1X0dOzOSDOZJrEqJH/RqWKCmh3Opuprf0PrW3fIstuYqLHkZ4+J+RywV3jcThq8Hh7iNAPCupPOHyJhNMASjj5ZJnjV22jzO5EAn4XG0VOhI4Lk2PJN+gP9fCEI4zNZuPZZ5/F5QpdONFgMPh3rvs5rr/+esxms/+4vb2doqIiduzYgc/nIzMzk0mTJpGy13IlQQhHlmXsK1fSs+QbZJcTfcEYjDMQBs1RAAAgAElEQVROQ6EPfD/sfOcdmh56OGQfUkQEKU88jqe1FVd5OZ0LF/XbUj1NXh5DPvrQf9z20ku0PvNsUJwyNpZBb80P2FVvF2dZGbZVq5AUCiInT0aTlhYUIwiCIAhHK6ezmV5nE1ptIjpt6B2UfT431TVzqa97C6erGYVCQ4L5NAYPvi1g5hRAQ8NCtm67h70LjWs0CYwbu4CIiEFA33eQVat/j9VaEvKaxqhRjB37Hl5vD729jaxdNxOfL3jzkl0iIgYjyz7c7i48nq5f8BfoP5KkJjFhBiq1EbfbQnPzR2FjIyKymTjhS/8D57XrLqS7O/Rv5oSEGYwc8S//sc1Wztp1F+F2t+91fRUjRzyP2XxyQHtn12p27HjUn8xSKPSkJJ9PdvZdKJXBv4Gdzhaamz/B6WpBrx9EYsIZYjfAAUwknAZQwgngik0VfNEWvM3o3VlJ3JIptqkX+ld5eTnvvvtuUNIpLy+PCy64AIfDQVlZGUVFRbS2tu6zr8zMTEaPHk1WVhY2m40333wzqE6UQqFg5syZDB06tN/vRTh6ybJMyxN/p+ONNwLaFUYjac//i8jx4/1t9XfcieWT4Knmv5YqORlN5iBUcfFYPg29qw9A5PFTyJg713/stdpovOduepZ8sztIkog+9xySH3gASaPptzEKgiAIwtFAln14PD0olXoUivCfo11da6ipnYelewMKpQ6zeToZ6XPQahMC4uz2StatvzRoaZ9Ol8qYgv/6k1MAZWV/p7pmLqFERuYyYfyn/tpIa9ZeRHf3qrDji4ubilaTgNvTTXv7D/tMZB1ISmUkarUJSVLjcFTuM3b4sOeIjByMShXF5i03YbEUh4xTqaI4btIyVKq+0jHd3etYu252yFlnsabJFBS8jiQp/G21df9lx45HA2qBKZWRDB/2T8zmaQGvl2Ufzc2fUFf/Ng5HNRqNmeTkc0lNmR2wM+OenM4WrLZSVMpIjMZRop5VPxAJpwGUcFra0cPM4uAt6nd5a9RgTooT2Vuhf1mtVtavX09TUxNarZbhw4czePDggPXY69ev56OPwj8F2ZtSqcTr9YY8p9frufXWW9Hs9YPa4/FQXV2Ny+UiKSkJk8kU8vWCEI6zvBzLZ5/j7e5Gm5uL8fTTURoCa+H1bttG1QUzkUPMcpL0ejIXvAOyjKuigtbn/oVrP3XOfonkJx5HP3w46vR0Gu64IzDZtAfT7ItIuv/+wHsrK6Pt5blYv/sO2e0mYtxYYq+8EsNxx/Xb+ARBEARBCOTx9NDYuJiOziJAIjb2OJKTzvUXON/F53Ozbdt9NDYtDmg3GIYyetQr6HS7Z/h3dq5i/YZLQxZQNxiGckzh+/7d7yorn6ei8umw40tNvZiY6EI8HgsNDQvpsW4OGztQmEyTiDYWoFJFUd/w3j6TWQWj5xEXdwIA7e0/sqH4DyHjJEnNhPGfEhmZDfQ9jNy67R4aG4N3746OLmRMwX8CZk95PD1s2/5Xmps/B/p+w+h0aeTk3EuC+ZSgPjweK01NH9HVtQpJoSI+7kTM5ukoFOqw9+L1OpBlb9C/nSOdSDgNoITT5Zsq+CrE7KZdToyN4u3RQw7iiAShT29vL//85z/DLr9TKBT4fOHXl+/t3HPPZdSo3Vu5btiwgSVLlmCz2fxt+fn5nHnmmQG75AlCf7AuXUrDvffhbWvzt6mSkkj9vyeJOOYYf1vXBx/SeM89YftRJSai0Olw1deHrfP0qyiV5PzwPaqdhfgdxcVUXzEH2R5cYDTpkYcxXXBBULtj02YsX3yBr6cHbf5Qos88E2WUqIcgCIIgCAeS1baDttb/4fP1Eh09ltjYyQEzdHZpa/+e0tKHcDh21aCUiI8/ifyhf0Oj2b0Rj8fTw9p1F2K1bgvqw2gcw9gx81Eq+zYFslg2snrNOWHHNnjwn0lLnY3H001T0ydUVP4zbKwkqYmKGoHH04PL1YzH0/Mz/wL9S6HQotUmolJF4XDU4fF0h42NizuRrMw/oVQZ6LFspGTrnWFjhwy+nczMPwF9O/utXXcR3d1rQ0RKjB79KvFxU/0tVtsONmy4HKezOSAyKmoEBaNfR6OJDWjv7FxBReVz/oL1UYbhDMq8LmRhdoDe3gZaW7/G67UTFTV857+hw3emlUg4DaCE0zHLS6jtDf2DHiBZq2b9pOEHcUSCsNumTZt4//33gwqHazQaLr30UvR6PRUVFWzcuJG6urp99qXVasnOziYjIwO3280334Se6ZGWlsacOXNQKII/qAF8Pl/Yc4KwL7LLhXXpUtzNzahTUjBMnoykVgfFVF96GY7i4Gnhiuhosha+hyYjA9ntpuXZ5+h49dV+G58qJQXd0KGo09KwfPkl3pbQWz5LWi05P3yPMqZvq2PZ66Xxr/fT/f77geM1Gkn/9wtEFAZ/3suyTO+mTXhaW1Gnp/sLsguCIAiCcODIsg+LpRi3p5vIiBx/cfO9ud3dVFY9T2PjYjyebtTqWFJSZpI56E/+pWm7VFe/TFn5k0F9xMedyMiRL/iXGvp8TlasOBVHb+hNV4YMvoPMzGuBvoLhy4omh5yRBX01lzIHXYfP58DhqKO5pf9KFxwoSmUESYlnoVQZcDlbaNpHPSuDYTjjj/loZz0rL8tXTMfhqAoZazZPZ9TIF/3Hra3fsHHTn9g1a2pPuTn3k55+uf9YlmXKK56iunouexaKj4jIZvSol4iICKwF6vU6qKt7k8amD3G52oiIyCI1dTZJiWeF3DlQlmW6LetwOGrRahMxxYw/KIkskXAaQAmnaau3s9nqCHs+L1LHD+NF7Rvh0KmpqaGoqIiqqiqUSiW5ubkcd9xxxMfvfhLT2NjIyy+/3G/XnD17Nrl7/AC22+38+OOPFBcXY7fbiY2NpbCwkAkTJqBUHr7Zf2Fg8lqttDz1FN0ffYTs6AVJInLKZBLvugvtkN0zTj3t7ZSdeBLyXnXLdomYOJGoadNw19ZgW74CZ2lpv40xasYMon9/JuqUFCxfL6H9+dC76iiiohjy9Veo9liual+3nsa//hVX+e7l3PqCApIffyxkkXMA2ePB292NIioKhag1JQiCIAgHhSz78Pl6USh0IWdN7dLdvY66+rex2yvQqONISjqLhITTgpILdnslxRuvxm6v2KNVIi3tMnJz/hJwjbLyp6iufpFQ9k6crF5zPhbL+pCxCkUEhePeA/oKqG/afNM+i6hrtSkoldqdM63awsYdeEpUKgOSpAoqhr63IUPuRK9LQ6mMoGTrXWHjFQotk48rQq3ue2hYU/s6O3Y8GjJWr8tgwoQv/bWnPB4b6zdchsWyISg2JXkmQ4c+FpB06ukpYUvJbdhsO/xtOl06w/KfwGSaGNRHt6WYuro3sVq3o1IZSUw8g+Sk88LWvnK7O7H0bEEhaYiOHu1fEgoi4TSgEk7/qm7mbxWNYc/fnpnIHVnJB3FEgvDLybLM3LlzaWwM/W9ZqVQSERFBT8/Pm5qbkZHBySefTFJSEm63m3nz5tHWFvyBM3ToUGbOnClmPAkHhM9ux93UhDImBlVsbMgYy5dfUn/7n2Gv+mXavDwy/vO6P9Hjqq2lfPopEO4zVZJQmkx4Ozr69R4A4m+4gfg/XYekUOAsK6PygpnIjuAHHSqzmayPPgy4V5/dTusLL9C1aBG+bguSVotxxgzMt9yCOjEhqA9BEARBEAY2WfbS3r4Ui2UjSlUE5vjpAcXQd8fJ1NTMpbrmVdzuvu8nWm0yWVk3kpoyKyDWat3OuvWzcbsDE0mSpGL48KdJTJjhb6uueZWyssdDjk2limHSsd/5d6Dbuu0+GhoWhL2X+PhpGKNG4PFaaWn5kt7efa+4GAi02hT0+jSUigg6u1bss0D8oEHXk5hwKkplJPX1b1FT+1rY2NGjX/MvA+ztbWTlqjNCJvYUCh3HFL6PwZDnb6urm8/20geCYo3GMYwp+E9ADSqfz0npjsdobHgPn9y3UkutjmXw4FtJS50NiITTgEo4WT1ezli3g2220P/Qzk808a/8jJBT5ARhIGlubuaNN97AvlfNGZVKxUUXXcSQIUPo6upi69atfPXVVz+rT4VCgU6nC+pzTzNnzmTYsGH+Y6/Xy7p161i7di1dXV1ERUVRUFDA+PHjUavDF/UThF/LWVZG59tv49i0GUVEBFGnTCfmnHNQ6AO39W24976gZW+7xF17DQm33ILPbse+di21f7y6X8coqdWokpPx2e0Bdaz2Fn/TjZj/tLO+gctF9RVzcKwNrm+gTkkh8713/TWndun59js6/vsmzq3bUERGYjztVGKvuAJVXFy/3o8gCIIgCAeHz+fEatuBhJLIyBwUClXIuN7eBmpq5/XVs5JdxMQcQ0b6HIzGUQFxsuxje+lD1NfPD2hXq+MYPWou0dEF/jaHo4ZVq88JmTjR6zMYf8zHqFR99Srb2r+nuPjKsPcxOOtmkpMvwOO10tT0IdXVL4WNVSj0xMQU4vVYcfTW43KFLnMwkGi1iZhMk1ApDVh6NoWcCbVLfPx08oc+glJpoLe3gRUrT2HPJX17Sk+fQ27Off7jTZtvoqXls5CxQ/MeJTX1IpFwGkgJJ4AOt4cnKhpZ1NSJ3edDK0nIgGvn3/6BISlclyGeJAsDn8ViYeXKlezYsQOfz0dmZiYTJkzAbDb7Y3w+H88++yzd3eELAP4S6enpXHzxxeh0Onw+HwsXLmTr1q1BcYMGDeKSSy4JmXTy+Xw4HA40Go1ISgkHjOxy0fzEE3S+t9BfcFzS6Yj9w+WYb7oJaY+ZejVzrsRWVBSyH4XRSNLDD+FtacVZWUHXO+Gf/P1SklZLxLhxqJKS8HZ2Yv3uu7CxsZdfRuIeBdbbXnqJ1meeDYpTp6Qw6O23UCclBbQ7Kyvp/O9/sa9Zi6RSYZh6AqbZs4OSWIIgCIIgHHms1u00t3yOx9NDlGEYiYkzUCqDNw6yWrezvfRBurpW7WzpK7Y+NO9htNpEf5wsy2wvfTAokQV9u+SNHvWqf3mY1+tg+YqTcTpDr87IzX2Q9LRLAXC62li27Liw9azU6jiG5j2E1+vA7qimqip0mYNdVKoYZNmF1xv+gfrBJQHhcz6SpCY1dTYqVRRej43autfDxmo0Zo6b9CNKpUYknAZSwmkXp89Hl9tLtErJ6m4bF24sxyv3/RN4Y2QW0+OjD/UQBaFfrFq1is8//zzkuZiYGE466SQaGxtpaGig6mduUW8ymdDr9TQ0NISNOfnkkzlujy3lvV4vRUVFrFq1ip6eHhQKBUOHDuXEE08MqFElCP3J09aGfd06JKWSiMJClNHB7+3uhgaqL70Md319QLuk15P+4otETpzgb6uZMwdb0fKw14ucNAnZ48Hd2Ii7trbf7kPSaPoSRAkJIEHL34MLlu5iPP10Uv/xlP/YumwZddffgNwbOLtXZTaT8cYbaAcH1pLydHbS9e67WH9Yiuz1EnFMIbGzZ6NODV1sVRAEQRCEI4vDUYvT1YJelxaQaNqTLMu0tn5NfcM72O1VaDXxJCWfS0ry+f7i6bvYbOUUb7x6r2LgCjIHXcPgwbcHrDCqrHqBiopQO/wpGDXyBczm6f6W9Rv+QEfHjyHHp9dncuzEr5EkJT6fh1WrTsdmLwt7z8lJ56PVmvF4bTQ2LhpASarwCsctIiZmrEg4DcSE097erG/jztK+daiRSgWfjM1hmEG/n1cJwsAnyzI//PADS5cuxefbPX0zKSmJWbNmYdqjwPFzzz1HRz/VtYmOjuaqq67CYOhbh7xo0SK2bNkSFKfX67nyyitDJp2ampqora1FpVKRnZ1NlNh2XjhAvBYLXYsWY/32W2S3G33hOEwXzUaTFphkcWzaRPWllwUlbwAMJ5xA2ksv+r801Vx7Hbbvvw97TUVkJD6n0z8Dq99IEuZbb0GTloYiOpr6W2/DZ7GEDNWPHUvm22/5j11VVVRf/gc8zYFbESsiI0l/ZS4RY8cGtPtsNroWL8by5Vf4bDZ0w4djung2+uFix1dBEARBEHbrq2f1Az09W1CqDCSYT0GnSwkRJ9PYuJjqmpew2ysBiDaOIWvwLcTFTg6IdTqbWbf+Mux7JZI0GjNjCt4IqJ3U2rqEjZuuDTm2mJjxjB3zlr+Qe1XVvymv+EfYexk9+j9EG0fg8dgor3ia5uYPw8bq9Zk7lwza6LYU43SGf2D/S40d8w6xsRNEwulwSDgB/HVHHa/U9dXbSNWq+bIwF7NGLPkRjgxWq5Xt27fjdDpJSUlh0KBBQfXKVqxYwZdffhm2j4KCAmw2G83NzVjC/IDdm06nIyoqitbW1rAx+fn5zJq1uyCi3W7n/fffp6xs94eHQqFg4sSJTJs2TRQuFw4pR3Exzf/3fzjW9NVbUhgMxMycifmWmwN2lbOvXUv1pZeBL3idvqTXM/iTj1EnJ+Npb6fxgQewfff9wbqFAPE33YR++DCUcfE03X8/vSUlIeNUiYlkf7MEaedSWE9nJzWXXY5zx47AQIWC5L/9jZhzzg5o9vX20rlgAd0ff4y3rR1NZiamC2cRddpp+6ydKHs8oFSK+oqCIAiCcBSRZRmXux2FpPLvNBeK19tLc8untLd9jyx7iDFNIDnpPH8x9D01N3/GjrLH91jepyQp8Qzy8h7y16fq69NJ8cYr6ewMntWekX4lOTn3+o8djjpWrpqB12sLipUkNYWFizBGjQDAYtnI6jXnhL2XBPMMhgy5DY/XRlfnSnaUPRY2VqmM2LkLn3HgJZwkSToVeBZQAq/KsvzEXuefBn638zACSJBlOWbnucuBv+w896gsy2/s61qHU8LJK8tcurGCbzv6dvcqNEawqCAbnVL8uBWODl6vl8WLF1MS4gfntGnTmDx591OFN998k4qKiqC4X+uss84iISGBuLg43nnnHaqrq0PGTZ06lalTpwa0tbS0UFRURHl5ObIsk5WVxaRJk0hOFrtOCgeOu7kFn82KOiUFhU4XMqb7s89oeuBBfFarv01lNpPyj6eIHD/e3+bYvIWqCy4Iu7Ne4v33Ezn+GNzNzXS+9TbWb7/t35v5mUyzZxM5ZTKquDjaX5tHT7hNCVQqsv/3DerEvmn4PrudmiuvwrE+eBvnmIsuJOn++wMSSr7eXtpfe42u9xbiaW5GGRtLzLnnEHfNNSjDzHT0Wm24a2tQGo1i+Z8gCIIgCCH5fB4sPcV4PTYMhqFotaHrN/t8LhobF9PY9AEuVzsREZmkpV5MXNzvgh6CdXevY/OWWwN27tNo4skf+gTx8b8LiN1R9jg1Na8GXU+nS6dw3Hv+8ciyj9VrzqWnZ1PI8Q3KuJrs7LsGXtFwSZKUQClwMlAHrAYukmU55CNNSZJuBMbIsjxHkqRYYA1QSF+1q7XAOFmWO8Nd73BKOAH07NzJbvvOnezOSYjh6jQzpXYnJrWS401RIgElHNF8Ph+lpaUUFxdjs9mIjY2lsLCQtLS0gLht27axYEH4Asp5eXloNBpaW1tpbm6mv97ftFott99+O5qdM0mqq6uZP38+brc7IE6pVHLhhReSk5MT0C7LMvX19WzduhW3201qairDhg0TBcyFA8Zns9HzzTd42tpQp6UT9bupSBpNUFznggU0PfxI0Iwo0yWXkHjfvf4vN666OspPORW83pDXM0ybhmnWTDwtrVh/+IGer7/u/5v6GSKPP57oM89AaYqlZ8kSut59N2xs+iuvYJjSl9CWXS5q/ng19pUrg+K0Q4cyaP5/URr22DLYbqflqafo+uADZEffZ7d+9GgS77kbfUFBUB/Qt9uhff16FBoNkZMni539BEEQBEH4TWTZS0fHTzgcdWi1icTFHR9Uy6ovTqa5+RPq6t7EatuOSmUkMfEMBmVcjUYT+H3E6Wxh46ZrsViKA9pTkmeSl/cwCoV6QCacjgUelGX5lJ3H9wDIsvx4mPgi4AFZlpdIknQRMFWW5Wt2nnsZ+F6W5XfCXe9wSzgBVDucnLa2lA538Jf5WLWSx3LSODvRFOKVgnD0kGWZzz77jFD/f+fn53P++eejVCoB+Pbbb1m6dGm/XdtsNpOUlERMTAxr167Fbg9d1C8yMpJbb70VlapvS1mPx8MHH3wQVEvKaDRy8cUXk5gYXBTR4/FQUVGBw+EgPj6elJQUsbRHOGCcFZV0LV6Eq7oaVXw80WedRcSYMUFxnQvepemhh4JmRGlzc8l44z+odtZm81ptlJ1wAj5b8DRvAHVGBuZbb8Hb3kHvls10fxC+BsGBpM7KJObcc1GZTDhKSuh6O+zXCuJvvAHz9dcDIHu91Fx5FfYVK4LiJK2WQfPnox85wt/m7e6m4a67se5RW0tSq4m94grMt9wcsHshgM/louerr7H99BOy7CNy4rEYZ5wWdkabIAiCIAhCf5Jlma6uVXR3r0Wh0BIffyIREbs3fBmICafzgVNlWb5q5/GlwARZlm8IETsIWAGkybLslSTpz4BOluVHd57/K+CQZfmpvV53NXA1QEZGxrhwS2MGsh/aLczaGHq5kAQsGD2EE2JFAWPh6CbLMtu3b2ft2rV0dXURFRVFQUEBI0aMCKiz1N3dzXPPPYc3zIyMMWPGkJ6eTnt7O9u3b6etra3fxjhhwgTy8/OJiYlh1apVFBUVhYwzGo3ceOONATOdSkpK+Oyzz7Dt8WM9JSWF8847j7gwMyJaW1vp7u4mOjoas9ncb/chCHtzbNhAx/y36N26FYUhEuNpp2G64AIUkZEBcZbPP6f+jjuDZkQpDAYyXp+HfuRIoO//54oZp+OqrAx5PUmvJ/nBB/DabLjrG+iYNy/sEsADSdLrMZ56KsqYGDzt7Vg+/jhsbOTkyWS8+grQd381V8wJmZwCMN9yM/HX7i4m6m5pofbKq4JqVKkzMsiYNy+ooLy7sZH2V16lZ8kSfL296EeOIPYPf8Bw/PEhryfLMr0lJXg7u9BmZYplgIIgCIIg/GIDMeF0AXDKXgmn8bIs3xgi9i76kk037jy+A9DulXCyy7IctoT74TjDCWBeXSv37qgPe35idCQfjs0Je14QhEBbt25l0aJFQUmnoUOHcsEFF/hnQ9XV1fHqq8HrmneRJAm1Wo3L5er3Mc6YMYPCwkIUCgWVlZW8+eabIZcCRkdHc91116HbY5ZDa2srH3/8MbW1tf62tLQ0fv/735OQEHpteGdnJ21tbej1elJSUkQxdOGAsa9fT8e817GvWYOkUmGYOpW4K+egycwMjFuzhpqr/hi8C59CQcqTTxJ9xun+probb6RnyTdhr5n82N+Q1Gq8HR20/uv5gDpWB5MmOxuVyYSMjGN1+O8jksHAkM8/QxUfj6RQUH3FFdiXh05O6UaMIHPhe/7Zjs7KSqovuRRve3tQbOI9dxN7+eUBbbYVK2l65BFc5eU7Ly5hOOEEkh5+CHWI9wtXVRVdH3yIu7EBdVIy0eecjTYrKyhuF1mWkV0uJLU6aNaWIAiCIAhHjoGYcPrZS+okSVoPXC/LctHO46NiSR3A5Zsq+Kpt37twVR4/Cr2o5yQIP1t3dzfr1q2jubkZnU7HiBEjGDJkSNAStbfeeuv/2Tvv+KjKtP1/z5mWSTJJJj0hpBB6R7oCroBIUUDp0hSxLaK46zZdd9XVfdd1V8Uu9gI2UFdBwYpSRKS3QEIS0nsyvc85vz8mOWQyM3F33333t65zfT4hnJNrnvM8zzlz5jzX3Pd1U9K18lU7pk2bxvjx43E4HBQVFbF169Z/aR9VKhUGgwGn04nb7Y7Iu+yyyxg/fjwAFouFZ599NigSqgOxsbHceOONJCYmKvusVisffvghxcXFyr7k5GRmzZpFYWFh2OM1NjZSXV2NRqOhd+/e6PX6f3aIUUTRLVxniml57jlsX3+N7PMRN3o0KauvI3b06CCep7qGiquvxtfYGNJG2rp1pN50o7Ld9NRTND/2eMRjZv3hD2jz8/C1tdH89NO4i07/6wb0j0IUEWNjv1cgS117C/qhw1AlGGh48M84Dx0KT1Sp6P3pJ2iyA6WfncdPULF0KXIY0VxbWEjB5ncQO72/W199lYY/PRjs7SUIpN9xBynXrQp6vezz0frqa7S98QbeqiqE2FgSZ80kdc0aNJmZIceTZRnnoUM4jx5DiNFhmDw5LC+KKKKIIoooovjPxH+i4KQmYBo+BaghYBp+tSzLJ7vw+gE7gAK5vWPtpuEHgQvaaYcImIa3RjpeVHCKIooo/lG43W4+/PBDTpw4oezTaDRMmjSJCRMmKAKVLMs89dRTNDU1hW0nLi6OqVOnYrVaaWhoCPFu+t9AFEWSk5OJj4/HZrN1mwY4btw4pk+fDoDX62XDhg1h+yyKItdccw25ubnKPofDwZYtWyjtiIQA1Go1EydOZNKkSSFinSRJlJSUUFZWhiAIFBYWUlhYGI2eiuL/BN6GRlpffBHL9u1IdjsxgwaRvHIFhsmTg3iSy0XV6utxhHkeMC5fTsadv1GuZfvevVSuui7iMbP++ADxF1+M32Si9bXXML0Z2YwctRp1aip+sxnZ6fznBvkvgK5vX/TDhyMa4rF98WXEtEWAjHt+T/LixQDYv91PZZfoqM7o+cLzxF90ERC4H9b+/A4sH30UwlNnZpL/5htBYpK3oZHqW9fiOnrsPFEUSV6xgvRf/iIkMspVXEzriy9h/+YbEATiLryQlFXXouvdO2zffG1t2HfvQfa40Q8bFpEXRRRRRBFFFFH88/iPE5wABEGYCTwKqIAXZVl+QBCE+4ADsix/0M65h4Bf06+7vHYVcGf75gOyLL/U3bF+qILT89VN/LablDq1AM8OymdWWtK/sVdRRPHjgslkorq6GpVKRUFBQVAKWwfq6up45ZVXcHVJAdJqtSxbtkwRb2RZ5umnn6YxTDRGB/r06YPP58NsNtPaGlFH/4chCAIZGRnExcXhdruprq6OyC0sLGT58uVAQDx66aWXgvTJG6MAACAASURBVNL0OmP69OmMGzdO2bbZbGzcuJG6urogXo8ePbj66quJ6+LtY7fb+e677zh9+jQ+n4+cnBzGjRtHZjcRDmazGafTidFoRKfTfe/Yo4iiA5LHg+mddzB/8AH+5ha0+fkkLV6EYerUEOG06bHHaX7qqZA2khYuJPPeexS+r62N0mmXIVmtYY+ZcedvSF6xAgD7/v1Urogs3qDVYpgyGclmx1tdhaf83D830H8BhNhYVPHxSHZ7RMN3CAhZqTffhBgfj/tsKY0PPhiRmzjvKrIfeAAImK2Xz1+Au6goLLdrhJr9m2+ouulm5C5Rn0JMDD03PEvcmDHKPlmWaX7iSVqeey4oiivu4kn0ePBBVEnBz01+m42211/HvHUrktmCrk8fjMuWhoiWHZDcbmw7v8LXUI8mJ4f4iRMRolVGo4giiiii+JHiP1Jw+nfihyo4WX1+frL/NDVub7e8+RlGHujTg0SN+t/UsyiiiKIrzGYz3377LWfPngWgoKCAsWPHkpycHMQrKSlh06ZNYX2ZBg8ezPz585XtF198kcrKyojH7EiRs9lsEc3Q/1kkJiYq4lBtbW1Enl6v57bbblOEuFdeeYXyCJETvXv3ZtmyZcp2W1sbL7/8MmazOYgniiLz589n4MCBQftramrYsWOHMicajYbhw4czderUsMKTxWLh2LFjWCwWkpKSGDp0KPGdStmHg9/vRxTFaBXAKICA75Tpnc14q6tRp6eTeOVc4i68MOT6cBw8SPWaW/CbTEH7jUuXknHXnUqkjizLVF1/A/bdu8MeL/0Xd5ByXSCySnK7OXvxT0LaVKDVknnnb5A9XryNjbS+8ML/FwP1fwiCgH7kBaji4vHb7Ti7eTYT4+Lo+fxzqJKSELRazi1egj9CJKmmRw8KP9mB0O7D1/LCCzQ+9Jew3NhRo8h97VXlHPpNJiqWrwgxZgdIuelG0tetC9pn3bmTujvvwt/pCwF1ejrZDz1E3NgxQVzZ76ftzTcxvfkm7rJyVMlGEmfPJvX660NELzhfkdB14gRCrJ6Eyy4jpn//iHMk+/14q6tBpULTo0f0vhVFFFFEEcX/F0QFpx+o4ARQ6nBx48kKTtjOh+EnqkSGGGLZbTrv7ZCt0/BI/1z6xul4taaFQxYHepXAjNQk5mYkoYumskQRxX8MSkpK+PTTT5VIJ51Ox+jRo7nkkksU43KA0tJSXnvttbBtGAwGfvrTn6LX65Flmc2bN3ebrhcTE4NKpcLhcIQVu/630Gg06HQ6bN/jOTNnzhwyMzOJiYnhgw8+iChOaTQafvaznyk+UXV1dbz44ot4vaECfF5eHitXrgxK2fvuu+/4+OOPkTp5zqhUKmbPns2wYcOCXu/3+9m3bx8HDhygra2NmJgYhg4dyqRJk8IKVH6/n5KSEmpra9FqtQwYMCBixcAOeDwexWw+iv9O+G12LNu24T5zBjHBQOKsWej6hBb28Nts1N31W6w7dij7hJgYUm64ntSbbw4SDUzvvU/db34T9ngZd91F8vLzAm71utuxbt8evnNaLYXbtiJoNEhWK9W3rcNTFr4KLoC2IB91Sip+mw13aSmEed/9p0E/ciTanj0RdDrM778fEgnVGZl/fIC4MWMQY2NpenQ9prffjsjN37wZ/eBBALhOneLcosXIYeZDiImh1/vvKSb4sixTe8cvsGzbFsLV9upF3sbXURuNyj7XmWKqbroJX5fo0MQrryTrD/chqM9/qSjLMqa33qJ5wwZ8tQG+tqCAtLW3kDBzZsjx/DYb5i1bsH65E9nnJXbEBRivXoImKyvsmP02G7avvkKy2ogZ0J+YoUO7FbNknw/J6USMi4saxEcRRRRR/AgRFZx+wIITBB4sDlgcnLG7MGpUTE5OQK8S+bLFwu2nq6j3nH/wUQO+Lq8fGq/n7eGFJEUjoKKI4j8GsizT2tqK1+slJSUlohBx7NgxPvroo6B0vfT0dBYsWEBaWpqyr76+ng0bNgQJLB0QRZHVq1eTnZ2NJEns3LmTr7/+OmLfNBoNiYmJ2O12nP8fPWcKCgrIzc0lJiaGI0eO0NDQEJG7aNEiBgwYAEBZWRmvvvpqWJ4gCKxevZoe7eXfJUni7bff5vTpUINoo9HIddddFyQ6NTc3s2nTppB0x9GjRzNjxowQn6rTp0+za9cuamoC6dH5+flcfPHFFISp7tXhfXXmzBkkSaJnz54MGTIErVYbdiyyLFNfX4/L5SI9PT0kXTGK/2x4KitxHj2KoNESd+F4VAkJYXnWzz6j6cmnlNQzbe9CUm+6OahaH4CvqYmK5SvwnDsX3IBKRY+/PETCjBnKLssnn1Bz621hjycmJCiV8gCan3mWpkcfjTgOw8yZxE+aiNQuuDkPH448aFFEjIlBcjgic/7DoOnRA/2wYQixepwHDobObyfET51C6k03I+pjcBw5Qv1dv43ITV65kozfBBwjJJeL0ukz8NXXh+WmrllD2tpblO3mZ56h6dH1YblZD9xP0rx5yra3vp6KFSvxdomWFWJjyd3wLLGjgtcHrRs30vTXh4POUczQoeQ88jCa9vtmB3ytrTQ9/jiWDz5EsttRJSeTtGABqTfdGGQ63wHX6dOY3n4bT0Ul6tRUEufOIXbcuLBilizLuI4fx1V0GjEujviLJ6EyGMKOuYPvb2lBUKvDRo9FEUUUUUTxf4eo4PQDF5y6g8nr466SGrY0tHXLm59h5ImBef+mXkURRRT/Sni9XkpKSnA4HKSlpZGbmxv2Ab2oqIj3338/qLKdVqtlzpw5DBo0SNnncDh44okncERY9C1YsEDhnz17ltdffz1i39RqNYWFhTidTkwmExZL94UO/q/QUdlPp9NhsVi6Fcpyc3O56KKL0Ol01NTU8Omnn0bkjhkzhpntEQNer5cnn3wSU4QUpylTpjBx4kRl+8CBA2ErGAqCwMKFCxWBDMDlcrFx48YQr6yEhASWL18eJC5CIEpu+/bttLS0AAFRcfDgwcycOTPEZ8zn83HgwAGOHj2K1WolOTmZkSNHMmTIkLBG7g6Hg2PHjtHa2kp8fDxDhgzB2CkSoys6DPF1Oh09evT4XnN4SZIQBCGa/vMPQJblQAqXJKFKTY04d36bDdPb72D95BMkpxP90KEYly0jpl/fEG7rK6/Q+Je/BkXrqNPTyXn8MfSdogD9VivnFi0OGxGlzcsj/603lQW+u6ycsssvD65m1wkdvkyyJGHfu5eq1ddHHLOYYMC4dCmyw4m7vBx7NyL5DxIqFfqhQxH1MfjaTBG9rAAEvZ6Mu3+LKi4e2eel9pe/gghp1GJSEn2+2onYnmZcufr6iCmcqtQUen/+ucI1b91G7R13hOVq8/Io+OBvCtdvMnFu8ZKwAlzsqFHkvvgCQiexvOXll2n8U6i/V+L8eWTdd19QZJS3ro6adbfjPHo0aA7Sb1+n+KF1QJZlzO++R8vzzytG+Prhw0lde4tiZt8ZvpYW2t54E9vXX4PPR+yYMRiXLUWbkxN23O6yMqw7digFCQxTpgSNq2tf3KdP42tuQZufh7Znz7C8znzZ60XQaKL3wyiiiOIHjajg9F8sOHXgl2eqeLW2JeLfNYLA0YsGkRyNcooiiv9quN1uTp06hdlsJiEhgUGDBoX1N6qtreWtt94K8k9SqVRMmTKFCy+8UNknyzIvvfRSRC+pGTNmMHbsWACcTid//etf8fm6xlmeb3/SpEmKKfqxY8fC8v6TIAgCOTk5aDQaXC5Xt35WWq2WK6+8UhF8Nm7cGHEuDAYD69atU1Iot2zZwvHjx8Nyk5OTueWWWxQhp7y8nFdffTVsamTPnj259tprFa7X62Xjxo2cC7MoHDZsGHPnzg1a6BQVFfHuu+8GpS4KgsCUKVOYMGFC0OtdLhdbt27l5MmTSl8SExOZPn16kJjWgZKSEnbv3k1lZSWiKNK3b18mTZpEVpi0HpfLxZEjRygpKUGSJPLz8xk5cmRED66WlhZOnTqFx+MhKyuLfv36BaWndoYkSdTU1GC320lJSQkR87rC5/Nht9vR6/URo81+qPC1tGDZsQN/Wxu6wkIMkyeHXUz7Wlpo/MtfsWzbhuzxIGg0JMycSfodP0fdZf5MW7ZQd/fvQkSn+MmTyVn/qGKuLcsyVTfciH3XrrB9y/rjH0m66soA1++ndNpleGvCF1LR5OZS8Lf3kV2ugBCyaDFSN+J3wuWzUCUkIjnsmD/cGlG8+cFCrUbU6xE0miCvqXCIHTcOXZ8+CDodpi1bkNoif4GZfMMNJEydgqDT0fb6RkzvvBORm3X/H0hq9yR0Hj3KuUWLI3Iz/3AfxgULgICPVfncKyOmfGb96X9ImjtX2Y4YgSeK5Dy2HsPUqcoud1k5lStX4uviBRYu2kuWJBruv5+2TW8EcTXZ2eQ88zQxfYNFXMehw9Tfcw/u4mJlX9yECWT94b6Q1EXJ5aLluecxvfMOvsZGVElJJF51Fak33Rg20tFVVETbpk24zhSjSkggYeZMEi+fFfa9Kvt8WL/8EuehwwjtRQj0Q4eGzk87/CYTjoMHkSWJ2AsuQP096eG+1lZkpxN1RkZQmmcUUUTx40ZUcPoRCE6/P1vDs1XhzTQ7sGV4IRcZg8ORyx1udrVZEQSYZDSQp49WfIoiih8L/H4/xcXFNDU1odfrGThwYNi0LIfDwfvvv09xpwdpjUbDpEmTmDBhQpBgsXv3bj777LOwx5s2bVqQmPXSSy9RUVERliuKoiKcuFwu3n33XezdVMpKSEhAq9XidruxRqgW9p8Gg8FAbGwsoiiGVPXriuHDh5OVlYVGo2H37t3dVjCcO3cu/fv3R61Ws2/fvojnA4JTEZuamnj66afDpmV25X5f9cKlS5fSp5N/UaRoL7VazbJly8hv972BgJn8K6+8EhJJptfrWbZsmZIO2dGPHTt28O233wZxjUYjS5YsIT09PWh/eXk5W7duVSLDIOABNnfu3JAoLqfTyRdffMGRI0fwer2oVCoGDx7M1KlTMXRJ7ZFlmaKiIvbv309DQwMxMTEMHjyY8ePHExsbGzJus9nMgQMHqKqqQqVS0a9fP4YPHx5W0OpItSwtLUWWZQoKCroV1CwWC2fOnMHr9dKjR4+IEZEdbVdWVirRk98nvlmamjDV1JCUlUVCRkZEnvvsWVrffAtLeRkxCQkkX3458ZdcEuLvI9nt1N93H+at2xTRR2U0knbbrRgXBwsUjkOHqbr++pCKeaLBQO4LzwctqlteejlitbzYsWPJffklZU7qH/gjbRG88gDyt2xGm5eP7HRQ/8c/Yv04gk8WoB81irgxY5BcLmw7d3brk4UoIsbHIzudYT2hfvDQaNCkpyNotfhaWroVAEWjkaTZsxF0Ojzl5Vi7iTpVZ2SQ/ec/I2g1SHY7VTfeFFEw1GRnU/jpJ4qZ/LlFi4OipoLaTUuj9+efKSJO84bnaHr44Yh9KNz+sZI26DpzJuDt1aVSLbSLoe9uQdUulsseD5Wrr8exf38IV9evH3kbX1e4AG3vvEP9734fUhBAP2okuRs2IHa6v3iqq6m64caQ685w6VSy//IXJToNAiJu06OP0vrqa+f9ztRqjIsWkfHrX4VUXXQcPEjjI4/gPHBQmS/jiuWkXHddyPvab7PR+uqrWD74EH9bG9rCQoxLlpBw+ayw9yLHoUOYNm/BW1uLJiuLpHlXhaR6dkCy27F8/DHus6WokhJJmDWr20gyd1kZzqPHEPUxxF14YcTUZQicG3dpKQC63r2/t/Kk7PEg+/1h00ejiOLHhqjg9CMQnNafa+B/yrtfsMSKInMzkrg6K4WBcTHcUVzNu51S8QRgXoaRv/TrSYwqavoYRRRRBKO5uZnq6mo0Gg2FhYUhqVvQ7jl34AC7du1S0usSExOZNGkSI0eODOI2NTXx0ksvhU3tmz17NhdccIGyvX//fj766KOw/dJqtaxdu1YRAXbs2ME333wTcRwDBgygV69euN1uTp482a3YI4oiWq0Wj8cTUYj5IUOv15OdnY1araaxsZG2bqIbjEYj48ePV7j79u2LyE1PT2fJkiWoVCq8Xi9PPfVUxEqKqamprFmzRlmIdCdEJiYmcuuttypiy549eyKmRCYkJLB27VrFH62uro7nn38+bD8SExO56aabFJN6j8fDiy++SH0YTx2j0cjq1auDxNnPP/+cXWEidZKTk1m1alVQZNa5c+fYtGkTHo8nZB5WrlwZJGbZ7XY2bdqkeIB1ID09nWXLlpHQafEkyzKfffYZe/fuDYp+y87OZtGiRUpVyw6UlJSwdevWoCjH/Px85s6dS1IXDxyTycTHH39McXGx0nbfvn2ZMWNGiFDn8/nYvXs3Bw4cwGazIYoiAwcOZMqUKWFTM4uLi9mz8ytq6uvQqFT0HzSIiZMmhVT4BKg/eZJdW7ZQ1S465cXHM2HefDIGBkfUybJM3VNPcfCzz6nOSMevUpHa3MzQ9Az6PXB/0KLTb7VybuVKqlpaqMjLw6PRkmg206usjPzrV5N6880K111eTvm8+dgEOJefj0MfS5zdTv65c8RrtfR6/z0lmsV5/DjnFizEp1JR3TMHiyEBndtNbmUlepeL9Dt+Tsrq1QDYDxygctlyZKDVaMSSkIDa5yO9qQmdx4M6J4f0deuQ3S7cJSW0vvwKAB61GpdejyDLxDocqNrvUfoRwwEBv9mEpyyQZiYDsiAgyDI/puQtQadDiIlBEEX8ne5vHe+QznMRM2xYILVOpcK6Y0e3xvMJs2cTN3YMgkZD66ZNuI6EF7IAUm68gaQFCxDUaswffUTTnx+KyE1dewtpa9YA4KmooHTmrIiCWvKqVWT88heB8fj9lM2Zg+dsaViu8eqryfzd3cp2w0MP0frCi2G5SQsXknXfvcq2ff9+KlddB2Eidrty/RYLFcuWB0V6de5Dxt2/Ve71sizT9PDDtDz3fOjYrrmG9F/9Mkigcnz3HdVrbw2u3CkIpNx4A2m33RbE9ZtM1P7q19i++uo8Va8n9aabSLnh+iCuLMu0vfYazc89h7+pGQgIaik33Yjx6qtDRDLnyZM0P/FkoG1JQtevHymrriVh9uwQrt9mo+31jVi2bcNvNqPr0wfj0qUYJl8SMmZZlnF8+y2md9/FV9+ApkcPkhbMJ7bTs1Bn+FpaMP/tAzzlZaiMySTOvgJd795hubIk4di/H+fx44ixsRimTEGTmRmWCwFPQMehwwgqkdjRo1F1+fwIatvvx332LLLHi65Pb8Qwz4adIdntSA4HquRkRQyO4oePqOD0IxCcKpxuxu0r4u89ewaViNUffvG0JCuZR/rn/us6F0UUUfzo4Pf7aWlpQRAEUlJSIvr6mM1m9u7dy+nTp/H5fOTk5DB+/PigiBcIPIht3749JJIlJiaGRYsWBZlwW61WNmzYEDbSyWg0cv311ytRJ3V1dTz77LMRx9E5Kuv7/KwMBgMTJ07E6/XS2NjI0QjfpHcgKSkJSZJwu91Bvls/RhgMBrRarWKm3x3y8/MxGo2IosixY8fCVi7swNChQ8nLy0OlUrF///5uUyLHjh3L8OHDUalUHD9+PKyA1IGLLrqIqVOnIggCtbW1bNiwISJ3xIgRzJkzBwgIWY8++mhE/7R+/fqxZMkSZXvjxo2UlJSE5fbs2ZNVq1YpC5zuxLf09HRuvPFGRairrKzk5ZdfDiuiGo1GbrrpJiUV1263s2HDhiBhqgMJCQnccMMNiqAmSRJvvfUWZ86cCeHGxcWxevXqINHpm2++YUenan0diImJ4dprryWjUxRVRUUFGzduDBHqtFoty5cvp2enKAebzcbLL79Mc3NzEFetVrNkyRIKCwuVfX6/ny1vv82pLn0WBYH5CxYwcODAoP1fb97Ml8ePI3daWIqSxGVjxzJ2VrCR+7Enn2RbVRXuTgswQZIYbbMz48E/BaUXnlm3jh2STFtKchB3QNFpZqxdi2HSRIV7dtFi9uhjOJefD+39ECSJPiUlXGRMJu+pJxVu2Zy5HBMETg0aiLc9ekf0+elVVsqIU0UUvvUWoj4Gyemi+tZbqbNaKSssxB2jQ+X3o/L5SGtqJreiAuP06egKeyG7PZjefRen2UxLSgoenRaV34/o9xPrcJBgsaJOSECTnobk9uCrr0fyeJBEEUGWEf8L1xr/G0gd51CWEUQRdXo6glqN32pFMpvxqlS49Hq87deLzu1G73AgqtUYpk5F0GrwN7dg37sXj0ZDW1ISjrjAZ1y8zYaxtQ21IJC2di2iIR7Z46HxL3/FCzRkZNCWbEQSRZJMZrLq6tD6fPR4bH1AkFCpqL3jFzjLy6nNzqYuOwufWkOiyUT+uXPEOZ3kb9lCzMABCIJAw58epOXll2nIyKAiPw+3VkeC1ULh2VIMNhu5r7xC3NgxAFi//JLqm3+KKTGR8l4FOGJj0TucFJSXYzSZ6PHYehKmTQMCAkjp9BnY/X7KexVgTkxE6/aQV1FBaktLcAquLFOxbDmWo0epzMulJSUFlc9PTk016Q2NZP72tyQvW6rMf/PTT9Pw2OPUZmfT0H7PyayvJ6uujsyf/0wRhiEQaVlx7bU0x8dT06NHQMxuaaZHdQ0Zt6wh7ac/Vbh+k4mKFStprq2lKrcnbq2OJLOJnpVVZN54A2m33qpwZVmm4Q/3U795M5V5uYqYnVtZSdaqVaTfvi7omrF+/jkVv/gl1WmpQWJ2jxXLSfvZz4KEL091DVW33EKFyURbsjEwF3V15C5eFMKVPR7q//hHyj77nMaUFARZJqu1lYJ580i77daQaDbzhx9S/vQzVIoikiiS4XDQe84cUn96c4iY5Coupnr9YxSXleFVq0kB+s2cSer1q0PSM/02Ow3PP8fp3XuweTwYY2PpP20aqcuXhUSeybKMZccnFP3tfdosFuJi9AyYMpnU+fPDpp26y8oo2bSJupoadFot/SdNIu2KK8Jy/TY71e+9y7kTJxFVIn3Hjyd9+vSw0W+yLNOyZw8lu3cj+SUKLriArGmXRhTVnDW1FH20DY/TSc7AgfSYPDlilU/J7aZ4+3asLa2k9yog9+KLu/V9q/zuO5rPncOYlUXehRd2663ZUlVFbVERsUlJFIwa1S3XbrVSXVSEWqcjb9Ag1J3OW1Rw+hEITgAPldfx13OhVZxiBIG5GUa+arNS5/7+sG0R+G78QHrEnH/j+WWZT5stbG8245IkRiXGsSDDSGLUEyqKKKL4N6KxsZHjx4/jdDpJT09n6NChYSOtWltb+fjjj5XFuiAI9O/fnxkzZgRFhQDs27eP7WHKyQ8cOJB58+Ypi3RZltm4cSNnz54N27fFixfTv39/ILDwfuqpp0IWvB3Iz8/nmmuuUbiPPfZYRDNygPnz56PT6fB6vWzdujWiYAGQk5NDUlISPp+PsrKykAV6Z3T+pjmKfxyiKAaMf79n/tLS0lCpVLhcrm7PMwQih7RaLV6vN6xw0xkjRoxQrue9e/d2K76NHTuW7OxsRFFk9+7d3VZ9HDduHAMHDkQQBA4dOsThbirPjRo1iokTJyIIAmVlZbz//vsRuUOGDOGqq65CEATMZjOPPvpoxLnr+h5Zv359WNELQn3O3nzzzbCVJyEgZt1+++2KoLZr1y4+//zzsFyVSsXatWuViK/S0lJe6yb9bvXq1eS0m0+bzWaefPLJiO+/hQsXKmKW3+/nmaefpqm5OZA61WUhMX36dMaNG6dsv/P665w8ezYsd9wFFzB99mxl+6t3NvPlyRPBB29/XR+DgaU//7my+8Q777Dl+HHkzguOdm6a1cqNDzyAuv1+W/n222w8dCggpnX0o/23wWJh1aJFGNv9/Vree4/Xv/yStvaoNUGSEP1+RFkm1m7ningDPa+7DtnrwbpzJ19v30FjRjqiJAWELElC9EtovB4GNTSQc/XV4PXiLjnLuW++oTktVeGoJD+iX0KU/KQ2t5A0cACBaC8z9spKXHp9gNvlR/VfGMH6b4dKBX4/flHEp1YjCwKSKLb/FtC53cTE6NHk5CCoVHgqKrD6fNgMBoUniwKSIJJgsZCs1RI/YQKCRo3r9Gmay8qpzc5CUqmQBFHhJrW1ke9ykbJ6daDdqkqqN2+huF8/PDptMNdkYnBNDTl/+h9EnQ7J6aT0jl9weMhgLImJgT609zvBbGb0seP037QJVWICiCLl161mtzGJmpwchSsLAnF2Oxd/vYsR77+HJjsbgLp77+Prkyco6iJa61wuLtq9h9HPPoO+vUCLees29jzxBAdGjUJSnxcoVF4vY7/dz4W/u5v4SZOAQOTbvpXXsGfc2BAxe/iRI1y8apXicyZ7PBxduIgv8vOxJHWKUpJl+p05w6XTLiN11bXK7vI772JHSzP1XXzHcisqmD5kKNk/u13Z1/ree2zbvIXS3oVB96GM+nqmZ2ZScM89yj5XcTE77votR/v1CxpfUlsbl2m09H/oz8rziN9mZ/cta9ibmRk0vjibjcluDxc8+ogi4siyzIl77+UTiwVrp2c7ncvFRRYLEx55JCiVtPb993nv009p6pQ+rvZ6GdXayqUP/QVV/PnIZevp02x5dD3nMtKV8Yl+P4NaWph9//1oOkXh+qw2Pvjd3ZyIi1fGJ0gShW1tzPv1r9F3sgKQZZmvHnqIvW1teDr1LcdkYv5Pf0pSF3+4E1u2sP3bb7F1Sp9Ns9mYd/XVZHaJfqs5fJj33nyLZv35eUtyurhi5gwKL744iGttamLzE09Q0encxXk8TLvoIoZ1+fLE6/XyweOPc9JkQmr/fIjx+ZgwZAgTFi0KjDcqOP04BCdZltnS0MYzVU2csDlRC3BZaiI/z89kYLwevyzzZauVP5fVcczWfanz63ukcntBJskaNVafn+XHythnDvZNSNGo2TSsF8MMof4UAJIs45VldN9TsSiKKKKI4v8KVqsVm82GwWCIaDgNEtCRRwAAIABJREFUgUingwcP0tzcTGxsLMOGDaNPnz4h3/R4PB527NjBkSNHlNSspKQkpk2bFhIJ0djYyKuvvorNZgvan5yczMqVK4NSnIqKinjrrbfC9m3ChAlM7WR82116YXx8PLfccosiwu3du5dPPvkk4riXLFlCv3798Pv97Nu3r9uKfb1792bMmDH4/X7Onj3LwYMHI3K1Wi3Dhg1DkiRaWlrCmpZ3Rkf6lNvt7tarK4r/Dvy9QmdcXBwqlQq/3/+910VKSgo6nS6QTvc9nmjp6ekkJSUhCAKlpaURzf0BMjIyyM7ORhAEzp49220lztTUVPr164cgCFRUVET0OINAGueYMWMCUSENDd1GROp0Oi699FJUKhU2my2iQAYBEXT27NnodDr8fj8ffPBBt6LzFVdcQXJyMoIgsG3bNpqawviBtgtJ06ZNIz8/H0EQ2LVrF6dOnYrY7oQJExgxYgSCIHD86FG+7JTa1BXDBgzg0lkBf5/aqio2vvlmWDENoGdCAstvuQVBEHDYbDz6yCPBAlknJPj9rLv3XkRRRJIkHrnzTqwR0n3UXi8/u/VWdHHxSB43b9z1WxriYsOKU6IkMX3AQFIGDgCvlwMvvURVjL6TeOVHkGREyY8oyfRRiaQPH4Hs9VK/Zw/1KlX4dmUJg9tDQo8eyF4v7oYGHF04giy3c//71ms/ZEjtwpNKFAMCh0qF127Ho9EEiVgd/wdIEkV0OTmgErGVl9OqizkvYontrxEC13aWz4dx5EhQq7CdPEWp34ckqhSecgxBJNdsJnfObARRhbP0LIfKz+GKiQniyQLIgkjPpiaGXbcKQaPFbzLx1Qd/oy05GZnzfZABWRTJbGhkytpbEA0GkGW+/PNDlGVnKX/vzDW2trLwxhvR9sxFEAX23Hcf+zoJep1/Yu12rps3j8Tx40EUOfnoera0tYZ9X2s8HlaOHEXOvKsAqP/0U1784osg4aYDgiQxPyODQe0pqq7aWp5+6CHMiYlh7y9TBJGJv/8dEEgTfHHd7VSlJIfljnA4mPPnPyvbf7vzTg53jZBqf12B2cyKhx9WPvf2Pf0027t+4dPOTbFaubmTsF/2xZe8/uUXSJ2jpNq5sU4XN992K4Z2gdNUUcHTzzyLW6cN6bPa5+O6BQvIaq9A6/N4eOr3v6dVpwsdnyyzaMIEBlx6qbLrtT/8gdIIqb2X9u3LRYHU06jg9GMQnDrDLUmoBQFVmA/rt+tbubUofMWprhgQF4NXljnrCJ/ukaXTsG/cgCBRqcbl4a/n6nmvwYRTkijQa1nVI41VOalh+xNFFFFE8UODw+GgsbERrVZLZmZmxBBkp9PJkSNHKC8vRxRFevfuzdChQ8OaQ5eUlPDFF18oi+XExEQuuugiRo8eHeI38dlnn7Fnz56g1ycmJrJkyRIyO/ky+Hw+3njjDUpLQ309LrjgAq644gqlbbfbzfPPPx92wanX67nhhhuUdCiPx8MTTzwRcfE9c+ZMxowZo/Rh/fr1Ec3cCwoKWLlyJRCIZHn88ccjekmp1WrWrFmDWq3G7/fz+uuvR4wiA5g8eTJpaWlIksTOnTvDL6bbkZubS15eHpIkcfr06SBj8a7Q6/Xk5OQgSRJNTU3dihAQSD2TZRm3293t4j+KKKL41+PvFTjVarVyL/d4PBFFLwgUzlDSTm22bi0tNGo18e3ebFazGV830VQqQSA5NRVBELCaTDgj3S/avbgyM7MQBHCYzZhttoAnlQxCe48EWVa2U1NSUIkiXpcLk9l8nqf8bvf3kmUMsbFodTrk9qqy59uSEeTAQTr8wHSCiFYfiHRzWANfsCjttbfd0Q+VLKNSq0GWkLze4D7IstJv2sehtPWP7usyD13/1HHMoO0uvG5f+0/0pbv9/+hr/i7u9/T77/nb/7q9CH/v/vVy4K8CIb9lBFSyjCZWD4KI1+nE1y50IQCCEPDXbxfVVJKEISUVRAG32YxdkoLaEuB8u5JEeo8eCGoVbrOZJrs90JOO9pW+gCBDz545qGNj8btclFdVBUQ3hMA9Qzg/LFkQ6JWRQVxGBjJQdOQI3vYIQIROM9X+//zERNL690cQBE7t3Yulg9vp+B3bOTothe22D2f27KHW51eOHejL+XbTgREzZgS+EDl0iJONjUpbstA+F+3bBp+PKQsXgijSWlnJzgMH2udYUPrtVatBFNF7PPz83nvRaLVRwenHJjh1hzq3h1F7T/GvKgL8+IBcFmQGvp2udnm4/GAx9Z7QbwvnZxh5fEBwtRyvJPNmfQtv1rVS5/bSM0bL1VkpzM80RhSnbD4/tW4vRo2KNG33FSSiiCKKKH5osFqt+P1+EhISus2lb2tr49SpU7jdbjIzMyNWLvP7/Rw+fJgjR45gs9kwGo2MGjVKSZnqDLvdzvbt2zl58qTi79OrVy9mzJgRUsGsqamJN954I8hzSRAEJkyYwOTJk4Parqqq4vXXXw/xqjIajaxcuTLIpLq0tJRNmzaFNffuaiZfUlLCpk2bwi4ke/XqxfLly5V+dJcOpdPpWLNmjZKeVlVVxQsvvBCWC6HV/Z588smI3DFjxjBz5kwgcG4feeSRiAb0GRkZXHfddYq3V3cpWSqVimXLlqHVavH7/bz99tsh0XSdcfHFF5Oamoosy+zcubNbr6zevXsr4tuJEye6FeqMRiOFhYVIkkR1dTWN7Q+x4aBWqykoKECWZUwmU7diIQQikQRBwOVyRUyn60BMTIwSyeIKUy0siiiiiCKKKKL41yDBZMLS/uy2fMoUek+aFBWcooJTMH5dXM3LNeEf9GanJTLEEMtek41vTDZcUvfXhADk6bUU6HVUOT2cdUY2v908vJAJxsC3PF5J5toT5XzWEvrN8Jz0JJ4emIfYacFi8fn5Q2kt79S3Kn26JNnAPb170C8uNETa4vOzub6Vo1YnsSqRy9MSuTApPqLJms3n56jVgSgIDDfEoo9W6osiiih+pHA4HJjNZuLi4kI8rzpDkiRKSkqor69Hp9MxYMCAkGpoHbBYLBw8eJBz586hUqno27cvw4cPD+vBVV1dza5duygpKUGSJPLy8pgwYQJ9+vQJ4Z45c4YdO3YoAopKpWLYsGFMnz49JJLs8OHDfPTRR0FeRwkJCSxYsCDIdBoCqYsff/xxiJg1adIkJk+eHLQvkvl1VlYWK1euDBpjJK5Go2HFihVB/fjuu+/Ytm1bCBdgypQpTJw4Udk+evQo7733XlhuQUEBK1asUD7/zpw5wxtvvBGWGxsby5o1a5QqfN8nvl177bXk5eUBAe+0J554IqKgdumll3LRRRcBgei/Rx55JKKgNnDgQBYuXAgERNP169dHjCRLSkri1ltvVQTa5557LqSyXwdEUWTdunUYDAZkWWbz5s3dpobNnz+f/Pz8gO/GV1/R3fPk2LFjFUH0xIkT3RrP5+XlcckllyDLMjU1NXz22WcRufHx8YpoabFYwnrOdUAQBC677DKluuann34asUIkBATRjgIGu3fv7las6927N9nt6RtHjx7tVgRMT09XijmUlZV1K1rGxsbSt923pKGh4Xsrh3akLVosFqqrqyNyITDPGo0Gt9vdbYojBLzW9Hq9Ip52h440bVmWaWpq6naOdTqdktJtsVi69VoTRVG5Nl0u1/dGRHYUwPB6vd22C4F7jCAISJLUbRopBK4jQRDOe9R1E+0VRRRR/PjQWXBaPHYcA2bOiApOUcEpGF5J5t7SGl6tacHTfs61gsC1PVK5uzAbtRj4UDlhdTD1QGhp038WfWJ1LMhMJkOr4ZjVwQsRRC+ApwbmcVVGIH3DLUlcefgshyyhRrmJahXbR/alIPZ8/u4hs51lx8to9QY/AExPTeDZQflBKYB+Weah8nqeq27C3l65z6hWcWteBjf1TAsRqIpsTtZXNPBJiwWfJDM6MY5bctO5JCV0UeaWJLbUt/G3RhMWn59B8XquzUllULw+7JiL7S7eqW+lweMlX69jUWZykHl7Zzj8EtuaTJQ53KTrNMxOSyJFG97EXZZljlidnLA5MKhUTE5JIEEduRRpk8fLAbMdtSAwPime+G64bknimNWJT5YZEq/vlivLMmVONza/RG+9jrhuuAAtHh+tXh/ZOs33cp1+iRavD6NGRdz3lFmVZJlWr584lfh3CYt2vx+1IPxdfmQ+SUYGNOL3P5jJcoArRh/ioogiLDoWO91FekFA+GpoaMDtdpOenq4swsLB5XJx+vRpbDYbKSkp9O3bN2xkGEBLSwuHDx+mra0Ng8HA8OHDg1IWO6OiooL9+/fT0NBATEwMgwcP5oILLgibPllUVMTevXupqqpCpVLRr18/Jk2aFLbto0eP8tVXXymCWmJiIhMnTmTkyJEhn08HDx7k888/V0zlBUFg0KBBXH755SHC3sGDB9m+fXvQItVoNLJw4UKyuhjGHj58mK1btwYtqkVRZNasWYwcOTKIe/z4cd57770Q0Wnw4MFceeWVQXN9+vRp3nnnnZDFekpKCtdccw2G9jQkgPLycjZu3BiyUNZoNCxdupT8TpUta2trefnll8Mu1jtXnoTAOX7++edxOkO9Lfv378+iRYuUebbZbGzYsCGs8JWcnMz111+PXh/4fPd4PDz33HNhhRaNRsPq1auVKnySJPHyyy9TWRne7mDevHkMGTJE2d68eTMnTpwIyx03bhzTp09Xtj/99NOQ9NsO5OXlcc011yjj+0f84YqLi9m0aVNYriiK3Hjjjcr4GhoaeOaZZyKmtM2fP5/BgwcDgSjL9evXRxRaOnva+f1+HnvssYjCV+/evVm2bJmy/eKLL0ac44SEBG677Tbl+nz//fc5cuRIxPGtW7dOEeO//vprvvjii7BcgBUrVtCrVy8gIERu3rw5Iveyyy5j/PjxAP9QBUy73c7DDz8cUfjKzs7mhhtuAALz9uijj0ZMc46JieHnP/85mvZKXN3NG8CaNWtISUlBlmU+/PDDiPMGgXPdIS7u3buXnTt3RuROnDhRmYvi4uJAQYIIwle/fv24/PLLAWhubuaVV16JyE1JSWHZsmVK9OTzzz2HL8K8adRqVl13HRqNBlmWee2VV7CEiyRtP9bixYuV1PMP//Y3qsNVRm3nTp06Vbku9nz9NScjFDoAGD5sGKNGjwbgxPHj7OtSsbczcjMzmTJjBgA1NTUBH8cIc2HQarly8WIEQcBms7Gl49oMwxWBBYsWodFo8Pl8bHnjDbzhnl/bjzVjxgyMRiOyLLPtrbewdJNKOnbECAr69UOWZXZ98AG1Ye7HHeibns7wn/wEWZY5+uWXFHcTKZuu1jBp7hxkWebcsWMcjFD1FUDv8zF93jyQZVpravh6/34lbRT5fIqqQKAy6SWTJ6PWaPA4HHz91VdKWlrn1NSO/18wcCDxKSnIksT+nV/hbV97nOe1p54C+cnJpPXqBbLMqX37cKhUXdoN/CPIMilqDdkD+oMsU3X8BObOKbLt3I52Y/0SOf37AzItZeW0ej3B7Sn/B7XfT06vAmQZHE2NNDscwfMANKek4IyLQ+31cvvatcRnZf3dgpPqnk6O8v8t2LBhwz0dN9koAlAJApNTEliWncLIhDjmZCRxf58cZqYnBS1+07RqtjaZafFG/iZkbGIcoiBg9X1/kl6r18+uNhvbm80ctkausgRw0Gyn2evjqNXBm3WtfNYa/oPRLclY/H5mpgVUVrvPz8xDJSFiE8BZhxuPJHNx8vmH2N+dreGpqia8nR6EXJLMV21WYkSRsUnnjYYPmO3MPVzCCZsLryzjB6pcHrY0tJGuVTMs4fwix+bzs/BIKS/VtlDh8lDv8XLM5uT12hayYzQM6WK2/pfyeq4/eY79ZjsnbS72mGy8VNNMtk7D4C7cr1utXHGohHcbTewz2/m8xcIL1U2katUhJu41Lg9XHyvjoXP1fNpiYWuTmReqm4lViYxMjAviuvwSvymp5paiCt5rNPFeoykgCsqB89zVR+aFmmauPX6OF2qaeau+lRdqmnH4JS5Mig8RUb5qtbLqRDkPltfzem0Lz1c30+z1cmFSvCJwdqDU4eLWokp+caaKF2ua2VDdRLXLy7ik+BDRx+T18buzNaw5VclTVY08U9lEqcPNBQlxIeKXT5J5vKKRm09V8Odz9TxR0cApu5NB8XqSw1RcfLehjTWnKrj7bC3rKxo4ZLHTKzaGLF1oKuc3Jht3nK5i3elKHq5oYI/JSqZOQ74+1MjwrMPF3SU13FJUyZ/L6/m0xYxBpaJ/GCGyxePjwfI61p6q5P7SWj5oClS4GmbQhyw2XX6Jp6saWXe6kvvO1vJWfSt2n8QwQ2yIACbLMm/Vt3LHmWruLqnhtdpmmjw+Bhv0YUW4na0Wfn2mmjtLqnmuuolzTjf94/UkhhECT1gd/P5sDb8qrubpykZO2pwU6HVhU2CrXR4eLKvjl2eqeKyikW9NNjK0GnrqQxfpZq+PJyob+cWZKh4+18AXLRbi1SJ9YnUhc+GRJF6paeZXZ6p5sLyeDxtNCMCgeH3ItSnLMh80mbizuIYHymp5u74Vu19icLw+rHC4p83K787WcG9pLa/VtNDo8TEgTk9smHkrsjm5v7SOu89W80J1M2VON31idSSFud5qXR7+cq6eu4preLqqkaNWJz1jtGSEud4sPj9PVzbym+JqHq9oYE+bjRSNmrww15tHknittoU7S6p5+Fw9nzRb0IkC/eJiQuZNlmU+bDJzV3E1D5bX8X6DCa8sMzA+BnWYB8s9bVbuOVvLA2V1vFHXQpvXx4C4GGIizMUDpXXcU1rDKzXNVLk89I2NwRDmGqpxeXj4XD2/O1vD8zXNFNmc5Ot1pIYR1c1eH89WN/GnejObLB4OOzxkaNXkhBHr3ZLEm41mHrf6eE/WcVzUYVCrKQxzDcmyzKc2Ny/5dXwcm8LpxFTi4uMZEBd6DQGckFW8rU3ii7SenM3OJykzmyEJcWjDiGWN+ni2JWawK6cvlb0HkdynH6MyUsOK5d6kZHan5bLPmEVVXh8SLhjDhN69SA4zF/q0dI5k9+KgPoHajBzUI8cxZcxoesaF3luSMzIp7VnIEUFLY2IK3n6DuWTaNIZmpIVwMzIzacgt5LgPWvVxOHvkMfKymUwZ2C9kLjIyMnDk9uKky4tZpcaZnEbeRRNZOPkn6Lqc69TUVFT5vThpc+HwS7jj4kkYPJylc+aQYgg2+TcajSQWFHLKbMPldOJXaxDzejF37lz65wZHpxkMBjIK+3CqzYTPYgFkfKkZTJg6jUvGjA7ixsbGkt+3H6eaWvGYA/cJf4ye3qPGsPiKy4PETq1WS9/+/SlqbMZtMrX7fwgk9+nLtYsXER93/jNVpVIxYMAAihuacLS10TFL2tQ0lixcQM9OVYs6qmiWNTZhbW1RuIIuhkunT2fUiBFBfe7duzfVLa20NTUpXASBQSNHcfn0y4L6nJ+fT7PVSlN9fVAb6T1zWbpoUZAgmp2djcvrpaZLJFCMwcCKZcuCUl9TUlJQa7SUlZcFcUW1mquuvFJZSENArDIajZwpLlYWLB24aMJExo0dq7z/tFotWVlZnCwqQu6yQO3Vuw9XXH7+nIiiSF5eHsdPnsLfRYhMSklh8aJFis8SBAS2k0VFeLqk9mp0OpZefXXQ+HJzczlTUoKjq1m9IDB37lwlqq9j3iqrqjCHqUA5btw4Ro8+f82lpqbS2toaNvW0oFcvpk+frozPYDDg8/nCij1JycnMnzdPOX9arZa4uDiKi0O/JNZqdSxcuFARcEVRJDU1lRMnT4acD6F9fJ1F58zMTI4dP44URpSZOHEigwcPRhAERFEkKyuLEydPhhUM+/btyyWXXIJarUalUpGVlUVxcXHYggBpaWnMmTMHvV6PRqMhPT2d6urqIH+/gNMPxOj1LFywgKSkJHQ6HUajEavVGhQp18EVBIF58+aRnZ1NTEwM8fHxqNRqysrKunYBCAjUAwYMIDY2lri4OBKTkjh58mQoURAYMWIEEyZMID4+nvj4eDIyMzl69GhohJggkJOTwxVXXEFiYiIGg4Gcnj05dvQoXp8vpOpjgsHAwkWLSE1NJSEhgdy8PE6dPBkQybtw1SoVi5YuVarV5uTkUFlejsliCSs6zZ03j759+2I0GsnIyMBqMlEXoYLpxRMnMnrMGJKTk0lNTUWtUlFaXh5Maj/GgF69uGzWLFJSUkhNTcWYmsqJrpGkHRUw4+NZuGIF6enppKWlkV1QwJGDBxVz9c7cGEFg2U03kZOTQ3p6OvkDBnB47158neagA6Iss2jFcvr060dGRgaFAwdyfNcuXJ3a64xZP/kJwy+8kMysLAr69aP8228xyXLA40ilQlapAhUSVSpG5udzyYIFZBcU0LNvX1pOFVHt8+FTq/Fptfg0msCPTkcPvZ6r1q0jp39/eg4YgK+5idMOBx6dLvATE4NHp8Ot1xMjiiy/+27yR4wgd9gwYtVqDjY344qNxRUbi7PTj1+rZcmttzJgyhTyR48mPSeHveXl2BMMOOLjlR97fDyO2FhmXT6LUUuX0uvCC8kfeQG7jh6lzWjElmDAlpCAtdPPmL59ueSOn9P74ovpN3Uq3+zZQ0NaGpaEBCxJSVgSE/FpNCAIDJFkhs6Zzb333lt3zz33RFbIOyEqOP3IEKdS0Tcuhr5xMWEXS4IgUKjX8V5jG+F06Z/2TOeZQfnc0DONNblpvFrb8r0peH8vbH6J/WY7u9psnLR378dwyubitdpmXqtt4fnqJhrCeEh14KjVgUoQOGV38Y3JyqMVkf0mDprtXJaaiF8OLFRWnygP608F8HWblZU9UpXF+n2ltWxtCv3WTQY+b7EwP8NIYvuic1uTiV8Xh4ZvS8AnzRampSYoi84Kp5s5h0uw+YPPiB/4tMXCqMRYReRwSxKzDwUEss7wtlcszNNrg6Kt1hZV8FZ98Ln2yjK7TbYQ8W1DdRO/O1uLs9MDoVeW+dZsp83rZ2qniK/dbVauPlYadF68sswhi4NTNidz05OUh80Kp5tZh0o42anPPhmO2Zx83WZlQUayIlDZ/X6uOlzKpy0WfO0PTX7glN3FR01mrswwKte1LMvcXFTBCzXNytzJQInDzZaGNmakJQaJTuvPNfCbkmqaOomt5U4P79S3MjYxPkgQ2d5kZtmxMspd5x+wql1etjS0ka/XMrDTHJ+yObniYAlH2qPCABo8PrY2mRGAC43n57jJ4+WKQyV81mLFIUnIQIvXx+ctFiqcHmakJirz5pEklh4rY2NdKyafHz9g8vnZbbLxrcnG3HSjMm+yLPOr4oAQU+/x4pNlrO3vt+1NZuakG4PuBy9UN/HTU5Wcc3lwSzJ2v8RRq5MtDa1cmpIYFFn3ZYuF+UdKOWl34ZQkHJJEkd3FG3WtjEqICxJEiu0uZh0qZq/JjtUv4ZQkSp1u3q5vJVsXLMq2eHzMPlzC1iYzbT4/TkmiyuXhg0YTbklmUicR2SNJrDhWzgs1zTR4fDgkiXqPl09aLJyxu7g87bywLssyd5XU8IfSOqpcHux+iWavj6/brHzVamVuelKQYPBSTTM3nKzgrCMQqdfm8/Ot2c4HTW3MSksKihz8ssXCvCOlHLM5sfgkzD4/R6wO3qprZaLRECRcnmmfi91tNtp8fiy+jnlroV/7PboDzR4fsw+V8EGTmWavD6s/MG/vNLQRIwqM6fQ+dXeaizq3F6s/MG9bm8zUub1MS0kIMtT9VXE195fVUenyYPUH5u3zFgvfme3MSU8KEoefq2riplMVlDjcmH1+mrw+dpsCXyhckZ4UJJ580mxmQftcmHx+Wr1+DlgcbG5oY3KyIUiMPGVzMutgCbtNNlq9fkw+P8dtTjbVtjDcEBsUzdro9nLFobN82GTm/7V35nFyVWXe/55au7t63/fse4gQIBAEZEBRAUFQcEEdBFFxHJ3xVWccfRV13EYdd9kVRVRGRkUEWZVFCQESlixkJQmdpJPuTu9bVVfVff+4t6trOedUxWnHyuvz/Xz6k15+OXXuee75Peeee+65vbE4Q/EEO8Yn+fmhfhpCAY5PuwkwkUjy9hde4odeWwzFE+ydiPHrnkFG4knOqq3IaIt/3tbFl/cc4uXJGEPxBAeiU9zfN8ym0Qne0FCdsc/gd/cd5oMvvsyu8SiD8QSHY3EeHRjh4f5hLmqszpiAu6d3kLc8v5tNo5MMxBP0Tbnn0K8OD/Da+ipq0nzoueFxLty4kyeGxjis/BxWfp4bneBn3f2sqY7QkTapdnAyxgUbd/K7/hG6AiUcCJexNZbgZ91H6Mjy+rGEe0Pk9p4hXiopZ295Ddv9YX7ZM0jCcVKPvoO7IvQftu7jGwf62V5Wxa6aRrZHqrl/aIKdY1HOb6jKmHT6+p5DfHxvL5vLa9nW0MaLNU08NqV4bGA0pz/96vAAV+7o5tnKejY3z2FzUyfrgxHu6R/hvKz+9PTQGJdtO8C6inqeb1/Ac+0L2FDZwF1DE5xeU05LeKYt9k1EuWzbAR6O1LOxczEbOpfwbGMHv43BwrKSjMfwh+MJLt9+gLtKa3mufRGb2hbwdOdiHgyUU+L3Z+S9eNLhwy8d4tZgFZvb5rOjqYOn5i7j0epmDifdNwOnT1x+bf8RvkaEbS1z2FvXwvMdC3m0dSFbkn4uaqzJmNC+s2+YT0TDbGvu5GB1Hdua5/DIgpU8HYhwQUN1xg2UJ4fH+dCYjy2NHfSVV/NSQyt/XHgcj1U2ck59FY1p/WnPZIx/GIaN9W0MlZZzoKaBp+Yt46GWBZxQW8mCspm2GIwn+NBggj/VtjAWLqWnooZNbfN5YP5xtNZUZ9ykiiWTfGowzu8qm5gIhhiIVLKrsY2Hl5xAoraBV2d5y3eG49xeVk80EGI0XEpXbROPLT6eg82dXNhUnTGhfddEkm/5q4kGgkwGQvRU1rB+3nI2zV3KG5uaAsZaAAAgAElEQVTrMvLT0zGHf6eCsUCQpM9Hf1kFL3QsZN2iV3Bee1NGftqbgE8nIwz4gyjHYTRcyvamTh5degKnz+nIyE+DDlybiHDAFyCQSBANhthX28yjS45n8cJFrErLT1Mo/sMp40XHTzg+RdLn43BlLevmr6R0+XGclbUC/hYV4U9xHyVTMXyOw2BZORs7F3Nk1Ylc2Jz5gp37/BF+GVOUxaIEE3HGQyVsaZ3HjuPW8KY5LRne8nywjJujfkpik5TFokz5A+xqaGfDyjVctGR+xo2O/aFSvhUL4I9OUjE5jqNgf00j65eu5u9WrcyYsB8Nhfl6PMzU5CRVE6P4gL5IJevnr+C4k9ewIq0tEsEg3yHC4NgY1ROj+ByHsVAJz3UspPrUMzijfuaRa5/fz4+D1bw0Mkr1+Ah+x2HK52db8xxGTjmTN7Q3pbxFKcVDkTrWD4xQMz5CwBsT7a1rZu/qV3Lp4nkZ3rK1sp67+0epHh8mlEiggMMVNWw6bg2XHL8yw1t6qur48eAEVSNDhBPumG+4pIxnFx7HuaedmuEtE+WV3DCWpHR4kNIpd8w3GQjyQsciTjzrbJak+awqi3DDVAAG+olMuZOcCeVjR1MHTWe/hrX1NSltIBTmJ/4II319VES91T1K8XJNI9HTX835c1pT/cnv9/NgWR27e3qonhhzJ52Voi9SSdeaM7lsxZKUtyil2FLXzLqDPdSOjaQmqEdDJWxbeTKXnnJShrf0NLby2+4+6kYGmG7NKZ+fLfOWc/6rz6YxbdwSrW/kpz1D1A704fOWxjhKsaN5DmvOfwOLymfOC39NLT8ailLVcwh/aodzRXdFDW0Xvok1DbUpbbi8nJ/GQ5Ts35ehHQuEiL32Qs6b15ExQX1/pJapXTsJpO0+nnDg4Mmv4tKTj095i8/nY1PLHHq2bCGYvlN5MsmBuUt40wXnZXhL74KlbN/4HMH0q6Kkw0B5DedecQX1aW2RWL6SZ57aQDARz5hYjCfg+Pd+gHkVM95ZsnQZjz/zHMHoBEyfs0oRmIzSctnlnNA2M9lbMW8+j72wjcDIIE7amCo0OUlozemcc8rMgqLypibW7z2I6jlEIjDT1wOxGL76Fs5/62UpbwmVl7NjeIKp3buYSrvp4EskKIsmeN1HP0KJ93nK7+dIOMLIcxuJlqbdvFKK2r5+zvj4x6ktjxzVhJM8UidoeXJwlC+/1M2TQ+5diI6SENd0NPDutvqMwdVX93Tz9b36mXEfcMfxCwgoxeHoFJ/dfYDuqP0Z8mON6oCf6qAfP4o9E1HtJN0080tDLImU4lfu6pgjmhVZ0yyJhDm7thK/UvxpYNS6Omx+aYgr2xtQwAsjE9xxyLwxbGMwwOcWteJTPg5FY3x6l2bpr0eZT3H9irmU+X1Ekw7v3bI39QhiNgq49bh5NIeDKOAft77M9nHzpOF3l3VyQmUZCsWXXjrI3ZqJumk+vaCFi5tqUChuP3iEr+49ZNS+p62e/zPPfURl/eAoV2zea9SeW1fJ9SvmAnA4GuOV67cZ47ekLMzDJ7t7SMSTDiev20KPIX6Vfh/PvnJF6uL7kmd38cSgfmNfBaw7dVlqwvATO/bzQ8tjpz9bNT/1KOetB/q0k5bT/PuiNt7T7q5ceGJglEue22XUXtFWz5cXtwNwKDrFSeu2EDekhtOry7nzhIWA+6juSeu2GCd828JB1p+6PDVpcfGzO1k3qH/VeUgpNp62IrWi5V+2d/Gjg+a3hv3uxMWc4E0u3LK/l0/u1O/fAu759mbvRQeP949w6fO5b3Cb5sNzmvjEfDf575+Mcco68wsXzm+o4paV7p4l0WSSE5/YSp9hdejishIeXbMk5Z8XbNjBM5pHhgEq/D6ePW1FalD4z9te5mfd+n6tgMfWLGWRd0F93cs9fHa3uV//6Lh5vNa7AHjoyDDveEF/lxfgk/Nb+Mc57iMye8ajvHL9i8Y+cllzDd9e5q4AGE8kOfGJLQwYVsGurizj3hPdRywcx+HcZ3awaVS/nL4uGGDD2uWpi6wPbN3HLw/r32jnB55cuzw1KfONvYf4yh6zX/ziFQs4w5u4/G3PIO/Zsteo/cKiNq7y+tO2sQnOemq7Ufvutnq+5PWn4XiC1U9syblhME16f3IchzOf2sZOw1tiW8NBnkrrT1du2sO9fXrvDCnFhtOWpyb2vvRSN9/ap8/VAPesXpSaXLjzUD8ftLzZ9htLO3hbSx3g3tB5reUx/H/obOT/LnD3ADoSi3Piui3Gm1Tn1lXy41XuKpmE47D2yRd5eVL/mNX80jB/PGVp6uL0bc/v5g+GVdFlPh/PnrY8dcPn0zsPcON+8/5Cvz95SeqmwW0H+/jYdrPP3rBiDhc1uheRTw6O8sZnzT770bnNfNTLT4eiU5y8bmvGSut0Lmqs5gYvP8WSSU5et9Xos8sjJan8BPDGjTtTY7dsqgJ+Np62PJWfPra9i9sMPquAP52yjPnehO+NXT3WMcNtx83jNZ63/OHIMG+zeMunF7Tygc5GwJ0sXPuk2Vve1lLLN5Z2Au4k8mqLt5xUWcZv07zltRt28MKI3lvqgwE2nLY8tYr6g1v3cafBWwIK1p+6PLXlwbf2HuZLe8z7TqXvX3pv7yBXWsYiX1rczrvb6gH3RsRZT20zvgHvyrZ6vuh5y0g8wQkWbzmjppxfHD/jLa96ajs7psdlzsxFPeTm6qs27+Ge3pk31SknSdLnnjPZ3vLll7r5puctvmSSQDJBzB9IlX3v6kWs9rzlvw/18w+et/iSScLxGNFAiKQXg28u7eCtGm/xJZOUxSaJBYLEAu7npntL/1Sc1U+43qKcJBWTE8R9fsbDbl40eYtyHCon3L4yVBoB76b74zpvcRyqJkYJJJMMlpaT8PvN3uI4VE2MEY7HGCotJxp0zxuTt1RMjBGJTjBaUsZoiTuuMXlLZHKcqokxJoMh+iOVoJTRW0pjk9SNDjPl99NTWYOjfDnesmbdixyKTRGaitE0MoCD4lBVLXF/gBXlJTx0Uq63BBJxWoaO4Esm6amsYSJUYvQWXzJJ62AfocQURyJVDJWVG71FOQ4tg32UxSYZLo3QU1EDSum9xXFoGu6namKM8VCYAzUNOMpn9Ja6kUEaRgaJBoN01TYR9weM3lI1NsKCnv3EfX52tnQyESoxekvFxBir9u7An0ywtWMBfZU1Rm8pmxxn7fYXKI1F2dnSyfa2uQR8SustpZMTnLXpaSonx+murufxFSfg+PxabymZnODcZ9dROzrMcGmEB084lZFIhdZbSiYneN0zf6R56AjRQIhHV5zI7vY5Wm+Jj45x/tOP0XHkMEmfj+fmLOKPq07mlXWV/OL4hSilZA8nmXCaHQan4kSTDg2hgPaRgmgyyZWb9vJwf+b+Bn7gG8s6uax5ZgY73wXyFxa1cVp1OYPxBDd09XBfn/m102U+HwvKwowmEhyOuisaBKFYUGlf+R489SsIen0r32rBgIKwz4fCvai3nfU+SD26NJ5IGi9spqkJ+FEKJhNO3v5UG/TjQzHlOAzlebS2JuAn5FMkHIyTMdNUBnypwcqh6JT1tdMRv49q7/h6YnHr8YWVoiHsDgj7Ywnr8fkgdad3KJ7Ie3wdJUF8KMYTyYzVcTpawkHCPsVU0uFA1L7Za0MwQEXATxKHfRMxa1tUB/zUeQPelydj1rYo8/loLXEH7N3RKeMkMkBQkVoBcCQWN17kgXuuzy8N41PuJItt1SnAnJIQIZ8imnSMkwrTtISDlPt9JB3YbXlJBbgTVNNt8dLEpHHiFNyJvek72fujMcYtbRH2KeaUuG3RG5vK2xbTj30OTsXztsX80jBBn2IikczbFu0lQcr9fuKOwy7DxNQ0jaFAahXnzrFJqxdVBfypFXj7JqJMWLyo1KdS58Wh6BSDlrbwQ2oydKCAtlhYFiagFGPeyjwbnSUhIn4fUwW0RXMoSE3QvXe+Y2zS6p01AT/NXlvsmYhafTni99Hp+cVBbxWdiYCCRd7qoiNTcXrytMXisjB+b/uC/Xn8Ym5piFKfj1jSydtHWsJBqgNuW2wfm7R6S13Qn1o9tXs8mtoDVEe535ea7N3vrZg0EVKKBWXT/SluzQ0KWBwpwYfrLfm8c35piLDPx2QyyZ4J+znUFg5SGfCTdGDHuL0tGkIBGqb703jU6rOVAR/tnre8PBkzTgoBlPgU86f7U2xKuz3END5wH40GBuIJuvO0xcKyMCHl5qe9BfSncr+PuOOwI09/agrN+Oz28UkSloarDvhp9frT3omYNf+W+nzM81aTd0ftPuuH1MrF/qm48WmEaRZ73jJagM/OSfMW0w2AaZpDQWqDM/3J5i21ad7yUgHeMsfrTwcK8JbFnrf0FeAtS8pK8CtSK6BtzJv2lgJ8ttXzliSFeEuAJu8G4+6JKFFLW1SkeUtXAd6y0POWngK8ZYnnLUMFeMuC0jBhn2IymeSlPN7S7nlLAjfn/P/mLetPXcbcshKZcJIJp/89Eo7DfX1D/OrwAEPxBEsjJbyztT7jkRBwV0NcuXkPD2reUndxYzXfS3tL3Z7xKK96aptxcPPttMms54bHed0G8x3WxWVhvri4ncmkw86xCT6723xXSuHeqQ8oxcBUwnj3eJrWcJAyvzvIy5fABEEQBEEQBEEQBOFY5c7jF3BGbWXBE076V1wJwlHgV4rzG6o5v6Haqgv6FD9cOY87DvXz8+5+DkZjdJSEeEdrHZc01WSsoJpXFubGFXO5ZuvenDutH+xs5NKmmeegj68s49LmGn5xKHcpdFAp/mNJB6d6ezK8uq6SR/pHeXRAv/T+0uYavrVsZmPItz63m0cM2vpggCdOWZZ61ON9W/ZyV0/uBpLT3HfiIlaUlxF3HK7dtZ8fHTQ/+vbPcxp5Y1MtScfhZ9391uX/Z9aU84HORpIOPDE4wndfNmubQ0H+bX4zDoq9E1G+YXnEIqDgY3ObKfH7GJ5K8I19h613bi5vqaUpHCSRhBv391jvkL+qppylkVIc4JeH++mzzLovLSvh5Gp3OfbjAyPstdxVaAgGUhvEbxmd4EXLXmBhn+I1dZUoFAcmY2zMs6n9WbUVhH2K4XjC+FjYNCdUlFEddFch/HFg1HpnY0FpmNaSII4DTw2NWe8gN4UCzPPuVmwdc/cJMlHh97GsvBTHgX2TUesdr4CCVeVloNzVG12T9jsbKyIlBHyK0Xgy7930+aVhIn737tj2PHuztYa9VQgObCvgTmFTgasQynw+OktDOED3ZIxhyx2hgIL5pTN3TfOtyppfGsav3D3o8t0Rag8HKSlwgrohFKDS765w2ptnhVNV2gqnrjwrnEp9Ppq91V75VocGFKm7iv1T+Vd7dZaE8CkYjSfztltLOEhIKWJJh+6Yvd3qgwHK/D6SjpN3pUdVwJ/a2P7AZMy6qqfM50s9wtkTm7KeQ0GlUnemB6bi1ruKCmgrCaJQjMYT1rv04LZFQCmiyWTeO9P1wQAlfkXScVfU2KgK+KkI+MDT2vpTmc9HbchbMRiNW30opBSN3jk0MJWwrpJTkFrdMJpI5j2HmkNB/Mpd7Wl7eQm4bRH2KRKOk3d1Q1XAT7mXqw/O4urJkFI0eOdQ/1QiY4/DbHyQWkU2Ek9YfQhP6wMmkknrnWlw/SKkFHHHybuKrCbgp8zv7sCS7xwq9/tS/elwbMq6YjDsU9R7PnRkKm7tT35I9aeheMLan2CmLca9ffRsNIYCBD1vybfqdLotkpDXvyv8vtQ+QIeiU1ZvKfGplCf3xeJELedQQEGTt4psMG7vT+D2J0Vh/akpFPC8xcnrybVBP6U+HwnHXQ1ho9Lvo8JbJdedpz+V+VRqn7remN1bgkrRGJrxFlt+UrjnhcJdqTNcYFtMJpPWLS3AXX1T4iusP017SyFt8ed7S9w6pk73luF4wroCCGZ8thBvmfbZKcfJm5+qA34iBfYn8ZYZ0r2lO0+uLhZvaQ3nvqTFhqxwEoqavlicXxzqZ9f4JPWhIJc01WRs/jlNPOnw7ZcPc8v+vtQg9ZSqCJ+c35KxoS64FwtXbd6bs6/OefVVfHf5nIwN5A5Mxrjk2V3sy7o4jPh93Hbc/IwNn/dPxnjDhp3ai6f3dzRw7cKZN9Qcjk7xumd2aLUry0v5zepFqXoMTcV5zTM7tBeo5X4f9564OLWabCrp8NpntrPVcGGf/kw4wGXP7eKxAf3+Qtl1tu0js7Y6wi/d53kB+94pjaEA605dlnp06peHB/jA1n1abVApfn/yktQjGc8MjXHBRvMrTn+4ci6v9yY+D07GWLv+ReMy3Y/NbU7t9zSZSLLmya3GZHpBQxU3e3v1OI7D+Rt3stGw/86isjCPrpl5/t+2L0TE72PD2uWpjT2/ufcQX7bsOfPQSYtTbzC8u2eQqy17zly3fA4XexOzW0cnOPtp854z/zSniX/19i0amIpz4hNbjQO98+qr+MFxblskHIdXrn/ROAmY3Ra2fVYifh8b1/7l91lZNzjKxZZ9Vv7P3CY+Ns9ti4OTMdY8udU4EHpDQzU3rZwLuI8Xn7RuK72Gc2hppIQ/pO2z8oYNO3l6WD9x+T/Zw+n7L/fwOcseTul95MG+Id65aY9R+2/zW/iQt4fTS94eTqYRw5ubavjucneyfiyRYPUTW40DllUVpdx/4mKUUjiOwzlPmz2rJuBn42krUi9nsE3s+3D3RJt+3Mu2xyDAT1fN52xvT7RfHx7g/QYfAvjswlbe1+HuC7FldIJzLP3pHS11fG2p+xa1wak4Jzyx1TgRcUpVhLtWLwLczbpfuf5F42NAjaEAz6xdntoo9x0vvMRDmhXD4A40n167PPXI4Gd3HeC6LnN/+tUJC1nr5cqfdh/hI9u6jNr/WNzOu7x9IfJ58tXt9Xx+kbsvRG9sihOf2Gq84DyrpoKfH78AcHPZmie3GgfqHSUhnjx1WWpDVNteeWGfYuPaFanNpPPtD3ffiYtTm8/fvL+XT1n2h/vOsk4u9VZbP9Y/wmWW/eE+1NnIv3l7znRNxjjVsj/c6+ur+KHns5OJJKvXbTFeGC4qC/PYmqUpb3n9MzuM+z5G/D6eO21F6nHrD724j//S3Kyb5pE1S1jqvfHwO/sO84WXzCvEb14xlwsaXW+5v2+Iv7d4y7/Ma+af57r5d9f4JGesN+9bdElTDd+f9pZ4guOf2GK8oD6uvJQHTprxlr97ejvbDN5S7XnL9Fjr6s17ubvX7C1PpO23+B97uvlPi7fcvmo+53je8qvDA1xj8ZZrF7Tyfm/PmU0j47zGsifa5S21fN3bc2ZgKs4JT5j3RFtTFeE3ad5ymiVXZ3vL5c+/lLNVxjTZ3nLtrgNcb/GWXx6/MDVW/unBI3xku9lbvrK4nb/3vOXpoTHeUKC39ESnOGnd7HhLe4m7n1VB3uLtPVmot6TvPfmX9JZT1m01Tlr8Jb3l2dNWpCZOisVbTl+/zagtBm/J3sv1Ky91WxcDFJO3HM0eTvKWOqGoKfP7OLkqwmvrqzijpkL7imwAn1KsrS7nve0NvKWllg/PaeKq9obUJmzplPp9XNZcw1m1lXSWhHhVbQWfWdDK1R2NOa9Erwz4eUtLLfWhALGkQ7M36fWtZZ2sqCjN0b6xqYZY0mHfZJSEA8dVlPGpBS1c09GYsdl6ecDPeQ1VdEen2D0excGdtX5Lcy3fWz4n47XhJX4f5zVUsXN8MmOwsKq8lBtXzmNlWj38SvH6+io2j05kTFBVB/z8+6L2jD21AF5TV8mW0YmMchXwztY6PruwLePNKWfWVLBrfDLnefaTKyPcsnIekbQ6r6mKcCg6lbMRcEsoyO2r5tNeMvNmmKWREuIOrM/a5DTsU3xv+RzWpk3qtZaEqAsF+MOR4ZzB6UfmNnFF28zrvSsCfpZFSrm3dyhnUH9+QxWfX9SeOr6AT7GmKsI9vUM5E1Qry0u5wds8Hdw3gLyyppz7+oZyVhg1hgLctmp+xhu41laX82j/SM5kVtinuHHF3Iy3vayuLOP5kXHtBefnFramJgrAfVS0Nxbnec2GqJe31PKhOU2pc64hFCTi92lX9q2tjvDVJR2pc7/U7+6Pdm/fYM6AZV5piJtWzktNhPiUYnVlGXf3DObcZakK+PnhcfMy3vZySnU59/YO5iT0oHJjnf6WupOqIjw2MKK9u/jJ+S2pwQfAikgp28Zyz01wVy1+OK0tOkpCjCeS2smeU6oifCWtLSoC7v4luseA55SEuGHlTF8NKOVOFvcO5uxnURXw84Pj5tGc1hYnVkW4u2cwZyIioOD7y+dknBcnVUZ48MiQdlD4qfktGefFqopSNgyPaSeo39pcywc7Z7xofmmYw9E4L2g27F5bHeGLi9pTbVETDBjPoXmlIa5fPjflASGfj4VlYe7pzT2Hqr22mH7zjVKK4yvL+M3hgZxzKKgU162Yw9K0twCdVFXGvb1D2rvZn13Yyjl1M29EOr6yjCcGRrWrON7VWsfV7Q2ptlgcKeGl8ah2AHlmTTmfW9SWevNNYyiIg6Nd6bioLJxx46LE76OjJMh9fUM5nlUb9HPLynmpvKaUYlV5KXf1DObc+Q4pxY0r52a8YezEyjJ+2zOovYP7pcXtGW+eW10Z4RGND4F74TY9gQSwLFLKlrEJdmv606vrKvnUgtaUd7aWhBhLJLQb4C+LlPCtZZ2pVcARv9ufHtD0p8ZQgJtXzkuthPArxYqIvj+V+BQ3r5iX8Yax1ZVl/EbTnxTwtSUdGTeeTqyK8GDfsHYF1T92NnJZy0yeXFleyrPD49r9b85vqOLj81pSE+qdJSH6DJ68qqKUry/tTG0iWxXwUxX083vNBHxrOMiNK+em7v4HfIrFZSX8tncopz+V+33cvHJexlhndWUZv/be5pmOH/jWsk5ekfYmxxMrI9zXN6Tdh+vj85p5Q9oNqlUVZawfHNWuMnxTU6bPLigN0zUZy3j77DQnV0b4cprP1norKB7X3PzqLAlx/Yq5qZwT8vmYVxrm3t7BnP5UFfBz88q5KZ9VSvGKijJ+0zOQMxHh+uxclqV5y4mVZdyjyU/gbnL+mrS3rR1fUcbjA6PalUCXt9Tyvo4Zb1kSKWHneFS7wvf06nI+v3jGW5rCQRKOo930fWFZmO8tnxmLlPp9tJWEuF/jLTUBP7ccN5f60IzPHldeym8M3nLDirksTLuZe1KVm9d1KyK+sLidM7K85ff9w9qbLe9pr09NIIHrLZtHJ7Qro8+ureDTC2bGnW0lIUYSCTZovGVppIRvp3tLwE99KKDN1Q3BXG9ZbvGWm1bMLdhbvro001tWW3L1BzsbeUuWt2wcHs+5oQ3ujb1/mZ/pLabxns5bKgN+7c291nCQG1bMSd3YC/gUiwzeEtF4ywmVZdx1FN7yu1nwlks03vLyZIytBm/50pL21MRpbTBAyOIt162YGcMdrbesqnD7k85bvrd8Tsbbqo/GW044Cm9ZHClhl8FbXlldzr8X6C0LSt1xy/RCgEK9Rd5SJyuchGOIoak4/VMJGsOBjNeK63h5Isq+iRgN4QBLykoyJrGy2TY2weaRCSoCfs6oqchYuZXN5pFx980TSnF2bQWdack2m00j4/z+yAhTjsNp1eWsrY4Y67F1dCI1aFlVUcoFDZmvC09nx9gkdx7qp28qzvzSMG9pqc2YuEln70SUn3X38/JElKZwkLc012YMHNM5OBnj9u4jbB2dpCrg541N1byqpkJb575YnJ92H+GpoTHCPsVr66u4qLE6lcjTGY4nuKO7nz/0u5Nfp9dU8PaW2oxXnE8zmUjyy8MD/LZ3kPFEkhMqy7iirT5jUDNNwnG4u2eQ/z48QP9UnEVlJfx9W33qrlg6juPw+/4Rftp9hP2TMVrDId7eUpvxeup0nh4a49YDfWwfm6Qm6Ofiphre3FST8WrhabaNTXDL/j42Do9R4vNxXkM172ipTQ1U0tk/GeOW/b2ppP6q2gqubKvXTvj2T8W59UAf9/UOMZFMcnJVhPe0N2Qk52nGE0luP3iEX/UMMDSVYGl5Ce9uq8+4kE5vtzsPDfDzQ0fojk7RURLi8pY6LmysznnhgeM4PHhkmFsP9LF7PEpdKMCbm2p4e0ud9vx8emiMm/b38tzwOBG/jwsaqrmyvV4b6+1jk9zU1csfB0fwozi7roKr2xu0fao7GuPGrl4ePDJMLOmwpirC+zoaMibephmOJ/jh/j5+3TPAcDzByopSrmpr4Mza3LaIJZP85OAR7jjUT28sTmdJiHe11fNGQ1vc1TPIjw8eYc9ElIZQgMuaa3lna532vH+sf4Sb9veyeXSCcr+PixpruMrQFi+MjHNDVy9PDo4SUIrX1Ffyvo7G1GN66eybiHJDVy8PHRl23yJUXc77OxpSK/rSORKLc/P+Xu7uHWQ07nrL1e0NqTfOpTORSHLrgT7uPOy2xfzSMH/vtUV2H0k4Dr841M9PDh7h5ckYzaEgb2lx20LXRx7oG+IH+/vYOjZBVcB97f3V7fXaPrJhaIzrunp4ZmickE/xuvoq3tehvymya3yS617u4ZH+EZLA6TXlfKCjUetxPdEpbtjfyz1p3vLe9gZeqekjo/EEPzjQl/KWhWVhrmir58KG3LaIJx1+2n2En3b3u95SEuTtLXVc3lKXc2PGcRzu6R3ih2necklTDe9pb8i4eTLNusFRbuzqZUPKW9y2aNEs0X9xdILrunp4fGAUhXvj45rORu1K54OTMa7v6uV3fUPuisNKtz+dkrXKGdz+dFNXL7/qGWBwyt178sr2el5fX5XTFrFkktsOHuHn3f10R6do97YCeGtzbeqtXult8eueQX50oI+d41HqPW+5sr1em98f6x/hxnRvaazm/R0N2ty3aWSc67t6+ePACH6lOLu2kms6GzImIad5eSLK9V293N83xJTjcEpVOdd0NKTeFpZO/7AiznsAABz8SURBVFScm7p6uatn0PWW8lKubK/n3LSLoGkmE0l+dLCPO7r7ORyLM7fUbYu3NNfmeEvScbjz8AC3HTjCSxNRGj1vuaKtPrViMZ2Hjwxz8/5eNo1MUBHwef2pIbV6JJ1nh8e5vquHdYOjBJXiNfVVXNPRoM2pe8ajXNfVw8NHhol745b3dzbyCo239MamuLEr11um3w6bzngiyQ/293Ln4QF6Y3EWlIV5V2sdb2qq0XrLHd393DbtLeEAb22u411tep+9r3eIWw70Zoxb3tvekFoNnc7TQ2Nc39UzM26pq+L9nXqf3Tk2yfe7PG9xHM7w+tMKjbccjk5xfVcP9/QOpbzlfR0N2vw7Gk9wy37XZ/unEizyvOUijc9Oed5ye/cRDkxO0RoO8vbWOt5h8Jbf9g7xgwO97BiLUuuNW642eMsTA6PcsL+HjcPjKW95v8Vbvt/Vw+P9oyg17S0NqVU36RyYjHF9V4/nLU7KW07VeMvQVJyb9vfxq8MDDMZnvOU8g7f8OMNbgryjtY63NdcZveXWA33sGo9SFwxwaXMNV7bVZ9zwneZoveW6rl7+5HnL39VW8IHORq237PO85QHPW9ZURbimozH1ltN0pr3l1z0DjMSTrCgv5SqLt9x6oI87DvXTE4szpzTEOy3e8otDA9x2sI89E7HUuOXdBm956MgwN3e545aKgI8LG2t4r8FbNg6Pcb03bgkqxavrKvlAZ2Neb5m+JrpmFr3lF4cH6EuNW4rPW+QtdTLhJAiCIAiCIAiCIAiCMKsczYSTecmDIAiCIAiCIAiCIAiCIPwZyISTIAiCIAiCIAiCIAiCMKvIhJMgCIIgCIIgCIIgCIIwq8iEkyAIgiAIgiAIgiAIgjCryISTIAiCIAiCIAiCIAiCMKvIhJMgCIIgCIIgCIIgCIIwq8iEkyAIgiAIgiAIgiAIgjCryISTIAiCIAiCIAiCIAiCMKvIhJMgCIIgCIIgCIIgCIIwq8iEkyAIgiAIgiAIgiAIgjCryISTIAiCIAiCIAiCIAiCMKvIhJMgCIIgCIIgCIIgCIIwq8iEkyAIgiAIgiAIgiAIgjCryISTIAiCIAiCIAiCIAiCMKvIhJMgCIIgCIIgCIIgCIIwq8iEkyAIgiAIgiAIgiAIgjCryISTIAiCIAiCIAiCIAiCMKvIhJMgCIIgCIIgCIIgCIIwq8iEkyAIgiAIgiAIgiAIgjCryISTIAiCIAiCIAiCIAiCMKvIhJMgCIIgCIIgCIIgCIIwq8iEkyAIgiAIgiAIgiAIgjCryISTIAiCIAiCIAiCIAiCMKvIhJMgCIIgCIIgCIIgCIIwq8iEkyAIgiAIgiAIgiAIgjCryISTIAiCIAiCIAiCIAiCMKsox3H+2nWYdZRSvcA+zZ/qgb4Ci/lLaYulHseatljqUQzaYqlHMWiLpR7FoC2Wehxr2mKpRzFoi6UexaAtlnoUg7ZY6nGsaYulHsWgLZZ6FIO2WOpRDNpiqcexpi2WehSDtljqUQza/+16zHEcp6Gg/+04zt/MF/DMX1tbLPU41rTFUo9i0BZLPYpBWyz1KAZtsdTjWNMWSz2KQVss9SgGbbHUoxi0xVKPY01bLPUoBm2x1KMYtMVSj2LQFks9jjVtsdSjGLTFUo9i0BZTPbK/5JE6QRAEQRAEQRAEQRAEYVaRCSdBEARBEARBEARBEARhVvlbm3C6sQi0xVKPY01bLPUoBm2x1KMYtMVSj2LQFks9jjVtsdSjGLTFUo9i0BZLPYpBWyz1ONa0xVKPYtAWSz2KQVss9SgGbbHU41jTFks9ikFbLPUoBm0x1SOD/y83DRcEQRAEQRAEQRAEQRD+evytrXASBEEQBEEQBEEQBEEQ/sLIhJMgCIIgCIIgCIIgCIIwu/xPXnF3LH0BrwO2A7uAf7XofgD0AJsLKLMD+APwIrAF+LBFWwI8BTzvaT9bQPl+4Fngt3l0e4FNwHMU8NpCoBq4E9jm1X2tQbfEK3P6axj4J0u5/+wd22bgZ0CJRfthT7clu0xdDIBa4EFgp/dvTR79pV7ZSeCkPNqvem3xAvAroNqi/bynew54AGjNd94AHwUcoN5S7rXAgbS2Ps9WLvCP3vm8BfiPPMd3R1q5e4HnLNrjgSenzyVgjUX7CmCdd+7dDVTa+oUuhhZtTvws2pz4WbQ58TNpdfGzlGuKn7Hs7Bhays6Jn0WbEz+LNid+GHwKmAes92J3BxCyaD+I67Opc97mgcDtXjtsxj3PghbtLd7vXsD1sHKTNu1zvwOM5qnDrcCetHY+3qJVwBeAHV6bfsiifTytzIPAr/PU4xxgo6f/I7DQoj3b024GfgQETLlDFz+LVhs/gzYndhZtTuzy5br02FnKzYldHn1O/CxabfwM2pzYWbS22O0lK59jyH8GrSn36bSm3KfTmnJfjtaS+3TlXoveO7XlYs59urJNuU+nNeU+ndaU+3LGVpbY6bSm2Om02thZ9Kb4GceDmvjpyjXFT1uuLn6Gck2x02lNsdNpTbHTjnd18bNodeMWk1Y3bjFpdeMW6/iczHGLqVxT7IxlZ8fPUrZu3GLS6sYtJq0pfjnXHxjynkFrGrfotLa8p9Nrc59Oa8p9hnJvRZP7DFpt3jNoTeMWndaW93R6be5Dc02I2Tt1WpN36rSmvKfTmnzTdg2b7Zu6cq9F0/dsZaP3Tl3ZJu/UaU3eqdNq+16hXwULj+Uv3MHebmA+7oXS88Byg/ZMYDWFTTi1AKu97ytwO7KpXMWMyQRxTfDUPOV/BPgphU041eerb5r+R8B7vO9DpA1U8rThIWCO4e9tuMZX6v38X8AVBu1K70QuAwLAQ8AiWwxwE9u/et//K/CVPPpluAnrETLNR6c9lxnT+8p02QZtZdr3HwKut503uBf79wP7mDEfXbnXAh8t5HwE/s5rs7D3c2Oh5y/wdeDTlrIfAF7vfX8e8IhF+zTwKu/7K4HP2/qFLoYWbU78LNqc+Fm0OfEzaXXxs5Rrip9JnxNDWz2y42cpNyd+Fm1O/DD4FG5/fqv3++uBayzaE4C5ZPmSRX+e9zeFOyixlZ0ev//EPY+M3gqcBNzGzISTqdxbgTdntbVJ+27gx4AvLXZ5/R34b+BdecreASzzfv8Br1467WlAF7DY+/3ngKvSPisjd+jiZ9Fq42fQ5sTOos2JnUmri52l3JzY5dHnxM9WD138DOXmxE6nxV1Zboudrt21+c+gNeU+ndaU+3RaU+7L0eq801Lutei9U6e15T5tPbK901K2KffptKbclzO2ssROpzXFTqfVxs6iN8VPOx40xE9Xril+Oq02fqY6GGKnK9cUO51WG7usz0uNd03xM2i18TNojfHTaLWx02lNsTOUq42dRW/sf7p66OJnKFcbP4NWN27RXn+gH7eYtDl5z6LV5j2LXjduMV4zkTtuMZV7K7njFpNWN27Je92Gl/cs5WrznkF/JZrch+GaEP01g0mru2YwaXXXDCat7prBeA1L7jWDqdxr0fumSa+7ZrBeS6f3PUu5umsGkzavd9q+/lYeqVsD7HIc5yXHcWLAz4GLdELHcR4D+gsp1HGcbsdxNnrfj+DOGrcZtI7jOKPej0HvyzGVrZRqB84Hbi6kLoWilKrEnTy4xatXzHGcwQL+6znAbsdx9lk0AaBUKRXAPVEPGnTLgCcdxxl3HCcOPApcPP1HQwwuwh084P37RpvecZwXHcfZnv3BBu0DXj3Aneltt2iH036M4MXQct58A/g4abE+ynNMp70G+LLjOFFP01NI2UopBVyGmyBNWgd3pQtAFV4MDdolwGPe9w8Cb/K0pn6RE0OTVhc/izYnfhZtTvzy9OOM+B1Nn8+jz4lhvrLT42fR5sTPos2Jn8Wnzsa9MwczsdNqHcd51nGcvZq2MOnv9f7m4K7kabdoh9PaonSm2FytUsqPeyfr4/nqkF3XPNprgM85jpP0dD35ylVKVXht+Os8Zevip9MmgKjjODu836f6X3bu8NoqJ346rVc3bfwM2pzYWbQ5sTNpdbEzaW0Y9Dnxy1d2dvwMWq13arR1GGJnwZj/stF5p0WrzX0GrTb3WcjJfbOAMffZyM59BrTxM5DjnZaxVU7sTFpd7Cxabews+pz45RkPZsTvaMaOFm1O/PKVmx47izYndhatdtySRfp4N1/fS2kL6Hvp2nx9L12br+9lj89tfa+QsbxJn6//5ZRt6Xvp2nx9L11ril/29Uc3hryn0R405T2DVpv3LHpt7tNpTblPp9XU1abV5j1budl5z6C1xS5bP4Y+95muCXV9T6s19D2TVtf3TFpd37Ndw2b3Pev1rgaTXtf3rGVn9T2TVhc/k7YQ7zTytzLh1IY7qzrNfiwXiX8OSqm5uDPk6y0av1LqOdxHkx50HMeoBb6Je9ImC/h4B3hAKbVBKfXePNr5QC/wQ6XUs0qpm5VSkQI+461YBmuO4xwAvga8jGv0Q47jPGCQbwbOVErVKaXKcGdVO/J8fpPjON3eZ3Xjzu7+JbgS+J1NoJT6glKqC7gcd+bYpLsQOOA4zvMFfvYHlVIvKKV+oJSqsegWA2copdYrpR5VSp1cYPlnAIcdx9lp0fwT8FXv+L4GfMKi3Qxc6H1/KZoYZvULawwL6UMFaHPil621xS9dmy9+mjpY45elt8bQcHza+GVprfHL0mrjl+1TuKtDB9MSdMo/j9LTrHqlVBB4J3CfTauU+iHu3c6luMvOTdoPAr+ZPucKqMMXvPh9QykVtmgXAG9RSj2jlPqdUmpRAW1xMfBw+uDFoH8PcK9Sar/XFl82xOQpIKiUOskr7s3M9L/s3FGHIX4arQ2jNjt2Jq0udgatNnaWOuTEzqLXxs92fOTGT6fVxk6j7cMcO9Dnc5N3Hk3uz6dN906t1uCdOVqLd5rqoPNOndbmm7bjy/ZOndbknTqtzjtNYytd7I5mHFaINj12Rr0mflqtIX62emTHz6TVxS/f8aXHzqTVxc6kzTtuIXO8m2/saR0bF6jVjTsztIa+l6PNN27R1CHfuDNdn2/sqTs+07gzXZtv3JmuzYmf7voD2IAm7x3NtUo+bXbes+mzc59Fm5P78tQjI/dZtDl5r4C2SOU9i1ab9wwx+S/0uc90Tajre0dz/ViIdrrvGbWavqfVGvqerQ66vmfS6/pevuNL73smra7vmbSFeKcZ5yiWQx2rX17D3Jz28ztxO7xJP5cCHqlL05fjmtslBeqrcfdUWWn4+wXA973vzyL/I3XTz5Q24j4ueKZFexIQB07xfv4WeZbF4S5F7sPt/CZNDfB7oAH3DvyvgXdY9FfhPsf7GO4SxW/YYoCbONL/PlBIzNAsbbZoP4n7PK8q5FzA7Zif1WlxZ/PXA1Xez3vJfLwo+/iacJcN+3Cfs/6BRbsZ+Dbuct41uMtW89YZuA74P3na+du4K13AnRl/yKJdirsccwPwGeCIrV/YYpitzRM/k1YXP2Pf1MQvpS0gftnHZoyfQW+MoeX4dPHLLtcWv2xtvvhN+9QZuCtEp3/fAWwyaFem/S6jzQrQ3wR8s0CtH/g+8G6D9kzc/QSml02P2srFfexQAWHcO2nZy//TtaPTcfDOlccLqO/vpuOSpx6/ZMabP0Za3tJo1+LutfAU8O+4ewTl5A5cT86Jn06b9Vmp+BWgTcWuAG0qdob6tupiZyrXFDuLPid+BdQ5FT9LuTmxs2hzYpf2WTn5HIN36rRpmkfIfCzLps3wTps22zsN9dV6p0Gr9U6D1uabtuPL8E5D2VrvNGhzvBPD2EoXO5NWF7sCtNmxyzvGm46fQftVXfwsx5cTP4tWF798x5eKnaXcnNhZtPnyXsZ4Vxc/k9bU9/JodeMW45ib3HFLSkv+cUv2seUbt2Trbf3PdHy6cUt2ubZxS7ZW1/d01x/vRJ/3rNcqZPpVPm3GmKUAfXru02nfhT73actFk/ssWl3ey1ff9LxnKlc7ZrHotbkPzTUh5rxnvH4kN+/ZtNneme+6ND3v6epryns6re2aT6fX9r08x5ed93TlmvKeTmv1znxfBQuP5S/vBL8/66T5hEU/lwInnLyOdD/wkaOs02cwPDsNfAl3Nn4v7oz4OPCTAsu91lSu9/dmYG/az2cA9+Qp8yLggTyaS4Fb0n5+F95Au4A6fxH4gC0GuBultXjftwDbC4kZBU44AX+PuxlaWaHnAu7z5Jt1WuA43NUIe72vOO5Mf3MB5WYfe/bP9wFnpf28G2jIc3wB4DDu40q2zxpixnwVMFxgWywGnrL1C1MMdVpT/ExaXfxs5WbHL1tri18B5Wa3qa4ttDG0HF9O/AzlauNXQJ0z4pf2+8/gDiL6mBkEZfhplvajaT/vxb6nSkrvff9rvP0F8pXt/e5V6Pfa+Yz3dSgtfknSBp95yj3LUu5HcTebnJvWxkN5jq0Od1Bse4nCdDvvTvtdJ7C1wDqfi3v3UJc7btfFz6D9SVqZqfjZtNmxy1dueuwM2gFd7AosNxU7k14XvzzHlxE/g/YeXewKrPO5wH8Zzotrcc85a/5L16b9/AiafWSytRhyn6lc73cZuS9L+3+x5L485c61lPtR8uQ+w/Fpc5+mbGPuy1PnxbgXT9qxlS52Jq0udjatLnb5yk6Pn0H7sCF+xxVQ7lxLufcY4rfCcnwZsbOUmxO7AtshJ++RNd7Vxc+ktfU9nVYXP1u5ur6XriX/uNNW7lxyx43ZbWHsf4bjM407s8u1jTttdZ7ue7rrj+vQ5z3rtQqZec+oRTNmyVe297vp3KfT7kGf+wop9yxLud9Hn/dsx5ed90xtrB2zFFhnbe7DuyaksLyXcf2IPe+ltOTPe7rrUlPe+yLuJtuF5L2817uGtsib+7KOL1/emy63kLynq7P2msH29bfySN3TwCKl1DylVAh3eeZv/qeFKqUU7vPhLzqO8595tA1KqWrv+1Lg1bgGkIPjOJ9wHKfdcZy5Xl1/7zjOOwzlRpT7nC3ecuFzcRO+FsdxDgFdSqkl3q/OwR0Y23gb+ZcMvwycqpQq89rlHNy9YrQopRq9fztxZ9vzlf8bXIPA+/euPPqCUUq9DvgX4ELHccbzaBel/Xgh5hhuchyn0XGcuV4c9+Nu3HzIUG5L2o8XY4khbpI72/t/i5m5A2Tj1cA2x3H259EdxE2IeJ9hfPwuLYY+4FO4s+C2fpETw6PsQ1qtLn4WbU78dFpT/HANXFeuNn6W4zPF0NQWGfGzlJsTP0tb5MTP4FMv4q6qebP3X6djV7CneRqtXin1HuC1wNscb38Bg3a7Umph2vG/wfv/Ou0Gx3Ga0+I37jjOQksdWtLKfSOw2XJ8qdh5bb0jT1tcijsJMpmnLV4EqrzzAeA1wIuWOk/HL4x7/l9vyB2X6+J3NHnGpNXFTqcF3qmLnaHcGl3sLHXIiZ2tzrr45WmLjPgZju8iXewsdc6JnfezKZ/rvLPg3G/SGrzTpNV5p077tME7Rwzl5nin5di0vpmnLbK906TVeaepLXK80zK2yond0YzDTFrTuMWiz4mfQbvRMHbZZCg3J36W49PFb6ulLTJiZyk3J3aWdtCOW9LIHu/axp6FjI21WlP8DFrbuDOlLWDcmV1uvnFn9vHZxp66tjCNO7O1tnFndp118dNdf2xFk/cMWtO1ilary3t59Dm5z6D9T13us5Sry32m48vJe3naInvcYmrjnLyXpy1MuU93TajtewatFp3W1PcMWm3f02h/bOp7hnKNfc9wfKbcZ2qLnL5n0Gr7nqHO+bzTztHMTh3LX7jPIO7AnRX8pEX3M9znTae8E+Yqi/Z03Of7p1+ZmPFqwyztKtxHHl7wTqxPF1jvs7A8Uof7nPrzzLwy23hsaf/neNzXH77gncQ1Fm0Z7ix3VQHlfha3M27GfcNC2KJ9HNesngfOyRcD3Nn2h73O8DBQm0d/sfd9FHeS4H6LdhfuHl/TMbzeov1v7/hewH0tZFsh5w2Zd0105d6Gu9z3BVyTbbFoQ7h36jfjLnk8O9/5i/s2i/cX0M6n4y6XfB53eeiJFu2HcfvUDtzntqdnybX9QhdDizYnfhZtTvws2pz4mbS6+FnKNcXPpM+Joa0e2fGzlJsTP4s2J34YfArXZ57y2voXuEu4TdoPebGL4yaz6SXWJn0c15en6/ZpnRZ36fGfvHbejLtyp9JUblb8RvPU4fdp5f4E9/FDk7Ya9676Jtw7ZK+w1QH3jtvrCskHuOf9Ji9+j3jtbtJ+FXdwt52s1/Jm5w5d/CxabfwM2pzY6bSm2BWS69A/Dpleh5zY5dHnxM9WD138DOXmxM6i1cYOQz5H750mrc47TVqdd5q0Ou/MO/5gxjtN5eZ4p0WrzX22epDrnaaydd5p0ppyX87YShc7i9Y0btFpteMWi940drGOB8kcu+jKNeU+ndYUP20dsmNnKdc0btFptbHz9DnjXUv8dFpT/HRa07hTpzXFzjo+z4qdrlxt7Cx6U/y09TDET1euKX46ranv5Vx/YMh7Bq1p3KLTGvOeQa/NfTqtKfcZytXmPoNWm/dMdUA/btGVa8x7Br0p9+VcE2Luezqtqe/ptKa+p9Oa+p7xGlbT93Tl2vqeTm/qe9p6oO97unJNfU+nNXpnIV/THVUQBEEQBEEQBEEQBEEQZoW/lUfqBEEQBEEQBEEQBEEQhP8lZMJJEARBEARBEARBEARBmFVkwkkQBEEQBEEQBEEQBEGYVWTCSRAEQRAEQRAEQRAEQZhVZMJJEARBEARBEARBEARBmFVkwkkQBEEQBKGIUUo5SqmFf+16CIIgCIIgHA0y4SQIgiAIgnAUKKX2KqUmlFKjaV/f/WvXSxAEQRAEoZgI/LUrIAiCIAiCcAzyBsdxHvprV0IQBEEQBKFYkRVOgiAIgiAIs4BS6gql1J+UUt9RSg0ppbYppc5J+3urUuo3Sql+pdQupdTVaX/zK6X+TSm1Wyk1opTaoJTqSCv+1UqpnUqpAaXU95RSyvt/C5VSj3qf16eUuuN/8ZAFQRAEQRCMyAonQRAEQRCE2eMU4E6gHrgE+KVSap7jOP3Az4AtQCuwFHhQKfWS4zgPAx8B3gacB+wAVgHjaeVeAJwMVAIbgLuB+4DPAw8AfweEgJP+0gcoCIIgCIJQCMpxnL92HQRBEARBEI4ZlFJ7cSeU4mm//hgwBXwRaHO8AZZS6ingO8AjwF6g2nGcEe9vXwJaHMe5Qim1Hfi44zh3aT7PAc5wHOeP3s//BWx0HOfLSqkfA5PA5xzH2f8XOFxBEARBEIQ/C3mkThAEQRAE4eh5o+M41WlfN3m/P+Bk3s3bh7uiqRXon55sSvtbm/d9B7Db8nmH0r4fB8q97z8OKOAppdQWpdSVf+bxCIIgCIIgzCoy4SQIgiAIgjB7tE3vr+TRCRz0vmqVUhVZfzvgfd8FLDjaD3Mc55DjOFc7jtMKvA/4vlJq4Z9XdUEQBEEQhNlDJpwEQRAEQRBmj0bgQ0qpoFLqUmAZcK/jOF3AE8CXlFIlSqlVwFXA7d7/uxn4vFJqkXJZpZSqy/dhSqlLlVLt3o8DgAMkZvugBEEQBEEQjhbZNFwQBEEQBOHouVsplT6x8yBwF7AeWAT0AYeBNzuOc8TTvA24Hne10wDwGcdxHvT+9p9AGHcD8HpgG3BxAfU4GfimUqrK+7wPO46z539yYIIgCIIgCLOBbBouCIIgCIIwCyilrgDe4zjO6X/tugiCIAiCIPy1kUfqBEEQBEEQBEEQBEEQhFlFJpwEQRAEQRAEQRAEQRCEWUUeqRMEQRAEQRAEQRAEQRBmFVnhJAiCIAiCIAiCIAiCIMwqMuEkCIIgCIIgCIIgCIIgzCoy4SQIgiAIgiAIgiAIgiDMKjLhJAiCIAiCIAiCIAiCIMwqMuEkCIIgCIIgCIIgCIIgzCr/D4vulOoifivTAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "#Plot training loss over Epochs:\n", + "color = sns.color_palette()\n", + "#Draw Weight Variance Ratio\n", + "dataplot3 = {\"svrg_mse\": [], \"sgd_mse_lr_0.001\": [], \"sgd_mse_lr_0.0025\": [], \"sgd_mse_lr_0.005\":[]}\n", + "with open('sgd_0.001.json') as sgd_data, open('svrg_0.025.json') as svrg_data, open('sgd_0.0025.json') as sgd_data_2, open('sgd_0.005.json') as sgd_data_3:\n", + " sgd = json.load(sgd_data)\n", + " svrg = json.load(svrg_data)\n", + " sgd_lr = json.load(sgd_data_2)\n", + " sgd_lr_2 = json.load(sgd_data_3)\n", + " for epoch in range(100):\n", + " dataplot3[\"svrg_mse\"].append(svrg[str(epoch)][\"mse\"])\n", + " dataplot3[\"sgd_mse_lr_0.001\"].append(sgd[str(epoch)][\"mse\"])\n", + " dataplot3[\"sgd_mse_lr_0.0025\"].append(sgd_lr[str(epoch)][\"mse\"])\n", + " dataplot3[\"sgd_mse_lr_0.005\"].append(sgd_lr_2[str(epoch)][\"mse\"])\n", + "\n", + "x3 = list(range(100))\n", + "plt.figure(figsize=(20, 12))\n", + "plt.title(\"Training Loss Over Epochs\")\n", + "sns.pointplot(x3, dataplot3['svrg_mse'], color=color[9])\n", + "sns.pointplot(x3, dataplot3['sgd_mse_lr_0.001'], color=color[8])\n", + "sns.pointplot(x3, dataplot3['sgd_mse_lr_0.0025'], color=color[3])\n", + "sns.pointplot(x3, dataplot3['sgd_mse_lr_0.005'], color=color[7])\n", + "color_patch1 = mpatches.Patch(color=color[9], label=\"svrg_mse_0.025\")\n", + "color_patch2 = mpatches.Patch(color=color[8], label=\"sgd_mse_lr_0.001\")\n", + "color_patch3 = mpatches.Patch(color=color[3], label=\"sgd_mse_lr_0.0025\")\n", + "color_patch4 = mpatches.Patch(color=color[7], label=\"sgd_mse_lr_0.005\")\n", + "plt.legend(handles=[color_patch1, color_patch2, color_patch3, color_patch4])\n", + "plt.ylabel('Training Loss', fontsize=12)\n", + "plt.xlabel('Epochs', fontsize=12)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.15" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/example/svrg_module/linear_regression/common.py b/example/svrg_module/linear_regression/common.py new file mode 100644 index 000000000000..14a144f40ce2 --- /dev/null +++ b/example/svrg_module/linear_regression/common.py @@ -0,0 +1,117 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import mxnet as mx +import logging +from mxnet.contrib.svrg_optimization.svrg_module import SVRGModule + + +def create_lin_reg_network(train_features, train_labels, feature_dim, batch_size, update_freq, ctx, logger): + # fit a linear regression model with mxnet SVRGModule + print("Fitting linear regression with mxnet") + train_iter = mx.io.NDArrayIter(train_features, train_labels, batch_size=batch_size, shuffle=True, + data_name='data', label_name='label') + data = mx.sym.Variable("data") + label = mx.sym.Variable("label") + weight = mx.sym.Variable("fc_weight", shape=(1, feature_dim)) + net = mx.sym.dot(data, weight.transpose()) + bias = mx.sym.Variable("fc_bias", shape=(1,), wd_mult=0.0, lr_mult=10.0) + net = mx.sym.broadcast_plus(net, bias) + net = mx.sym.LinearRegressionOutput(data=net, label=label) + mod = SVRGModule(symbol=net, context=ctx, data_names=['data'], label_names=['label'], logger=logger, + update_freq=update_freq) + return train_iter, mod + + +def create_metrics(metrics): + metric = mx.metric.create(metrics) + return metric + + +def create_logger(): + logger = logging.getLogger('sgd_svrg') + logger.setLevel(logging.INFO) + formatter = logging.Formatter('%(asctime)s - %(message)s') + fh = logging.FileHandler('experiments.log') + fh.setFormatter(formatter) + logger.addHandler(fh) + return logger + + +################################################################################ +# Functions below are for benchmark purpose to calcuate expectation, variance of +# gradients per epoch for each parameter. These calculations will be helpful when +# benchmarking SVRG optimization with other optimization techniques, such as SGD. +# Currently it only calculates the expectation, variance for single context but +# can be extended to multi-context in later iterations. +################################################################################ + +def accumulate_grad(grad_dict, mod): + param_names = mod._exec_group.param_names + + for index, name in enumerate(param_names): + if name not in grad_dict: + grad_dict[name] = mod._exec_group.grad_arrays[index][0].copy() + else: + grad_dict[name] = mx.ndarray.concat(grad_dict[name], mod._exec_group.grad_arrays[index][0], dim=0) + + +def calc_expectation(grad_dict, num_batches): + """Calculates the expectation of the gradients per epoch for each parameter w.r.t number of batches + + Parameters + ---------- + grad_dict: dict + dictionary that maps parameter name to gradients in the mod executor group + num_batches: int + number of batches + + Returns + ---------- + grad_dict: dict + dictionary with new keys mapping to gradients expectations + + """ + for key in grad_dict.keys(): + grad_dict[str.format(key+"_expectation")] = mx.ndarray.sum(grad_dict[key], axis=0) / num_batches + + return grad_dict + + +def calc_variance(grad_dict, num_batches, param_names): + """Calculates the variance of the gradients per epoch for each parameter w.r.t number of batches + + Parameters + ---------- + grad_dict: dict + dictionary that maps parameter name to gradients in the mod executor group + num_batches: int + number of batches + param_names: str + parameter name in the module + + Returns + ---------- + grad_dict: dict + dictionary with new keys mapping to gradients variance + + """ + for i in range(len(param_names)): + diff_sqr = mx.ndarray.square(mx.nd.subtract(grad_dict[param_names[i]], + grad_dict[str.format(param_names[i]+"_expectation")])) + grad_dict[str.format(param_names[i] + "_variance")] = mx.ndarray.sum(diff_sqr, axis=0) / num_batches diff --git a/example/svrg_module/linear_regression/data_reader.py b/example/svrg_module/linear_regression/data_reader.py new file mode 100644 index 000000000000..d56ae03a5f4f --- /dev/null +++ b/example/svrg_module/linear_regression/data_reader.py @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import numpy as np +from sklearn.datasets import load_svmlight_file + +# Download data file +# from subprocess import call +# YearPredictionMSD dataset: https://archive.ics.uci.edu/ml/datasets/yearpredictionmsd +# call(['wget', 'https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression/YearPredictionMSD.bz2']) +# call(['bzip2', '-d', 'YearPredictionMSD.bz2']) + + +def read_year_prediction_data(fileName): + feature_dim = 90 + print("Reading data from disk...") + train_features, train_labels = load_svmlight_file(fileName, n_features=feature_dim, dtype=np.float32) + train_features = train_features.todense() + + # normalize the data: subtract means and divide by standard deviations + label_mean = train_labels.mean() + label_std = np.sqrt(np.square(train_labels - label_mean).mean()) + feature_means = train_features.mean(axis=0) + feature_stds = np.sqrt(np.square(train_features - feature_means).mean(axis=0)) + + train_features = (train_features - feature_means) / feature_stds + train_labels = (train_labels - label_mean) / label_std + + return feature_dim, train_features, train_labels + diff --git a/example/svrg_module/linear_regression/train.py b/example/svrg_module/linear_regression/train.py new file mode 100644 index 000000000000..b3d942973f19 --- /dev/null +++ b/example/svrg_module/linear_regression/train.py @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import argparse +import mxnet as mx +from common import create_lin_reg_network, create_logger +from data_reader import read_year_prediction_data + +parser = argparse.ArgumentParser() +parser.add_argument('-e', dest='epochs', help='number of epochs for training phase', type=int, default=100) +parser.add_argument('-f', dest="updateFreq", help="update frequency for SVRGModule", type=int, default=2) +parser.add_argument('-b', dest="batch_size", help="define the batch size for training", type=int, + default=100, required=False) +parser.add_argument('-m', dest='metrics', help="create eval metric", type=str, default='mse') +parser.add_argument('--gpus', type=str, help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu') +parser.add_argument('--kv-store', type=str, default='local', help='key-value store type') + +args = parser.parse_args() +# devices for training +ctx = mx.cpu() if args.gpus is None or args.gpus == "" else [mx.gpu(int(i)) for i in args.gpus.split(',')] + +logger = create_logger() +kv = mx.kvstore.create(args.kv_store) + +feature_dim, train_features, train_labels = read_year_prediction_data('YearPredictionMSD') +train_iter, mod = create_lin_reg_network(train_features, train_labels, feature_dim, args.batch_size, args.updateFreq, + ctx, logger) + +mod.fit(train_iter, eval_metric='mse', optimizer='sgd', + optimizer_params=(('learning_rate', 0.025), ), num_epoch=args.epochs, kvstore=kv) diff --git a/python/mxnet/contrib/svrg_optimization/__init__.py b/python/mxnet/contrib/svrg_optimization/__init__.py new file mode 100644 index 000000000000..6e70009983c9 --- /dev/null +++ b/python/mxnet/contrib/svrg_optimization/__init__.py @@ -0,0 +1,22 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""SVRGModule, SVRGOptimization import. +""" + + +from . import svrg_module +from . import svrg_optimizer diff --git a/python/mxnet/contrib/svrg_optimization/svrg_module.py b/python/mxnet/contrib/svrg_optimization/svrg_module.py new file mode 100644 index 000000000000..5d6b5dd5720c --- /dev/null +++ b/python/mxnet/contrib/svrg_optimization/svrg_module.py @@ -0,0 +1,578 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# coding: utf-8 +"""A `SVRGModule` implements the `Module` API by wrapping an auxiliary module to perform +SVRG optimization logic. +""" + +import time +import logging +import mxnet as mx +from mxnet.module import Module +from .svrg_optimizer import _SVRGOptimizer + + +class SVRGModule(Module): + """SVRGModule is a module that encapsulates two Modules to accommodate the SVRG optimization technique. + It is functionally the same as Module API, except it is implemented using SVRG optimization logic. + + Parameters + ---------- + symbol : Symbol + data_names : list of str + Defaults to `('data')` for a typical model used in image classification. + label_names : list of str + Defaults to `('softmax_label')` for a typical model used in image + classification. + logger : Logger + Defaults to `logging`. + context : Context or list of Context + Defaults to ``mx.cpu()``. + work_load_list : list of number + Default ``None``, indicating uniform workload. + fixed_param_names: list of str + Default ``None``, indicating no network parameters are fixed. + state_names : list of str + states are similar to data and label, but not provided by data iterator. + Instead they are initialized to 0 and can be set by `set_states()`. + group2ctxs : dict of str to context or list of context, or list of dict of str to context + Default is `None`. Mapping the `ctx_group` attribute to the context assignment. + compression_params : dict + Specifies type of gradient compression and additional arguments depending + on the type of compression being used. For example, 2bit compression requires a threshold. + Arguments would then be {'type':'2bit', 'threshold':0.5} + See mxnet.KVStore.set_gradient_compression method for more details on gradient compression. + update_freq: int + Specifies the number of times to update the full gradients to be used in the SVRG optimization. For instance, + update_freq = 2 will calculates the gradients over all data every two epochs + Examples + -------- + >>> # An example of declaring and using SVRGModule. + >>> mod = SVRGModule(symbol=lro, data_names=['data'], label_names=['lin_reg_label'], update_freq=2) + >>> mod.fit(di, eval_metric='mse', optimizer='sgd', optimizer_params=(('learning_rate', 0.025),), + >>> num_epoch=num_epoch, kvstore='local') + """ + + def __init__(self, symbol, data_names=('data',), label_names=('softmax_label',), + logger=logging, context=mx.cpu(), work_load_list=None, + fixed_param_names=None, state_names=None, group2ctxs=None, + compression_params=None, update_freq=None): + super(SVRGModule, self).__init__(symbol, data_names=data_names, label_names=label_names, logger=logger, + context=context, work_load_list=work_load_list, + fixed_param_names=fixed_param_names, state_names=state_names, + group2ctxs=group2ctxs, compression_params=compression_params) + + # Type check update_frequency + if isinstance(update_freq, int): + if update_freq <= 0: + raise ValueError("update_freq in SVRGModule must be a positive integer to represent the frequency for " + "calculating full gradients") + self.update_freq = update_freq + else: + raise TypeError("update_freq in SVRGModule must be an integer to represent the frequency for " + "calculating full gradients") + + self._mod_aux = mx.mod.Module(symbol, data_names, label_names, logger, context, work_load_list, + fixed_param_names, state_names, group2ctxs, compression_params) + + self._param_dict = None + self._ctx_len = len(self._context) + + def _reset_bind(self): + """Internal function to reset binded state for both modules.""" + super(SVRGModule, self)._reset_bind() + self._mod_aux._reset_bind() + + def reshape(self, data_shapes, label_shapes=None): + """Reshapes both modules for new input shapes. + + Parameters + ---------- + data_shapes : list of (str, tuple) + Typically is ``data_iter.provide_data``. + label_shapes : list of (str, tuple) + Typically is ``data_iter.provide_label``. + """ + super(SVRGModule, self).reshape(data_shapes, label_shapes=label_shapes) + self._mod_aux.reshape(data_shapes, label_shapes=label_shapes) + + def init_optimizer(self, kvstore='local', optimizer='sgd', + optimizer_params=(('learning_rate', 0.01),), force_init=False): + """Installs and initializes SVRGOptimizer. The SVRGOptimizer is a wrapper class for a regular optimizer that is + passed in and a special AssignmentOptimizer to accumulate the full gradients. If KVStore is 'local' or None, + the full gradients will be accumulated locally without pushing to the KVStore. Otherwise, additional keys will + be pushed to accumulate the full gradients in the KVStore. + + Parameters + ---------- + kvstore : str or KVStore + Default `'local'`. + optimizer : str or Optimizer + Default `'sgd'` + optimizer_params : dict + Default `(('learning_rate', 0.01),)`. The default value is not a dictionary, + just to avoid pylint warning of dangerous default values. + force_init : bool + Default ``False``, indicating whether we should force re-initializing the + optimizer in the case an optimizer is already installed. + """ + + # Init dict for storing average of full gradients for each device + self._param_dict = [{key: mx.nd.zeros(shape=value.shape, ctx=self._context[i]) + for key, value in self.get_params()[0].items()} for i in range(self._ctx_len)] + + svrg_optimizer = self._create_optimizer(_SVRGOptimizer.__name__, default_opt=optimizer, + kvstore=kvstore, optimizer_params=optimizer_params) + + super(SVRGModule, self).init_optimizer(kvstore=kvstore, optimizer=svrg_optimizer, + optimizer_params=optimizer_params, force_init=force_init) + + # Init additional keys for accumulating full grads in KVStore + if self._kvstore: + for idx, param_on_devs in enumerate(self._exec_group.param_arrays): + name = self._exec_group.param_names[idx] + self._kvstore.init(name + "_full", mx.nd.zeros(shape=self._arg_params[name].shape)) + if self._update_on_kvstore: + self._kvstore.pull(name + "_full", param_on_devs, priority=-idx) + + def _create_optimizer(self, optimizer, default_opt, kvstore, optimizer_params): + """Helper function to create a svrg optimizer. SVRG optimizer encapsulates two optimizers and + will redirect update() to the correct optimizer based on the key. + + Parameters + ---------- + kvstore : str or KVStore + Default `'local'`. + optimizer: str + Name for SVRGOptimizer + default_opt : str or Optimizer that was passed in. + optimizer_params : dict + optimizer params that was passed in. + """ + + # code partially copied from mxnet module.init_optimizer() to accomodate svrg_optimizer + batch_size = self._exec_group.batch_size + + (kv_store, update_on_kvstore) = mx.model._create_kvstore(kvstore, self._ctx_len, self._arg_params) + if kv_store and 'dist' in kv_store.type and '_sync' in kv_store.type: + batch_size *= kv_store.num_workers + rescale_grad = 1.0 / batch_size + + idx2name = {} + if update_on_kvstore: + idx2name.update(enumerate(self._exec_group.param_names)) + else: + for k in range(self._ctx_len): + idx2name.update({i * self._ctx_len + k: n + for i, n in enumerate(self._exec_group.param_names)}) + + # update idx2name to include new keys + for key in self._param_dict[0].keys(): + max_key = max(list(idx2name.keys())) + 1 + idx2name[max_key] = key + "_full" + + optimizer_params = dict(optimizer_params) + if 'rescale_grad' not in optimizer_params: + optimizer_params['rescale_grad'] = rescale_grad + optimizer_params["default_optimizer"] = default_opt + optimizer_params["param_idx2name"] = idx2name + optimizer = mx.optimizer.create(optimizer, **optimizer_params) + + return optimizer + + def bind(self, data_shapes, label_shapes=None, for_training=True, + inputs_need_grad=False, force_rebind=False, shared_module=None, grad_req='write'): + """Binds the symbols to construct executors for both two modules. This is necessary before one + can perform computation with the SVRGModule. + + Parameters + ---------- + data_shapes : list of (str, tuple) + Typically is ``data_iter.provide_data``. + label_shapes : list of (str, tuple) + Typically is ``data_iter.provide_label``. + for_training : bool + Default is ``True``. Whether the executors should be bound for training. + inputs_need_grad : bool + Default is ``False``. Whether the gradients to the input data need to be computed. + Typically this is not needed. But this might be needed when implementing composition + of modules. + force_rebind : bool + Default is ``False``. This function does nothing if the executors are already + bound. But with this ``True``, the executors will be forced to rebind. + shared_module : Module + Default is ``None``. This is used in bucketing. When not ``None``, the shared module + essentially corresponds to a different bucket -- a module with different symbol + but with the same sets of parameters (e.g. unrolled RNNs with different lengths). + """ + # force rebinding is typically used when one want to switch from + # training to prediction phase. + super(SVRGModule, self).bind(data_shapes, label_shapes, for_training, inputs_need_grad, force_rebind, + shared_module, grad_req) + + if for_training: + self._mod_aux.bind(data_shapes, label_shapes, for_training, inputs_need_grad, force_rebind, shared_module, + grad_req) + + def forward(self, data_batch, is_train=None): + """Forward computation for both two modules. It supports data batches with different shapes, such as + different batch sizes or different image sizes. + If reshaping of data batch relates to modification of symbol or module, such as + changing image layout ordering or switching from training to predicting, module + rebinding is required. + + See Also + ---------- + :meth:`BaseModule.forward`. + + Parameters + ---------- + data_batch : DataBatch + Could be anything with similar API implemented. + is_train : bool + Default is ``None``, which means ``is_train`` takes the value of ``self.for_training``. + """ + super(SVRGModule, self).forward(data_batch, is_train) + + if is_train: + self._mod_aux.forward(data_batch, is_train) + + def backward(self, out_grads=None): + """Backward computation. + + See Also + ---------- + :meth:`BaseModule.backward`. + + Parameters + ---------- + out_grads : NDArray or list of NDArray, optional + Gradient on the outputs to be propagated back. + This parameter is only needed when bind is called + on outputs that are not a loss function. + """ + super(SVRGModule, self).backward(out_grads) + + if self._mod_aux.binded: + self._mod_aux.backward(out_grads) + + def update(self): + """Updates parameters according to the installed optimizer and the gradients computed + in the previous forward-backward batch. The gradients in the _exec_group will be overwritten + using the gradients calculated by the SVRG update rule. + + When KVStore is used to update parameters for multi-device or multi-machine training, + a copy of the parameters is stored in KVStore. Note that for `row_sparse` parameters, + this function does update the copy of parameters in KVStore, but doesn't broadcast the + updated parameters to all devices / machines. Please call `prepare` to broadcast + `row_sparse` parameters with the next batch of data. + + See Also + ---------- + :meth:`BaseModule.update`. + """ + self._update_svrg_gradients() + super(SVRGModule, self).update() + + def update_full_grads(self, train_data): + """Computes the gradients over all data w.r.t weights of past + m epochs. For distributed env, it will accumulate full grads in the kvstore. + + Parameters + ---------- + train_data: DataIter + Train data iterator + """ + param_names = self._exec_group.param_names + arg, aux = self.get_params() + self._mod_aux.set_params(arg_params=arg, aux_params=aux) + train_data.reset() + nbatch = 0 + padding = 0 + for batch in train_data: + self._mod_aux.forward(batch, is_train=True) + self._mod_aux.backward() + nbatch += 1 + for ctx in range(self._ctx_len): + for index, name in enumerate(param_names): + grads = self._mod_aux._exec_group.grad_arrays[index][ctx] + self._param_dict[ctx][name] = mx.nd.broadcast_add(self._param_dict[ctx][name], grads, axis=0) + padding = batch.pad + + true_num_batch = nbatch - padding / train_data.batch_size + for name in param_names: + grad_list = [] + for i in range(self._ctx_len): + self._param_dict[i][name] /= true_num_batch + grad_list.append(self._param_dict[i][name]) + if self._kvstore: + # If in distributed mode, push a list of gradients from each worker/device to the KVStore + self._accumulate_kvstore(name, grad_list) + + def _accumulate_kvstore(self, key, value): + """Accumulate gradients over all data in the KVStore. In distributed setting, each worker sees a portion of + data. The full gradients will be aggregated from each worker in the KVStore. + + Parameters + ---------- + + key: int or str + Key in the KVStore. + value: NDArray, RowSparseNDArray + Average of the full gradients. + """ + # Accumulate full gradients for current epochs + self._kvstore.push(key + "_full", value) + self._kvstore._barrier() + self._kvstore.pull(key + "_full", value) + + self._allocate_gradients(key, value) + + def _allocate_gradients(self, key, value): + """Allocate average of full gradients accumulated in the KVStore to each device. + + Parameters + ---------- + + key: int or str + Key in the kvstore. + value: List of NDArray, List of RowSparseNDArray + A list of average of the full gradients in the KVStore. + """ + for i in range(self._ctx_len): + self._param_dict[i][key] = value[i] / self._ctx_len + + def _svrg_grads_update_rule(self, g_curr_batch_curr_weight, g_curr_batch_special_weight, + g_special_weight_all_batch): + """Calculates the gradient based on the SVRG update rule. + Parameters + ---------- + g_curr_batch_curr_weight : NDArray + gradients of current weight of self.mod w.r.t current batch of data + g_curr_batch_special_weight: NDArray + gradients of the weight of past m epochs of self._mod_special w.r.t current batch of data + g_special_weight_all_batch: NDArray + average of full gradients over full pass of data + + Returns + ---------- + Gradients calculated using SVRG update rule: + grads = g_curr_batch_curr_weight - g_curr_batch_special_weight + g_special_weight_all_batch + """ + for index, grad in enumerate(g_curr_batch_curr_weight): + grad -= g_curr_batch_special_weight[index] + grad += g_special_weight_all_batch[index] + return g_curr_batch_curr_weight + + def _update_svrg_gradients(self): + """Calculates gradients based on the SVRG update rule. + """ + param_names = self._exec_group.param_names + for ctx in range(self._ctx_len): + for index, name in enumerate(param_names): + g_curr_batch_reg = self._exec_group.grad_arrays[index][ctx] + g_curr_batch_special = self._mod_aux._exec_group.grad_arrays[index][ctx] + g_special_weight_all_batch = self._param_dict[ctx][name] + g_svrg = self._svrg_grads_update_rule(g_curr_batch_reg, g_curr_batch_special, + g_special_weight_all_batch) + self._exec_group.grad_arrays[index][ctx] = g_svrg + + def fit(self, train_data, eval_data=None, eval_metric='acc', + epoch_end_callback=None, batch_end_callback=None, kvstore='local', + optimizer='sgd', optimizer_params=(('learning_rate', 0.01),), + eval_end_callback=None, + eval_batch_end_callback=None, initializer=mx.init.Uniform(0.01), + arg_params=None, aux_params=None, allow_missing=False, + force_rebind=False, force_init=False, begin_epoch=0, num_epoch=None, + validation_metric=None, monitor=None, sparse_row_id_fn=None): + """Trains the module parameters. + Parameters + ---------- + train_data : DataIter + Train DataIter. + eval_data : DataIter + If not ``None``, will be used as validation set and the performance + after each epoch will be evaluated. + eval_metric : str or EvalMetric + Defaults to 'accuracy'. The performance measure used to display during training. + Other possible predefined metrics are: + 'ce' (CrossEntropy), 'f1', 'mae', 'mse', 'rmse', 'top_k_accuracy'. + epoch_end_callback : function or list of functions + Each callback will be called with the current `epoch`, `symbol`, `arg_params` + and `aux_params`. + batch_end_callback : function or list of function + Each callback will be called with a `BatchEndParam`. + kvstore : str or KVStore + Defaults to 'local'. + optimizer : str or Optimizer + Defaults to 'sgd'. + optimizer_params : dict + Defaults to ``(('learning_rate', 0.01),)``. The parameters for + the optimizer constructor. + The default value is not a dict, just to avoid pylint warning on dangerous + default values. + eval_end_callback : function or list of function + These will be called at the end of each full evaluation, with the metrics over + the entire evaluation set. + eval_batch_end_callback : function or list of function + These will be called at the end of each mini-batch during evaluation. + initializer : Initializer + The initializer is called to initialize the module parameters when they are + not already initialized. + arg_params : dict + Defaults to ``None``, if not ``None``, should be existing parameters from a trained + model or loaded from a checkpoint (previously saved model). In this case, + the value here will be used to initialize the module parameters, unless they + are already initialized by the user via a call to `init_params` or `fit`. + `arg_params` has a higher priority than `initializer`. + aux_params : dict + Defaults to ``None``. Similar to `arg_params`, except for auxiliary states. + allow_missing : bool + Defaults to ``False``. Indicates whether to allow missing parameters when `arg_params` + and `aux_params` are not ``None``. If this is ``True``, then the missing parameters + will be initialized via the `initializer`. + force_rebind : bool + Defaults to ``False``. Whether to force rebinding the executors if already bound. + force_init : bool + Defaults to ``False``. Indicates whether to force initialization even if the + parameters are already initialized. + begin_epoch : int + Defaults to 0. Indicates the starting epoch. Usually, if resumed from a + checkpoint saved at a previous training phase at epoch N, then this value should be + N+1. + num_epoch : int + Number of epochs for training. + sparse_row_id_fn : A callback function + The function takes `data_batch` as an input and returns a dict of + str -> NDArray. The resulting dict is used for pulling row_sparse + parameters from the kvstore, where the str key is the name of the param, + and the value is the row id of the param to pull. + validation_metric: str or EvalMetric + The performance measure used to display during validation. + """ + assert num_epoch is not None, 'please specify number of epochs' + + self.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label, + for_training=True, force_rebind=force_rebind) + if monitor is not None: + self.install_monitor(monitor) + self.init_params(initializer=initializer, arg_params=arg_params, aux_params=aux_params, + allow_missing=allow_missing, force_init=force_init) + self.init_optimizer(kvstore=kvstore, optimizer=optimizer, optimizer_params=optimizer_params) + + if validation_metric is None: + validation_metric = eval_metric + if not isinstance(eval_metric, mx.metric.EvalMetric): + eval_metric = mx.metric.create(eval_metric) + + ################################################################################ + # training loop + ################################################################################ + for epoch in range(begin_epoch, num_epoch): + eval_metric.reset() + tic = time.time() + if epoch % self.update_freq == 0: + self.update_full_grads(train_data) + + train_data.reset() + data_iter = iter(train_data) + end_of_batch = False + nbatch = 0 + next_data_batch = next(data_iter) + + while not end_of_batch: + data_batch = next_data_batch + if monitor is not None: + monitor.tic() + + self.forward_backward(data_batch) + self.update() + + if isinstance(data_batch, list): + self.update_metric(eval_metric, [db.label for db in data_batch], pre_sliced=True) + else: + self.update_metric(eval_metric, data_batch.label) + + try: + # pre fetch next batch + next_data_batch = next(data_iter) + self.prepare(next_data_batch, sparse_row_id_fn=sparse_row_id_fn) + except StopIteration: + end_of_batch = True + + if monitor is not None: + monitor.toc_print() + + if end_of_batch: + eval_name_vals = eval_metric.get_name_value() + + if batch_end_callback is not None: + batch_end_params = mx.model.BatchEndParam(epoch=epoch, nbatch=nbatch, + eval_metric=eval_metric, locals=locals()) + for callback in mx.base._as_list(batch_end_callback): + callback(batch_end_params) + + nbatch += 1 + for name, val in eval_name_vals: + self.logger.info('Epoch[%d] Train-%s=%f', epoch, name, val) + toc = time.time() + self.logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic)) + + # sync aux params across devices + arg_params, aux_params = self.get_params() + self.set_params(arg_params, aux_params) + + if epoch_end_callback is not None: + for callback in mx.base._as_list(epoch_end_callback): + callback(epoch, self.symbol, arg_params, aux_params) + + # ---------------------------------------- + # evaluation on validation set + if eval_data: + res = self.score(eval_data, validation_metric, + score_end_callback=eval_end_callback, + batch_end_callback=eval_batch_end_callback, epoch=epoch) + for name, val in res: + self.logger.info('Epoch[%d] Validation-%s=%f', epoch, name, val) + + def prepare(self, data_batch, sparse_row_id_fn=None): + """Prepares two modules for processing a data batch. + + Usually involves switching bucket and reshaping. + For modules that contain `row_sparse` parameters in KVStore, + it prepares the `row_sparse` parameters based on the sparse_row_id_fn. + + When KVStore is used to update parameters for multi-device or multi-machine training, + a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters, + the `update()` updates the copy of parameters in KVStore, but doesn't broadcast + the updated parameters to all devices / machines. The `prepare` function is used to + broadcast `row_sparse` parameters with the next batch of data. + + Parameters + ---------- + data_batch : DataBatch + The current batch of data for forward computation. + + sparse_row_id_fn : A callback function + The function takes `data_batch` as an input and returns a dict of + str -> NDArray. The resulting dict is used for pulling row_sparse + parameters from the kvstore, where the str key is the name of the param, + and the value is the row id of the param to pull. + """ + super(SVRGModule, self).prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn) + self._mod_aux.prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn) diff --git a/python/mxnet/contrib/svrg_optimization/svrg_optimizer.py b/python/mxnet/contrib/svrg_optimization/svrg_optimizer.py new file mode 100644 index 000000000000..0f695a1b2ff0 --- /dev/null +++ b/python/mxnet/contrib/svrg_optimization/svrg_optimizer.py @@ -0,0 +1,171 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""A `_SVRGOptimizer` encapsulates two optimizers to support SVRGModule in single machine and distributed settings. +Both `_AssignmentOptimizer` and `_SVRGOptimizer` are designed to be used with SVRGModule only. +""" + + +import mxnet as mx + + +@mx.optimizer.register +class _AssignmentOptimizer(mx.optimizer.Optimizer): + """_AssignmentOptimizer assigns gradients to weights for SVRGModule's full gradients + accumulation in the KVStore. It is a helper optimizer that is designed to be used with SVRGModule only. + """ + def update(self, index, weight, grad, state): + """Assign the gradients to weight for accumulating full gradients in the KVStore across all devices and workers. + + Parameters + ---------- + index : int + The unique index of the parameter into the individual learning + rates and weight decays. Learning rates and weight decay + may be set via `set_lr_mult()` and `set_wd_mult()`, respectively. + weight : NDArray + The parameter to be updated. + grad : NDArray + The gradient of the objective with respect to this parameter. + state: any obj + AssignmentOptimizer will not need to be associated with state. + """ + + weight[:] = grad + + +@mx.optimizer.register +class _SVRGOptimizer(mx.optimizer.Optimizer): + """_SVRGOptimizer is a wrapper class for two optimizers: _AssignmentOptimizer for accumulating full gradients in the + KVStore and a default optimizer that is passed in as a parameter in `mod.init_optimizer()` + The _SVRGOptimizer is designed to be used with SVRGModule only. + + This optimizer accepts the following parameters in addition to those accepted by :class:`.Optimizer`. + + Parameters + ---------- + default_optimizer: str or Optimizer + Optimizer passed-in when invoke on mx.mod.init_optimizer in SVRGModule + """ + + def __init__(self, default_optimizer, **kwargs): + # Reconstruct kwargs to identify additional params for default optimizer + base_param = self._check_params(**kwargs) + super(_SVRGOptimizer, self).__init__(**base_param) + if isinstance(default_optimizer, str): + self.default_opt = mx.optimizer.create(default_optimizer, **kwargs) + else: + self.default_opt = default_optimizer + self.aux_opt = mx.optimizer.create(_AssignmentOptimizer.__name__) + + @staticmethod + def _check_params(**kwargs): + """ Reassemble kwargs to identify additional optimizer params for default optimizers. base_params contains + all the param names in base class Optimizer. + + Parameters + ---------- + kwargs: dict + Parameters for the default optimizer + + Returns + ---------- + default_params: dict + Optimizer parameters that are defined in base class Optimizer + """ + + optimizer_param = dict(kwargs) + base_params = ['rescale_grad', 'param_idx2name', 'wd', 'clip_gradient', 'learning_rate', 'lr_scheduler', 'sym', + 'begin_num_update', 'multi_precision', 'param_dict'] + + default_params = {} + for key, _ in optimizer_param.items(): + if key in base_params: + default_params[key] = optimizer_param[key] + + return default_params + + def update(self, index, weight, grad, state): + """Updates the given parameter using the corresponding gradient and state. If key contains 'full', update with + `_AssignmentOptimizer` otherwise will use default optimizer. + + Parameters + ---------- + index : int + The unique index of the parameter into the individual learning + rates and weight decays. Learning rates and weight decay + may be set via `set_lr_mult()` and `set_wd_mult()`, respectively. + weight : NDArray + The parameter to be updated. + grad : NDArray + The gradient of the objective with respect to this parameter. + state : any obj + The state returned by `create_state()`. + """ + + name = self._check_index(index) + + if "full" in name: + self.aux_opt.update(index, weight, grad, state) + else: + # use the default optimizer + self.default_opt.update(index, weight, grad, state) + + def create_state(self, index, weight): + """Creates auxiliary state for a given weight. + Some optimizers require additional states, e.g. as momentum, in addition + to gradients in order to update weights. This function creates state + for a given weight which will be used in `update`. This function is + called only once for each weight. + + Parameters + ---------- + index : int + An unique index to identify the weight. + weight : NDArray + The weight. + Returns + ------- + state : any obj + The state associated with the weight. + """ + + name = self._check_index(index) + if "full" in name: + return self.aux_opt.create_state(index, weight) + else: + # + return self.default_opt.create_state(index, weight) + + def _check_index(self, index): + """Check index in idx2name to get corresponding param_name + Parameters + ---------- + index : int or str + An unique index to identify the weight. + Returns + ------- + name : str + Name of the Module parameter + """ + + if index in self.idx2name.values(): + # index is a str + name = index + else: + # index is an int + name = self.idx2name[index] + return name diff --git a/tests/python/unittest/test_contrib_svrg_module.py b/tests/python/unittest/test_contrib_svrg_module.py new file mode 100644 index 000000000000..d9e0abaebb27 --- /dev/null +++ b/tests/python/unittest/test_contrib_svrg_module.py @@ -0,0 +1,307 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import mxnet as mx +import numpy as np +from common import with_seed, assertRaises +from mxnet.contrib.svrg_optimization.svrg_module import SVRGModule +from mxnet.test_utils import * + + +def setup(): + train_data = np.random.randint(1, 5, [1000, 2]) + weights = np.array([1.0, 2.0]) + train_label = train_data.dot(weights) + + di = mx.io.NDArrayIter(train_data, train_label, batch_size=32, shuffle=True, label_name='lin_reg_label') + X = mx.sym.Variable('data') + Y = mx.symbol.Variable('lin_reg_label') + fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1) + lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro") + + mod = SVRGModule( + symbol=lro, + data_names=['data'], + label_names=['lin_reg_label'], update_freq=2) + mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label) + mod.init_params(initializer=mx.init.Uniform(0.01), allow_missing=False, force_init=False, allow_extra=False) + + return di, mod + + +def test_bind_module(): + _, mod = setup() + assert mod.binded == True + assert mod._mod_aux.binded == True + + +def test_module_init(): + _, mod = setup() + assert mod._mod_aux is not None + + +def test_module_initializer(): + def regression_model(m): + x = mx.symbol.var("data", stype='csr') + v = mx.symbol.var("v", shape=(m, 1), init=mx.init.Uniform(scale=.1), + stype='row_sparse') + model = mx.symbol.dot(lhs=x, rhs=v) + y = mx.symbol.Variable("label") + model = mx.symbol.LinearRegressionOutput(data=model, label=y, name="out") + return model + + #shape of the data + n, m = 128, 100 + model = regression_model(m) + + data = mx.nd.zeros(shape=(n, m), stype='csr') + label = mx.nd.zeros((n, 1)) + iterator = mx.io.NDArrayIter(data=data, label={'label': label}, + batch_size=n, last_batch_handle='discard') + + # create module + mod = SVRGModule(symbol=model, data_names=['data'], label_names=['label'], update_freq=2) + mod.bind(data_shapes=iterator.provide_data, label_shapes=iterator.provide_label) + mod.init_params() + v = mod._arg_params['v'] + assert v.stype == 'row_sparse' + assert np.sum(v.asnumpy()) != 0 + + +def test_module_bind(): + x = mx.sym.Variable("data") + net = mx.sym.FullyConnected(x, num_hidden=1) + + mod = SVRGModule(symbol=net, data_names=['data'], label_names=None, update_freq=2) + assertRaises(TypeError, mod.bind, data_shapes=['data', mx.nd.zeros(shape=(2, 1))]) + + mod.bind(data_shapes=[('data', (2, 1))]) + assert mod.binded == True + assert mod._mod_aux.binded == True + + +@with_seed() +def test_module_save_load(): + import tempfile + import os + + x = mx.sym.Variable("data") + y = mx.sym.Variable("softmax_label") + net = mx.sym.FullyConnected(x, y, num_hidden=1) + + mod = SVRGModule(symbol=net, data_names=['data'], label_names=['softmax_label'], update_freq=2) + mod.bind(data_shapes=[('data', (1, 1))]) + mod.init_params() + mod.init_optimizer(optimizer='sgd', optimizer_params={'learning_rate': 0.1}) + mod.update() + + # Create tempfile + tmp = tempfile.mkdtemp() + tmp_file = os.path.join(tmp, 'svrg_test_output') + mod.save_checkpoint(tmp_file, 0, save_optimizer_states=True) + + mod2 = SVRGModule.load(tmp_file, 0, load_optimizer_states=True, data_names=('data', )) + mod2.bind(data_shapes=[('data', (1, 1))]) + mod2.init_optimizer(optimizer_params={'learning_rate': 0.1}) + assert mod._symbol.tojson() == mod2._symbol.tojson() + + # Multi-device + mod3 = SVRGModule(symbol=net, data_names=['data'], label_names=['softmax_label'], update_freq=3, + context=[mx.cpu(0), mx.cpu(1)]) + mod3.bind(data_shapes=[('data', (10, 10))]) + mod3.init_params() + mod3.init_optimizer(optimizer_params={'learning_rate': 1.0}) + mod3.update() + mod3.save_checkpoint(tmp_file, 0, save_optimizer_states=True) + + mod4 = SVRGModule.load(tmp_file, 0, load_optimizer_states=True, data_names=('data', )) + mod4.bind(data_shapes=[('data', (10, 10))]) + mod4.init_optimizer(optimizer_params={'learning_rate': 1.0}) + assert mod3._symbol.tojson() == mod4._symbol.tojson() + + +@with_seed() +def test_svrgmodule_reshape(): + data = mx.sym.Variable("data") + sym = mx.sym.FullyConnected(data=data, num_hidden=4, name='fc') + + dshape=(3, 4) + mod = SVRGModule(sym, data_names=["data"], label_names=None, context=[mx.cpu(0), mx.cpu(1)], update_freq=2) + mod.bind(data_shapes=[('data', dshape)]) + mod.init_params() + mod._mod_aux.init_params() + mod.init_optimizer(optimizer_params={"learning_rate": 1.0}) + + data_batch = mx.io.DataBatch(data=[mx.nd.ones(dshape)], label=None) + mod.forward(data_batch) + mod.backward([mx.nd.ones(dshape)]) + mod.update() + assert mod.get_outputs()[0].shape == dshape + + dshape = (2, 4) + mod.reshape(data_shapes=[('data', dshape)]) + mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)], + label=None)) + mod.backward([mx.nd.ones(dshape)]) + mod.update() + assert mod.get_outputs()[0].shape == dshape + + +@with_seed() +def test_update_full_grad(): + def create_network(): + train_data = np.random.randint(1, 5, [10, 2]) + weights = np.array([1.0, 2.0]) + train_label = train_data.dot(weights) + + di = mx.io.NDArrayIter(train_data, train_label, batch_size=5, shuffle=True, label_name='lin_reg_label') + X = mx.sym.Variable('data') + Y = mx.symbol.Variable('lin_reg_label') + fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1) + lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro") + + mod = SVRGModule( + symbol=lro, + data_names=['data'], + label_names=['lin_reg_label'], update_freq=2) + mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label) + mod.init_params(initializer=mx.init.One(), allow_missing=False, force_init=False, allow_extra=False) + mod.init_optimizer(kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),), + force_init=False) + return di, mod + + di, svrg_mod = create_network() + + # Calculates the average of full gradients over number batches + full_grads_weights = mx.nd.zeros(shape=svrg_mod.get_params()[0]['fc1_weight'].shape) + arg, aux = svrg_mod.get_params() + svrg_mod._mod_aux.set_params(arg_params=arg, aux_params=aux) + num_batch = 2 + + for batch in di: + svrg_mod.forward(batch) + svrg_mod.backward() + full_grads_weights = mx.nd.broadcast_add(svrg_mod._exec_group.grad_arrays[0][0], full_grads_weights, axis=0) + full_grads_weights /= num_batch + + di.reset() + svrg_mod.update_full_grads(di) + assert same(full_grads_weights, svrg_mod._param_dict[0]['fc1_weight']) + + +@with_seed() +def test_svrg_with_sgd(): + def create_module_with_sgd(): + train_data = np.random.randint(1, 5, [100, 2]) + weights = np.array([1.0, 2.0]) + train_label = train_data.dot(weights) + + di = mx.io.NDArrayIter(train_data, train_label, batch_size=10, shuffle=True, label_name='lin_reg_label') + X = mx.sym.Variable('data') + Y = mx.symbol.Variable('lin_reg_label') + fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1) + lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro") + + reg_mod = mx.mod.Module( + symbol=lro, + data_names=['data'], + label_names=['lin_reg_label']) + reg_mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label) + reg_mod.init_params(initializer=mx.init.One(), allow_missing=False, force_init=False, allow_extra=False) + reg_mod.init_optimizer(kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),)) + + svrg_mod = SVRGModule(symbol=lro, + data_names=['data'], + label_names=['lin_reg_label'], + update_freq=2) + svrg_mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label) + svrg_mod.init_params(initializer=mx.init.One(), allow_missing=False, force_init=False, allow_extra=False) + svrg_mod.init_optimizer(kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),)) + + return di,reg_mod, svrg_mod + + di, reg_mod, svrg_mod = create_module_with_sgd() + num_epoch = 10 + + # Use metric MSE + metrics = mx.metric.create("mse") + + # Train with SVRGModule + for e in range(num_epoch): + metrics.reset() + if e % svrg_mod.update_freq == 0: + svrg_mod.update_full_grads(di) + di.reset() + for batch in di: + svrg_mod.forward_backward(data_batch=batch) + svrg_mod.update() + svrg_mod.update_metric(metrics, batch.label) + svrg_mse = metrics.get()[1] + + # Train with SGD standard Module + di.reset() + for e in range(num_epoch): + metrics.reset() + di.reset() + for batch in di: + reg_mod.forward_backward(data_batch=batch) + reg_mod.update() + reg_mod.update_metric(metrics, batch.label) + sgd_mse = metrics.get()[1] + + assert svrg_mse < sgd_mse + + +@with_seed() +def test_accumulate_kvstore(): + # Test KVStore behavior when push a list of values + kv = mx.kv.create('local') + kv.init("fc1_weight", mx.nd.zeros(shape=(1, 2))) + kv.init("fc1_weight_full", mx.nd.zeros(shape=(1, 2))) + b = [mx.nd.ones(shape=(1, 2)) for i in range(4)] + a = mx.nd.zeros(shape=(1, 2)) + kv.push("fc1_weight_full", b) + kv.pull("fc1_weight_full", out=a) + assert same(a, [mx.nd.array([4, 4])]) + assert kv.num_workers == 1 + + # Test accumulate in KVStore and allocate gradients + kv_test = mx.kv.create('local') + _, svrg_mod = setup() + svrg_mod.init_optimizer(kvstore=kv_test, optimizer='sgd', optimizer_params=(('learning_rate', 0.01),), + force_init=False) + svrg_mod._accumulate_kvstore("fc1_weight", b) + assert len(svrg_mod._param_dict) == svrg_mod._ctx_len + assert same(svrg_mod._param_dict[0]["fc1_weight"], b[0]) + + +@with_seed() +def test_fit(): + di, mod = setup() + num_epoch = 100 + metric = mx.metric.create("mse") + mod.fit(di, eval_metric=metric, optimizer='sgd', optimizer_params=(('learning_rate', 0.025),), num_epoch=num_epoch, + kvstore='local') + + # Estimated MSE for using SGD optimizer of lr = 0.025, SVRG MSE should be smaller + estimated_mse = 1e-5 + assert metric.get()[1] < estimated_mse + + +if __name__ == "__main__": + import nose + nose.runmodule() diff --git a/tests/python/unittest/test_contrib_svrg_optimizer.py b/tests/python/unittest/test_contrib_svrg_optimizer.py new file mode 100644 index 000000000000..f7d90d12872f --- /dev/null +++ b/tests/python/unittest/test_contrib_svrg_optimizer.py @@ -0,0 +1,101 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import numpy as np +import mxnet as mx +from mxnet.test_utils import same +from mxnet.contrib.svrg_optimization.svrg_module import SVRGModule +from mxnet.contrib.svrg_optimization.svrg_optimizer import _SVRGOptimizer + + +def create_network(): + + train_data = np.random.randint(1, 5, [1000, 2]) + weights = np.array([1.0, 2.0]) + train_label = train_data.dot(weights) + + batch_size = 32 + + di = mx.io.NDArrayIter(train_data, train_label, batch_size=batch_size, shuffle=True, label_name='lin_reg_label') + X = mx.sym.Variable('data') + Y = mx.symbol.Variable('lin_reg_label') + fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1) + lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro") + + mod = SVRGModule( + symbol=lro, + data_names=['data'], + label_names=['lin_reg_label'], update_freq=2 + ) + + mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label) + mod.init_params(initializer=mx.init.Uniform(0.01), allow_missing=False, + force_init=False, allow_extra=False) + + return di, mod + + +def test_init_svrg_optimizer(): + _, mod = create_network() + + kv = mx.kv.create('local') + mod.init_optimizer(kvstore=kv, optimizer='sgd', optimizer_params=(('learning_rate', 0.01),), + force_init=False) + + assert type(mod._optimizer).__name__ == _SVRGOptimizer.__name__ + + +def test_svrg_optimizer_constructor(): + kv = mx.kv.create('local') + svrg_optimizer = _SVRGOptimizer(default_optimizer='sgd', learning_rate=-1.0) + kv.set_optimizer(svrg_optimizer) + + assert svrg_optimizer.default_opt.lr == -1.0 + + +def test_kvstore_init_aux_keys(): + param_idx2name = {0: "weight", 1: "weight_full"} + + svrg_optimizer = _SVRGOptimizer(default_optimizer='sgd', param_idx2name= param_idx2name, learning_rate=1.0) + kv = mx.kv.create('local') + kv.set_optimizer(svrg_optimizer) + + # Use default sgd optimizer + param_weight_init = mx.nd.array([0, 0, 0]) + param_weight_update = mx.nd.array([1, 1, 1]) + + kv.init(0, param_weight_init) + kv.push(0, param_weight_update) + kv.pull(0, param_weight_init) + + param_weight_full_init = mx.nd.array([1, 1, 1]) + param_weight_full_update = mx.nd.array([2, 2, 2]) + + # Use AssignmentOptimizer + kv.init(1, param_weight_full_init) + kv.push(1, param_weight_full_update) + kv.pull(1, param_weight_full_init) + + # updated weights using default sgd optimizer + assert same(param_weight_init.asnumpy(), np.array([-1, -1, -1])) + # updated with AssignmentOptimizer + assert same(param_weight_full_init.asnumpy(), np.array([2, 2, 2])) + + +if __name__ == "__main__": + import nose + nose.runmodule() From ac4ef212f6269469f3f3827da49e43fb42f1398f Mon Sep 17 00:00:00 2001 From: Vandana Kannan Date: Mon, 10 Sep 2018 11:59:01 -0700 Subject: [PATCH 112/160] ONNX export - Clip operator (#12457) --- .../contrib/onnx/mx2onnx/_op_translations.py | 24 +++++++++++++++++++ .../onnx/export/onnx_backend_test.py | 3 ++- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py index 0960776251c4..3ffac96a14e1 100644 --- a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py +++ b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py @@ -1057,6 +1057,30 @@ def convert_flatten(node, **kwargs): ) return [flatten_node] +@mx_op.register("clip") +def convert_clip(node, **kwargs): + """Map MXNet's Clip operator attributes to onnx's Clip operator + and return the created node. + """ + helper, _, _ = import_onnx_modules() + name = node["name"] + input_idx = kwargs["index_lookup"][node["inputs"][0][0]] + proc_nodes = kwargs["proc_nodes"] + input_node = proc_nodes[input_idx].name + attrs = node["attrs"] + a_min = np.float(attrs.get('a_min', -np.inf)) + a_max = np.float(attrs.get('a_max', np.inf)) + + clip_node = helper.make_node( + "Clip", + [input_node], + [name], + name=name, + min=a_min, + max=a_max + ) + return [clip_node] + def scalar_op_helper(node, op_name, **kwargs): """Helper function for scalar arithmetic operations""" diff --git a/tests/python-pytest/onnx/export/onnx_backend_test.py b/tests/python-pytest/onnx/export/onnx_backend_test.py index 19bf6993e7cd..01ae09402ef5 100644 --- a/tests/python-pytest/onnx/export/onnx_backend_test.py +++ b/tests/python-pytest/onnx/export/onnx_backend_test.py @@ -89,7 +89,8 @@ 'test_operator_exp', 'test_operator_maxpool', 'test_operator_params', - 'test_operator_permute2' + 'test_operator_permute2', + 'test_clip' ] BASIC_MODEL_TESTS = [ From c8c3b04996113bc05c1384e597b6a80df544177b Mon Sep 17 00:00:00 2001 From: Sam Skalicky Date: Mon, 10 Sep 2018 15:56:21 -0700 Subject: [PATCH 113/160] [MXNET-853] Fix for smooth_l1 operator scalar default value (#12284) * changed smooth_l1 operator implementation to not use helper macros since they do not provide enough support for checking for arguments and setting custom default values * added testcase for smooth_l1 operator scalar default value * fixed whitespace * added curly braces for if/else to match mxnet style * added more curly braces --- .../elemwise_binary_scalar_op_extended.cc | 35 +++++++++++++++---- tests/python/unittest/test_operator.py | 4 +++ 2 files changed, 33 insertions(+), 6 deletions(-) diff --git a/src/operator/tensor/elemwise_binary_scalar_op_extended.cc b/src/operator/tensor/elemwise_binary_scalar_op_extended.cc index 9870342ea402..a0c4149d5c5f 100644 --- a/src/operator/tensor/elemwise_binary_scalar_op_extended.cc +++ b/src/operator/tensor/elemwise_binary_scalar_op_extended.cc @@ -83,7 +83,7 @@ MXNET_OPERATOR_REGISTER_BINARY(_backward_hypot_scalar) .set_attr("FCompute", BinaryScalarOp::Backward< cpu, mshadow_op::hypot_grad_left>); -MXNET_OPERATOR_REGISTER_BINARY_SCALAR(smooth_l1) +NNVM_REGISTER_OP(smooth_l1) .describe(R"code(Calculate Smooth L1 Loss(lhs, scalar) by summing .. math:: @@ -98,17 +98,40 @@ where :math:`x` is an element of the tensor *lhs* and :math:`\sigma` is the scal Example:: + smooth_l1([1, 2, 3, 4]) = [0.5, 1.5, 2.5, 3.5] smooth_l1([1, 2, 3, 4], scalar=1) = [0.5, 1.5, 2.5, 3.5] )code" ADD_FILELINE) -.set_attr("FCompute", BinaryScalarOp::Compute< - cpu, mshadow_op::smooth_l1_loss>) +.set_num_inputs(1) +.set_num_outputs(1) +.set_attr_parser([](NodeAttrs* attrs) { + if (attrs->dict.find("scalar") != attrs->dict.end()) { + attrs->parsed = std::stod(attrs->dict["scalar"]); + } else { + attrs->parsed = 1.0; + } + }) +.set_attr("FInferShape", ElemwiseShape<1, 1>) +.set_attr("FInferType", ElemwiseType<1, 1>) +.set_attr("FInplaceOption", + [](const NodeAttrs& attrs){ + return std::vector >{{0, 0}}; + }) +.add_argument("data", "NDArray-or-Symbol", "source input") +.add_argument("scalar", "float", "scalar input") +.set_attr("FCompute", BinaryScalarOp::Compute) .set_attr("FGradient", ElemwiseGradUseIn{ "_backward_smooth_l1" }); MXNET_OPERATOR_REGISTER_BINARY(_backward_smooth_l1) -.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); }) -.set_attr("FCompute", BinaryScalarOp::Backward< - cpu, mshadow_op::smooth_l1_gradient>); + .set_attr_parser([](NodeAttrs *attrs) { + if (attrs->dict.find("scalar") != attrs->dict.end()) { + attrs->parsed = std::stod(attrs->dict["scalar"]); + } else { + attrs->parsed = 1.0; + } +}) +.set_attr("FCompute", + BinaryScalarOp::Backward); } // namespace op } // namespace mxnet diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 9842a69e18d4..55a46ca2e93c 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -5956,6 +5956,10 @@ def test_unary_math_operators(): lambda x: np_smooth_l1(x, 1.), lambda x: np_smooth_l1_grad(x, 1.), -2.0, 2.0], + 'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x), + lambda x: np_smooth_l1(x, 1.), + lambda x: np_smooth_l1_grad(x, 1.), + -2.0, 2.0], 'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.), lambda x: np_smooth_l1(x, 2.), lambda x: np_smooth_l1_grad(x, 2.), From 20a087ff96cd982d3da3e0eec1b61f1daaba9f0a Mon Sep 17 00:00:00 2001 From: Aaron Markham Date: Mon, 10 Sep 2018 16:53:34 -0700 Subject: [PATCH 114/160] update C++ example so it is easier to run (#12397) * update example so it is easier to run * updated per feedback; removed CPP prerequisite --- .../predict-cpp/README.md | 54 +++++++++++-------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/example/image-classification/predict-cpp/README.md b/example/image-classification/predict-cpp/README.md index b3433ff2f824..69f63d706006 100644 --- a/example/image-classification/predict-cpp/README.md +++ b/example/image-classification/predict-cpp/README.md @@ -1,13 +1,23 @@ -# Image Classification Example of C++ -This is a simple predictor which shows how to use c api for image classfication. +# Image Classification Example Using the C Predict API +This is a simple predictor which shows how to use the MXNet C Predict API for image classification with a pre-trained ImageNet model. -It uses opencv for image reading +## Prerequisites -# How to Use +* OpenCV for image processing: `USE_OPENCV` is set to true by default when [building from source](https://mxnet.incubator.apache.org/install/build_from_source.html) -## Build -1. Edit image-classification-predict.cc file, change the following lines to your model paths: - ```bash +## How to Use this Example + +### Download the Model Artifacts +1. You will need the model artifacts for the Inception ImageNet model. You can download these from http://data.mxnet.io/mxnet/models/imagenet/inception-bn/ +2. Place them into a `model/Inception/` subfolder, or if not, you will need to edit the source file and update the paths in the Build step. + +* [model/Inception/Inception-BN-symbol.json](http://data.mxnet.io/mxnet/models/imagenet/inception-bn/Inception-BN-symbol.json) +* [model/Inception/Inception-BN-0126.params](http://data.mxnet.io/mxnet/models/imagenet/inception-bn/Inception-BN-0126.params) +* [model/Inception/synset.txt](http://data.mxnet.io/mxnet/models/imagenet/synset.txt) + +### Build +1. If using a different location for the model artifacts, edit `image-classification-predict.cc` file, and change the following lines to your artifacts' paths: + ```c // Models path for your model, you have to modify it std::string json_file = "model/Inception/Inception-BN-symbol.json"; std::string param_file = "model/Inception/Inception-BN-0126.params"; @@ -16,41 +26,43 @@ It uses opencv for image reading ``` 2. You may also want to change the image size and channels: - ```bash + ```c // Image size and channels int width = 224; int height = 224; int channels = 3; ``` - + 3. Simply just use our Makefile to build: ```bash make ``` -## Usage -Run: +### Run +Run the example by passing it an image that you want to classify. If you don't have one handy, run the following to get one: + ```bash - ./image-classification-predict apple.jpg + wget https://upload.wikimedia.org/wikipedia/commons/thumb/f/f4/Honeycrisp.jpg/1920px-Honeycrisp.jpg + ``` + +Then run the `image-classification-predict` program, passing the image as the argument. + + ```bash + ./image-classification-predict 1920px-Honeycrisp.jpg ``` -The only parameter is the path of the test image. ## Tips -* The model used in the sample can be downloaded here: -http://pan.baidu.com/s/1sjXKrqX -or here: -http://data.mxnet.io/mxnet/models/imagenet/ -* If you don't run it in the mxnet root path, maybe you will need to copy lib folder here. +* If you don't run it in the MXNet root path, you may need to copy the `lib` folder here. -# Author +## Author * **Xiao Liu** * E-mail: liuxiao@foxmail.com * Homepage: [www.liuxiao.org](http://www.liuxiao.org/) -# Thanks +## Thanks * pertusa (for Makefile and image reading check) * caprice-j (for reading function) @@ -58,5 +70,3 @@ http://data.mxnet.io/mxnet/models/imagenet/ * sofiawu (for sample model) * piiswrong and tqchen (for useful coding suggestions) - - From d789101eba03f971d1f1d86c28efb19a7d99cce0 Mon Sep 17 00:00:00 2001 From: Aaron Markham Date: Mon, 10 Sep 2018 16:54:03 -0700 Subject: [PATCH 115/160] Add Python API docs for test_utils and visualization (#12455) * adding missing python modules as tools folder for docs * adding missing python modules as tools folder for docs * adjusted paths after testing build --- docs/api/python/index.md | 12 +++++++++++- docs/api/python/tools/test_utils.md | 27 ++++++++++++++++++++++++++ docs/api/python/tools/visualization.md | 27 ++++++++++++++++++++++++++ 3 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 docs/api/python/tools/test_utils.md create mode 100644 docs/api/python/tools/visualization.md diff --git a/docs/api/python/index.md b/docs/api/python/index.md index 15d1045a93e4..8f60bcd0f13c 100644 --- a/docs/api/python/index.md +++ b/docs/api/python/index.md @@ -177,4 +177,14 @@ Code examples are placed throughout the API documentation and these can be run a :maxdepth: 1 symbol_in_pictures/symbol_in_pictures.md -``` \ No newline at end of file +``` + +## Tools + +```eval_rst +.. toctree:: + :maxdepth: 1 + + tools/test_utils.md + tools/visualization.md +``` diff --git a/docs/api/python/tools/test_utils.md b/docs/api/python/tools/test_utils.md new file mode 100644 index 000000000000..e29d9b99a844 --- /dev/null +++ b/docs/api/python/tools/test_utils.md @@ -0,0 +1,27 @@ +# Test Utilities + +This module has a variety of tools that help using and testing MXNet. + +```eval_rst + .. currentmodule:: mxnet.test_utils +``` + +```eval_rst +.. autosummary:: + :nosignatures: + + mxnet.test_utils +``` + +## API Reference + + + +```eval_rst + +.. automodule:: mxnet.test_utils + :members: + +``` + + diff --git a/docs/api/python/tools/visualization.md b/docs/api/python/tools/visualization.md new file mode 100644 index 000000000000..6faa24980dd8 --- /dev/null +++ b/docs/api/python/tools/visualization.md @@ -0,0 +1,27 @@ +# Visualization + +This module contains visualization features. + +```eval_rst + .. currentmodule:: mxnet.visualization +``` + +```eval_rst +.. autosummary:: + :nosignatures: + + mxnet.visualization +``` + +## API Reference + + + +```eval_rst + +.. automodule:: mxnet.visualization + :members: + +``` + + From acede67649bf40bdbcf0081d4aea0a13aba8f500 Mon Sep 17 00:00:00 2001 From: Aaron Markham Date: Mon, 10 Sep 2018 16:54:17 -0700 Subject: [PATCH 116/160] fix subscribe links, remove disabled icons (#12474) * fix subscribe links, remove disabled icons * update slack channel --- docs/community/mxnet_channels.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/community/mxnet_channels.md b/docs/community/mxnet_channels.md index 18dc1bc55ec8..98cce941e236 100644 --- a/docs/community/mxnet_channels.md +++ b/docs/community/mxnet_channels.md @@ -2,9 +2,9 @@ Converse with the MXNet community via the following channels: -- [Forum](https://discuss.mxnet.io/): [discuss.mxnet.io](https://discuss.mxnet.io/) -- [MXNet Apache developer mailing list](https://lists.apache.org/list.html?dev@mxnet.apache.org) (dev@mxnet.apache.org): To subscribe, send an email to dev-subscribe@mxnet.apache.org -- [MXNet Apache user mailing list](https://lists.apache.org/list.html?user@mxnet.apache.org) (user@mxnet.apache.org): To subscribe, send an email to user-subscribe@mxnet.apache.org -- [MXNet Slack channel](https://apache-mxnet.slack.com): To request an invitation to the channel please subscribe to the mailing list above and then email: dev@mxnet.apache.org +- [Forum](https://discuss.mxnet.io/): [discuss.mxnet.io](https://discuss.mxnet.io/) +- [MXNet Apache developer mailing list](https://lists.apache.org/list.html?dev@mxnet.apache.org) (dev@mxnet.apache.org): To subscribe, send an email to user-subscribe@mxnet.apache.org +- [MXNet Apache user mailing list](https://lists.apache.org/list.html?user@mxnet.apache.org) (user@mxnet.apache.org): To subscribe, send an email to dev-subscribe@mxnet.apache.org +- [MXNet Slack channel](https://the-asf.slack.com/) (Channel: #mxnet): To request an invitation to the channel please subscribe to the mailing list above and then email: dev@mxnet.apache.org Note: if you have an email address with apache.org, you do not need an approval to join the MXNet Slack channel. From 9ec4879e4abc16e4dff010cb2648f88625509046 Mon Sep 17 00:00:00 2001 From: Stephanie Jingyi Yuan Date: Tue, 11 Sep 2018 00:41:14 -0400 Subject: [PATCH 117/160] Temporarily disable flaky tests (#12513) --- tests/python/unittest/test_contrib_svrg_module.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/python/unittest/test_contrib_svrg_module.py b/tests/python/unittest/test_contrib_svrg_module.py index d9e0abaebb27..dd4618e2e82d 100644 --- a/tests/python/unittest/test_contrib_svrg_module.py +++ b/tests/python/unittest/test_contrib_svrg_module.py @@ -20,7 +20,7 @@ from common import with_seed, assertRaises from mxnet.contrib.svrg_optimization.svrg_module import SVRGModule from mxnet.test_utils import * - +import unittest def setup(): train_data = np.random.randint(1, 5, [1000, 2]) @@ -134,6 +134,7 @@ def test_module_save_load(): assert mod3._symbol.tojson() == mod4._symbol.tojson() +@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12510") @with_seed() def test_svrgmodule_reshape(): data = mx.sym.Variable("data") @@ -289,6 +290,7 @@ def test_accumulate_kvstore(): assert same(svrg_mod._param_dict[0]["fc1_weight"], b[0]) +@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12510") @with_seed() def test_fit(): di, mod = setup() From 597a637fb1b8fa5b16331218cda8be61ce0ee202 Mon Sep 17 00:00:00 2001 From: Jake Lee Date: Mon, 10 Sep 2018 22:15:00 -0700 Subject: [PATCH 118/160] Change the way NDArrayIter handle the last batch (#12285) * 1. move the shuffle to the reset 2. modify the roll_over behavior accordingly * refactor the concat part * refactor the code * implement unit test for last_batch_handle * refactor the getdata part * add docstring and refine the code according to linter * 1. add test case for NDArrayIter_h5py 2. refactor the implementation * update contributions doc * fix wording * update doc for roll_over * 1. add test for second iteration of roll_over 2. add shuffle test case * fix some wording and refine the variables naming * move utility function to new file * move utility function to io_utils.py * change shuffle function name to avoid redefining name * make io as a module * rename the utility functions * disable wildcard-import --- CONTRIBUTORS.md | 1 + python/mxnet/io/__init__.py | 29 ++++ python/mxnet/{ => io}/io.py | 280 ++++++++++++++++--------------- python/mxnet/io/utils.py | 86 ++++++++++ tests/python/unittest/test_io.py | 122 ++++++++------ 5 files changed, 328 insertions(+), 190 deletions(-) create mode 100644 python/mxnet/io/__init__.py rename python/mxnet/{ => io}/io.py (82%) create mode 100644 python/mxnet/io/utils.py diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 8d8aeaca73e4..1c005d57c4a6 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -178,3 +178,4 @@ List of Contributors * [Aaron Markham](https://github.com/aaronmarkham) * [Sam Skalicky](https://github.com/samskalicky) * [Per Goncalves da Silva](https://github.com/perdasilva) +* [Cheng-Che Lee](https://github.com/stu1130) diff --git a/python/mxnet/io/__init__.py b/python/mxnet/io/__init__.py new file mode 100644 index 000000000000..5c5e2e68d84a --- /dev/null +++ b/python/mxnet/io/__init__.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# coding: utf-8 +# pylint: disable=wildcard-import +""" Data iterators for common data formats and utility functions.""" +from __future__ import absolute_import + +from . import io +from .io import * + +from . import utils +from .utils import * diff --git a/python/mxnet/io.py b/python/mxnet/io/io.py similarity index 82% rename from python/mxnet/io.py rename to python/mxnet/io/io.py index 884e9294741a..2ae3e70045fb 100644 --- a/python/mxnet/io.py +++ b/python/mxnet/io/io.py @@ -17,30 +17,26 @@ """Data iterators for common data formats.""" from __future__ import absolute_import -from collections import OrderedDict, namedtuple +from collections import namedtuple import sys import ctypes import logging import threading -try: - import h5py -except ImportError: - h5py = None import numpy as np -from .base import _LIB -from .base import c_str_array, mx_uint, py_str -from .base import DataIterHandle, NDArrayHandle -from .base import mx_real_t -from .base import check_call, build_param_doc as _build_param_doc -from .ndarray import NDArray -from .ndarray.sparse import CSRNDArray -from .ndarray.sparse import array as sparse_array -from .ndarray import _ndarray_cls -from .ndarray import array -from .ndarray import concatenate -from .ndarray import arange -from .ndarray.random import shuffle as random_shuffle + +from ..base import _LIB +from ..base import c_str_array, mx_uint, py_str +from ..base import DataIterHandle, NDArrayHandle +from ..base import mx_real_t +from ..base import check_call, build_param_doc as _build_param_doc +from ..ndarray import NDArray +from ..ndarray.sparse import CSRNDArray +from ..ndarray import _ndarray_cls +from ..ndarray import array +from ..ndarray import concat + +from .utils import init_data, has_instance, getdata_by_idx class DataDesc(namedtuple('DataDesc', ['name', 'shape'])): """DataDesc is used to store name, shape, type and layout @@ -489,59 +485,6 @@ def getindex(self): def getpad(self): return self.current_batch.pad -def _init_data(data, allow_empty, default_name): - """Convert data into canonical form.""" - assert (data is not None) or allow_empty - if data is None: - data = [] - - if isinstance(data, (np.ndarray, NDArray, h5py.Dataset) - if h5py else (np.ndarray, NDArray)): - data = [data] - if isinstance(data, list): - if not allow_empty: - assert(len(data) > 0) - if len(data) == 1: - data = OrderedDict([(default_name, data[0])]) # pylint: disable=redefined-variable-type - else: - data = OrderedDict( # pylint: disable=redefined-variable-type - [('_%d_%s' % (i, default_name), d) for i, d in enumerate(data)]) - if not isinstance(data, dict): - raise TypeError("Input must be NDArray, numpy.ndarray, h5py.Dataset " + \ - "a list of them or dict with them as values") - for k, v in data.items(): - if not isinstance(v, (NDArray, h5py.Dataset) if h5py else NDArray): - try: - data[k] = array(v) - except: - raise TypeError(("Invalid type '%s' for %s, " % (type(v), k)) + \ - "should be NDArray, numpy.ndarray or h5py.Dataset") - - return list(sorted(data.items())) - -def _has_instance(data, dtype): - """Return True if ``data`` has instance of ``dtype``. - This function is called after _init_data. - ``data`` is a list of (str, NDArray)""" - for item in data: - _, arr = item - if isinstance(arr, dtype): - return True - return False - -def _shuffle(data, idx): - """Shuffle the data.""" - shuffle_data = [] - - for k, v in data: - if (isinstance(v, h5py.Dataset) if h5py else False): - shuffle_data.append((k, v)) - elif isinstance(v, CSRNDArray): - shuffle_data.append((k, sparse_array(v.asscipy()[idx], v.context))) - else: - shuffle_data.append((k, array(v.asnumpy()[idx], v.context))) - - return shuffle_data class NDArrayIter(DataIter): """Returns an iterator for ``mx.nd.NDArray``, ``numpy.ndarray``, ``h5py.Dataset`` @@ -601,6 +544,22 @@ class NDArrayIter(DataIter): ... >>> batchidx # Remaining examples are discarded. So, 10/3 batches are created. 3 + >>> dataiter = mx.io.NDArrayIter(data, labels, 3, False, last_batch_handle='roll_over') + >>> batchidx = 0 + >>> for batch in dataiter: + ... batchidx += 1 + ... + >>> batchidx # Remaining examples are rolled over to the next iteration. + 3 + >>> dataiter.reset() + >>> dataiter.next().data[0].asnumpy() + [[[ 36. 37.] + [ 38. 39.]] + [[ 0. 1.] + [ 2. 3.]] + [[ 4. 5.] + [ 6. 7.]]] + (3L, 2L, 2L) `NDArrayIter` also supports multiple input and labels. @@ -633,8 +592,11 @@ class NDArrayIter(DataIter): Only supported if no h5py.Dataset inputs are used. last_batch_handle : str, optional How to handle the last batch. This parameter can be 'pad', 'discard' or - 'roll_over'. 'roll_over' is intended for training and can cause problems - if used for prediction. + 'roll_over'. + If 'pad', the last batch will be padded with data starting from the begining + If 'discard', the last batch will be discarded + If 'roll_over', the remaining elements will be rolled over to the next iteration and + note that it is intended for training and can cause problems if used for prediction. data_name : str, optional The data name. label_name : str, optional @@ -645,36 +607,28 @@ def __init__(self, data, label=None, batch_size=1, shuffle=False, label_name='softmax_label'): super(NDArrayIter, self).__init__(batch_size) - self.data = _init_data(data, allow_empty=False, default_name=data_name) - self.label = _init_data(label, allow_empty=True, default_name=label_name) + self.data = init_data(data, allow_empty=False, default_name=data_name) + self.label = init_data(label, allow_empty=True, default_name=label_name) - if ((_has_instance(self.data, CSRNDArray) or _has_instance(self.label, CSRNDArray)) and + if ((has_instance(self.data, CSRNDArray) or has_instance(self.label, CSRNDArray)) and (last_batch_handle != 'discard')): raise NotImplementedError("`NDArrayIter` only supports ``CSRNDArray``" \ " with `last_batch_handle` set to `discard`.") - # shuffle data - if shuffle: - tmp_idx = arange(self.data[0][1].shape[0], dtype=np.int32) - self.idx = random_shuffle(tmp_idx, out=tmp_idx).asnumpy() - self.data = _shuffle(self.data, self.idx) - self.label = _shuffle(self.label, self.idx) - else: - self.idx = np.arange(self.data[0][1].shape[0]) - - # batching - if last_batch_handle == 'discard': - new_n = self.data[0][1].shape[0] - self.data[0][1].shape[0] % batch_size - self.idx = self.idx[:new_n] + self.idx = np.arange(self.data[0][1].shape[0]) + self.shuffle = shuffle + self.last_batch_handle = last_batch_handle + self.batch_size = batch_size + self.cursor = -self.batch_size + self.num_data = self.idx.shape[0] + # shuffle + self.reset() self.data_list = [x[1] for x in self.data] + [x[1] for x in self.label] self.num_source = len(self.data_list) - self.num_data = self.idx.shape[0] - assert self.num_data >= batch_size, \ - "batch_size needs to be smaller than data size." - self.cursor = -batch_size - self.batch_size = batch_size - self.last_batch_handle = last_batch_handle + # used for 'roll_over' + self._cache_data = None + self._cache_label = None @property def provide_data(self): @@ -694,74 +648,126 @@ def provide_label(self): def hard_reset(self): """Ignore roll over data and set to start.""" + if self.shuffle: + self._shuffle_data() self.cursor = -self.batch_size + self._cache_data = None + self._cache_label = None def reset(self): - if self.last_batch_handle == 'roll_over' and self.cursor > self.num_data: - self.cursor = -self.batch_size + (self.cursor%self.num_data)%self.batch_size + """Resets the iterator to the beginning of the data.""" + if self.shuffle: + self._shuffle_data() + # the range below indicate the last batch + if self.last_batch_handle == 'roll_over' and \ + self.num_data - self.batch_size < self.cursor < self.num_data: + # (self.cursor - self.num_data) represents the data we have for the last batch + self.cursor = self.cursor - self.num_data - self.batch_size else: self.cursor = -self.batch_size def iter_next(self): + """Increments the coursor by batch_size for next batch + and check current cursor if it exceed the number of data points.""" self.cursor += self.batch_size return self.cursor < self.num_data def next(self): - if self.iter_next(): - return DataBatch(data=self.getdata(), label=self.getlabel(), \ - pad=self.getpad(), index=None) - else: + """Returns the next batch of data.""" + if not self.iter_next(): + raise StopIteration + data = self.getdata() + label = self.getlabel() + # iter should stop when last batch is not complete + if data[0].shape[0] != self.batch_size: + # in this case, cache it for next epoch + self._cache_data = data + self._cache_label = label raise StopIteration + return DataBatch(data=data, label=label, \ + pad=self.getpad(), index=None) + + def _getdata(self, data_source, start=None, end=None): + """Load data from underlying arrays.""" + assert start is not None or end is not None, 'should at least specify start or end' + start = start if start is not None else 0 + end = end if end is not None else data_source[0][1].shape[0] + s = slice(start, end) + return [ + x[1][s] + if isinstance(x[1], (np.ndarray, NDArray)) else + # h5py (only supports indices in increasing order) + array(x[1][sorted(self.idx[s])][[ + list(self.idx[s]).index(i) + for i in sorted(self.idx[s]) + ]]) for x in data_source + ] - def _getdata(self, data_source): + def _concat(self, first_data, second_data): + """Helper function to concat two NDArrays.""" + return [ + concat(first_data[0], second_data[0], dim=0) + ] + + def _batchify(self, data_source): """Load data from underlying arrays, internal use only.""" - assert(self.cursor < self.num_data), "DataIter needs reset." - if self.cursor + self.batch_size <= self.num_data: - return [ - # np.ndarray or NDArray case - x[1][self.cursor:self.cursor + self.batch_size] - if isinstance(x[1], (np.ndarray, NDArray)) else - # h5py (only supports indices in increasing order) - array(x[1][sorted(self.idx[ - self.cursor:self.cursor + self.batch_size])][[ - list(self.idx[self.cursor: - self.cursor + self.batch_size]).index(i) - for i in sorted(self.idx[ - self.cursor:self.cursor + self.batch_size]) - ]]) for x in data_source - ] - else: + assert self.cursor < self.num_data, 'DataIter needs reset.' + # first batch of next epoch with 'roll_over' + if self.last_batch_handle == 'roll_over' and \ + -self.batch_size < self.cursor < 0: + assert self._cache_data is not None or self._cache_label is not None, \ + 'next epoch should have cached data' + cache_data = self._cache_data if self._cache_data is not None else self._cache_label + second_data = self._getdata( + data_source, end=self.cursor + self.batch_size) + if self._cache_data is not None: + self._cache_data = None + else: + self._cache_label = None + return self._concat(cache_data, second_data) + # last batch with 'pad' + elif self.last_batch_handle == 'pad' and \ + self.cursor + self.batch_size > self.num_data: pad = self.batch_size - self.num_data + self.cursor - return [ - # np.ndarray or NDArray case - concatenate([x[1][self.cursor:], x[1][:pad]]) - if isinstance(x[1], (np.ndarray, NDArray)) else - # h5py (only supports indices in increasing order) - concatenate([ - array(x[1][sorted(self.idx[self.cursor:])][[ - list(self.idx[self.cursor:]).index(i) - for i in sorted(self.idx[self.cursor:]) - ]]), - array(x[1][sorted(self.idx[:pad])][[ - list(self.idx[:pad]).index(i) - for i in sorted(self.idx[:pad]) - ]]) - ]) for x in data_source - ] + first_data = self._getdata(data_source, start=self.cursor) + second_data = self._getdata(data_source, end=pad) + return self._concat(first_data, second_data) + # normal case + else: + if self.cursor + self.batch_size < self.num_data: + end_idx = self.cursor + self.batch_size + # get incomplete last batch + else: + end_idx = self.num_data + return self._getdata(data_source, self.cursor, end_idx) def getdata(self): - return self._getdata(self.data) + """Get data.""" + return self._batchify(self.data) def getlabel(self): - return self._getdata(self.label) + """Get label.""" + return self._batchify(self.label) def getpad(self): + """Get pad value of DataBatch.""" if self.last_batch_handle == 'pad' and \ self.cursor + self.batch_size > self.num_data: return self.cursor + self.batch_size - self.num_data + # check the first batch + elif self.last_batch_handle == 'roll_over' and \ + -self.batch_size < self.cursor < 0: + return -self.cursor else: return 0 + def _shuffle_data(self): + """Shuffle the data.""" + # shuffle index + np.random.shuffle(self.idx) + # get the data by corresponding index + self.data = getdata_by_idx(self.data, self.idx) + self.label = getdata_by_idx(self.label, self.idx) class MXDataIter(DataIter): """A python wrapper a C++ data iterator. @@ -773,7 +779,7 @@ class MXDataIter(DataIter): underlying C++ data iterators. Usually you don't need to interact with `MXDataIter` directly unless you are - implementing your own data iterators in C++. To do that, please refer to + implementing your own data iterators in C+ +. To do that, please refer to examples under the `src/io` folder. Parameters diff --git a/python/mxnet/io/utils.py b/python/mxnet/io/utils.py new file mode 100644 index 000000000000..872e6410d7de --- /dev/null +++ b/python/mxnet/io/utils.py @@ -0,0 +1,86 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""utility functions for io.py""" +from collections import OrderedDict + +import numpy as np +try: + import h5py +except ImportError: + h5py = None + +from ..ndarray.sparse import CSRNDArray +from ..ndarray.sparse import array as sparse_array +from ..ndarray import NDArray +from ..ndarray import array + +def init_data(data, allow_empty, default_name): + """Convert data into canonical form.""" + assert (data is not None) or allow_empty + if data is None: + data = [] + + if isinstance(data, (np.ndarray, NDArray, h5py.Dataset) + if h5py else (np.ndarray, NDArray)): + data = [data] + if isinstance(data, list): + if not allow_empty: + assert(len(data) > 0) + if len(data) == 1: + data = OrderedDict([(default_name, data[0])]) # pylint: disable=redefined-variable-type + else: + data = OrderedDict( # pylint: disable=redefined-variable-type + [('_%d_%s' % (i, default_name), d) for i, d in enumerate(data)]) + if not isinstance(data, dict): + raise TypeError("Input must be NDArray, numpy.ndarray, h5py.Dataset " + + "a list of them or dict with them as values") + for k, v in data.items(): + if not isinstance(v, (NDArray, h5py.Dataset) if h5py else NDArray): + try: + data[k] = array(v) + except: + raise TypeError(("Invalid type '%s' for %s, " % (type(v), k)) + + "should be NDArray, numpy.ndarray or h5py.Dataset") + + return list(sorted(data.items())) + + +def has_instance(data, dtype): + """Return True if ``data`` has instance of ``dtype``. + This function is called after _init_data. + ``data`` is a list of (str, NDArray)""" + for item in data: + _, arr = item + if isinstance(arr, dtype): + return True + return False + + +def getdata_by_idx(data, idx): + """Shuffle the data.""" + shuffle_data = [] + + for k, v in data: + if (isinstance(v, h5py.Dataset) if h5py else False): + shuffle_data.append((k, v)) + elif isinstance(v, CSRNDArray): + shuffle_data.append((k, sparse_array(v.asscipy()[idx], v.context))) + else: + shuffle_data.append((k, array(v.asnumpy()[idx], v.context))) + + return shuffle_data diff --git a/tests/python/unittest/test_io.py b/tests/python/unittest/test_io.py index 4dfa69cc1050..ae686261b818 100644 --- a/tests/python/unittest/test_io.py +++ b/tests/python/unittest/test_io.py @@ -88,80 +88,88 @@ def test_Cifar10Rec(): assert(labelcount[i] == 5000) -def test_NDArrayIter(): +def _init_NDArrayIter_data(): data = np.ones([1000, 2, 2]) - label = np.ones([1000, 1]) + labels = np.ones([1000, 1]) for i in range(1000): data[i] = i / 100 - label[i] = i / 100 - dataiter = mx.io.NDArrayIter( - data, label, 128, True, last_batch_handle='pad') - batchidx = 0 + labels[i] = i / 100 + return data, labels + + +def _test_last_batch_handle(data, labels): + # Test the three parameters 'pad', 'discard', 'roll_over' + last_batch_handle_list = ['pad', 'discard' , 'roll_over'] + labelcount_list = [(124, 100), (100, 96), (100, 96)] + batch_count_list = [8, 7, 7] + + for idx in range(len(last_batch_handle_list)): + dataiter = mx.io.NDArrayIter( + data, labels, 128, False, last_batch_handle=last_batch_handle_list[idx]) + batch_count = 0 + labelcount = [0 for i in range(10)] + for batch in dataiter: + label = batch.label[0].asnumpy().flatten() + # check data if it matches corresponding labels + assert((batch.data[0].asnumpy()[:, 0, 0] == label).all()), last_batch_handle_list[idx] + for i in range(label.shape[0]): + labelcount[int(label[i])] += 1 + # keep the last batch of 'pad' to be used later + # to test first batch of roll_over in second iteration + batch_count += 1 + if last_batch_handle_list[idx] == 'pad' and \ + batch_count == 8: + cache = batch.data[0].asnumpy() + # check if batchifying functionality work properly + assert labelcount[0] == labelcount_list[idx][0], last_batch_handle_list[idx] + assert labelcount[8] == labelcount_list[idx][1], last_batch_handle_list[idx] + assert batch_count == batch_count_list[idx] + # roll_over option + dataiter.reset() + assert np.array_equal(dataiter.next().data[0].asnumpy(), cache) + + +def _test_shuffle(data, labels): + dataiter = mx.io.NDArrayIter(data, labels, 1, False) + batch_list = [] for batch in dataiter: - batchidx += 1 - assert(batchidx == 8) - dataiter = mx.io.NDArrayIter( - data, label, 128, False, last_batch_handle='pad') - batchidx = 0 - labelcount = [0 for i in range(10)] + # cache the original data + batch_list.append(batch.data[0].asnumpy()) + dataiter = mx.io.NDArrayIter(data, labels, 1, True) + idx_list = dataiter.idx + i = 0 for batch in dataiter: - label = batch.label[0].asnumpy().flatten() - assert((batch.data[0].asnumpy()[:, 0, 0] == label).all()) - for i in range(label.shape[0]): - labelcount[int(label[i])] += 1 + # check if each data point have been shuffled to corresponding positions + assert np.array_equal(batch.data[0].asnumpy(), batch_list[idx_list[i]]) + i += 1 - for i in range(10): - if i == 0: - assert(labelcount[i] == 124) - else: - assert(labelcount[i] == 100) + +def test_NDArrayIter(): + data, labels = _init_NDArrayIter_data() + _test_last_batch_handle(data, labels) + _test_shuffle(data, labels) def test_NDArrayIter_h5py(): if not h5py: return - data = np.ones([1000, 2, 2]) - label = np.ones([1000, 1]) - for i in range(1000): - data[i] = i / 100 - label[i] = i / 100 + data, labels = _init_NDArrayIter_data() try: - os.remove("ndarraytest.h5") + os.remove('ndarraytest.h5') except OSError: pass - with h5py.File("ndarraytest.h5") as f: - f.create_dataset("data", data=data) - f.create_dataset("label", data=label) - - dataiter = mx.io.NDArrayIter( - f["data"], f["label"], 128, True, last_batch_handle='pad') - batchidx = 0 - for batch in dataiter: - batchidx += 1 - assert(batchidx == 8) - - dataiter = mx.io.NDArrayIter( - f["data"], f["label"], 128, False, last_batch_handle='pad') - labelcount = [0 for i in range(10)] - for batch in dataiter: - label = batch.label[0].asnumpy().flatten() - assert((batch.data[0].asnumpy()[:, 0, 0] == label).all()) - for i in range(label.shape[0]): - labelcount[int(label[i])] += 1 + with h5py.File('ndarraytest.h5') as f: + f.create_dataset('data', data=data) + f.create_dataset('label', data=labels) + _test_last_batch_handle(f['data'], f['label']) try: os.remove("ndarraytest.h5") except OSError: pass - for i in range(10): - if i == 0: - assert(labelcount[i] == 124) - else: - assert(labelcount[i] == 100) - def test_NDArrayIter_csr(): # creating toy data @@ -182,12 +190,20 @@ def test_NDArrayIter_csr(): {'data': train_data}, dns, batch_size) except ImportError: pass + # scipy.sparse.csr_matrix with shuffle + num_batch = 0 + csr_iter = iter(mx.io.NDArrayIter({'data': train_data}, dns, batch_size, + shuffle=True, last_batch_handle='discard')) + for _ in csr_iter: + num_batch += 1 + + assert(num_batch == num_rows // batch_size) # CSRNDArray with shuffle csr_iter = iter(mx.io.NDArrayIter({'csr_data': csr, 'dns_data': dns}, dns, batch_size, shuffle=True, last_batch_handle='discard')) num_batch = 0 - for batch in csr_iter: + for _ in csr_iter: num_batch += 1 assert(num_batch == num_rows // batch_size) From 4ee866fc75307b284cc0eae93d0cf4dad3b62533 Mon Sep 17 00:00:00 2001 From: Hao Jin Date: Tue, 11 Sep 2018 10:20:41 -0700 Subject: [PATCH 119/160] Add support for more req patterns for bilinear sampler backward (#12386) --- src/operator/bilinear_sampler-inl.h | 11 +++--- src/operator/bilinear_sampler.cc | 43 +++++++++++++--------- src/operator/bilinear_sampler.cu | 52 ++++++++++++++++++--------- src/operator/mxnet_op.h | 27 ++++++++++++++ tests/python/gpu/test_operator_gpu.py | 18 +++++++++- 5 files changed, 110 insertions(+), 41 deletions(-) diff --git a/src/operator/bilinear_sampler-inl.h b/src/operator/bilinear_sampler-inl.h index e0b4db7b367c..499d23396207 100644 --- a/src/operator/bilinear_sampler-inl.h +++ b/src/operator/bilinear_sampler-inl.h @@ -95,19 +95,16 @@ class BilinearSamplerOp : public Operator { Tensor gdata = in_grad[bs::kData].get(s); Tensor ggrid = in_grad[bs::kGrid].get(s); Tensor grad = out_grad[bs::kOut].get(s); - if (req[bs::kData] != kNullOp && req[bs::kGrid] != kNullOp) { + if (req[bs::kData] == kNullOp && req[bs::kGrid] == kNullOp) { + return; + } else { if (req[bs::kData] == kWriteTo) { gdata = scalar(0.0f); } if (req[bs::kGrid] == kWriteTo) { ggrid = scalar(0.0f); } - BilinearSamplerBackward(gdata, ggrid, grad, data, grid); - } else if (req[bs::kData] == kNullOp && req[bs::kGrid] == kNullOp) { - return; - } else { - LOG(FATAL) << "Have not implemented the data req combinations! gdata_req=" - << req[bs::kData] << " ggrid_req=" << req[bs::kGrid]; + BilinearSamplerBackward(gdata, ggrid, grad, data, grid, req[bs::kData], req[bs::kGrid]); } } diff --git a/src/operator/bilinear_sampler.cc b/src/operator/bilinear_sampler.cc index 3365d98bb4db..a3b7d5764245 100644 --- a/src/operator/bilinear_sampler.cc +++ b/src/operator/bilinear_sampler.cc @@ -78,10 +78,12 @@ inline void BilinearSamplerForward(const Tensor &output, template inline void BilinearSamplerBackward(const Tensor &gdata, - const Tensor &ggrid, - const Tensor &output_grad, - const Tensor &input_data, - const Tensor &grid) { + const Tensor &ggrid, + const Tensor &output_grad, + const Tensor &input_data, + const Tensor &grid, + const mxnet::OpReqType data_req, + const mxnet::OpReqType grid_req) { DType *g_input = gdata.dptr_; DType *grad_grid = ggrid.dptr_; const DType *grid_src = grid.dptr_; @@ -104,8 +106,7 @@ inline void BilinearSamplerBackward(const Tensor &gdata, DType top_left_x_w = 1.0 - (x_real - top_left_x); for (index_t c = 0; c < static_cast(o_c); ++c) { index_t grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w; - int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w - + top_left_x; + int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x; // calc 4 vertex value in input data DType top_left_v = 0; DType top_right_v = 0; @@ -113,22 +114,30 @@ inline void BilinearSamplerBackward(const Tensor &gdata, DType bottom_right_v = 0; // calc input grad if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) { - *(g_input + data_index) += *(grad + grad_index) * top_left_y_w * top_left_x_w; + if (data_req != mxnet::kNullOp) { + *(g_input + data_index) += *(grad + grad_index) * top_left_y_w * top_left_x_w; + } top_left_v = *(data + data_index); } if (between(top_left_x+1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) { - *(g_input + data_index + 1) += *(grad + grad_index) * top_left_y_w - * (1.0 - top_left_x_w); + if (data_req != mxnet::kNullOp) { + *(g_input + data_index + 1) += + *(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w); + } top_right_v = *(data + data_index + 1); } if (between(top_left_x, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) { - *(g_input + data_index+ i_w) += *(grad + grad_index) * (1.0 - top_left_y_w) - * top_left_x_w; + if (data_req != mxnet::kNullOp) { + *(g_input + data_index+ i_w) += + *(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w; + } bottom_left_v = *(data + data_index + i_w); } if (between(top_left_x+1, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) { - *(g_input + data_index+ i_w + 1) += *(grad + grad_index) * (1.0 - top_left_y_w) - * (1.0 - top_left_x_w); + if (data_req != mxnet::kNullOp) { + *(g_input + data_index+ i_w + 1) += + *(grad + grad_index) * (1.0 - top_left_y_w) * (1.0 - top_left_x_w); + } bottom_right_v = *(data + data_index + i_w + 1); } // calc weight grad of top_left_w, then multiple -1 is the grad of grid_src @@ -139,9 +148,11 @@ inline void BilinearSamplerBackward(const Tensor &gdata, (top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_y_w); } - // calc grad of grid - *(grad_grid + grid_src_index + o_h * o_w) += top_left_y_gw * (i_h - 1) / 2; - *(grad_grid + grid_src_index) += top_left_x_gw * (i_w - 1) / 2; + if (grid_req != mxnet::kNullOp) { + // calc grad of grid + *(grad_grid + grid_src_index + o_h * o_w) += top_left_y_gw * (i_h - 1) / 2; + *(grad_grid + grid_src_index) += top_left_x_gw * (i_w - 1) / 2; + } } } } diff --git a/src/operator/bilinear_sampler.cu b/src/operator/bilinear_sampler.cu index e1f205258a24..2e6be3e1ef3e 100644 --- a/src/operator/bilinear_sampler.cu +++ b/src/operator/bilinear_sampler.cu @@ -79,7 +79,7 @@ __global__ void BilinearSamplerForwardKernel(const int i_c, const int i_h, } } -template +template __global__ void BilinearSamplerBackwardKernel(const int i_c, const int i_h, const int i_w, const DType* grad, const DType* data, const int o_n, @@ -114,22 +114,30 @@ __global__ void BilinearSamplerBackwardKernel(const int i_c, const int i_h, DType bottom_right_v = 0; // calc input grad if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) { - atomicAdd(&g_input[data_index], *(grad + grad_index) * top_left_y_w * top_left_x_w); + if (Req1 != mxnet::kNullOp) { + atomicAdd(&g_input[data_index], *(grad + grad_index) * top_left_y_w * top_left_x_w); + } top_left_v = *(data + data_index); } if (between(top_left_x+1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) { - atomicAdd(&g_input[data_index + 1], *(grad + grad_index) * top_left_y_w - * (1.0 - top_left_x_w)); + if (Req1 != mxnet::kNullOp) { + atomicAdd(&g_input[data_index + 1], + *(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w)); + } top_right_v = *(data + data_index + 1); } if (between(top_left_x, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) { - atomicAdd(&g_input[data_index+ i_w], *(grad + grad_index) * (1.0 - top_left_y_w) - * top_left_x_w); + if (Req1 != mxnet::kNullOp) { + atomicAdd(&g_input[data_index+ i_w], + *(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w); + } bottom_left_v = *(data + data_index + i_w); } if (between(top_left_x+1, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) { - atomicAdd(&g_input[data_index+ i_w + 1], *(grad + grad_index) * (1.0 - top_left_y_w) - * (1.0 - top_left_x_w)); + if (Req1 != mxnet::kNullOp) { + atomicAdd(&g_input[data_index+ i_w + 1], + *(grad + grad_index) * (1.0 - top_left_y_w) * (1.0 - top_left_x_w)); + } bottom_right_v = *(data + data_index + i_w + 1); } // calc weight grad of top_left_w, then multiple -1 is the grad of grid_src @@ -140,9 +148,11 @@ __global__ void BilinearSamplerBackwardKernel(const int i_c, const int i_h, (top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_y_w); } - // calc grad of grid - *(grad_grid + grid_src_index + o_h * o_w) += top_left_y_gw * (i_h - 1) / 2; - *(grad_grid + grid_src_index) += top_left_x_gw * (i_w - 1) / 2; + if (Req2 != mxnet::kNullOp) { + // calc grad of grid + *(grad_grid + grid_src_index + o_h * o_w) += top_left_y_gw * (i_h - 1) / 2; + *(grad_grid + grid_src_index) += top_left_x_gw * (i_w - 1) / 2; + } } } } // namespace cuda @@ -174,10 +184,13 @@ inline void BilinearSamplerForward(const Tensor &output, template inline void BilinearSamplerBackward(const Tensor &input_grad, - const Tensor &ggrid, - const Tensor &output_grad, - const Tensor &input_data, - const Tensor &grid) { + const Tensor &ggrid, + const Tensor &output_grad, + const Tensor &input_data, + const Tensor &grid, + const mxnet::OpReqType data_req, + const mxnet::OpReqType grid_req) { + using namespace mxnet; DType *g_input = input_grad.dptr_; DType *grad_grid = ggrid.dptr_; const DType *grid_src = grid.dptr_; @@ -196,8 +209,13 @@ inline void BilinearSamplerBackward(const Tensor &input_grad, dim3 threads_per_block(kMaxThreadsPerBlock); CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler backward"); cudaStream_t stream = Stream::GetStream(input_grad.stream_); - cuda::BilinearSamplerBackwardKernel << > >( - i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src, grad_grid); + MXNET_REQ_TYPE_SWITCH(data_req, Req1, { + MXNET_REQ_TYPE_SWITCH(grid_req, Req2, { + cuda::BilinearSamplerBackwardKernel + <<>>( + i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src, grad_grid); + }); + }); // post kernel check cudaError err = cudaPeekAtLastError(); CHECK_EQ(err, cudaSuccess) << cudaGetErrorString(err); diff --git a/src/operator/mxnet_op.h b/src/operator/mxnet_op.h index f11a497c564c..e77569671ebb 100644 --- a/src/operator/mxnet_op.h +++ b/src/operator/mxnet_op.h @@ -111,6 +111,33 @@ inline int get_num_threads(const int N) { } +/*! \brief operator request type switch */ +#define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ + switch (req) { \ + case kNullOp: \ + { \ + const OpReqType ReqType = kNullOp; \ + {__VA_ARGS__} \ + } \ + break; \ + case kWriteInplace: \ + case kWriteTo: \ + { \ + const OpReqType ReqType = kWriteTo; \ + {__VA_ARGS__} \ + } \ + break; \ + case kAddTo: \ + { \ + const OpReqType ReqType = kAddTo; \ + {__VA_ARGS__} \ + } \ + break; \ + default: \ + break; \ + } + + #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ diff --git a/tests/python/gpu/test_operator_gpu.py b/tests/python/gpu/test_operator_gpu.py index 1fc2c8e922d9..d201a2e09c6d 100644 --- a/tests/python/gpu/test_operator_gpu.py +++ b/tests/python/gpu/test_operator_gpu.py @@ -1945,7 +1945,7 @@ def test_bilinear_sampler_versions(): exe.arg_dict['data'][:] = test_data exe.arg_dict['grid'][:] = test_grid exe.forward(is_train=True) - assert_almost_equal(exe_list[0].outputs[0].asnumpy(), exe.outputs[0].asnumpy(), rtol=1e-3, atol=1e-5) + assert_almost_equal(exe_list[ref_idx].outputs[0].asnumpy(), exe.outputs[0].asnumpy(), rtol=1e-3, atol=1e-5) out_grad = np.random.uniform(low=-0.01, high=0.01,size=data_shape[:2] + grid_shape[2:]).astype(np.float32) for exe in exe_list: @@ -1975,6 +1975,22 @@ def test_bilinear_sampler_versions(): assert_almost_equal(exe_list[ref_idx].grad_dict['data'].asnumpy(), data_grad + data_initial_grad, rtol=1e-3, atol=1e-5) assert_almost_equal(exe_list[ref_idx].grad_dict['grid'].asnumpy(), grid_grad + grid_initial_grad, rtol=1e-3, atol=1e-5) + for req_dict in [{'data' : 'null', 'grid' : 'write'}, {'data' : 'write', 'grid' : 'null'}]: + # Mixture of kWriteTo and kNullOp + exe_cpu_mix = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req=req_dict) + exe_gpu_mix = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict) + exe_cudnn_mix = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict) + exe_list = [exe_cpu_mix, exe_gpu_mix, exe_cudnn_mix] + for exe in exe_list: + exe.arg_dict['data'][:] = test_data + exe.arg_dict['grid'][:] = test_grid + exe.forward(is_train=True) + exe.backward(mx.nd.array(out_grad)) + if req_dict['data'] is 'write': + assert_almost_equal(exe.grad_dict['data'].asnumpy(), exe_list[ref_idx].grad_dict['data'].asnumpy(), rtol=1e-3, atol=1e-5) + if req_dict['grid'] is 'write': + assert_almost_equal(exe.grad_dict['grid'].asnumpy(), exe_list[ref_idx].grad_dict['grid'].asnumpy(), rtol=1e-3, atol=1e-5) + def test_context_num_gpus(): # Test that num_gpus reports at least one GPU, as the test is run on a GPU host. From 35ca13c3b5a0e57d904d1fead079152a15dfeac4 Mon Sep 17 00:00:00 2001 From: Stephanie Jingyi Yuan Date: Tue, 11 Sep 2018 16:18:00 -0400 Subject: [PATCH 120/160] Temporarily disable flaky tests (#12520) --- tests/python/unittest/test_contrib_svrg_module.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/python/unittest/test_contrib_svrg_module.py b/tests/python/unittest/test_contrib_svrg_module.py index dd4618e2e82d..79407d15fd7f 100644 --- a/tests/python/unittest/test_contrib_svrg_module.py +++ b/tests/python/unittest/test_contrib_svrg_module.py @@ -94,6 +94,7 @@ def test_module_bind(): assert mod._mod_aux.binded == True +@unittest.skip("Flaky test https://gitsvrhub.com/apache/incubator-mxnet/issues/12510") @with_seed() def test_module_save_load(): import tempfile @@ -162,6 +163,7 @@ def test_svrgmodule_reshape(): assert mod.get_outputs()[0].shape == dshape +@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12510") @with_seed() def test_update_full_grad(): def create_network(): @@ -204,6 +206,7 @@ def create_network(): assert same(full_grads_weights, svrg_mod._param_dict[0]['fc1_weight']) +@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12510") @with_seed() def test_svrg_with_sgd(): def create_module_with_sgd(): @@ -267,6 +270,7 @@ def create_module_with_sgd(): assert svrg_mse < sgd_mse +@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12510") @with_seed() def test_accumulate_kvstore(): # Test KVStore behavior when push a list of values From 3d83c896fd8b237c53003888e35a4d792c1e5389 Mon Sep 17 00:00:00 2001 From: Sandeep Krishnamurthy Date: Tue, 11 Sep 2018 16:20:40 -0700 Subject: [PATCH 121/160] Fix broken URLs (#12508) --- docs/architecture/rnn_interface.md | 4 ++-- docs/install/index.md | 2 +- docs/install/windows_setup.md | 4 ++-- docs/tutorials/onnx/export_mxnet_to_onnx.md | 2 +- python/mxnet/contrib/onnx/mx2onnx/export_model.py | 3 ++- python/mxnet/contrib/onnx/onnx2mx/import_model.py | 3 ++- python/mxnet/contrib/text/embedding.py | 2 +- 7 files changed, 11 insertions(+), 9 deletions(-) diff --git a/docs/architecture/rnn_interface.md b/docs/architecture/rnn_interface.md index 42338763ce63..dc0b6a7958ef 100644 --- a/docs/architecture/rnn_interface.md +++ b/docs/architecture/rnn_interface.md @@ -1,6 +1,6 @@ # Survey of Existing Interfaces and Implementations -Commonly used deep learning libraries with good RNN/LSTM support include [Theano](http://deeplearning.net/software/theano/library/scan.html) and its wrappers [Lasagne](http://lasagne.readthedocs.org/en/latest/modules/layers/recurrent.html) and [Keras](http://keras.io/layers/recurrent/); [CNTK](https://cntk.codeplex.com/); [TensorFlow](https://www.tensorflow.org/versions/master/tutorials/recurrent/index.html); and various implementations in Torch, such as [this well-known character-level language model tutorial](https://github.com/karpathy/char-rnn), [this](https://github.com/Element-Research/rnn). +Commonly used deep learning libraries with good RNN/LSTM support include [Theano](http://deeplearning.net/software/theano/library/scan.html) and its wrappers [Lasagne](http://lasagne.readthedocs.org/en/latest/modules/layers/recurrent.html) and [Keras](http://keras.io/layers/recurrent/); [CNTK](https://cntk.codeplex.com/); [TensorFlow](https://www.tensorflow.org/tutorials/sequences/recurrent); and various implementations in Torch, such as [this well-known character-level language model tutorial](https://github.com/karpathy/char-rnn), [this](https://github.com/Element-Research/rnn). In this document, we present a comparative analysis of the approaches taken by these libraries. @@ -93,7 +93,7 @@ The low-level API for recurrent connection seem to be a *delay node*. But I'm no ## TensorFlow -The [current example of RNNLM](https://www.tensorflow.org/versions/master/tutorials/recurrent/index.html#recurrent-neural-networks) in TensorFlow uses explicit unrolling for a predefined number of time steps. The white-paper mentions that an advanced control flow API (Theano's scan-like) is planned. +The [current example of RNNLM](https://www.tensorflow.org/tutorials/sequences/recurrent#recurrent-neural-networks) in TensorFlow uses explicit unrolling for a predefined number of time steps. The white-paper mentions that an advanced control flow API (Theano's scan-like) is planned. ## Next Steps diff --git a/docs/install/index.md b/docs/install/index.md index 4a6af31cee3c..3a697ae20eeb 100644 --- a/docs/install/index.md +++ b/docs/install/index.md @@ -272,7 +272,7 @@ Follow the four steps in this [docker documentation](https://docs.docker.com/eng If you skip this step, you need to use *sudo* each time you invoke Docker. -**Step 3** Install *nvidia-docker-plugin* following the [installation instructions](https://github.com/NVIDIA/nvidia-docker/wiki/Installation). *nvidia-docker-plugin* is required to enable the usage of GPUs from the docker containers. +**Step 3** Install *nvidia-docker-plugin* following the [installation instructions](https://github.com/NVIDIA/nvidia-docker/wiki). *nvidia-docker-plugin* is required to enable the usage of GPUs from the docker containers. **Step 4** Pull the MXNet docker image. diff --git a/docs/install/windows_setup.md b/docs/install/windows_setup.md index 99ce7f63e850..c974eeb858b0 100755 --- a/docs/install/windows_setup.md +++ b/docs/install/windows_setup.md @@ -55,7 +55,7 @@ These commands produce a library called ```mxnet.dll``` in the ```./build/Releas Next, we install ```graphviz``` library that we use for visualizing network graphs you build on MXNet. We will also install [Jupyter Notebook](http://jupyter.readthedocs.io/) used for running MXNet tutorials and examples. - Install ```graphviz``` by downloading MSI installer from [Graphviz Download Page](https://graphviz.gitlab.io/_pages/Download/Download_windows.html). **Note** Make sure to add graphviz executable path to PATH environment variable. Refer [here for more details](http://stackoverflow.com/questions/35064304/runtimeerror-make-sure-the-graphviz-executables-are-on-your-systems-path-aft) -- Install ```Jupyter``` by installing [Anaconda for Python 2.7](https://www.continuum.io/downloads) +- Install ```Jupyter``` by installing [Anaconda for Python 2.7](https://www.anaconda.com/download/) **Note** Do not install Anaconda for Python 3.5. MXNet has few compatibility issue with Python 3.5.   @@ -69,7 +69,7 @@ We have installed MXNet core library. Next, we will install MXNet interface pack ## Install MXNet for Python 1. Install ```Python``` using windows installer available [here](https://www.python.org/downloads/release/python-2712/). -2. Install ```Numpy``` using windows installer available [here](http://scipy.org/install.html). +2. Install ```Numpy``` using windows installer available [here](https://scipy.org/index.html). 3. Next, we install Python package interface for MXNet. You can find the Python interface package for [MXNet on GitHub](https://github.com/dmlc/mxnet/tree/master/python/mxnet). ```bash diff --git a/docs/tutorials/onnx/export_mxnet_to_onnx.md b/docs/tutorials/onnx/export_mxnet_to_onnx.md index a9c03bed8b12..dc34bd520b43 100644 --- a/docs/tutorials/onnx/export_mxnet_to_onnx.md +++ b/docs/tutorials/onnx/export_mxnet_to_onnx.md @@ -55,7 +55,7 @@ Help on function export_model in module mxnet.contrib.onnx.mx2onnx.export_model: export_model(sym, params, input_shape, input_type=, onnx_file_path=u'model.onnx', verbose=False) Exports the MXNet model file, passed as a parameter, into ONNX model. Accepts both symbol,parameter objects as well as json and params filepaths as input. - Operator support and coverage - https://cwiki.apache.org/confluence/display/MXNET/ONNX + Operator support and coverage - https://cwiki.apache.org/confluence/display/MXNET/MXNet-ONNX+Integration Parameters ---------- diff --git a/python/mxnet/contrib/onnx/mx2onnx/export_model.py b/python/mxnet/contrib/onnx/mx2onnx/export_model.py index 33292bf664a7..e5158051d6f4 100644 --- a/python/mxnet/contrib/onnx/mx2onnx/export_model.py +++ b/python/mxnet/contrib/onnx/mx2onnx/export_model.py @@ -36,7 +36,8 @@ def export_model(sym, params, input_shape, input_type=np.float32, onnx_file_path='model.onnx', verbose=False): """Exports the MXNet model file, passed as a parameter, into ONNX model. Accepts both symbol,parameter objects as well as json and params filepaths as input. - Operator support and coverage - https://cwiki.apache.org/confluence/display/MXNET/ONNX + Operator support and coverage - + https://cwiki.apache.org/confluence/display/MXNET/MXNet-ONNX+Integration Parameters ---------- diff --git a/python/mxnet/contrib/onnx/onnx2mx/import_model.py b/python/mxnet/contrib/onnx/onnx2mx/import_model.py index e190c3bdadc0..b8d3bf28ee2f 100644 --- a/python/mxnet/contrib/onnx/onnx2mx/import_model.py +++ b/python/mxnet/contrib/onnx/onnx2mx/import_model.py @@ -23,7 +23,8 @@ def import_model(model_file): """Imports the ONNX model file, passed as a parameter, into MXNet symbol and parameters. - Operator support and coverage - https://cwiki.apache.org/confluence/display/MXNET/ONNX + Operator support and coverage - + https://cwiki.apache.org/confluence/display/MXNET/MXNet-ONNX+Integration Parameters ---------- diff --git a/python/mxnet/contrib/text/embedding.py b/python/mxnet/contrib/text/embedding.py index 38defb4b90bc..277f78222922 100644 --- a/python/mxnet/contrib/text/embedding.py +++ b/python/mxnet/contrib/text/embedding.py @@ -490,7 +490,7 @@ class GloVe(_TokenEmbedding): License for pre-trained embeddings: - https://opendatacommons.org/licenses/pddl/ + https://fedoraproject.org/wiki/Licensing/PDDL Parameters From 90599e1038a4ff6604e9ed0d55dc274c2df635f8 Mon Sep 17 00:00:00 2001 From: Hao Jin Date: Tue, 11 Sep 2018 20:35:45 -0700 Subject: [PATCH 122/160] further bump up tolerance for sparse dot (#12527) --- tests/python/unittest/test_sparse_operator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/python/unittest/test_sparse_operator.py b/tests/python/unittest/test_sparse_operator.py index b4783bd23820..57808248b081 100644 --- a/tests/python/unittest/test_sparse_operator.py +++ b/tests/python/unittest/test_sparse_operator.py @@ -1307,7 +1307,7 @@ def test_infer_forward_stype(lhs_shape, rhs_shape, lhs_density, rhs_density, tra rhs = rhs_nd.tostype(rhs_stype) out = mx.nd.dot(lhs, rhs, forward_stype=forward_stype, transpose_a=trans_a, transpose_b=trans_b) - assert_almost_equal(out.tostype('default').asnumpy(), out_np, rtol=1e-3, atol=1e-5) + assert_almost_equal(out.tostype('default').asnumpy(), out_np, rtol=1e-3, atol=1e-4) lhs_var = mx.symbol.Variable('lhs', stype=lhs_stype) rhs_var = mx.symbol.Variable('rhs', stype=rhs_stype) out = mx.symbol.sparse.dot(lhs_var, rhs_var, From 7ea05333efc8ca868443b89233b101d068f6af9f Mon Sep 17 00:00:00 2001 From: Anton Chernov Date: Wed, 12 Sep 2018 10:47:47 +0200 Subject: [PATCH 123/160] Revert "Fix flaky test: test_mkldnn.test_activation #12377 (#12418)" (#12516) This reverts commit 445967e6c316a91876efb60b6a5ef52ec1837d73. --- tests/python/mkl/test_mkldnn.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/python/mkl/test_mkldnn.py b/tests/python/mkl/test_mkldnn.py index 17fc29c81114..e597d0f5fc58 100644 --- a/tests/python/mkl/test_mkldnn.py +++ b/tests/python/mkl/test_mkldnn.py @@ -281,6 +281,7 @@ def check_pooling_training(stype): check_pooling_training(stype) +@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/12377") @with_seed() def test_activation(): def check_activation_training(stype): @@ -291,7 +292,7 @@ def check_activation_training(stype): in_location = [mx.nd.array(data_tmp).tostype(stype)] test = mx.symbol.Activation(data, act_type="relu") - check_numeric_gradient(test, in_location, numeric_eps=1e-6, rtol=0.16, atol=1e-4) + check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4) stypes = ['row_sparse', 'default'] for stype in stypes: From 619bc3ea3c9093b72634d16e91596b3a65f3f1fc Mon Sep 17 00:00:00 2001 From: Sandeep Krishnamurthy Date: Wed, 12 Sep 2018 08:45:54 -0700 Subject: [PATCH 124/160] Remove regression checks for website links (#12507) * Remove regression checks for website links * Add redirection ignore regex --- .../JenkinsfileForBLC | 4 -- .../broken_link_checker_test/README.md | 5 +- .../broken_link_checker.sh | 3 -- .../check_regression.sh | 46 ------------------- .../test_broken_links.py | 6 ++- 5 files changed, 5 insertions(+), 59 deletions(-) delete mode 100755 tests/nightly/broken_link_checker_test/check_regression.sh diff --git a/tests/nightly/broken_link_checker_test/JenkinsfileForBLC b/tests/nightly/broken_link_checker_test/JenkinsfileForBLC index 782bf74c9ccc..4c3f05319976 100755 --- a/tests/nightly/broken_link_checker_test/JenkinsfileForBLC +++ b/tests/nightly/broken_link_checker_test/JenkinsfileForBLC @@ -34,11 +34,7 @@ core_logic: { timeout(time: 60, unit: 'MINUTES') { try { utils.init_git() - sh 'aws s3 cp s3://mxnet-ci-prod-slave-data/url_list.txt ./tests/nightly/broken_link_checker_test/url_list.txt' utils.docker_run('ubuntu_blc', 'broken_link_checker', false) - } finally { - sh "echo Storing the new url_list.txt to S3 bucket" - sh 'aws s3 cp ./tests/nightly/broken_link_checker_test/url_list.txt s3://mxnet-ci-prod-slave-data/url_list.txt' } } } diff --git a/tests/nightly/broken_link_checker_test/README.md b/tests/nightly/broken_link_checker_test/README.md index a925d1b43816..c39abd0d6175 100755 --- a/tests/nightly/broken_link_checker_test/README.md +++ b/tests/nightly/broken_link_checker_test/README.md @@ -1,6 +1,6 @@ # Broken link checker test -This folder contains the scripts that are required to run the nightly job of checking the broken links. The job also checks whether the link that were published before are still accessible. +This folder contains the scripts that are required to run the nightly job of checking the broken links. ## JenkinsfileForBLC This is configuration file for jenkins job. @@ -8,6 +8,3 @@ This is configuration file for jenkins job. ## Details The `broken_link_checker.sh` is a top level script that invokes the `test_broken_links.py` and `check_regression.sh` scripts. The `test_broken_links.py` invokes broken link checker tool (blc) from nodeJs and reports the list of URLs that are not accessible. -The `check_regression.sh` scripts downloads the file `url_list.txt` that contains links that are publicly accessible from s3 bucket -The scripts merges this list with the output of `test_broken_links.py` and checks whether all those links are accessible using 'curl' command. -The updated `url_list.txt` is uploaded to s3 bucket. diff --git a/tests/nightly/broken_link_checker_test/broken_link_checker.sh b/tests/nightly/broken_link_checker_test/broken_link_checker.sh index 2107c96d257b..450cd65b8d96 100755 --- a/tests/nightly/broken_link_checker_test/broken_link_checker.sh +++ b/tests/nightly/broken_link_checker_test/broken_link_checker.sh @@ -28,6 +28,3 @@ echo `pwd` echo "Running test_broken_links.py" python test_broken_links.py - -echo "Running check_regression.sh" -./check_regression.sh diff --git a/tests/nightly/broken_link_checker_test/check_regression.sh b/tests/nightly/broken_link_checker_test/check_regression.sh deleted file mode 100755 index c21577f3ab3d..000000000000 --- a/tests/nightly/broken_link_checker_test/check_regression.sh +++ /dev/null @@ -1,46 +0,0 @@ -#! /bin/sh - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -echo "Running the check_regression.sh script" -cat blc_output.txt | uniq | grep -Eo "(http|https).* " | sort| uniq > unique_current_urls.txt - -cat url_list.txt unique_current_urls.txt | sort | uniq > new_url_list.txt -regression=false -while IFS= read -r line -do - err=$(curl -Is $line | head -n 1 | grep 404) - if [ "$err" ]; then - if [ "$regression" = false ] ; then - echo "FAIL: REGRESSION" - regression=true - fi - echo "BROKEN $line $err" - fi - unset err -done < new_url_list.txt -mv new_url_list.txt url_list.txt -rm -rf unique_current_urls.txt -rm -rf blc_output.txt -if [ $regression ]; then - echo "FAIL: Found Regression in broken link checker" - exit 1 -else - echo "SUCCESS: No Regression found" -fi diff --git a/tests/nightly/broken_link_checker_test/test_broken_links.py b/tests/nightly/broken_link_checker_test/test_broken_links.py index 593e008d5082..b1cbac7375e3 100755 --- a/tests/nightly/broken_link_checker_test/test_broken_links.py +++ b/tests/nightly/broken_link_checker_test/test_broken_links.py @@ -31,6 +31,8 @@ def prepare_link_test_result(command_output): # Whitelisted broken links patterns HTTP_403_REGEX = "(HTTP_403)" HTTP_401_REGEX = "(HTTP_401)" + HTTP_409_REGEX = "(HTTP_409)" + HTTP_3XX_REGEX = "(HTTP_3" BLC_UNKNOWN_REGEX = "(BLC_UNKNOWN)" HTTP_UNDEFINED = "HTTP_undefined" FALSE_SCALA_API_DOC_LINK = "java$lang.html" @@ -53,8 +55,8 @@ def prepare_link_test_result(command_output): current_page_broken_links = "" if line.find(BROKEN_PAGE_START_REGEX) != -1: - # Skip (401, 403, unknown issues) - if HTTP_403_REGEX not in line and HTTP_401_REGEX not in line and BLC_UNKNOWN_REGEX not in line and HTTP_UNDEFINED not in line and FALSE_SCALA_API_DOC_LINK not in line and FALSE_SCALA_API_DEPRECATED_LINK not in line and FALSE_PAPER_LINK not in line: + # Skip (401, 403, 409, unknown issues) + if HTTP_403_REGEX not in line and HTTP_401_REGEX not in line and HTTP_409_REGEX not in line and HTTP_3XX_REGEX not in line and BLC_UNKNOWN_REGEX not in line and HTTP_UNDEFINED not in line and FALSE_SCALA_API_DOC_LINK not in line and FALSE_SCALA_API_DEPRECATED_LINK not in line and FALSE_PAPER_LINK not in line: current_page_broken = True current_page_broken_links += line.split(BROKEN_PAGE_START_REGEX)[1] + "\n" From 46a5cee2515a1ac0a1ae5afbe7e639debb998587 Mon Sep 17 00:00:00 2001 From: Jake Lee Date: Wed, 12 Sep 2018 08:48:20 -0700 Subject: [PATCH 125/160] [MXNET-580] Add SN-GAN example (#12419) * update sn-gan example * fix naming * add more comments * fix naming and refine comments * make power iteration as one hyperparameter * deal with divided by zero problem * replace 0.00000001 with EPSILON * refactor the example * add README * address the feedback * refine the composing * fix the typo, delete the redundant piece of code and update the result image * update folder name to align with others * update image name * add the variable back * remove the redundant piece of code and fix typo --- example/README.md | 1 + example/gluon/sn_gan/README.md | 44 ++++++++ example/gluon/sn_gan/data.py | 42 +++++++ example/gluon/sn_gan/model.py | 138 +++++++++++++++++++++++ example/gluon/sn_gan/sn_gan_output.png | Bin 0 -> 404415 bytes example/gluon/sn_gan/train.py | 149 +++++++++++++++++++++++++ example/gluon/sn_gan/utils.py | 49 ++++++++ 7 files changed, 423 insertions(+) create mode 100644 example/gluon/sn_gan/README.md create mode 100644 example/gluon/sn_gan/data.py create mode 100644 example/gluon/sn_gan/model.py create mode 100644 example/gluon/sn_gan/sn_gan_output.png create mode 100644 example/gluon/sn_gan/train.py create mode 100644 example/gluon/sn_gan/utils.py diff --git a/example/README.md b/example/README.md index 6b9a086ff5e1..2123104a1487 100644 --- a/example/README.md +++ b/example/README.md @@ -95,6 +95,7 @@ If your tutorial depends on specific packages, simply add them to this provision * [Gluon Examples](gluon) - several examples using the Gluon API * [Style Transfer](gluon/style_transfer) - a style transfer example using gluon * [Word Language Model](gluon/word_language_model) - an example that trains a multi-layer RNN on the Penn Treebank language modeling benchmark + * [SN-GAN](gluon/sn-gan) - an example that utilizes spectral normalization to train GAN(Generative adversarial network) using Gluon API * [Image Classification with R](image-classification) - image classification on MNIST,CIFAR,ImageNet-1k,ImageNet-Full, with multiple GPU and distributed training. * [Kaggle 1st national data science bowl](kaggle-ndsb1) - a MXnet example for Kaggle Nation Data Science Bowl 1 * [Kaggle 2nd national data science bowl](kaggle-ndsb2) - a tutorial for Kaggle Second Nation Data Science Bowl diff --git a/example/gluon/sn_gan/README.md b/example/gluon/sn_gan/README.md new file mode 100644 index 000000000000..5b2a750e4efb --- /dev/null +++ b/example/gluon/sn_gan/README.md @@ -0,0 +1,44 @@ +# Spectral Normalization GAN + +This example implements [Spectral Normalization for Generative Adversarial Networks](https://arxiv.org/abs/1802.05957) based on [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset. + +## Usage + +Example runs and the results: + +```python +python train.py --use-gpu --data-path=data +``` + +* Note that the program would download the CIFAR10 for you + +`python train.py --help` gives the following arguments: + +```bash +optional arguments: + -h, --help show this help message and exit + --data-path DATA_PATH + path of data. + --batch-size BATCH_SIZE + training batch size. default is 64. + --epochs EPOCHS number of training epochs. default is 100. + --lr LR learning rate. default is 0.0001. + --lr-beta LR_BETA learning rate for the beta in margin based loss. + default is 0.5. + --use-gpu use gpu for training. + --clip_gr CLIP_GR Clip the gradient by projecting onto the box. default + is 10.0. + --z-dim Z_DIM dimension of the latent z vector. default is 100. +``` + +## Result + +![SN-GAN](sn_gan_output.png) + +## Learned Spectral Normalization + +![alt text](https://github.com/taki0112/Spectral_Normalization-Tensorflow/blob/master/assests/sn.png) + +## Reference + +[Simple Tensorflow Implementation](https://github.com/taki0112/Spectral_Normalization-Tensorflow) \ No newline at end of file diff --git a/example/gluon/sn_gan/data.py b/example/gluon/sn_gan/data.py new file mode 100644 index 000000000000..333125dbe9fe --- /dev/null +++ b/example/gluon/sn_gan/data.py @@ -0,0 +1,42 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# This example is inspired by https://github.com/jason71995/Keras-GAN-Library, +# https://github.com/kazizzad/DCGAN-Gluon-MxNet/blob/master/MxnetDCGAN.ipynb +# https://github.com/apache/incubator-mxnet/blob/master/example/gluon/dcgan.py + +import numpy as np + +import mxnet as mx +from mxnet import gluon +from mxnet.gluon.data.vision import CIFAR10 + +IMAGE_SIZE = 64 + +def transformer(data, label): + """ data preparation """ + data = mx.image.imresize(data, IMAGE_SIZE, IMAGE_SIZE) + data = mx.nd.transpose(data, (2, 0, 1)) + data = data.astype(np.float32) / 128.0 - 1 + return data, label + + +def get_training_data(batch_size): + """ helper function to get dataloader""" + return gluon.data.DataLoader( + CIFAR10(train=True, transform=transformer), + batch_size=batch_size, shuffle=True, last_batch='discard') diff --git a/example/gluon/sn_gan/model.py b/example/gluon/sn_gan/model.py new file mode 100644 index 000000000000..38f87ebddc8a --- /dev/null +++ b/example/gluon/sn_gan/model.py @@ -0,0 +1,138 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# This example is inspired by https://github.com/jason71995/Keras-GAN-Library, +# https://github.com/kazizzad/DCGAN-Gluon-MxNet/blob/master/MxnetDCGAN.ipynb +# https://github.com/apache/incubator-mxnet/blob/master/example/gluon/dcgan.py + +import mxnet as mx +from mxnet import nd +from mxnet import gluon +from mxnet.gluon import Block + + +EPSILON = 1e-08 +POWER_ITERATION = 1 + +class SNConv2D(Block): + """ Customized Conv2D to feed the conv with the weight that we apply spectral normalization """ + + def __init__(self, num_filter, kernel_size, + strides, padding, in_channels, + ctx=mx.cpu(), iterations=1): + + super(SNConv2D, self).__init__() + + self.num_filter = num_filter + self.kernel_size = kernel_size + self.strides = strides + self.padding = padding + self.in_channels = in_channels + self.iterations = iterations + self.ctx = ctx + + with self.name_scope(): + # init the weight + self.weight = self.params.get('weight', shape=( + num_filter, in_channels, kernel_size, kernel_size)) + self.u = self.params.get( + 'u', init=mx.init.Normal(), shape=(1, num_filter)) + + def _spectral_norm(self): + """ spectral normalization """ + w = self.params.get('weight').data(self.ctx) + w_mat = nd.reshape(w, [w.shape[0], -1]) + + _u = self.u.data(self.ctx) + _v = None + + for _ in range(POWER_ITERATION): + _v = nd.L2Normalization(nd.dot(_u, w_mat)) + _u = nd.L2Normalization(nd.dot(_v, w_mat.T)) + + sigma = nd.sum(nd.dot(_u, w_mat) * _v) + if sigma == 0.: + sigma = EPSILON + + self.params.setattr('u', _u) + + return w / sigma + + def forward(self, x): + # x shape is batch_size x in_channels x height x width + return nd.Convolution( + data=x, + weight=self._spectral_norm(), + kernel=(self.kernel_size, self.kernel_size), + pad=(self.padding, self.padding), + stride=(self.strides, self.strides), + num_filter=self.num_filter, + no_bias=True + ) + + +def get_generator(): + """ construct and return generator """ + g_net = gluon.nn.Sequential() + with g_net.name_scope(): + + g_net.add(gluon.nn.Conv2DTranspose( + channels=512, kernel_size=4, strides=1, padding=0, use_bias=False)) + g_net.add(gluon.nn.BatchNorm()) + g_net.add(gluon.nn.LeakyReLU(0.2)) + + g_net.add(gluon.nn.Conv2DTranspose( + channels=256, kernel_size=4, strides=2, padding=1, use_bias=False)) + g_net.add(gluon.nn.BatchNorm()) + g_net.add(gluon.nn.LeakyReLU(0.2)) + + g_net.add(gluon.nn.Conv2DTranspose( + channels=128, kernel_size=4, strides=2, padding=1, use_bias=False)) + g_net.add(gluon.nn.BatchNorm()) + g_net.add(gluon.nn.LeakyReLU(0.2)) + + g_net.add(gluon.nn.Conv2DTranspose( + channels=64, kernel_size=4, strides=2, padding=1, use_bias=False)) + g_net.add(gluon.nn.BatchNorm()) + g_net.add(gluon.nn.LeakyReLU(0.2)) + + g_net.add(gluon.nn.Conv2DTranspose(channels=3, kernel_size=4, strides=2, padding=1, use_bias=False)) + g_net.add(gluon.nn.Activation('tanh')) + + return g_net + + +def get_descriptor(ctx): + """ construct and return descriptor """ + d_net = gluon.nn.Sequential() + with d_net.name_scope(): + + d_net.add(SNConv2D(num_filter=64, kernel_size=4, strides=2, padding=1, in_channels=3, ctx=ctx)) + d_net.add(gluon.nn.LeakyReLU(0.2)) + + d_net.add(SNConv2D(num_filter=128, kernel_size=4, strides=2, padding=1, in_channels=64, ctx=ctx)) + d_net.add(gluon.nn.LeakyReLU(0.2)) + + d_net.add(SNConv2D(num_filter=256, kernel_size=4, strides=2, padding=1, in_channels=128, ctx=ctx)) + d_net.add(gluon.nn.LeakyReLU(0.2)) + + d_net.add(SNConv2D(num_filter=512, kernel_size=4, strides=2, padding=1, in_channels=256, ctx=ctx)) + d_net.add(gluon.nn.LeakyReLU(0.2)) + + d_net.add(SNConv2D(num_filter=1, kernel_size=4, strides=1, padding=0, in_channels=512, ctx=ctx)) + + return d_net diff --git a/example/gluon/sn_gan/sn_gan_output.png b/example/gluon/sn_gan/sn_gan_output.png new file mode 100644 index 0000000000000000000000000000000000000000..428c333150234cc5e586bf0f8dd43023bc9ae092 GIT binary patch literal 404415 zcmYIPXEb<+~3Y~o%_Cj>q#~?dO?4i{Wb{+3B8`Kwh0Ld8Pos1)MPgm7W*%o zB%~xHdfFPMAwRb-`_L|n?V{9ak{dSRFCrk8fQ{weOHR?Yly~ovD98rXV$x=*28a3w zhRB&k9@_9VtLx_JvWm_yQFD=#^Q4*uYZ0TC0*nCoJF1ap(NiJ5d_O9I(&SLZCC~JO>XOh(X^w5+;3Z=zlw*FN(RX~r9p09;)qFdeS zHhNF7!0UCw(Imz%x@)I*r=Wmi@R=h|rth9!1$X~~>qw%ud{$B%#jR=18M0q4cgdjM z&JPAmswrPr{};XA_s^d6Z{L~Pg*~%Mx~VOc7IS=Yl}o(bTUoi5g4IYR4PQ>eu~?tA zs~rNY@A3>DeSEaz1ILQDx3+w!<4<*FHb08^eYz2LMK~Sp z!ygV&>#Ecv^z+jQ6GHc#m#*+f1hwlkY~S$_R!#W$cyDi|?|L88cNJ+{lWe_yeG>g| z<hfVL~0aw@YYftiXZ@)5yip%9whCY9d zKTE$7zh;cN9=#@93u+bde}%4^e)a@URs{vWj(y|mX7ZrWv_r#F!PL#ke1IkaGJ6@u zUe?6amEr=|r)qm+Vn8)cza^B1psDTpAj@2OoBr11!x(CL0p!EV&MyxwuMe-cqS^YR ziG9avMM3V~1!=5~sV%{sTY0j6aKoX>>qy(#ovbGlA?2x#z~z;b%j*-h^SdM$cdwGJZ9lox z0$J$rct_IjrJiggQo8Jq#Gv(@8>g4&qbt`}n6>@2yrgQEz&tv*!;VWB>M=J&Cf61wRRDd4*cf#H{*hYbITd{wQ4p! zd~_3f!^)K%By{=eny7f%tb}OFmw0V}LM|zDjWOXK0YT>LRR*7!WhR?sKJfDB-eF}V z|FkqvPA7u!psyD@+gPu0D09txt=+{a`Z)W8aeHBP4#ZV%x=N@X2{p}isT|POvA!1q zg%vM@V%`SGrb+ex$s+U2&dByRhtpMWv&IV{W&Vg&>AE}9@ws>dI_oK7o9;N7#Ww~^ zG7eJ8H8jFTOt}D`J>Gnl_$rwF!X4S^&dFTkODX5TqGj~Xl&98{Z9J0$p;7U{QFox< zjLlWxXhADqgT4TO6UwAWcJ#lllt=6FZd-<>yTnqelv@oW*)(d1nU%Y>LT z`+B*#A){*)!S}$tVvwUi|3Esf$bM>u5Iq$DU%~aZ^Nzw+~4H8JV zfz-Jj(mHzM0dfFvBxgxbhE#VbV~%gp|0y5&>}~xy!f)Ca`h3vlt3{Hpv14H6xYduI zgM9HE>W{71zj-UvwXZ_K4J7@)MakM{79aY0(WQ#D1_?u+=?cq_tHGNaEVr4%##(iIfP8J3bd=$60nPj||qOvCPLP`}kJ z9mk+8nO+mj1(UP1A5%Q0oNhFbDyaQ9Mumb-AobmLJ)e7wmkXjOJ0Ct^S^IQVMDNqhw7wq)fj6ls!~!d2cevmlVv+0P5^zibBGw|Mim6v`wm z%eV!SHk=suyyx#Zh+rmh3m9w4k40@CirHZjJ&Gi`(8PR9Xw=2Y{@Qu^=$ho<+5A>w zB^gOgf!_ydqJojEQu!pnP|IDOLJx|0=)^aks2^b0vba-m-x(mmU#w9Kb=7(@;e%QG)aYqQgv}@f z^C$8@V)tds9``$m&C|k2)FPN`=(oL3#n32gFa2H={`uT*7c8GsKz^j;3Va8Dhqb;5 z@>hQmu$6c&IFU_)y%7N@AR7i`Oc2p*;GySWRTS!Rvl;2KXn>F4re=mWfP`PN)QGmh zmR`V^UxK^s77v6@D4)5~Ctg;qZ^fvEgT7(Cb5=$#;*#1q#YoOdqK!hOJ|FGx_ejG=N!L1dXf;W$DS6e~2qo!f^_IyrUSD)ko>nORrh8k_Xhx0>SpirBr*{(y94 zMtoy<_2=q#|HPS)$q~Doq&HsUFn@G9VXw4x@&dfuDV-xN`}1R(tFyKbJS6`kHh9`6 zg@9QpG9d{ue0g6?daK`?p7fD9x9#~K7RIqA`J#TMsKj~W(A>A%pH<3*H%mm zp?J$9!B@GhgjOuUfbIH;7ag2j#E^gBERT}C|9}+h|8`7&XWz||?CzAuogM~7a~$vV z4rJJx+s$o&QJSqXgxGkUZPf?KaHG}aYopo0N;#c*(G)o%!uMS9% zeSMuZfVVc54wq2M6j;l|NH5bUja`%oc-fXvp(hO%6o?ryk$%ay6#rYIjf0^ubf1#jj3+ux|xmcGQ5J2EL4W(XzRc zjz=?@UPW5lPdZd3eHKWxD;S-o^=6Xl)Lvj0Wd|}#o#3e^1f`}SAP0v}Ffk^*&@fIr zK#t$sfWAtqenS!C@oZX$c}CG-LQ#}1t zs0|3cPsb>!JeX8k*lG$j^Xs^qVkS(R=~muiRD|9*yx0r{2Pa^@>3p7;b6bKh@V0Zm z=beIprwVI=Qx*{|p%vrX0W#gweEm%KU9{!}2QB*ZS#BdLThF}hrktipWg%G*Sk(uM z7#0LTFYoXz6T*u_WRZ2360WY#ubcH4HSf5z+1a~u8^I*;HaeZqjrf3fmY^o5my(qMt4^t1-B%g8G`F!|FJocfq_Q8B0p8~_hX>t z4-cI8Cm}{C6LK!5UR{HKEUTQFn^i=f>VeD~2ijO^ggOJ|ZI$P2oy`!cS$guU0}M63 zM^#qVQAmNFN%=Ny^8H8j)Z1Haq~zMCZeP(wf7*RiB|u%H)FdFh%vr1zN$aZxN_@&Z z6VJ&YFh9mS!xb1GL4Qx8>H7LJns3(gUcJjFKH1l<+{T$@&sptLvR-OGa9Ec3?$S2@ zD3&7CSK#7M8=PC21xp*V_cDv))%ehIh_$$v!d-{&Mtt6;{ypbLsFo{pT~_G|Equk4 z%m(^H(jqgHc}(!9FxYY9E6l31@1H~5nf;__;14%DN64u23cj&DS$Yo|a@D5Ad|aHa~h_|4}Rrcz0!pjJdI?Ug@w&kD(E>a3N=Rqe*l;w#w-HM8B!r3eZ`6UFHCjr|6t zw^VXH?9W*iSW^4jACuh%S9c;gZo17BJ@~KuA$G%*KcM*;KXHQUf#98Nk1!QvD-@tu z4-9G)&zSy%yhotTn)3*x-84w4mF7+bWm>PPFTZ~J&kD-R4vvH3 z!Tf(_=G|z(W6;(4H}PVA%xdO-)3;^2V4>YyXlFac0{I&2c)v$o(a~ok_Wu!J zSZa{{mpvSnygo@)Vn!yi(VDOW`9ew_0EIQ&I@>IEEbkosG?DM+qDAhKci*zL1wk0x zS18_lTSl-YmMQz%u9KJS#Y#MmjUUe4k;5_pFmseG<&p^bpDMBEcGE^2{s!3}jc@R_ zQQkK`6^l7zS1`|_M}}porJw^2M8daWA@b57QJowlh8VV@n3EuY1bD_a??9~0Q#OmG z2(RdZE&5@_rt0qPj_)#4ufg`IOcU=o{hqTB%}Wx=#J`FC#yej
JKT`KX1sW7qh3L zZQWGwR7yH6|ExkK>)Mp6k*_i1fDMA!|!j}Y! ztgm+}s4Df$%@iwi8 zDZ1rNvL zpmbmW{DBM^)HP9Z#7W%BKe#3sL;I)fIYk=aK6gwrwX~+DyFKQxTgX9XZEZhegVK?> za4ea~yl9K^L{Jt{y{j6~5bwgR2y?07*De@@kewRXqs|eQ$+}%og&t7C4CZy8w%&`U z%X-mO^5x$VO$KhKQfPfuLz5uyAVTxE<`$lKLlP9iV}6wU#-p#a!t0^tDqzq+lDX3>hClw3!r|uXMN; zoO*rSb8tmi>A2F~{r<5ne%#oXonsqZ4`KH>fH zVI-BGkl;i zz=Zi;PKchGcxXNw5H-weXR5mJ`}^&tXmQ5Uue!I`A6VMWfBNlWRlujNt|20Gc$Nl} z=N+fEtmyLarq+mc>*btzK()<(zBJfn;1FCOIyML8Xqm7Sfd5u6hE6!y9upsu4UYv5soT6e^W@RWHt%=ys&p3hJkniBuiWCR)In5IRXxE%A zE^Uf7B(0tErD%_K+-#V+K40Yug!27-fPy7J63QB15lwVgq+%>#7^iO4D8HRVU2 zLcyXWnw*XrA>_8UI7%re#sf2t+PM*@6vNL;NEfq47C1LBuZ}X9+AYQZlT{#;g<;{h z)o^T9SD7m{Ve;PC1Gt~vi&drK7v;o`pAOtquRn3zR(D5%U(j=rutk+oKf=s-W<}5@ zSsLT~1OgzL5Iz779X%KFY!OBF7MvWo!77D3I77bTzuO(vqd+Ag_#k%d)g znqvaC5y;Me=|XMg6nw~pHF5h(ncCT6(qW(yp@4z z8#p8@%~w1{Wm-U?3KIv^kq@wr;C;EkDyj3WL*z2wlRjM{fZ6&NV$kcdPpBc9__MJ8 zdAEZfXZb*UHE^@!XSZio&VTWXA|ES%i?Tb4$(Mf1N!C6tOxO}UjNse=+hCmWi;DI7 zTQ;|HjjPb4mc|=}8~e!6ajSAt$~JJDK0ve~F8)c(I}`au6xc-Rk<{&N^NbqAYI{K^nJLVw<)m^3A6R z`ItgX#C3@2oGk3Ty${#Bi;Y|fVsck0L9bn2u7VgjSXY5}0`#raw5CkkI^B6;#1my- z>ts3Wev*#)vP1zM`&SbPV9lpF1EmM6<)Ic}RG6RVR-2TW!RQN>75npu;9|_{F=A-M z^=10P2*$qm618%5Bph{q5F{V!&WYYQKfc9%Jv^zRTV-l$muKlIxbL#f0hYd7^=$Sm zYjZtY5jIM^Jl~6sxW0;rdeE6pPli%11Ym~q0rdLDp$1%BOt0C}Zyotu9TBe9&6oR5 zJKlhJ^Pf1s``hGHM;ru0je_3D&^5LG0>kCH7N7U?i9HOYe_(q(UfW0A(5fnEp z^VQl|_Ry2$dN>Qb1ZxcIGCfmrnA?co*TvebQ(4%SN$zKvTi#O>4JJbZ1KxDE2E7*J z0!yoMG%8-m9%KoyE~UA#+!5`lst3rDHaqfh(~2d3k>OZw1C7P9WvhY3uTT3gTx2r(!~7prLPV?ktXOvaVe0wxFMRuOPP9@j ztpMB49;xxkiNAI?(*`bsu%Vph<{1(n#@e1}Os5RLW%e|b1bmuqd(RV>eB1q*jkVVA zkfPUlVLT3t6K0;PovMQ%MxOx=EPt8p1g~igqDu-vm{`Zux$*EyL`D1Rq5Dm?t= z_bP7XSOo==>wV&b0ssN{?fg$j4-EvVc*-XZQ330m_cSp}NWq}sHmUli&`0QiAlDqm z4~}lTe2A_oVn{Kj&~i9^^o_&9YSf2xAx`w@+I|RU`fl&evzYYsR@KwU0gZL9d=_s( zQ78#htt@HX-`K5L;Bx8Xu>@mrNd`*rx~aK2^H?_mfgqu2o2H79L58M%0m2K|M7K}R z&qqhsMvv23SDoRwg0IHK=JkQaDCo--3&b92|4^Rcxs9_lI`rWUsouY_JNEY#N`55$ z`1y8KbwuC`UKuS&TCNB+?x?&AP9y%lV%(_MU?eu5`>aI;bM%JFdl1lN^*HJ@f( z6C-Ew0PhEe_iyL?qH3zp&YP1i_@=4*TS8hauL9*wUO&yNQ<^dd-C_hue5c80#uK<6806VeJ8osTN7*PTq^-`50<~2zJLE$QZD!|ZPU;% zrO0oir>aMdUC=EhqfBdW`EX}H@$N>*M9R8I__tRv&@864is_Aq;Q~ot! zioW~;{QUXau7#iU+aQBI-}!&&)eo-g*550(sw|=gl*y74^-?~&J`<#J(aG?A{zGje zkS1i{HIU>>q0?`|#&1GMCB#yG>^U*X@}s0FR45METhQq@Il*iEbgm*S#u{C4mg2|} zKBVRnU<5We%*S9Hu77-_`9}7i_ag;kavia^aRZ{<|E>JGsATbo6b>|v(6=72ip>9)NQ%KERs5Jn9TRiRbVC@ph2HPsSqrJUi9mPx%?V@M- zPx74_652}G8cZzQILyHV3sp}7WgYb|thoWfQr_LT-5W=9Pq#rRpqIkEl!iZO9*hco zv?ZsN$||W!=_%u+O~+-_C9@#{1$)I-Cn%T{VY;V6k(q2N?LaK9p>-qFju_U>MSSVZ z8mNj!;QywDpbyrRuTS>ca*_^yHuT*&@Ys;`Y=64maPgyNd8HMsK7){g@wcb#>gQR-7(P zjOy##feXv6JTk|xSsuE$SIk%fEAftSV6D?DgRM7}@Ou}KmA6Sp}B9vrX>uq#E zy?Z7AmDY~~+se{iZu()bRlVbE{M%a}c)Y&phs%?Wk827r%PLJV2nykz@4eRRv(mdP zKcXm}yvwtI{6<^4seCsB53u=q;!^eIpsz5~ z?bkF%c5s{br*6y6Xfit9Nn%WcLdIjK@gR=~5Xbdm^jS$aSGwBuI$9e2UNT~X<*`$I z`j_E_qC~5tN|6X`Vp4X#OI~zn`1v7xMQ(`$_vX%z_zc~ShVVKD*HjSc_IwI5JT!qG zmt|`8@ZW!OIhW++e1z=S>6@{kikCfif=9a|OA5JAqOq4w1(b--Pl!)!YywdS|Nh9{ zGpLf~m!+%pu&UM3q8gkp{w_g2aEEm|R$tEJ;Bc*Ay{9sNUOeuWDV23$QbV)p%pEeF z=sIxPvSjkfAFM>C=yzn=>hjieip7)>%w&jW#bM^{Hf{Jq`);pli*U00H#<%%k3f{6 zdnvp)d_g?u^1n!FK8KGrO0D(6r~X3ZqI^8;sqJzpO>(Ub7`X!k-qmY|lH~lWlBxUr zVMj5Vi)(ZgS%3fC!l79~LA&x#5fWE76ag`%h+fN=g&_@~g86oLmgM53y4o}q!z>$e zpYYYUW~6LPvUs?PpMIUGd}YAjsPP%4Zw4-3RHW4u^IA7auN#l<2D;Q4(xn6YR&Ep@ zj{V5fTa_$)a-VFoiBJ&+odEXat!;nA)CnS6+P) z<5xHudL!x=LeB9NXkthF-+*D7?mn*Q(@Qia;~X2wHI(b>y|Tus!vD;kz#Jl3l$c&ifAha131FAii5;-MIB?h*!FGF3#CIe8E@#HT=ABc!L;n5u(O5 zdfayv_3(`jY5GQJpGxXbV+s0r_j6v_lo#)NLg(Bzyd%=l-=0a3WF`7y{q*YOTgnU14dCHn2=yL?>}=fNf=8&6Vuh-^KQRdoQcv z%Benp8}(412(_cCcG%o*0M#AltGg05oy6r}mBsmJ#K*M|8-j5zrRvkAt>t^itT#S4 zHj0)gp}BDU=${Y@x{ZWD__XdCexu)92@toI)pO*BrXN%3soKi*et zPidEk2ps=onZAbvH#9f9~bO57hUS>bw>BJpv7%EK93xEh|f zlq2t^fJ)?eC)Ig+8Fap$3zN0r&jf|fGiohRcX&Rfy&5Pb4cn1ije7s7k z!OUOr$i@ov*JZ{Cqi@#w=nHu4vH0QC(6(T_Y}Tkh$8PS)jUK*2BlRrEh^$r7WPjwv5w18;t(9Xg3Qe`>6W*Z2efb<>%z zE^Q2#0Rn+%-ZO+!^1UxwyTWXNnd0Je^toj z2*r-%itY}9W(umEK}v5PkTmB5gv3-0=_ej=_6gi&z>%8vurk{cQRJ6Fc}D)uJVhv`_BYS?=Cw{{)PY>HUz+@2<6F9XYJJ39DI}f8QUdgf zvcNNBSf3JZJ|LT>_e*I+&@^#S^50?Q&Zzzp`rY2*zyIbCUU(#7 ze}8{%G@V5*I__NA9e_A>N-Z9}Nj|J>_*M4yM4tZ4?&tx+yHpSV3|3JNun4>m|5=>a zW(pf4&5Z4`x?{Yfnm!t5(^I;$si-&h2nAlajSQ~B#1kKRO|SBG@{i0VyQxxNrTP)4u4 zz*js=llsBB7PYc>4lGXEVzP_Pkh}=OE`!cFb)Z}Mt*tFxGnk0qiZJ3$$Mvc8UH-Yr zMz!`uf>SaN^11hXf@#qWP**DZeYpg?GaBkiv9Tsl&mG6og*gC#sGP0{W8!0;kcc{~ z$AQmVB2Qb=)5rrmAwX*j?^5@}HD%T9ZsV-9Rmmkw%T}QjtJ;TYGS>6o%x*&yKC&Z` z3}Uubnt)f12}TP_fmVR;NRv3BSdtu^At%s~2^ScDz93-nuU|O|x*dJ_!$^|&IG;PxPX!z; zi*R2l-tbS)80=0u8dy<^zDcD}`sKAN>5MXPe{^%IqmP}g58-gQc=S&;_O%+L%%-YV z`MfW6{0RF$at3;Ya+}gFh@Qu>X|7}WqK6XAOh@bnd%_cddMuOu%KdVI2fy(y*xOqk za(1gmZl9Q_Xc=uRs;5Ddyqi6h4Nt}dbs{u@&cpO+UqlNl+UV8*P-u3H`FDl~7HQDX zhp%oIq+=H@=%*I?q85(|UKCtEjr!Ne>>cGA91@3es!++zc+!T9?sEdX^yZ zSW-tJg2TgA^j1?g#UQIrnAQoronGuN?X!2=g`LR5B6mC((6C+Ew(B8SuP5fC3kDe2 zrjoK31zO~F=IEG;S!nrx76&WXPiD?S6bWAi*KcA+QNnii}=pK;z4wkwnFZGWk{BCkr<00vR9> zC4{jw+#8OC+ehPk&bynurfx*o8+^3acVkXmN&GACklF}EIdfw9TM-ALDB4KW)W)da z?8{lol|A&%mdbilRHY4SGx7DGM_F>i8fbAP}CoeDWp9)+#`m%SdB4Ydl zCY>&w9g@t9=CXBP512}C8r?Z_?dq9vvfF`&cVXMX_Q0I@^Au{Pe|iTNbzSc?;nU2X zA2P%6-T%9?ncQkeSjB@CeX$pE?cv)6FZ%6+IN_2yslHt1^V?sER&GvblV{dd(A}JL zApIEM00ZI+Nc`R>!uO=QIvsQIxun!gWWyX8pwSFvu3nmEK}FhTcMWh7b`l{IZxxc8 zrTupKW971yYWI4}{_13{C-BIdE~wZXidCTc?9A5wLYFS?wv@aQ0{h9jCJPgxj27Ls z_Ks^`@v#@$J4WfP*d+^CExU>oS2%nX%L8YEkX5kzz|PAuszHe8ke_PtDEQHj-qZ!3 zWBXl!bG8i4B+*b<<2~}4{3CyPU&ZjSPAi9ff08D8*LVMHUB&n1=I_FCXa(MFgJR~T zYFKh{QV;axUA#mMY~JR#&qqzDw-B=J9W>f#LU4G4Cv-ob3OfjQy<0BtPZ)z0%78Ue znnk*0%3#EbskbiOxIHZBJQp?Eu@cD@cw)KX7jAP*cE9W#gTSf2e_T@^K93BDoH9^= z$tE4}=WT&$|5geAAk1xnk~w)bMtK)irRyFJ{ZP!i>Hj{nt8KZ9(D0ok@rts#mr3oqcyBlMdh$j)!GI~qX)rqc^W zZL0YRf@4V1_`BapSg>PYKV29dI(i%XJ};Xg93P7JD@J<-t*?b_r!P>U+OyZ5vjf)w zBG9kWxlBt)@A#x)gvk<>rt`P3DM7^D<4q@58a{$hhcD;jX`KWw4lM#%mT9Aq^!BQK zZ@1zprf;c1|Z z7gX`8qXC?l?Fl5pYVm~~#&VB%;TL{=bhLv3UJETpE)6|yJ|vq{GzQ5GzU8o#`Pu}k zW0732tOL#Py8!Uuby?vVF$JmUsmJ!a=fIT+3N~>fG3D`NJ^SAP*I+lp*hcSDq_lMX zQ<0oG2Y$h*k#ew>d7DcsGv0k2aB7pz&9atX*%*-ONhQlqFolHHFVWd*jEXP-WH;$g z*(>zEMj^gk_e5Xqy*|smAsb9^HH-}7FThB?B{!Z9dwUg>#w*F8^WfQ1tE=>>vT0<5 zvd`KmaNBw*Y4q~wXdQc#dhCtpS!u`ax|s`ay%P;6 z<;|%5V%f%vYeFb8r!j5T8a)B8LR(!bRtBzo(xFBJIeKHHDQNYt8= z+f*F{ld%!>FuZ^4c?L7>G$DHJLlrXj#n3{7S20dDakwLt)wwg0?eWMBnzt3T_LEv3 zO6tm>z6IY8;+U)S)608ldb&Uq)4TX8JC;vLPLWZXhb1q@g3V%}?DQ?ITmGC0q8$bJG{{(lfIb?r*a(P2-jz|4a zUi{x17@N9;tYO05w)s?LABr~JYJ|Gn+@~x-d}=(D(%)ZuSWb3)`}ch^r6a;{*ILls zwf+yIhCD6eMV@Kb54W_}kgaZnikF)??Atq{5)4&9e~bKO%Y)im56ZF0&G!}e|2Ua| zgHgy~7koFqa4Yuh3f;#bv{BR(YMgV2mP*(A1@q~c^uN^7gk5L^zYhmWWpHTid8(K~ zcg=@{D*7@~7wgGqf|DCE$Gh$G5_H!7#%|`Hw}!nMi$tB9=X^B}KS+_y8D#o6>KLj6 z=7yU-m-Op;hnycB`dk(Dgmq4VqIwI)xA+^}j8x8A&LEibgQTe)u~ z<~4>cKtr*uxvLmaes(c|NlAw6^s^{Alg;kFK@kZwWl;4$xkUfO z@wdB5QUy4M!YUJs;6X`iP|#u9+b$tggZ_UX9><10j@Ea^$b%5^BAtgUSA3 zNXEhH!yv~kg#eZ`cx3R3=XidXveR~f4`_<;6Au>c5Q$iF3i#YqV{F6i{iK$<_R|I+ z>kQ>_{vNm;&|V2`>i*a1Mb3Rox&FDo>k}<0g4NZf`Sb^d80{BtWPGw-7#z^gkC(j^ z$u@XH?A8a*Dc0vy+%MWVy|~g^JUcnrk0{K3!*73=8JPT!CG3tpL?H%Do_!M!cQrRN zJF68UH|IOsc)_$CTjBnA?C{b998~jtlXqAQY-Tn&G{VC8<(o1+k=`HvRuvim1$0+R zxxL%AJ2Q-TQ6GExFMFB(>$~|XZ)E;r8^~T`E^SyBb{WSV`Aw(7hvS>e_4a-4aW|Ba zaMvR1(2z=w>m!Jwd%lc^{9P`;6_9g?rK^w>%rwVyB02jnC?-R~G|$S$n~kTUlbNrv zKQ>r@u5+8aF4GA`Oo?~&m%1*R#PMyy-x(@^_EF;?bLpd>i%3N>xdZXfgItTEN zKZ-a%n@jR>K0i5$Qbv1m4govTfvj8mlCZV4e9!&}y4H`iq3!;w^!P6;eV`XTTs5hx zPTt3k_>%n1n5-eY6G0B~q>e^`0{&+<54q_}Z{?a6HqlEkPVlXFWRLk6UONKUr=9X84#==vO0{Q_rbl47l*Yd-0nvUOTq^)L)-wid2{aTK`K?e1eWL(K0XL$1S#< zJFP7aGI*I#L9n3jY)frZb4% z6y$>I^JO=bL`hTMYo-IQ&w{Y+z&kEK!FHN?A9(awXxz#CC}@ajL(p(g^I-K}cEYL|v^6Rp zk{fiwB1gDWKkFdCmh<|fjq33n@15sZ>C+>-Rz^7g zO%!sWkNZ^BdhIuo@NKk@osyEe{i)PBsC7jSJE7w| z4cax-;eKC5@1JTxQ$UMR{HWAgj6c8rC)sKfN86#$G_WqVCd_~+Q;_u zv(eti;nC4LmFFPIBWzSxA`ZZdnL75e}Dx?p^H6Kp1p^P0o+>T{k<|A2iv zJ{)k|(6u9$wW|E##31ypbPz4xWBT+=n4KdhzGPm`<+(hxl|JAoTfyB9)7|vg{kAKJ z^P^_4S%r|-aLpL1*+e3tirID-fDkv&SJ-^b5^-wjsaZekKYhGEG8uSnlRv2Z$C1VB zhecHsTRpUaRm3@llug0LjPdH-}%j?Zn&QCHz;F2dn8ct3~iX-y;iYTu^j4_V^}GGxW0z@dI!IM5^4~^Z_V)6*Z+^hAGh91 zW3FY)Ht#da!CXt{=TkjW{f;sXIdpUApuPn(9A@8lbx;ErKYrh3k5Zqni#J4YesVJo}mr9jSt7paa8+6rl_ zxT+D$^@2{e;Ogly2pY;XmNx!=lh{xaRgEIqyU&i!jBB9yl9w9MW}4S2%5^J7f~C3g znfPT2^C9FDVbiLhD^-2cHZkbIfq!vx0DY&MfE@C+^IBA>%Qq&YKz=h~a#Xc%AKAC< z?6;R8zUYUx)Y=)Jv2 z)$VW(Y(YTo>4zif0E*4GIm=U9?Xs_`^*mh2TxFyY38b2Deu{E?AGJJT2g1!&xsv-i zBY+&C$lmX$#1)oQ2NLTIKb28w3+aF=o>u#rvq{BT!S9Hlk1ZsEt3Cj5H0w5Z^0WEw zkbp&v-csJ~o{huL`s+6=ILf}|<5j>~%ucwH*e%AwiUK=N(yr&*Z=3@zxOI2W4(R5l zuRpH%aiW0_9GJ42J20D#ITfMRNkyqdq@rTj7^fn=;g>-xM_zrqYY@exbE$2CE~QtG z?nY;AiASFj({H4E)#AthVKMUX&22(RZ=af)Y7|2+aBv6D@=x?X3f;3_;E#*_?q0QJ zF3ceidj!9{TF0`6f4dTt@fOp8ljaHOHRtR(Tt146I8Em3f8^V09_p`;vOEo$T9YhM z!;hYyMnoeC>@2;jt^?noXA~%_kLp#d1oyRx2GQVVb)1J_z-=CY8^<5DI42<=oc;xfW$8`$l6aV4;i{3bNDMK1g(yg_ zzQ5y?xxenhySAZ~N!QL=%U{^1-i zIsDdd9LbyT+05r9U)j#T?C=+CRzLXAoJIZncIJO-rT`h4u>*{|AL_H)+z&%+FMX@B zfzny4(yWp9L>l9e8|L&~0|I!IeYLlHC41qAoO+9%NbQpJpc%J(7}#pAQPd#m&`TD|c2ozXG$!(Tt{ht*&dphl zO(=44sg)|m5|!NzW|8-;WbI#;ND_DrNg}fR$Cv2}ZH1lhsgbv`plbPp1p2Gw!t^Fr zx7-puQ%tC+=;;0dnsfWegg?%wru6iG7`YtPhay@=g<`&-Zkh!o5|GYHdZ*d`d`rkl zb~;VW>szUKp`?T1|4?+@(QLk79Hlke+Osw#R*C(!#imw**fVN`nyo#GTBX#cs4awA zv1x0>2wJnmC`zho@BPc~ocI5n^S-(Fx%ac4S4m@kQWyk)Uy!uimU6gIC1oY0LOC(J z%xB+ggT~}=0Uv#Z&i>~8bYz`I#lHPDu@*3MbfHl6p&9>w*%{q|mwMkem408Wb(b#q zm>z|c{^b1J89NznVLCiB$B@-V0CvyX%$*p)JDp$8UbLk+jVmeBkAdd^|Mb373d+{9 zZd%T()ngnMUZ#A8_Xh&Kir+3dqRnaHy#q%qj`M+cJK0xQZ1|aN?eJ)Ui`VIJx{S{E z8Yw&=4QyTAeTE;Bxy!R)!(+7#iBHU+tY+KKU)J|5)Yc<51du{FxuZ6BwX12wQqGb5 zLFnP=%U2J*-H(`Rb+qGHPdAY^CSs@Z=OL4pZXz zJznxhY7*Hco&G*?J2?_2~)!$?VX3~gSM{_ldXM9(r97rvaRVchRj6!|sG*V`D$_IxktQ=rlGxdA|TP z%C}70hm*_-m4kF-Dar?@LL$Z9P;aq^2|>aQr>(I3iY%SZW48Hu&uIv!sXv=S!ZI=wV-4|>YRj{ zjGL#pt}chxXRH`f9)nFYwt|zVTQE(Equ@7KPt5UW8ZPfh~!Z z8T~}*R@g@Ba0iRYTIcfd-XX$8bccCb4<0;Q90au36e{XUY8hRzzD_JZQZcu!{D;0c z&}JRCY~^-0@G2I& zop5=wEi!n!u^rATLkI~QuMtXG%5x-hFPjRpr%WnV&&H>GC`{z8sk|zhn+bqMn^c=k zJOj_Ph|Z2B+qX^Gg$%~Oz@j!Rf%KG7pE}^LA;f?wFtC0k=t+jb-T46)uMxOAXX2>imHy=E`H!x)81j|@CYHy@{eDK&*eNIBaKo=J{qw7q zDifr<24PlCwnWITbX>2c+}l+2P1t!AV*NGV8^R;h8io%EX}xIeRRfOpp|RmBaEec2 z?;LN{(I{wVR4qNtcO_l36RfoOVD(!m%y7w^sMUOtDgxyhslG2L6)LH(sMB&6EY?wLp*?aKezC0&GQ_^ zdM#_#iG~4NVCIZ~ja<{u$V8Zb|F4^OrEL%NWmA85u}{=&ZVZNk$ z96|{2mdqNHJTP<1qo=2i6Z$TUqQcliA+M$bn(#Z}N=F$?)!!od;j#ORUily4O_Dwv zQWM+fuR}S!oSGcez1Z10-|(1jR%I6Xwj{r6aE8*1GPgvr~U>Y2fvMqS#KIN ziF`wiiFkTCC8-smfkIBHtF?7Or$Gu5Q`(Oxk{Xr<8BkOZOLt~)Bt3$GT+&Q0 zih0@}*BB%G>-R<1cpgq1_iy{32fz=(GD`9*BiE)oQKCgR-;S$YFRx?GcVL&OPh~;c zF(PS){Q&ml^OA^tVC&x{^#SQkeO4@!Ro51#XI}Y*NL$slA-EymM$wfW=Tq=z_Vycx z)e<^@jBpCJ`@HUa!aTy#RP!yBA1jw+oNQ+Rre*>cZm@*tc)o=Q>i(zu5n^K@DCMB} zDyiz$H^i|ny&rV-p1Xe-%jqGSyhZ#%Ek0q)d*lHlf%KasC z>l# zMzSuA1R$b=65`Ck)?DmvL|bdfdW<|U%5*f-G9!*0R|wh&AVL0zlPW?tG4}BrZB1h? zP!TQSo@cOJ2T_BIiNIhh7x~YYd>S#dA~8+Q4_QPG$Ty`(F0bO9tSsPKq+uBsrcx-NY@+&YZUBsDkei8RFa3mAt9!W<}wQJNLn zxV*4@M(MSoPp(TB?e01AH~r^KdB-#njk{BBR3CYMJ_1W66napOvqfxf(Aiz)2Upyh>f;yS&6!Lsf1zL01q*V)KW?oAs>T&ND_! z#RiN`B-&6&=^+@u3d2+J3}1gbaX76^1YIA5n%PBMZHr;!|36#dq{w!zus1umfgwjf zi9HYBrAn-_joZa*$=&=0Jgu-ac7Lz1lemCSrslm_9m5T=QX+x7aD6=kK{*AYx@ z8$F2ZFccNZ#Hg$HoNRl}$yy{&V>{vE?#m23Dmibw-Yo0AK7Hi+mcL>aE zGuNRgpObI34jY`5+P(-anfgfQ>Oi7y1I%AuzKVmS&W@QEeX#v64U0H)X?u3GUachI zx&dx67aWDvxKK%TOy~NRdiSB0oFpaa&Bu3J_OfzwkT=^{zCs4|sv{DUoqOzY66I*p zvv*jw@J%_gz&?$$u=B9~v9~ofh^FsbFvY5u1bwf7W4MAaML!th0+%%Q3()0RJ)d0k zTV~JotPlRZ8lF7?I>PjE&wzamB&dd!p3orMlcKxP%iDG6&gfu-Un&uM;7-%b8 zK9-jLH&fuOL&de|7BzX~Q|uwt4tJ7i6~ABBdwufggz_-(PJWQ7hmgO$iS%79qt4J@9S=98nhNE|mFkV#B9WD6@3%2L9$7G%+v6R>d@53u z$s<8ztqcTL@M28(`IgVjA@4|4_;8Gdr0s}De>@W^vBeztW_GXhKUSM^+aD0U&t07V znhh0my!-ehJ}SoTwGOqo{f`ACH=&XCQ_=*#&QwU#@HnhC3efDam)z$wFkNbxA&Ob)- zC6M(=*DhMlH+_|pE`@@Gsa$I$fb~ynoz8!*b&O*W1w4Z&xVN|W+0_!J2%VXkJ13F& zBz1e_gR`eGd`^73c9K#?2J5ugySRH6*0ys6Yd8Z(Pn1SP+?}tB?M)oerwbE$0tJ(`6#pR;8_4N)3V5go9-FLv3K|~=mT1#tq z5pe}cXzzGupyn1}A^ho&g!Hnv&mBzz3?AM06D0d#QH}3mEB8_dR`}_&V`j(R1wQ?+ z13YQ|BfPM!ybs_1{t>ar^35`AcEw_P9ILYIgXopch{{5!v0jqDbZ5GrrRKI*>jI3s zX-aBLAXTumjC`t8uB3pp0zWC6Zf6Em(!}=F70-I-&V{LF7rfNGgCA=k8K==4bh;sp z>2TR}!k)sD8@Rt=shpBK9VD}FSZ2(vLU!OpC`#Fi|BA5NBB6-?ShEwo%>tj>et^$O3mf|kEfJy}*il zhS^kl<0S%-D;a>Svj6hk{rs1!YQ4A2+feHt>mr!%BQ=G%eV%&IE7v9+2TEYlOkP{( z$E@AJ!hnE@8hb=ZE^<0aw5`7O#==oWl6223r+W3kQ4nl#H?#PCR8r*wa~Lb0po@+3 z85dj6 z2F`{S&Yj^n@W;%3g&r^S3eFFucJK~@+So0#JV3pxZVdBqz6< z1n)HvWGG_(V%Cg*Y=TUtGsN$7)1L7g`q@e|%|s1<#%A~G~G@SG1p)jgR#p$#?I@@ojtVdlI!YK&I7{f7?kr?UazQr zkT@W=VD-_Ilz3Q$d0Wk?gtlo`onjDS?)Qn$wyzo7>(VkM>9U?#S0iAv7|T7Y8BUSy zoI<~fVqTpW#34SnM<~KLNrHyKeV6Y81fb{=&N0irjY|U*d3Wzw5cgn^^2rUy?t|1l z+?2+do^{ddu;(-K>r6_ps0JzB+@|h1$*_ALj_8oc+?Q*CHGV%8U1#U}b9!5AMf2ZP z`E3>~6RYumqV*6v);?qT?Me_D7R1#k`qUXy!0s0_A-CHaGPZcLc-3OXurKbV?QJ#S z2Qm2J%xD>gIS7s@0s)5?O*}skQ9HiSq}~4LSl$TuM}d5wx*Os$!$#lm4~{_n z((~ej3fG~kAq^a#B)QfN1_^$Z0FraozN|G6i`@?FAwNj^qY165RI3ZN0ulB2I~zn~ zHHJ%6Tj*sI)ooE~6gMt>90NDIIkSbEU1GX8&T{&s=cQ{fJ`amrKTSJtTkRQuQ}8s) z#p%JRPp3hzn1qMEWO=jRwt+!Xfm3Z%v9pB-Uj}R^cnh1=s(vW~NX=i_TVF?q-pK|^ zU+2ds#v9iVEBlFQ2pwUxhcVVv7B6Eb$s7c6$;?NA%U)=oWWVwD63I*<#>V0zI_j`4 zKX#Rh2j6=c{!oDi+#lJOGxq1u&A0ZzkpV66YM^3yKk1`*&3^f3nZ?|CkkbC&_?V_9 z1}FV$PlFn1yKDBXV&o&F7B!u=4#gH>M$3@GsN;x*=$jMvaDR2Km}k%#4Lu?y@d6-l zFqsqt-T**A135nDay8`nBA;8wYuAslVTCvaO}ed=5l1-}C%qK1?x_6hMf>Pc>v1%{ zbNL=PVYWH9F1WW!gs05ixH*ac8BY^}8=YH_8I2TQ97>EniHn8C9h2y(9}-%WyNOV( z4NBEB#Z`>53nEIOKRcIGj{-Y^ex{z>`Bkp*7Mi3~MR}`}W^9E}&__4MkW1g2&E8bG z#6$d$*Hy$>Qw503n^$Xvs8AFNI`jH}G;C5(Y~rWYe2(qdP1 zA5Zr^l32l%hnIm`X{NSy_N?#=OWOCBX8V3L-C*Q!^mHjb;r=RB47YuvK!c0$^+%tT zvf%BrJ@1W~_xtltL$Y1}@^?yVg0se7Qp2$*Z`Pl*!N~;H*DvzYxs|-@S8i_NfuR1L z!m^fIpx>iWV{Y?vcJzSwdH53nm)rv6_SgHV~7~A;YGcy;`C)1QDC{o)F zvoyg5bCLoxnnn~sG|+P4SH^-En)Roz2s}S~b~n@*J%<$SzAZxM0vjt#EKqdg5bFp4 zz{slTgT--7I_g{Yc9CMVq`FC&gWT3Vu)1opu19wRdFBQgNl96Ce0rX-xMQ9<1F4jt zyWL;QSJ4P}1c1LNq+)`=t79m*UMbx;H+Dp7${lI?JDdY82m8^NdbIfJ=47w;pH-4L zI%s+CUR>q&)YL{fmZ8wQes6D2d3l+!-fi%hlzgCHAL-_Se1)Ag;{-f@=nP`l(-FD} z-nu$1OmGVC>bZTb+}pdfys_mH@mEaDF4%iJ?0Vvz?53;2XF3$aJ9>LEn|@xcF|5Kk z_9iez{F{dZ+op+Dl8FFv7$@rBf~_VpHEoKNHd2K+#4$4`<1Y=-#VN4lr}9KX@eOE~7c_>n}uq3{&C3iLB;7l6`E z_y01C52H*Ze=kl3cQ>5NrqnjB}Tfg{My|;-wQiT;;ia<@|~8?-%WZOUv%DVH%U3Xpmyy z)#DC@spjVws04O~-gM-;Z2EJg@i(5lvl4spdfaC@tbkdG$0Qd#fE=QPw(3*U z@31;|y?c2VKMbYOa zeEUhGWYc!JRFP4%^I-xTrwe_S;~dOKXdr4`tt2ag^8uNWDf*2!wdl!s*i2yI$wprT z=X=AhHiCm_M&!n^JL{)D75`CmOn*oxWss1lFoe6q_>qW2?IPyY@aIKn1*?A)KiK2| zKl9+1$G+9|c+uV6&0g>IEgbtwbao22RQ-aFJ;}R?jvy7~#n0??|4_mEY(2aHO_@si zvc;4!IMyJu!$kYLHn~VM3L#_*d^>CpDa1-{TmaQ@|C%(Y zR)DT*#u?>Pl0KE-nFIvZXK@8B>l6%SG~}G7Wf>ab z4;0@(-B7adbKo|w_1`EmMGu(NxjN4zzS~F_R?~1Z8XHJTO-VDO(-Bh z?>1OYM~kqmBpns}!_7ZaMqHJ=rk$z=sPSy7h`sTB(z;IfiI~MXVQNynHq2JqXxdT# zEcZy-%h=1H-mcDt*$fxdhPw%;cx6lyQ)~TgpGYH}L{)K(sikYIDtSnIg3kGVsg$1h z!lC_LMzJ{rDPUjwfjl;*lR#EuAww8Ez8%=Eq-fKlkUgx=u};kR=ep?THY0Y`$?QEw zKg}|-EHU`Y7mX%?rDo-MmzC4X(QG^H6vz6_lW@za+2+0f%t)30nTX@kpC*%osuGK3 z%00g(#b!EvG{swVM*ay~t!p(U7Esa3($Yv#pajfgOE~}H=5fI8z9fMG;d_4PvOGLa z42e?Zx@!WYUP|3pnigUv{~3^t(4^LW*%snGl-}MYo9H@MUm5off-zXc?m>NWgb6c$vUti!YjKhGn9tm5ROw)#CDJ zW*H>Aw6YwIIm-AzVZl5~_mDxNi8!+O%=UfvKmq-*q1WbN6fL41#Yi$~At`R$f;^e6 z$M&@L1PsUf6r4HzW&^5er)np#rrv}%_ng4r#0#{vpawdgrm6k7JM$`i^C>RbpC!Ah zys4$9abIrYY76p1EPFX#;@;{Ia8NPlq~6U-q{g$CE@~mcZSDO zZ+FfjI*)BksFI6pqe%5FwIxXMHZo19;}4mcspW7!m$Fle>p)lG8s9G8TfR`v!nBl^ zV0R5Ocfw36xuiH9qKee(Y16O_8(YsfcWPrkXbLD!DQex~%>e zsKcji-1%?N2Bx4so(jY%l>D^BmQPul_zi}IV`?qs9=Ld*yx$2tdZfJ6hn7pZV%i(J z*l@)5-e6J6pY#J(>hrxPVm5PUqSH)Z18fIFX8fBw3h1^a;wq zt{Rz1e_8~UZf4Sqi(fdzKIYKkiX)@`8XI6BOsPl17p+_40-(=35o+#Bo=wzjPu(9n zsWYbGFNsNfTqwH8Ha0|)hDd!wUbPm~>lM~}oKy|#Z!2Q=`84y? zzmnn3-(ZTf3amo&jsCRSaNM)lzmQ%d!=!NAt#>5*X4EQ$=i2_9*)NnWi1IDvt9+dJ zcxnV2;SM(YW(#|AvAx{d4L+h%F2|#Bz4m9j*dc99?{du@di&hCCs<}5EuX;50u&m>fOS)KUdWe=Z@1`Hi6qP7PQCi|se{9{5e0%j!_<-=cEY_7Z~=In}!I)>3<=gPM} zgKRwM+6A80>?tZn7w@O~1t%loKBaLl=z_sDh_BZ{;XxPDXo25favu%L(Fv4sqy62M zoL#d!-@zmtMH>(PvXYhOAfl}qM!_t^lc4<5?wyd3zgSovwYZ{C%2CcPGB@LQeC_4I zZQX*{RZFW42oq1m4dga9Qi%C_x6$H&EGuImt;teMHwZ9Q8}uox?7UhNK0$hS+lX3u z007ARmm$H+MZMOoqh?={#h0r8h3#GOXONl)3^Dl)GHGZI$W;of*d*wDXnB4huqs|T z_tq7ERRFi^TvFJQU15*e0N!hOdbM)5bCE6PvC_pps~p0L2LQ>Fa=#_d^bR48<#_rk z#duboxXt?V}akg0={Ed?Qb9{Pr+2lADL)skE@G7s!mXQ~DVNMnDV@ zeyikG1!2&CWfS+h^CHgyTn8c9(3;}-Q zW%U26fLoAM%>d7w5-`_UbxneKRRz=m7D5Lo*mV>M24Z14FYOV=Z)a<#tTaf8;bE(K zxrLpO_Pn1IzUkkFE>mT_Y7u0SS@zz;-L!Q_u;GNiJRy=DCC=RrYcF%B_+mY}V?dx0 z6%1?O?n6ZLS<(uE7y^*NoBeA;G8Xp;X??WH9OdLFrGe#?}lPU`>@jxBeo}4-LG{g)J!ke;3 zH9w99JzP3V$}2l{;Lz&ojp%){&Jmvf)~=8jZ*@wuOdv4kYjHdpD2MF(hTqa{V`EhG z2ZxS!qoU^$V^85pJC^YUxnQ2w+0HzZL7&?#B~A9DWHRhkMMK*xKE&Z>y+8_m!?q!A zTDF-xo=ECOQ(4KW@Q?gpF{j}IcIv;Ao%ExvS4rD8`Na#6i4+BuzTsnIg43eF-e{FK zqyIe;{G_{MXlU1?^zLYzjh&}kp?lJ-GAp-AzrIRDM`^#_5+Zmh>jDvpwlJoO(V@+g zhFh(i!dF_id-l4ee)v#24_XtCc~V#~G10H@JF)u3?6ueRxU*D!!&t}fMl@ViM?Tqw z2pZ9gi-RSp&)Sxh#@CD8_qtnekNtx>tVOR--bmhLgZRoY@9#d!<6Es~A(%EV!^YAU zq!BKMyQjrN99kfr>ZIx79A}7cR+d1)B~hqx8oFy`?`UTyAm#fY(guY*3A@_KUg_-# zo%fv3>WNjZZ-6#2Je9>ezJ@0d0UJapV|(G%Mseq;XAo+G&)hsj8?hD#&kQDX$B5Nq zbgIDjA?~swqc!n|$@;S7ev7GBQ}Co8;tT15Nft51{z0!8X9Z~!2LiLn)fXz+2-n%j zyB5&1rJM+pTu4(fROH}uuxnO4lTcL4aNS`bzkV5l-04z${K`jp@J7Mg>}GxaL=Wis zzNe=82Z5@D5P;1hOGK7N4)MNNIwI3h)o***@57NIGif4b5uNgvO&=ndAv3@C<=Zfs4ybL zD>H>l+U<@_ofG%M$yOGPzOJ1tOOOUV@#~n#O-r-wnU0MCxd;*^+~NuogB9Z;k_JEH2_83 zq!y$8!g#k}S`U>IQDAw7KtwS)*YGn^BgAZ;|59t%Hg>snxixHW76jm)npoZ4-M)Tc z^u9qtK8@P%Ima8MX*?qHK5bpqSOkX&bunxpZ-JM@gRD1BDwbF;1EDKO52d|UNf_#p z@qkqb2K=Ywb#~!%^)eMUW?4Ek`BmXX6+pOPcc68hZFM&TWvp+$Wb!W3Z1d%vtkUwb z@-DtVgrflPOZ;MKTNk$C(sXRWB{N)Pmug0{8Yw)zAIw*`k;h4|`P@#DK=@D7%Y;#5 zsP?z7M)y^%lK#}vH9*!$?tK$Bl&bzuwrNn`QBivpIKHkh?u4mfj7As`L2Ax4tr$9l zGsI0_)CyC_{Hg@XnTF2#P-dLM?RXhkQF+OF89B@Z`CBXr!iL{Wh_zJPY_qBT%;dRQ z66t-GQ`hSn8#3b)+=&wUY0|W3I$|^(<2YMdEM<7bXj^zk28{hb40424RuVFjBl{_)ih_K9t zAr&7Uw1+mClo6=%J2?zxO2KL4utP-{zKX2EnQ-DFY~3+{DDr(#&hN+D`H+-2tf@`P!O7+N zN%(&X-@?o|X&o{4S#9gGHZWBK-P2iKArlFrY5giDRcgdMs-@dI`)1(`d9obrff&kD z!ddS|^IF6ta8?tmS}E)UCZ;J(<_68d5*z_rcRe)q0I}8eul?oLY0LuTr`s9^+z0^R z8=487Yn;pVP7LcsS6Yq^Vnw5Ox~L}J*VRUvP*#kAt*ox+K@O*SpylN~;nVss53ARk z@<%1w$b$8pz1G&=Wm~)6h(|-M+1a!ITJ7#CoJ;8*Lrvl*;tRnW;L&7l5gj^RLIjw8 zQh2jqTG)9wu`|-yB@XwxR^Pw@kw{~y5uNvlXa4d&`4`>73hr>t#MhzY3`V0IN^i|$ zhG9ga6!#sBQ7?%zG8y^Ebp`8@Oht@ewDYUk^QuXJ11P)FL2(n11|)`>uF;!SfTzM) zy1c5HUyr?4ViTl2Fq@9FVWF5p%X;QSee6h9AOuj5yKxE$E%9llHCn*9USu+FKO}tN z{9eDJO5POUjaxNVcyv)vS^w0Aiy4{*xV*TafTR`qO6x`eO!!Mkj1wr0si=)bhcQm~ zT}VdX{&+oBF`F~OkmTZGp5{RWGiflpXYZD&BjwU7*=g+`(ipgeNj%-yxLqIpar+t) z(x;tg_AvZZ+AJVQM&v`7kg<4TgLm+7PDqFZ94cH}CF7_`FO)$4ctAa&LrLCN$f*co z(f&a#QM;B(U8#2Kx6{nCFq(9FVhX;p2N{sa5qTmrv$3s+oAYI{?Cp)ly{i*nF4mgx zM!b@n&dOep{2Nj!$O)<|!7i2-0~W4-jRcfO=Mp!&?||}cg9rXC++0(yD!EQy0u8z4 z`N{d@$kcdEL!Aq*3R0kkfxXtHw#7@Qm0;SxEYB5Z-|D59ma9aWRZRqS6j1|}9~Sk! zeL$F)LrxFKD-Qe$;!AP>dA}bzD%hQ>`QA*qU?H5vuj1tV$wS9CQv*z^+EPnKJz?s9 zl;=ssY0&+5==D`w*pk?Un3#4N>SU*2Rh87w=jud!#oh%07|~Fsjic9x(|yV!RV9xZ z8O=Uen-H_xYh5qs3*%4xqSl)If`4}^Rzok%t3vIgT%X+19M{ij@I0z#2cHLmZ0%|3 z$z8}>aMeGPdn?Ax2I6SF_0MU9BfV`vM*9Lm{!<_`2cAQ_FxDibnF?9@_KXRMGv5$0 zpA;P=Y8SBgJC(gwWGgV%J_zet@0yu@`h{fK!lQ}#rpxEsG+q?I6%MoMrC(K)Typtjy2FV?CN(HylueWV`+J|v?14CG-IH)VPY8nIOj)vGO}w8#^P@G}Tx zOn_RH`Cp=~@N7qdwwY$I^I+}1LCr{>!v{6T%M9`PI0nukj4t|~lBoSeMjnQ`#I3y# zG^QCnK>a-4HH}6mwnRX1l{p=!zF**m%*Xfz1gR{T;G&D_*uI ztAq2%P0AR6#&e}J-4!931c10Kqm1ZB2~@g5QIi4bsdV-y(lgW29wBqvnjSCKn1{;} zP|FdB) zlR1%vFqewQ%q_{ikJuj8C|M~&=u<3xUF*6ZH3^K^Q?DAAw1^gqub+m8+#KATY`aA4 zW>+rPO917Sq8RjFh|-cufFdaY>}ubI0qtNaYPZNr(j1)@qAw%05*h^VX@G9BPnJ-f zQUn=MZ3c@ECwi3!La^gV|DKdXhTPnL7EI#@axVe*|xfZn+THAsloZPmn>URfz73jY+SGwsnSCSSfc!r zaX1H8D$Hi)%`rDr>QO{kNW^8s-RVj7cAbGPsi|Rwc5xe#AGZLlZe~L5!6O3eqMpwt zAa3#ThUeb_M$}L-s+sAM`0Jxq-N(9aB~?u9euX$Wzys#)1b}oJ!oNuV;dbTkM!(m_ zf;Q>)j4i`E1Y=t2yRy}5v47-<(`HPBH}%@BR^P6Dc0HiQ(=oFyj&jr;a;@N6D4k04uzyfRq&_Uft2 zvl)fpL(uku`ghD2-qO#J6x&*VomCf}ZV-G~zJh)*ZFyRjw-vtSPE@DR6>mWpBR-75 zoC%yAXB_4{`xs*);I<&Z4U>DQ`feWtF{rxVg?W6h#F(t9*0jupsFB{p9y+Bs7-aZ2 z2e`^TDZ@h&MXnVK8yV3{q^Y}1ct~FVqFC46r{PV;wa^&A*$jFaF`*V=Uf&81yT=-l*Z*~Rh#Of-zY14H|oeMQ86=VPh9& zo&sE=KJpOKLhZ1;@5GaeSo7eWk!kF5I5|xXS-?UO~;- zO!IhHL+Blzoy2|y*FEXi{S-`MzPA0T-?8|&Lz5<_PX7Y-MK!4&cHs#RDT_-mz2$zC|;XosA*vv5kQn0#B9VGT1Bdz_{gkiKI z4~NOaLt6TlOOFQUg9MFgQzfI;c+C@CKA>Q$^3(6m ziuwg4fcGt61G?!2S2~c{$`ewfY^N@ODTqh6tt&h;R-)Q{0hwW9p4>g+#I}ZGt_S+| z|5DGoUk`BPrhtYU>b`a!{q0_Yf=S;a308PN$!+WLEUO=Pw$``@`tK=Jt(?3wU<4Oz z*Q^UBdG==Wm{OCsNw)gCw&*(%rCFv$l3al>5SH+q z>6n=%Xg~ic?(^fYon8EQ?>}h-cbH8`ljBUX4)`?^V<9r9|5dBtEGm6HJmTiSr_ejw zAVXt@CX#BXHI>KlHn6*SIq_wYU$QAL4^A#G&!}b^)Ece<8+!o8!X;Z*kyfIS;uc2BRX?!SWnQz>VU_idOm z{<@q}yWlJAKpq#ZZTcXf98!m_Bb8)B{&uaLTgZ~i5%*(9!wc3)2_aYINd`r~)b0yg z`IQjS9LIQ)|B(uC-bJnIk-gU1KqXG7In5=pqUL+Q_4HBg}?E&w+0=EP&e z?JVx#etTw2ZUw1kCMS=sl+$AWrO5~7Xo>jxN%CIjOXIq+&cfLe#Y~=WY;0`p)x!7 z-Nc>FuT~{0iqK3>7(eH6Ks;}nB$DN%9cwTCDE5fa}2kwp*P3Vol)8f8&5dJ9c!BSz%3Y8r}x^6(kk58KB1 zB!V!}qeP_ryF8c%M1649PV)Zm=yikR=hdUjW0I!#Tz6=2Z?y`J;4zq^1yRwA@cDvr zciQM=sL|o-^}ETcD)iI87rx@}2x{k}WoiosUp^p+WfBB$jJ#?SD*}PX%EgkHxSwnR z{`=&}lE$~6YyxVJcabybd*UJV{D7@2s4%1#H6FP0K5x~VgM<6;7JtTotkItZlE6&* zeM2+Ct>_|y;{F$Z$ixzLj8q2If7D^8TE(PpZzG&%#c#q>(C^Y7(^rkWNw(pEVJ6Kg zY=~zi0iU1MwvfwPG-LERg%n0tdHR;Zc9vAQV%)#Aj~`eo$=8O?l(;G%k zq*s+cLzn(%KwaVb``G%!MovKQ6I;HFtNkkNi7WKZ`X#DY!EFT;DZ#-LI+Nv+LQBuh zOdVJlvUi2@*v?Fgf{Ady4WUZ*bB1|kCitab@fjOlx2omvu)C9O{Ni!giZ7JixhMS9 z)@6ql6p83jL(}^JJ*cfwR=t@<*7QAX+iiq7x7zt(HEJxc1@#L0ew7@MBlwu=rzm24 zNixxZhaaN-z|8P{S=~FAXid;F2|OCcarb&J8`aBHWcg__W7b%OWQ;jg9z*F@1KR zf`9?w--6}exSrvF*1DYq3?0AH#8$ZSxOh{AD?_e3Y7g0p3}`wMc4JpQ(`RLlDxs6m z0{|7)6jF|)C*PL6-yjn)cogOG1H_Y9p2khJy8T!^EnqJb&lXCx)RYaC$m%hGbB&A5 zj7co^7YGFCNizGL)LM=o(7`y%2ak`Gc5&i^F$GZUtTG3L(>zGu?BFrQ1vs;$7GP{v zp+L#Djay~0#-($-bwa*u0jQ3R(AdnQvjpNl**69tc0(3d`oE9$|5H$OS*N30Ki@rK z>-)$f^p1R+n~EFoXyql05XzUH1HF_hm&k;@{>%U6I0q;+878GN3W6Ff++$L#F=GEt zqr);&3M!wRmwZw_6soX8mnEKQrfP~5&`lcRugwJ`V_cek!{DY;5-|2NvS64RCmA;+ zLxUC!2>4o(lR3a&)Ja7h1W2#>*#H~9*Pa8$PHpR@s$f< zW@oR(_@=u!+U6z+?*CB~39>-4s=PLHF33!gGcRG)g;I_1=I7)lN{PCD5)MQ4|P%e0OnxaKi&XDzrlbLn`(Nc4QlpDii0zG@HRMoCT zcM-*RwYznId{|SM>C+F~ByV3B4+R*G@}dhOZckjj4>z`5uwt-oRdN&Cl8#yJOrn<> zwuiJ24)7(Dmb3A;qdp0$$uJ45b%#hR#X73MG-vY=GwxmI#~RI|5b6PU=O$l%nu6W2 zC7^;MCedBiwff9}C&$ypCYUjBW{^@>Rfw^lw_9LTpEQG!9`61zdMAqvmvb92_iq=O^lM zeD5oFi)UOcO|`X_uY$8RVE<(_N9AH_pOaaOM=z@T&eX8?aE9+!qBFtv+Wr@Mqc)SD zLib8yozoxPhCmuhX8*gdp$QQ5Ak74$)Cl=9aJY}8*1ZfLxPhY{_a6pw6Rjt+IHgdl zFNq{cis&**c|Dr-Ms}k+p3t}6IE+xgkXN@`$%#x zeW4R`>nCiU{8KT7ahl|3ytBu@g;Toc#=(At#MN=QKjzf@0K)f5uF z8C*Ad@M5Olm2_C2Ohcr6dmZ(ZkPE72VICHKd3b`oIKkHAWeB%Hn*{=Y`^w#rEhAk; zD)O`DAq@sXSu0f0DoV?2k**u-sIU8n$Z zCsAhU$*k$Z^~uqI2HB{|;_O!Zktfcuy;^d(wokPO0C*`1vLcFjtawSwC(&ibfhqtJ2!fZ_td6(uUubjT}0dz-QiD) zyYOyX%Q>QSf&1}lw~U*JC?H4oJMVw&cm#LQ0%8ujAxyFtNKsB94KxAyq zv_=VqJrk153jRA` zesx4lt;V_EB@a`KWo~kR-WP=o=0_a!?smmQc^drmkH`l*WK8oU3}mJ(eHyArk>8;N z`rtC%u?3fv`nl2UO8E9pHM0rAfFti}(`%MKvPx^xk`O0rYg>PY-KzFc=-6f4=1MCq z`vfR&sN9*D!w=Hqy*jJD^GZs~_=S!m2Uvx=Rpl|!0FDqdlhoeD(wAqD*Zz;9Gx2A_ z|KqqsIm;MQxsPm)LPW{Axy9Hpcg_%U6>{es5gCRYbI*Nju26Ct${BNf5lceuo8Nwa z!S;CU^Z4xjetllA*OM(ePBJV=1egL_UsbK@J^7Z*0;9$@mHusUEwudf#(_`ln=_YZ zBk5dT?D}I;`+1OCtVwv1XiIXkWneTu1y;cT&u_Y2h+9*SLLvn44&97NkEh&(bsXk0 z#IAzbRSHxZ@I~!|n69M`&Sl`=CH=O3n0)VD!KWG1tNrV#gN*yGqe0BMbm&Z@+qub` z4CCrc?mEJ0Y{&^Z*zZwz;q|^-gDuU8*uTGT-Y})cNtLJhLb#H_(S4dS+RRrnor+M7 zMJU4>_qIE&glp6Z6{3(99|da$y38=2r+3_BYa&Z)JF}DES+ZJ6b&Gna8dmVS9FFVn z=s?wd%Hz1@!h8-zN+ky?{;3AmzizXpCph`!^r?t|5TJb)3x3#esU1Hxg2I~~!Br@* zCWmEa80iVs>%mL&D*5gonKf>!8e6mgDnsYo^S~TVWoB-x*oT8rP4Ns#q~IfkKQ~x_ z(}aVKm9_Qs!D^cv$@Ck^0PC;!4SX&R)_V^qL@XmaM&)b9x550Puu6N8I?&5OEn51cyQ~cr+1vKWv2UBx>EFbH|>#-s;eij8<=^t`! zzsSUKp|HsqG{S5IyW(PfH_fk|;~FHZpYR5igxp+N!AKcr7N%saddyB#SuC9TEgTjP z$S2n~GO~U$+AI{37Yy`a+f`VYZ=X-x-k#FDpn=4Thu*eZ!Du1od@n6dDHo$BCH1p? z^8fJla#F{|+4^fbs!uV1XZTV-T``#<_O#e93>Wm^cZLS{ zgr!vji%~`cdXw}5^o+e1K>8 z(j6GNWQhMASHAN3GtCa&8!*4*pj9tQ?J1AJx+|TtfEaaLtT0YIoB@AW^&noPO!hX# z(JfDbmCv`Y?>BCcrpXI4vRzeae}MdAwNnM1)V4YH5;visBEq+a(+;qq$vpp66{(cu zO~eCwXW8e4D8Qf~XAirTtna zPxa-&2e>Lwe4t}i&Ko1x2JZCv(UU%1VQ3NkBx!7pQ%fB<_wzobN_l@}rP{f^>)1t#C0f8ez?)NkY(O# zeK@$%vek-@wjB6I_d*~)q3Gw^&u0zdteA+*q?Y%hV(|hAc7mV6ICL^UfbW2A*h~7$ zyn_V9sr`pVsbo%43K^oci+vh-W;Z$Or)Xc8Gfblu4XzK6k=-o|iBnsYqjS8ox8L&g zD&hso7R(|lSWxKWFf-Dl{wz*f6vVzy2a&{UY83T=qtgonD3PKB9}Ot<%kQyoSG56x zMtu6UZ#4cW2|Epqzf#_^?~>{k5fr*YEz{#Q>mk4OGM_u0tl>8I@~F()Mrs&kgx4vZ zwpWKggi)uCxIBKqW-ctTvbzlZli9-e(TMS4yYI`d$D21?%isM3Jk2dlKe74rGwDEzCr=St?aC%ukX8%a;r}rFE36`4h{xL=ZZITRly7z z%=e98k)l{p2Ce1gpyRbT-F&Cc$hGF4(e)pnOCj+=l5itE{N&<&6Ifwnd5-v^pYqwi z)7IS6d%Q%gnVoCw;PiyFa>K?DZViVxY+V-(ENHfms~%oNwXy!%7!nIqR9y|6K&TaZ zqfH|qm5#*1fgE#D(vX`{ ztZek%JrV+?Q7Rni;-IG~x2?nAT!jp(Fm2Jd4yjM6L5pHy8e>Iw*2I7B=;2IKis?#H zYpW)6TQ=Ib`cq32Qr6-*@glq7G1BIcx~7(p2y^I{5%uRwwW?0kjBgY73u$$0Ud}qs zNltA92G0|Q3XQ{3nIbhEV0+#OK%9b4U{?j5lt!hO%1T8Brqnx?&-s@TjYE$XF#U(; z4>ke&vJEyjfAxyl!_1elSl93c;N>|1z_^*M_2!UEinEd}5;Xm`nAe$iIXmWF1pN2T zBNvnTt=($g>Kr`9I9Wmt4qZF7$lpAC>v>Apm>MkF7nv!HL&1CGp8;&*_6OT7w$dxh zw4ih&lLzZ;fh3NYL1L;2HBe7aMTF+$zZ>T?CEwEhxiN+O>ZN^t$%*WnfOUj8010aM z`>mVTw*!5|=lE1h3|iz8N0n{l_KU^VuKx4h&&J9|uSk3TSQ-G=Vo7f-E`k5#yqfpE zO>XGvX|>++PMSB8YrBjN&cCg&@I0zi<*}H|`Qo#RTf1RlCPue0dQG}b3!A-nzkL() zae_OFJg&KY)paz>y_N5eqNs^t@UQ{}MB4-tdHW|0J1()BO{P@nDi6r_WQkdVxyk2*O`TN zmHcpZ)KY;&r?7Z9Wv$Kc-W_!qUd!4(b{&~IKd!Se_Y_*KK>6B!de4{=`MOHXAZ#=t zs;4BF5)9oqUq0G?U3mHVSlXZ>wpBDMH+c+KCPy}cDmOA^VGGO->^QTivA7bvdj z3ksd;hys;hJ|sG%<^AN*_|uC>)#m#O?l)5iiv%*brcnHR7#eDAv|U63g+?#bU=$It zZ|wM0kKk^KBTSkneOMOpn+)L1y*E4Yjza{YXBeHL6JEo|$XfI4qqtBML)Fcv=3^tB z>Hq2jS>s!cwI7(q5U3T__)7I$w%#qPZ`)m24Jzww;5%<6AiX z!=-m2M6bB$jN6kRkqgDq?|{09OjM?jlfa-%@}`U%<`1q;$L;Yq!%espvtihkA3LS4 zam}kiGkQS%A-EnD5pi58om&E{7BOghRD|+}u-5n3Ci0HJ`y=yTawhBf-cWgJ*yZEGCF(o= zMdSpA6>XY`(+7EAlMXm8&>bzCqhi0MRqX)(NmLK~rx0yhVK8>AmaXjSgw zP!8koK;RWKC+AcwE5t;BZ_$#g#2^HK#i98?dyXDrsV``CE3~Aje{=p#cVD={>oXka zP%S;y{I3uYFtU4oe!j84@tON#aGce>QoximeSnD7stIwz(y{=fn2^C$`zw2E7){CV z@~-;*_FW3Y0mCX?ud4gN?rBBBM!**~X&gdThquW@svh}k^KP?vg_{ST*dwm+vW81_ z20Ohk*%8Ojo68wX#7XZ*y3j&^V&}C~J*6x|BV3!Y@WOnxQa6(!bBany@6O)NlUgZ^ zA1JzJ{YhB(f3aXV*c6gLTYTh8NiXoy6lnH96+bA2jP6Wogm(c9%HF*fHuEtuUonG2|7 z=K(V`p+z6(wcO0>yyq~fIBv&P2ta<3cY+7>eWT_t%R4A$McpF`j%n)6JMnlLJ8H%8 z{^zuPDc4gDITO?P#5Ng6tMNf`5 zzb$)kb(?!NM1ky0RBBhz)L&kFBDLEbhL8>x?XycW1Xp9`AS*i!S*75}2)%x3v=RLk zvFzJ`SSEu~K6=%_l)+RF1Q7yxD69B;4sTLqSzjtnSL>mMg+njS5ND=MT2x6}GRO45 zd-`-*e*bp%mXOXwhm>revt#n%Zb)de2WEGKSNQ{LqP=n%2S92J(%3HMc-Nk|tfb*W ziDKd>gny&0laRHO5cDn+0}enka9vhtfd&~%QJFcDme zn_ic?kI^O_HdZc_JaZ1n zV@Tq6WRdL+NbTzt^HiQ+uu-Nw^1f}h%zD9Ycg=eOBhG4}eWuqa;rLSWRO~$yR(hE0 zUqDyB151jfXVaqfT22m{2bM}Sax^p&j~XXzn7+Slv3HuSIJm5+yvfh&l%JI_578^e z-YQGLa{3)9oX2$vH%~bIOYT%d$?>HhnkDECb0}Sw7rZLFrw@iW=yxPX;Vg;m=Imt? zTEk2nbzoQhtwIJPc5&)(=&F{HqABft1^^e>Rp7LnC^=W*J{0E5dnovs7*jwLo$N|> zMX=VMNT#?|z`7&THYp=<`^$~D9hMC401(2HemUY>Tl9C#(ISJGYrlY<0RfQE?cb7?%Q*auy!SFCWSI&Y0+ACBkX zgD|ZOOhfv5`sFrhYs6s0mEeAwo}NB$o^!~Xc^TwcI1`bYlHv&6hBHKnKPX*)kP?*~ zS!1N@dd+#fZYgnJ3;-&`p+!E*fH(bSpKkSiwLYEfq8(oTJZH1ujP|%cb11+jnMji` z+D*~_%3RNTleQKXJjoVm+ZO6Ojb88xJ>&Kdo+taYB#ukCd`DQ5WoYdKXL%`ta5x zUS1zPas}l|h>?>ucPn2!`JL!U&5+x9LFvb*R)uQEMgQKpHzxEIOQV&{lrOh9wz78N zJ91}s3!Sicx}nZ|cpBm|H^&$kar;o$t&LVeg2^uGH2JE`Bq2e#2d8GAkCI*eE#hK} zaFR|=PGC)CX}+i$iMWz4-qj&g#){}G6-}kneaY?b=jbm}*5VmFyzbcHNHZ$tonsHb zq3WOQsVM?!WN>MV`&lgFG%5S;+X&@l+>6geU$8$m(>mL_L{j%l21YHJdh|Li4_f<> zOK`5yw=AkbB0=s^nm9^y-Znh*k1A>99;e2+p>@IsN4b1J)lv95Hf zv{OTFdhsIFpc9O9&JUj^Jy6gk6b2XI146LvhMKK>ni=N#8>r9N#h%j4p*yzO= zb$Y!~wr?2fKHYTc)W9;8zbh6u;j{Zuh%e84nEWhPhZX{jYAC{A1yciq2wmwVCW=;* zF=bR;I`=f4Xc&H1YH{!aJA&w5^)nR4DZ;6t0aW|arhbpL`jHLdqJi(?Hm!zI2OMtj z-WmnhH6Uz_DSpfN1vlNhU2vzj2)a~GC?-n*ye?iSo~;|RkpVdyxQ zi6os8@Edjb%17=tHtlDWt ztOcEW{ZK!rp#bWBEeUDeEI15a4@4-{qF-7ceIlhs7{jMOGL zhAZ&82o}gbcgvDxn`Bt23V4Yq-aFG)F@OIc^;P&EH-r*ifKwOwwE`Xi8Y1X+Sm#dm zU6Heo{tb>*6xoS1?1E5)kQ_M#7n=d;;0NLGX4`@@yrnvnuaw2DLSPhRRFtaX{q@ir zpjv+`sY+ooLeW$u^>T@%#jC;}fv|AXjbKLZ^`rF49wzHf)o!fpDt?R_+N;}NCotcj zRuU|p$|F|nKjXYLs z$IKjks+)$c#j%p;FaM+xkG)zykwqQibZiu8ssW5|e>7_B>u(&dwAje8D=yVtjkHxw zND((FRXOts=MrR7aDF|ZuLp@~9YNQz8=A^jDaDcF)tjiP5oJI)Oc<1Bs0&hih(gJZ z={tT*c`UQq$3@UBud)a}vF5lv^%BO?G=CTFKIagdspC}HMQu- zhbko5>g*0?z=22O?(c^<2}svn=R!``;ec69j)t{c7Zg`EWukmNPtG3niBmklEGXRb`(8v8`)qED^GI<5^~w z@#n9sgp9y-5o`5!CdaH`Fh>Rz*EGFD{;T0{o|Kk?i=*Dq3$N@(r|gmRC(_wh#%w{ugD;DccNX4{m#HKJ!U4q!n zPdXV7?J|Bz#W-{OB)+<7VC9uFf15B*beNNyyqbpLvToL3QI7KOtEf8b%kMR zI{82*`B;6wb<%!I`846ZJ0vp8i_)3f%qk0oxD;EAtvkLAY2T~7`Z%dYnR~(JkRsML zC2@V@j+)nln%q@0)5l|~MtTcsYni(hK1ni>K^PSOpie9jaSSL)_XubQE z5z2Kar`qb3yP{0MwC`4%MYXNx6PC7($rc6v zH>WEz2a@jZkj%{mtN(T{q>@B!7ld&UD+u>16DDo6$}&IhIIL=o`I#C%<}0OQmerwJ zeGCj$tAtWQBZE*QWj2xbVj~<1hTKsNQ2*}=y;8%CQ&Wc|N=lJzH&hN1sEGVKOStwR zcpYcc(e^p0ZBKWMIZ7`M1(wh-EHZhXu{{>f%JTj?sud@g9LMhdZAMfa{aR7L$0=TM zN&wYI#Dboi1>StExM?=pvR}FJDeL)IolbdIcFG;W?gWt7t>V12(u&H+w*XNo_hi-)U*5hhC9b|No|Te*Q04hW0s^fbC61c>!E%(g$l5nuIeUmLli8yJS`EBvu#@w5Mh<|5jB) z$nO6$l=A%i@abIUt>X{{K?;$#S~+Akdc@gW9)e=L%wdn9J)zY$1{T@)wZ`&Td*#UX zJ`b8IIdpzIQBF94B$ne+<8B-d#Ckm|BeKYpR%F>3yQ!(mnaE(CvOT_@IJV5hrkWqQ z2*BnPxn7D(Gh&1hYiOKLcfHn1vbDg_sYqW>z845kK{PySeR(?O zNFv;DpK$ZkN0TOdGJ4m@@{FH1Tk(X3R(7^XyooG*S?S(@S;dF_;V7PvMQ%L+0E#g;yO8rm90vDdq=m*+iPjTIfqP$Kuy zfIqzHSm-ftGv(22N)?-02`%~*&-E`mhhi$9r9=uJ)~4U@Q0fQ@4!L+e*YVJuoz_HS zt#h85ss&deWy((*2NH;*eHR}julX)6q942@(xtQWBMD7iK2F)-D&9R5)D?>36BY~p zuj#n=H0Z5zqTuL9T46dVV;Je=;%ozoSlzDbv}vc<1$K4@r_#wAF7+ZXOkb_{Ca5N1 z)`;w!GVqZl9He(oCXe4-!%0<`gX4B{Dx(uFOV%BMfhBPD?f-sO_A9Ncb6&IvPjHu( zY_Tfhp1pT6qcL8(U&3dOD9lsq(kgw8E}m?Rx5;r$_~Kmm0 z*alWaYke7y%v%$f>ly$&U)zW>TD;-lXe7k1QKsV!lI7i9Vt;Qog$gp$bqyDXnHl#q zI~@xf{W=M}`j76x>Rq;M(A!GEHjezKvYHEx?w(-m&hck`uOGz6<&pAM(;UJaf@Yn` z<-FpK86U<4?w{%=JU*2<=hmhnIBC~RD3Uk+pBvJwv&lL6xH*+>V;%CuzP;^2ez?|E zymKXzT?&EhCTM;Hnh!zr;gN*VX0O?;L$CIZc^k@^SF&D6q68xAFF?AJaCKL@zJU*{ z8uYYRgdT(o2&CVpV*?~R1IH>Dvv3uj&J#KxO%MpCFI1{p5WLga%Io;n-oqq+-l9?6 z>dK9QDoo#i3N5=kSBZ26@~b3C<`8z3lG^0jKQO;MT9Bc<)fg>Eg*G{8%V21G{a)os3Oafo5o zrv-dV7$_4wj?SuE^dZ84S1CZ!TxxvpYmZ(y0d=Nr@F2M&4Z)<7th#8Q`<05pf1KO6 z?0p?xf{+mX{-par^Q3-Nab&O0t`34nk}};}WR>)&e}5UnXL0v7HF`aPKq#UJVP4y* zXAg#SW7m-uAj_ZqZ5GqxB$VO|`L%@a@YeQS3PeE*TK@gu43N()2iq%*X+3xw~Sroz$c4kDp-=&YA73M~1AbAVAXsKRP*D$!qofBa#< z;fVyRMle7`$Nh?CfQHG$U3JJ<#T$F4*u)Ch=Tk*Rr*1tOurVy(&S2_%Wqr`IBbX$) z@wqL2sbzh)+D6i#)k>m%0IX3QrUjOMr~mA7Z9IHGUe(tPt5%L)G);p;6$!3jl?WWp zZW0mc@PKc14P_UH67Dkg;ZJ9-e7@jL38ufE_I0b{7vlq3l?JF>G0D{8oTk)B%{Sh2 z32r@ly($-9QtpR~U6T3^PJ^@a9emuIu5b=Sm9WKWjUg&% z@yU}CPHQA7Y}()bhRK~`E{fA&H)ceZYkr=QPI5>EFosGO7`C?;S=%vWWGGZ-cB<=r z<{LmQK1@&Z)?e$|bR_@sr@W1Y24#~aD)U2+Y?L>}ilF2FqE48s?#ce3S-8GAVVR>Y zokG4V_3mOqk&A$S5(JJ%~jINqa(ZV*yi>Gl!e%QmAeZ48j+s>+H zsC}DaR1>ItaH|WoFU6>ZM&RtR9NT6LdhVl-;%5|DU1wSz3foaSs^^{xeK>aIY_ z27LCn5Os^{5SPh*P%Ih?lbT)rxo_qVxr zk8!uEJ^$;k#ZsTrIHENFRs~H2aVlnxN*8AY=+XIbqo~zxhb1D_z znI1Mx&lh3#xUI~z4_qanYG%ZTj;7O%=W8?7>E!i&o%KeGJ>3oMwp3(EMZkN}U$A*& zSyXNCX!dw=jvehA+w;mDe?NFpRUybV{Dfz>v{#s0&B(Q1Pw9RKqZLupyU`8?rNy^= zNDgPYN0ShdD0i;sBKpPNo2Zb8IUHL^#B;ZsIIOqfMWUESis)c2ZI>^p`5m-FYD#Nu z-ve(60L+m_CP+Qx%spTR?l6r5@%*0-`W zc?UOcE;Hs7fyJQ==Ji?Jb&&CH^MPUI$w;($P!&LVLO36c9i zH#Hs&8L651LvZ34x0#WpKoF%`U^XeqE0NCh3N+I-9dE}6Y0$|6b@NpS$}yjQ}b*eeZykgnMG_Wg&jFICQ}mA znp-I6772x}GMwA_2h9%idQw)n(9gWR0XJ{kb=j_nyB3u&D%8tj3tMKe#XX`A#p1G+ ziYZ2^&In(((}TVB`=OWno*f-Q=dS|-1hfRx-bgXFcxWgG6s1WoiHi;}g0uM{f!`-< z*^mF(MKD0rxVC@0rrgCK6oz-RmoF|Tkj8)Kbrz(T3tx4T1<7-}a;w2zivjYpoW@u_ z?|_z&b4m?O^@xGZ{ocwajAOdO@nvC}{dR|HJaaQFtp9S!ly=t8>*+Q&u6 z&>4zBaBr{SF!;XvouPPBqtVa5yT~Fi!WE}o-aCZuPmO6y(Kdmz z_i5_duc(jZVw_wVhblDHoo3NuOPu(w9qaAMoJDC8QR{k z%%VJe`Z>4Pt0Q!F^IME&vQ%#qcT9T+&+?1Pa_ub%!u*dYxcRDm^3?%>_Gii^m&$+7 z4XVYb>o(|_Pi5u=C(YkF=lE>CO^SGiPiBdbW0klq{6hsy1J3l#S3issS)ih)=jY!B zd{A|rq45(JIjwKbk5z3 z5ubBKvQL#yRUf6;bO1dKa)X9_Ri3?a=J!S5J3+PpKDBBo)caE z&qQu7x;D8cz!LjA>wd~ZcZ99MMaouI?E0vl8mSmXIWoq%x5rep!ZU3rBms{gJQ z&jmRHgXBqzv{F}XoiKb7LLYO`6JPWGU}!gVU;b4ye0fOPloWxDgO_eIb==NPQ3E(e z#V)TnV@1V(_{9QSfgMeBA9z#?0`Ce*{=tI0s+KK(Jmc*)Uq3!l%q5#Ms!kJ!h?X(* z^zI}m+YJOX)&lrEq}6?SmHVZO5e?{Z&rdlY85XgfW5sfueHM{P0L!ut*y*T{8|Mz= z%Y?+Mp=%7kIzS-f;v7XkZJshV0Y`nRG89ZNE{>NwPI@~oj<$Cdt`^>CyixD-+_|@w zhKa*4fhq3zbo;2GUvBf&ZY*8#ttR`$XFh+?KR+MyKr-2{ zb^XbK&EDJg3A%H|;|xqQ&El8$KmA^2&22U&QJw@hf)gZu+qJqk_2)c|^t6&eRGs}N zkU4f1;l~Jp42>rM;!t$`QPCut$%?Bdv8BRZ41qO^IkS*`z#&!abw#@y~xRru3)44%nB=JntU2SdS> zs2(i_L)-K!!y`+L_P{azV3TQ8eM-@Rl$hO6g)nhF z=sIiyi|Z3uwcPEKncM~VRGHa5F6;_^$PyP9|H-xo9nU3J4mORhbzNEIf9!FCwE0Np zsC{G0b1U>@Ecarsq$B8bKb_Q@h5PnZ0M7U_A-B|FvRrdQ0fXc!`^`k-5a(F=Qb7(N zGdvKhATyN>=#KsjXUO2%wd`FckL=3J2uKU2tAvIqd3qX*G{SSGM$o<4BZ5#z+9_hj zgtP1wUtrPiuQLBZMf6xGT-&eA@&BFyIYk`%h_lvDjyEn$>ZA*>@4c)x)4n5xh3dlu zPJ6;EuD>cpz5e#E`{6SH6Nnq?fMv~3DXw+jxp+M|^tmU*`)$s;BHB&J^Iz5mp@UKr z+t_$`?rzE}?!q_t)7Q6d#u3}(Iy$d_Ymc1kW5*ydLgSxx`YQU2^gD?v4bLpYDD@2N z<*dT4GxChA0)A}SD_B$Mo=?6I)U2BRB$8}N(x`=WB@qNRcK7(kx-_BY6+j6lM?xNaFHNQ8v zUm5~;#Hp7R-BTGc7L62OL;!0@FDi?NV8XKsq zm?1Y&lrIaEiL_lMw6?D>tC>P!`d({$IUxKj)sQ{&?~aR;ZDq#p$rU{|Fk;`y>02Wh zLf_ZdENzk^LfWXZH(t>DyL691rrECS&n4|0~#6ynj`VeK(tff9#L zO0sFcJh>Tq5kqm<_k?hIg@X}QAFdH1_l-gpo&y)VmtW&+rW?8({ew35&4 zMQ}{jZ?VtgSKLil>I;E|tk<-gJ;ZUS($R5klvq4XInts6*dcV|cPuNr48GdBDGU*T zHT?4RZXHv1g;HfV1AX%}9XD>Pef?*_WSpYnpBrx|gi9yh1y4_2lD`Z|r~q@m`Goi@ z<@Rfc1f9!CRYVZC*T`;dU$cn_Wa!ZdA4bNsFf+=MZeTd+TJGFdO~$T$^5Hx;@)++sw{F}y$Cf8t@(Xa#jbl)_c{tockO?9#S;l0Ec{KUzes zRC4r&*4lov&d&!vZICbBjGoWR&{O|WtpZ7#9`>iZO$+n$y(IFo&nGgf+kv_e(yb&T zW-jiC6^B%s3zfg$?JqTlB{NCKaxiAavG1y>>@SWZATjQ(a|9{t35@`s^C!N7>Wm1` zcOniAlSw*0I$qAbSZkD}5MVbBHbNG*NOR+cD)}jSv8ki}USFQXvVa+WxchkDAw5L~ zs6*i0D8XS2AS$5(rIW(VSSbjFJj?)CVvu4K?9(ktg~FYi>|RnSc&A$bZ^4fqO+0+! z)UD>!W%&yPlsWX+%9w15qnH=6WmYp~C@xxUPA>%D9qOU9xjvO&y0}bU+6lhQx8M%< zCpVUl{h*c12QRE}H$M0cJC6vhOB1}ijY4Sm(ky=)Nc|Y32Ts9N^2|0^O+B^gQqf59 z)Miabr&?+d7m{m(RcKX_MSnl1=78w;$ZBSCVv`Y&<*^Cyq8xniI1z zT`*J5WfZgCFe0T3S)TZ+yEass7d ztj9)juCoYmEAKxlV9wrEb*!XSCq}Y}@gX;W0abl|AxcwyHoQe$G z@Ykwk6*;nWQ%)iWh*H=dV2=mYvG|i6$D}u=PXD}~eUwNt1LDEE&y0MA!V(MDiqWb3 z34iX9<0W=zuNFkqxG{9*yyw6L3v-ILdwrC!oiH^zTM9k0dN>9az+$pT`hko6q*c-sJ%pMDwW&Zb9xxKPlqU(yjvM zsTn3@#x_8^Atwg#?A5U3|(d?i@kcIwIIwzye13Eeg4mn4mPt)(F5snEu?$^TI zCRSE{@E4qYn-_L_x$GaLAzWuvc+XVIxwZ;i=Owt2Gz4}sf%7IOzZb24t?^BJ+w{yz z*4$!l+>>-5H{bj3eje1Hp=CJMLzpVLybhcwLk>{P?OmpfGQPHyK=sjz{p?oYuAXHl zY8VLQM-6QL>UT5|^YsntRUb4g@G3_!za18RP@j@rk20b{d^Gg#UdpZ?_HpcL9`?x@ z**EJ|oFX6VRUG$1?$U1zq+tzZjp;BcX0YbE%<0X(hw1q&DMrE{%xeXvFmvX5Ca`9F zc%kOls({CQM%0G}UndobdZH$_O|oIJYNq~CxZ`Oiy;|L;#eO?S!htZ~j;u4*u%ZxR zMV|)y$O2#POIgaq%#YOaA%Rk3?yag;zIS^880@Qhulmz}?xdh56v`!3TrUlhN+0Ddf*#2L5Xne{>&3xF@Jscx18DiNoe{l`m5`W>u<0*i z2HD(sg>_9h86d{`{W}Kx3^(&rZk_Mi?v=0OeYn1CpNOsql{yBG{#&c7ggHX$&hsd5 zncZ4X(oX_~s&(4vxzKUBGkCbQ#mn1R%QeF(aTPV#9lLn~ilmkJRj$2%d_00o#j|q3 z*yC(hP<;-Lht~wAGJ9)>Wlc4pCX?vK`FtUFwWih2d|iB)1mrH$b0;w0YU>O!J2{!_ z{9(NLMD_?d5cgoxA^$bNtQlRn`BSV@N??{xY=8oriRp6FP=|dF`y=kpQmXOTrQ8+n zk1vRwQl(}HMy`m?{p)}WLpg8JxCDyP_?vJP0)6|Pk__d3S?M%N{{Spkos!xY5SlQd zFfz27~;VSQW=NR*XVZIvm->0zQQCpmMS6Mh$}Z zs*C=l8o$>1lxpXsCCw$olIn058KtJf_m;%)gQ@`jP_TK1!v^_$N%EWf_0@@s&8&^r zVnVRw+TrGtJZnsx=np$5cEtBq;jb67yC+(O-;i@~_u1FHr>|SrCr7oYA}4_*RAv23 zqwHeN*<`8z?+spxAxhXuxt{x!WHj%&=H=%3=^OUJL3!?h@EmPAbSK=1A&MnJO@eTv zE$cz|OQ!ZKz_}Jsx40^Mc`TYeN@hyIK4)Y(gB;ZLc!aXwk;%u$0mA``Nq)n}fuGCu zx_U(H6NZ0ne(d_kY~dK~nitv0N^NTtKA~C+QmJ98qq1)@nbVb@+y@2i=6&{dbe(FjkXr zSJ%dK_gwj@A1x5-ld!m?5OUHFOf0GUp z>APlwj^sYSyMPo{t|JlkSC8;3<`inrNy)GWr(xT zHqQiD8P|{{x#s?^|C0X9A~Cm?SjyomMhqSa#6KNtz(IggQ$>QTt2(!uagpoUK7onn}yo z7;hME=Eox|I3<;2d-wj?TREGbJp|@E5xSdBRiDGqMK9Ai%qpvjOJ`qHKUHq-VXsH~nt!OmL5Hn#cZ!=ikobG5 zBKtpalw4LSxCOICB9P?ZnzfUPBS9BO=j0LI4&`BTrjLe+2^<^_&lfU+Ci>mrwSQBT z?IXn)_9RtO#GbT88IpMy<`>%MJP#KO*5ZMl;z3CVh3|2YT4tZK%x@RV%9kDBq8m=E zEGCmJx8^Eve?tYuOdW(fSP|8}9?d=Rm0CCf(c)WGbUL4sl{tAX&vvr`-xCjUYK~4% z1Y+Jdo{~rx>vGYlATup$7+JNVr?2Y|&%fPLE1Jq%r8hq(Up{f}@(fyuGD3KN6=n(!+Z0Omb)geL#9 z$pKpdX>AU+?pQPu01N+X`4Zt%lzA$r7`1IF*5b`q7atKF2@O4cDuC+Y>`v(u*<;XU zG*QFB3a#0mmEk5u%S%OH6c<+!n7Qp|Jx?{>5RWpCFXJ^D?NYy|e z0=IKmC6om|lG?MDwp|&0|~hz%OGZ zaxe0|-UmJHqjrw!$bd6551_>}Gg(E@qEU#Ha69RKY?9L5ME!o%bS9(jjjUCRINPSV zn41!n0i?fj1gl2(*{<`M)yfTCbBBS6Sxpnw@}Cc(Y8b9~asWlxJXTDi%s{1u981TP z5a7mUmgm;R9_Qux3W=p)Rly#i;dtsoHYbiLJh^|{FsdmtL(bNAg2>f-97NF~6De9H zTX$h>-Z-gYNm5nzqXT~3`wd%@x(cax_5TG5ji-&2b+#k;GbOLKa3~e+w`InUc;GT2 zD7H9v-C)6VKL1>?{9~L|{Hc@{=fEW7#{ciT?-Ju-#_wXSF99RU@#`7&~(>lR10I)La1W z+YSy8cwDYD6vQ5JZbNOI4+0Nx=D?~-7nGgGpDK8)#;_7ubC{f8aKt-5FS;v${t$8R zgYFnyK`y+><4%{uUPBmjzQzG1<8U-D8VJ2&@p$%xC8RALn5>5|%8b&WYv855VN_40 zZ(`N|V^X$4)X*qT=POW+4u6PCT|elUgGV?<8e9g?s6Ui+0PtoB1pig?`T0RXuolc` zja_lKODSir_9I!`5rw znZWr$3bcKE6cogh+i^a2yxU0j>Qkh@Q4~AZcpgHcOrIg49iE;9a0a{EwRh;HVceg9 z=#TU-oUHi|h4n<>T80>*7M!6;a`ZJ@ARX+5h0kMi>eRi-r5kPk=99fRwT%}(TRudL zm})#v%<+lGXCxUR3=6-QYiqqT?v9E5(#@|FjjP=fO%Y$;tj~;VZdGl$Dz%&{9vjZB zOEY?!HQ~UJT!@Lwxbrpcqw z$vgE46gyeb{%Ovxd`=QsVT5d17R`KT{u*fZ^3_<@i@~=HEI-`u-~t8pD6QnLkB( z>v!CNkp>f8vUmbpJZO!+2nE#Ah_Ts4nfIXz1js1SrM^BgrM)1oaDGcfn$Spcmm#4O zBJ!lm{=Lq_mR+sv`wcm?Z01+SAO_rTR)F;H=)0?c~vKe7oCXp}NB|+w2 zrz5wjME@BX4Ur8pvKG4928OyMe^iu~51LFC0tF(!`@X~8sYAa%HO(+g*sy6LXzFNJ z7dHJpr=cxu$MS$6pl}`f(bgbfn;k8#et_0)bdQQ^Le7-cu|M6Ly-P8dKb-D?owS%V zV=0pZbjVU+L=AuOLwE}JEJi&yGYV^O+$mq7`6vMlIA~BZ3B%XbxjH&(`r1H7iGE!s`3e91V)TDxoo6`P@B98ye9YE}-P)9( z#HOlt30fmDL(SNGt5sX<5nGL-NyOe1HEK0xtqRqmX0_E6RqL1U@qhSFp5(}*ymQ>| z_kG>hb)K)YXZDPDHGbL4_9cy>k;ZbFd)_Q%*&|uw@?(9c|&bPGe01(al zQguG*Msy`qp}>m5dr2?3c&c5q^Gtu1y8HW!g0lM!S>uwb7JfTLy24WxEWi%(0TvVc;uHV#go-RDtN>MmgS|yZb0}2C`I&u%Dhx`+p-P-{5v!lk(^Dc$ukQ%& z-QOM5#v6OJ2HUKtE$`z@UUoMITutuVxDJJ~RI7(*YrK7Pfc1F!vLmDSBj!&_M!Qrh z_HYecu_6*)NJk+MSCpgYBxr`?m?;v?tM@Pb3Dhyqfz~cs)jl;I_f(OFQO_d=0h&-H z3wFs)9y%9eS_UhHpVsyZL-RU8@}#vbpH|*dIqWIxRm8{%SgVQ76`6fSGK`vrf+gJj ziTHsfD~Gif!4K5$EUju7WB6YeYRuE**BCqS-_yHO|3O8xPtU7SI@m0nm}CRmXq75_ zaYu>@6TB^(wjL)+wC;5~_&&UYx^;A6ih4y8^2tBp9xa84C|E9`IZ0^QWnNJnp*zz< zpqaV_5GfYnMg4eMxr_$ei0O zhN__;q>#2*N|QZs{fwV>FPN(4Le=KVwePZ<+$u!8=~SY*z{xap#?27n1_f*`K5HC% zzH3}wPF&r)Te||U?zA`WXU*aI44sp_tIJ=O!o*UFTYocIw9&koc;;FKqP5fU={%Qa z>=22gyH-c&?@vT`rpL#~u5GtKIc32JGmUYhnj$)$GwW{}kytiZe5zjhEciA45F+V5 zTQB~_OfT;CqSxPFbNWVPh(r{dc4uNMrg;Cy@=yhk2J19}VbakT6iOtn52g7E470^lLWXxHz#HcPJOQ*QEtZ(R5(=W1|mh&M`5ytPzQ= zl{ZLqY`-zney4e?l-(Fb876N%UNdGzZLhNI=1AN+0eao~CYXnvqn zM{aqub@L~h{7dvz^k2zoKS^T9K-#+J+DNdexoI-xy_})0QbiWA48hppwu#|}U_1;u zsYNrKm%+~8`(4xGJk0FTM`3;hDHh=5gd;OOGb2$~Cg-gawiK)Hjp8RaL?+Y>+n;q% zALWN^^b`I*zdpNhB)RFk`5K)|8AN5FAu7x~0pP5mqOF|elLG1L1~D(AW$76Gj=ArU zLHARE(ss%QBbqfRF|lV227*j$Qcf#gohWLy*V6XO{W|(AmFO#4* z?}D5Pso&TBr-(4VTS?0X}Dvt0SbYTyXBh?|GTg+(We9d&P3s{>t3FY0c`hb zpF%_2x?qV)W zrC)YL*jQVkE2_FD7h?-XdxNmw7SE@VkE6LWd3K-Q{0n%*YSHWXqV<1pEjL`-S-{KE4%_I_ow^YUkpHtK%hTM z!YeONQ5n-&c}%sJQ7wnGoAB!;Uk)M76h%X^XGZO<405SQ8fJR=$$X*@CVCvUk=C+8 zOso2Z9z#JEq~1pUilwZ7PY$#)`5|iprZxdOj^R^6!c&1lQ_k>iYYHa|!F5jx1Fx1f zEX{DBGx`t{2og?G(svag+Z-)A;5Lx6!M^5i(~>-Q0kkqC zh)^@X4Ao6Z)-BGbp67!aId9~m=BH&?CaT0@=guni?qlUBx|yT0L@Ak~w`p(*U=~^m z#Td>5QpRvnO2ewX8gUk3tt19VMYJq4BR;qlbZWClI-MhB{ay__l!cL@&TO}cMaq*5 zjT%8e-|aZ>Qum^(*{e0tt6Bo5;s}3o+yVduT%}w)-84u4x`m_=(npv+fiWlnjcSV< zF__{7_jV-8hw0Kreq!?Vm@3k!T-6(tgeBx`Pv4fU>j~xx>;5s1q^n`IaoWQVxW?}{x@Fu$YqG%iW2?hYs_@OBn zaz1&35SV`cfbfG`2`HP{3VAHdDOw}7ie8NYlZ?__k38e^W;y=tpzmn?fQhH=NnC+- z#e+V&k%MCTjsucgX$58{9qZ1+JDAR|!u#GrLYwZgcxxE77<0S9%U`x&{h@^k{~*Qn zf4n3V1|9rq)*6tvCZ83{*j)?IWTTa$$lUQdrv}}S!k%)F; zXf~Wju7B+g*?eJ64>f7PLorWH4Np6kNGWZYvtD{7G5+r5?@G~c!h@?2TB&8S$W|(@ zL~8FH0M~;$_kn#n<2g&u%^tEbKkpsy9XRzT!%AOc#x63S4-r3w+4RC^S&B}MJr=fKz( z6OgsI*l&v;yZeMG_u%4Q5(MF@=eH1amVix8#V&- zf0d#{vYvSH>Vo~EWbN~m&&t}Gbx3pv+8v4`qy;?O1+kSAigw3gsXbn%wTEICPWVc8 z%7B0%8FS+R1;!vbBwJq9;`-97tlNR^?yqc}3K}$&3(fnvFP0=bNg1eZzb}hXWTOCR z!SDXb{gfAv*eKhC@+19FCp8|(r<;-KBh0QYGc4&(U7vgAifI>S7pCqGo`~n67W?rX znhz^D%J*N zSJ8;+<7#-_ErFj&gNJ}T2%*m9i`m4)H!W6CxxweW1TbTD1x10bLR8}HX*e)IWrMXqp5Xd=vzeh`R-a5<<_(Uwt)qGhxa-_A zi=UtcfCdV00f|4dbox*w#h9Kuaz{IP7*vYJH)(_CYO+ubDAilka)EX2*RMl542Rz` zh!#)z%&00!0SpOSr-6leTpfK0W#8Iuse$RrbfwuI&ji2~S>HRr6@}J|sm>*}_PdXZ z?w>0iE2`Wcv9yd|EE&bKOWR>gmQ73;gwmIe$E+9vDp5}G7q>45UBx+`46#lxGNF&-P<{d!dTK#Mvu?G zp?wKaD5kx=!|hmD@~=q}6>s@W5u&9gcLRQI1wEuXsQ-Dg_yG7pkx`CvURRudme zieVWt6l%2HuPIF+Z4Adil3V)Qsmv@TR(KwU0rL^02{po^zbf;jo0GHnoG`k4Q}>Fi zo{9z)c+Y#7NMspW%diMSWr2ws9nBl3ckuFg(*pC3@6)oyTAQ&CtNC{a11j_ZC6yKZcIhllZN$$m96E;g}%)WpE|`5iZcIea!w ze{PIk4WD|wjU*=5t`A)8UtLA^u4@}Y@Jbwp7{oX%tij5q^a*apDidMK?!FSDuu)+> z@w7b_3POl^cBKMqz_PK5%*9`%C!CV0zQqh|pMbOtolqERSg&bK)gCXD#aKRB%~oWo z$5-D-o#*eTKgG8>wRXqa>`fYUcxxU_BzftAw_sht)EqqhmZl&v+7=3! zz@UOk6*JGz31P%;xJ;cism#%8(1>EtMV6Gqt4-pW($Roc1CPo35Zr)It<0 zyb?Q}4C^)sSb*O8nJ7o#^A^lO?nnWlAa)Qv=Y+vwGu4F1>H%gqG3(h(OGn zYMIhUZ$}?D#*40==e{q*c#F6SqrI6JlAHzyQGw&&AkkpxU0L-em>{PF8K-OxCH2s_ z0W@!D;4>uzTyN~_t+Cp*igGFne~q+^kSn8rDnqD{QPm0J9(2jBLTN{8P%=$#pH82% zP9J#xtKD;00QYW~&PXu_l=rUtBgPuKY_Gpp*Ycxg;= zEYa^|4F&%ZBi9W^v;~2p$#kOTLI%bY*COHaOFvFVn?(xQR#Tq8m(H!S(_n|(OtSn9 zU1nH^7*gI9an*$o@G*m`-k+K-ywA=qE-sEP&WQILart*1?fmZ^xHvub!=L)$)5#Pd zNE&li_JCS7Ggg^Qncrz!Pu|N+Kg(UE`(buBFI$K@-N{HCf@RW$W4FY^wVf+kTCheO zO@jqvSm}}&z8F`TDafTpD+)IIK`h1EmcxQ=W~Ufx1Z_|XwNayIM1L?9AZs!`z&zpP zC+nD~)|zdz5Je9dcfjn$JV$WLE%R>-A*Z|qTJzSR??yAmVl2(TnC?xjQpSNNSBmb5 z-cmt7FW)kY%Du(R{{%DAHrVME)&<~CBW2he%EUC62%(p%nPcanSP>obZb5zN)D&H*+*;KI{P^YFdf@%`$%DaQ3jobi;C*qf0oO~ZzzJk zzF*UFNCJ}57eZ3#v9Oq1OU0D^uvR~zU3&*$uHz?>C=S!46w|KcK$@^ni&AR0VFKyu z<;Sl+xNAO?|IgBrg~e}YGN9}bCKg9wieJN?s|7(TB0%OB9obBb;7VG1mZ?d~B%%s~ zu$#G6O5!kZgx^usl9evu3h3P+zTy=?hjxzp!&z>3244P%q3t2)*`~f*Z27KVm7^@{ z%&InG5-MQ)jsMx7z{i&JrW{T!ZN*|k(@Z3~4dh3^`=!&oM}kTJlQv03)(BH}xdryy z)E4rJ0xG*WG*V+x9vRM%yp+I^%B(7s_zBb7gOy1tu&ecPkzPn znK7ctU}iV0Nn`Lr3Tn*1i?cdZ4CSWk7}es_VsU4Zx`&qvLgZQLRl(mNTpKU5VWd24 z?_0l8r|-Gby4X~H(y(=WfB4rLOQx?~H1)an2kJsH8l3~;)e*CA!0Or1GGnThbh`Gh zIg!1eJ6w~`uA7-#gR>ACgHsF@ce~J)Z?gYSirYfMi`XAOXLM)81(R!ftE5oYrRlK z;X7!e9_+Q6vFQ`x#A`m=pl&A(aTeNHsJ`2*+K`aLoUA3LC#xibp+~em{KiYWMyV=n#(}T5Jz-g5U?Hbrj|%b=M;3at|Ol)O+1DfUCY9Ob2NmYhqMqvRZu>6=v++q zfZ^H41##&bvoe{b^9b&-;|NuM7RVXMcOa!JaGOu>=$4N_w9(>O0ZL3cqw2n)#Pps$ zmyxAMJ^a6H#hKSRJHfzV?d#Hp#MudK1Fvh zK@@Gv3u_XD^rN2CSgqgJ7c_V{p%{B{*fHsl;QHTf`ukx*tEoA%Q1X$|m{e$<8f{>% zxrUM<%x+jAmwq9Q$$)Zw1YD2xLTPGE`4G%*8#~v(G=zVn}I%uMu*R@*JVQRC;8ldpX8|{q?Ox@>qz4b3PS=YM36h-Gy zdSUdDNrHbe$D_uGK|g3q|7_kZPSRvb?6P@mF?f;E5ER7+l+?wg{#%Nh%qV~YK24-b z(3AJ8MN7tTHrQ>dmfFe|j$RB**I`h?!+{Y70Lclm&8kU&mSU;h-#XDwr_W`x=?{UB@?vOiuCz}sIpiGrl9JC&o zE;s|iKw78{D1<5f?xWg%OG5H2iW>Zhs=M}<0Iir)uiiLMeo~C}*>W1S`A1d9oZE0# z`=UTOIGQu(HgYL~*@{G8U+BFS`Mi7eDb@q6rm(K4RpCLAM%IJOFD!o7q%UI=0A(u{ z)>QjP^4%MO0U5LqC`U$T9V~emuM=`Gd1PRCrR;8mYDW3v=o_M*!$2nWs+P{PWUJU) z?PPMmRMXwOR23K4r4*C#o2mILe(XZQbiL!M)a6gW9nmEv$9n4V{~?evKw6cxJHgSt z(Kpw1#dpr8E$qmqBzF=twfk{**+PFXjCtX;+S0UQ?BpjHbb^^UQh`>M;q=Stf(tYN zSK+eJs+2&chY7V!7*g<}Q95|gLR$i$8O!3P&01eg!zHfH$J|7&ZYYd){sgOzC&f+2 z8!uA@{RKg>G>4?Fjy^Ys?Fvu2zVcy16_!dUhU*xw0T3?J<*SxL^@ z4bZ_xNOaxoNDcrR9V6k7c!*WcIbrwbz}4ThbI9vd^(F&;jjEkv)H!72b|F18gu-Ez zuXXaQl0Ru7;YGf5F=S9Qg%sd{6ke#97x;Wo{Cai*T|Z?Pko#-XjFq~odc46}IIF19 z-7$KA`a@95fnpBKSl?h_Bck`>wEyP@F&p;!3O{!C{0H#_a3$}J_k}8=bl=&bH`lM+ z>Z|lBUCp2l_hx3tIR0C3Hnj{f`z*V)`8IZeMmo*XH-YBkx4SM$?YX2*i%gk� zJ(yhm{y;C$`uVq$t(@$T`~@#S$hcU=Zbj~X;*Z?hmu z=`W%nf!P0%pSgui1|0=Y8QVC2NS@(&HWteBbJ#=G;Z?JlP%kJ0`u#M%P<*sXq5<_1 z=))JN|LY5Ml?#;xUBpPkM?oGn@CfJfm}>a7`ih@!ij_146AOTqncui9?`{m`Fx1q9 zw=REB`t{zBv~hCbAgtPm1yeQb<9z`yYG#j>er68sny7UbbSd>1O=IR2*S66nCj4D6 zm_w0PC!u${?P|%6Y_A8%-q6M`ssqQx9_bVdSbJ?uIGD)?$#NDz8)S`>T(vM9O{pFN zkvqz*4Tz`E(T+G8=-nr9Z8+A#o0#Eg7iR>R2%*#vcbX1qR#2#;~w5-{|vG2`CE_ zlvw%r-DM2B%OU1CbWc=>pB6MWIeD%UG<7j+-;b|7Ak`(mg z3VC5@h7c8?MJZ7%w3CfsnbJg-O&*N4m2!S8v}ET;LGFbf585V$Ui0}DJdQ$>u++f% z-emN-gC1>T7;V$pTU@#^Uw9|Lq1ek{8_&?P*<>kX>gNt8-11R(=iI0lRp<2Xu9dl6h2 z=^TP_*LLLj&{h(|A5&tMN183wtVNVI;NA*tAp_0L+@9Y7<`^+}F${TAn&|1>_Q<8r zg$O5mtu%o~e{>YFiyEI1t$yJR-C(9X!^po~ed%9s4@PKB7iTZoaPs^$U)0(C?4H5` zi{QCMq>Eo3UFUNE`B!&pQaxX@LMzz$_Dy@lWXE$$oYze4X?y17H;T(oL2UXIpc z(z_}%O1ojxEH1+FFn#U3Nqm${2+P3F&XSD@r=fq$v5=(3pohJCSrKLN^d5LpIB)7Rq6!A`zK2w|2KjUYkYBjClda*GAF@D)Gd}kS@ag4xvT=3xGlJiUS z-KW%jRP+*tAYJt%gcc*sf>l65Rq$!3Pw%}Sbc{hfyebc-)43A1>;FnvEuVqAL z0wSZLqPPQea99oem{7Mtbz!y6^yDB=v@)-nl`p&~5`shLkJ42`;A1ae@=4w?WL)$G zFJc(x$P`Q~a85NwmIf#tWTBABB9og#w!J%~Fg!Xr+nJw%bD=Mt*3gljROZ=|RvDaq z$LH*-!^iPEn3_Wf4Gbp%m#eA&%U7x;chIS~$(c^zk&jcG6lb0b{V{SilNC9%P0)sy_Z`z9f&0A8Uf9fWU>* zI08DyqK1=^LX*XyM#EEaGI5bO@c+-wm;_D70!Q%ep1-whpzB(uODQ6}JW1QKB)~jG zF($a63{X?r&tZnm&qvywBqM@kQdn_qXbQ)jbFPz&RgW){!VHCnuH^q&Vz5Q&aX|#W zQ`L_3QHItL)IZ9b523wB);f=57+uPcmX>Q&_5PMD2H6OV*e_ka9hx?;Th0dc02im6@2}AmRR|XU~H7` zk<(GnHRc;=>Yh)QGj%rOgNZJ-V0=e2S_2rMBSGfIxY^=$%Q4Qg7_`DUTKyf6U8yU| zCylbB6qI|az5tsQ!>m@^YZmV(WiW4up)$`G41nY0$_Wu8=i`JnEY_qsws3V)nrqxV z!tBF9V-_P38(X>T3VSi=L4C9+`eZoDH|$MyogG!g1~ zkZw}UIvWCTgV&lA1nOHAH8hU18&xdsNL0yyI9~I9Y2I{O-Xjj$HcYl`xfF+m`XuSsF=qdD!!;_Pi|0J+adYzTdD)C zg&~amL9K@Fn9OUVAtv-iExsu;lPDj~yf`upax`-4Iy!iLa5c6y#asSZl`jv|F;s*~ z^&YM>{8iCacm2WLv3@bPp8r$!YfYHb1_E_D8GO6OHY+QW_7xqKdxPmW&@Ph$x5pIB zCAw zc-KM!rL1mia`<$-W$gA(WP|4R5~ZL)@Z)w9F%wu3rdIr5OgJ3BKpDa1NXxad9lsIW z;Gj*Z1u+Ty%fq)eY|VS02C8rVQr+380+^6jsU%tvQ?if$r7+-zcJ}AyBHTJ}XrU8U z6wtv<#MGj?5tbOYK@^yxGOYQCMYw%(0b&PC5R~Bniv+!cpdTRfoItF{h4=Nj$IiFH zRGk{kv-Cm6OMJy%?96iaw)r0h;7C}4Oaq{XaME0VCQw3*0l~l0W{1aXpGOao;#$LhLnZ2zNCm#@ z;OW#+jA9(egsIS((MhE$8KDDPchT;>Y3s4t5`G&m80=V=ROne}`E*O$pKcd<=;>SJ z58LPxr^yR7#^1|)Rfv@uv0R%e&zvbAC?AXSBk==qL#()RIr;H*G|Of{>I)2dN6Q9q z{!_1j-1fVQsys6Un9vf)d(+~HpqMEjcJgb5(UdRsv6`8g3CS({_hP>4G#psi7%bOC zw8`twQ&@j7qz#k*$-Jp<_UJ5FpzDzpz=+lqNZWfxyp6z}?tsAO(L``6!Qrw`mQTs1Q0D5?A2k{&;l zP-Emo6Mk+!Zm>N$rpd1TS2f~==`zNd8@d8vh2&<}KQbeT>k-pq4!aoCktirK`DHc` z3NhVxJkr3ff?6s~j3dFAZC$!Njplc+jr7TY?Y)V+wfjZf6yq^H z6TT8aZGmE>5S#`CZE^(~6uaIzYY0U!L2G>j{=Sp102Yppk4H@Zi{O6DGsZl*Osz06 z?<$z49Ld#L;^X)@axsu9@7L*3m&ZG0#wgEkw;YL`CX7!f5wlJNtzXEWe$|F7(dmFG zH8(eTIj(B?O%hjmzmHF0^R@Gf&8+df4ifr@cWum?A5bf(zqW1peQ2Wdt|< zJK+lD8HE^XoCNi=se62bw8p#8N%=nx_J62RDFg&Oc?nFe65JwZd-d4zN&RAnm*o}mlGK=gt6oE)}2A|Bhd2~=mi&(pt{Gk&dYXH50K)G+h6Kn zp$Q&tPjqc_55(*=8mH&A(>?MM!OgQ87+U^n?Cn(w`!a|1qx#SE05#i0F}@T;n)l$u zw|~P0ueXZFzwm$VTM{u``mU5)wBO`qeh)Vvcn`a`M}vSd{)VDgrD$tXHXw-ttQPLt z7-<>BUFBD%xPOmCy;?!9i@kOngxf6-k%mknO$f`H_<`8x@63*}V6*}6i#azNZf#w^ zpOTTu6D~rMEyqq68Jnkr3ycd#MCJoE2MyYXmrO30ndI_fI1Bg&X79?#u*v0C#c&`g z*At^|PLDsF`S4tf^>1iLCUkv*NYJgacb42He!u0XYJN9>lD5bmj*04}e^x|fS}@gL z78qcXEf^2OuomKQ?FJm;DX*#8l))mdWMvO7FK>L@T!cwo_wnKVT{=U!Jio}?CHOTU zJ)F(p4RjFu4lz^7Rg`Xv>Eq_3h7Q`fPdJY$!rzgfC*3zEp_frn*XKOfM+1K6AA{Le zQmrV&jL;|G_@Z4+{XRvW7s42g7JlMhd3Tq{w~6n%)5T`wfQnI9JE`p0XNPY(c9Fm} zW?>h82rULeQIYXx>V|5B?!zej#(IiXL)}my`}+hxm2w9f4xdpOuP4%8%>F6*FX5wc zA0+awpKz3*zQ$!>TAxXMejn50TDcu6{F}~yG28;3H1R-YS*7eGIbdczsi>s45PDDD zCU(Gb-iSgRkxCi%Fqo$dtLa0bZgS?2<|iru4dPuA+4-YMTIww@vObEYQl6 z)#1#UnRW-cEfKORt2zh18I=W?4aQ#Rx^K znJ9l1O?c0QY38M%c&@wB5kMs}KldY?I#5!dV!$CThCKD3iH_SKxht#lSB!?T5w_jq zUW)_HuZB_m6V3}~tW4$9x*({?~+i+=Q-!c^Vhr|4QIR*?!! zvM4q^o5HC3K;@W24|HN^5R@kA7^D}xF#udtVQ8?`F^(&lm0>lcp&)3W@D4#|Z~KxQ zIK@TGMvV+@+b@ai>;Mh!-MnORe8(}-#ujzK-UAYXi-RX^{~+!5x+{!$&N}gUmB$GQ zjIz|#0#iDvZ}io!jnB_F)}Un()41gBj4h4;cme=A0$uD2ID4B5nfiL**lLd%#aSE@ z5hKJscS8({7v#yapw7&YWdkk zzmmb|-ilov5G6R--?H-RnI7Yy;}i|N-om47hHy+|aafUpxOxvzsmY)6A0nPEN-4C|#%n3${^W}4o{%i@8u|MB)?xQjsmC-y#pRQA zKjbt%?CZAJBeNu67Gf+Bk)Yj$&8YT=OfSd$mg$^6-;v)8L+{j}K&e-wpJ6cc=f zG@YIaQrS{jf|-0Wb{7pqw9V_qV>H(dgIZtZx~oIjDTTzqS&SO|rbf6$25v+%0IrQJ z2;V)%`+x+NExRFH1%n(y9M0?y`2M8oCA>x%pe*7Qg~YeBju?0-E2YHR+l=d>+Ic$@ zp-bT+_l<}oPimpMvX`r6%Pkb$H$&CI>Vtw1cP4-1lwqrt7lx}8Jr|P z^7hs@^0!#W$SK>w$mO+7Ig^6J|2kwe!{^Kigla^DxQA~~yOTS_oK5?q@b8-%x(?(ZIx23xyCIxX| zq>hM(Ky^ld01Gp%?1NfN=M)sTUTwD2Dn6R{)gS7x?A!jefChv)PS|CVHuXT}A9(ft zyaCgIa+4kx-<$cQfDq=r;={8IVxRr7K-HTRj0U2p_YNOi z%UL6631Vu>vf~YUe`QVa3#Wbo63&%0 zCVw&V#q`wDb@bKs<3##WBHa4R~TS$E0TO{(&%4V?^(EbW_Tkul*$o zosV{E@xhIl2m>c4ZBiWi*QUnwHd%`x&AHMpdiP5=vW}}>E~lL{cM@rleg#daL)r3W zo429t`^YLuC5+;W03y(&bdW#F$DOC-y5#KNH({P(p(}++l~#YybZI$Mytps4hHaQ5 zPf$y~^{lh=uPRUGv$hM^qCyA@#m|fDtAd|roo8P7b%nx1`O~YN1{~&u@b}VkX5(+f zQfX0&`{LV<{R)!9oM8I|&u>1&460!pPu+I8v&qloRE-*F77GVs7HVPR$TilBDo0h~ zKAU1tEA3W-cn^fUBxTE^Y9a?(O*Pd8xy{h!2)EpGj7u-+!X%NKfyUhf{DAp<^6h;6 z*1dc2b$f|qZaEGI8i2LJ;f}kk!26Shi9!ZQaLqeq!wz%$3l=phwO=GGCW0;>B0WCQ=pbKX4FP8&% zeW<#YerwibU}ev|c=Af_2=GaCxpEq~|Ho&Fu^X~V!2;~cx|eX`%!~o`lRdxXyHvkbo}J4M41Bl}F6nkX zEo!=_ziy&f?k=q-C&W>d9WaWWtU>|X_=cv+CDo=Zs#dY}RuYyE5A%vmYGQxmNftV4 z38;LJLg@*s?u`E#GKpJ#Xv3oA9F0iLE$?caw#~wVA=c}^RYYJeX-H76DX&7<`AF-? zOR$rgVivW%4ry99O{$9p5X_eYVB7}s#&@Ap2ncmG9T(f=ZY79<95YDH1XeoKqNCs- zq-CT|D%cxhtm)PD)+c=9*!!~o!}HG_J|XLiuVl`{UWv`f%}^N|*PJ&sCcQm+bC3D~ zVEH04l_+yK2PKMDxH}eNXbz5|E>4!8|K#}zd;r(Z{yO~jU6|_hm}BHMcI*vzrl;i_ zloYK)mJk>WNFG-<&QbmzRd5!`yj)4w)Ixb^E> z*6v|CbN#sSoQKYO37JU^H9@erX|}JvmZYY~J@cF8;;lg47vm3WG~ea#V-U@lx(6*Q z3%4PX*r^x$eYO3t&Ebi!dKI#=*)@G!_2Z7Q3(a$R1Jc?3~(e55sm1nk> z==K%v@!@K%#etRX6l+Z-7d11fTXN(ou~jXN8RkJ#Be7%;V)JxuLBO&elT}nkH1$)q zZS8l4a{qqaxycG)F<$)e_hV8uR<|R>*ziHDPebXT%IM0*h< z4eyQU^KU%YzwZ!#X}`}ft|ru}Ig1Fz3y?4kI24;t$|@E6X0hwH5|Dvk@*D#;YxJL{ zZ$w>8B_84bfDqbdFHz*x>M8ElNj(ZeV}f6+9x%*V3CV;}K3#|vr;^8y{&3ugpldeN zHC4>tW)ha!v>!b&b;e_kTkNHV$Wq!#BU+oU|9I(;i$~S67Z)3S{)M4XA)u#Myn+rNzv~Cc$+1 zBQ0=<+Z$c!xXw#U|F1I*t8B^>{9@%={!$oS(S*#ZSv9#M7hATd#$_qmQ9W;D`K^rp zZ|oQ=^W-Q?ow7E|Yya$Gcd{~(g!)mTr94|nQ!~G0`y+quX921+#1i$qG@$r*6xYwQ zlFv2(lfU9BF(N@nZOmCc1BZ00goUFKMXCFU>U^gZJtUs0_U&|H zL0L8$<m|-2NhMs-}lMv7q4XjrYT+6j?w)>c0s;Zc&+o#(zy2;GZCcQ~0wz z??Q1-EwH!)&<|u!;k|+RY3?Qc#hpzD2cL)}D&qwa%ZW*gZvL6#I-`=* z+Y0SFV+)6og&23q3lcFK@D_1Ar=kx#nvDM5=bFXk#J;AK1b{+lHR*QX4f1p}K{a*7 zn9Z9Hi#(osId>=Wc^PF=F-0%5gk#QnTx4!;>}A$ndJb8#h~eSzS|HA#mElCq+@8XY z@xIEM0W`j7?=!E>DEeIH&@ueQ_pP&^L>ko*Pp7)~zs0OxnozhjANCX67HpRegq2Ar zO-${xuzu}-@7w9+nF+K9laP{-=xM1#F8&|xp(IJ{8MIjl>i3>+T|7;`O80T*ze6IU z4bv>HD}>vGlv|qG{ZSIL_++#0?D_mhVxu<67o*e`tEt_a>1r#;2#INPQppAPR+WA| z!3qhH(MMquIOF+d9CwJhM(-q2^(ig9q8bdXza112?Z^GZLW5X{5ywnF&d%`RAv4RR zN;6^8-J$cp8|gn){Rb6meMhu0U!JT@r}x(5wmnytAO1QI*GkwwfYfVz@z3K({|jse z_Tt@C#rtupP6{%9iS1s)o<@N@@RB)tN+hQ4->hESg3`rFapRNe)&N`Hd*`tQ_{lJS zXL1*RzTcP2EI4fpCw%9LM=L@3?sdf7*F_=EYn+|u`pi_jX`UJ=i3_b5A-kLP6vT>4 z78>Uj?|&lTU`@|^7=u&%OX)qV25}-1KnO`U_TAq41?#ET)*07L@RpCyWmM~hw*Yn| zbD8j1mHX!3<@xfMfCmN?0g(ftp0miX>09`sSt5s5I(_U;BfA&OhMpj~?HT<8gY=3b zr(HdaeTPrvOfKLv8I0sFdw^)z01MpYM2)U5_lkL>LKKFBDN^#%ucpK$+xo9 zmi$YdVDH)`H6y$N2jrr^y(e3aC$k!4A2d(r_*B0*5CZt)@fksx z%HZ7az~Wr*7AYC)6G++lyW``CtHP5>ukZ4wM|g*b=%b?9CH5B9;VI4^)3FI5xBQ(- zDD3gS5-&qW^Ti z*NW%W$yCc;)SFFKJiBk5OzT0;_`7f+t)=nSYTAV9%b&gU?H?*IIu`E~I~X0iidd=C z)wJ6EBh|%SeG`+vg500-z&}fL7JxAs2u4SIhms4l=hG2UV(_!`B9iAtp=7G(tCNVg zs;b;GyGkw%Z*}U7XNNKgG&{B?)yLt8p>Mf!NA1l<2obe{2Sy?q>RRaJ|& zszqt(;;(A&tpim`Q88=p5qs~dRVy~JRZ%g5RAQ?cBZ)mCLRE>1QDQd6a~_|(^&;b( zbKk%F`d;6S^>x1zd4M35rmU+rJ$+T`a7d!O&y12I2j&BBV7xug?4#0DHRZWAo2oQqZL5H7^i_T|?AdneP32?yFRqmu^ z!`YWV_OCU|l~h47U<8=QKaMlqm{87}p|4NEA_F>vCQhq`J!E%%R*eTuH)o`#TSy|-w^ z4ER&RgtqCqGfW?z{4#A%N=!{kq=Hql@fa zMmJxZeqEe;dg0pYMN@*Fcnj8BY(3-lDSjk)>nfu+p}(%iUOZTK#*le3v9`(-ot9E& z7&;pp-lVcCgm2Hrwws{1xwe^-;Ccf6v z*MlO8jEgh+ln{738t3+k%1Ye~3lFbAALrKpp)Q`C$yso7L-J$k=YO605pH&X1=^y4 z$@^hvsPbr0kqqz)rw40-S5P*>7ttZmc2fiCsaY{|5wC0b%gu;7)_Be z_Z?82Tk`$m_RZJv`Sg?5bN-{ougNGs@%K`1Hl;fkL5JhtUXKo1k z;=&L?mz`~$S%MXM`kmEvk|Y1+kXY4@%1PV#3Ggj;wyUzgpv!Ep-Zu0~;S0I@Aw)Ml z#o6v$tmw!j`r<|0t?wef1MBP^^xK+^#7EJ!H1+DL1tWCka_{~v|3GLMQ>UHDunmNU z_^GYpU80*!o|JZHLHwn$sS1FnmAL5wA!o?DQ`fesfm_?$By!ZX_ORP5Q2u~5az2hy zS5VCoSjWaAc2%%a<738H$e;VV78Aka9xuc@7fN07ES@ADNV)!pl=kABwdMk8hOgA_ zXE{oltz8bJ$)930Vly&+WO@C@aHNUZl-|@#eklrC&!4XRY>_I54v=-F()s|U}?0$YAvOfr^nwYF&ajr4dJ5{iG z!0PSie;GO=mrOa9gA~?6lePq8DP$LB`!{9>lh0OT)1a@u-pK8bLs3uN!+|*ofe;D- zv_bTUt{5o6nz!o3We|=&tsWSmAh%IWQabnZI^7An(MO&gTvij(i^$o#Ye*3wxqzxC z8ogKXJ%km!s=t;FT+SBXtnkOB)O z3I2~CvEoTICP(XcDoMvC+{*Ecu@j8bu}!<%U8i-#9Ilt zo)jqg;`M1~`kJQeLv)EA2?#>Rt64ry}4dXJ{u#c0*GSB1u%Qt+b%;J zE!U^fmAM5|X8#=poE)RMEl)PNx9Xt;e&-TG)<(z2JIW@EM&Bni@laBJ=RzpMvm4X7Z32j|$g?oN&=(E`< z$tel_k=qa{_nKRr&s&a_-2A9nKmn3x;mFnuh7Bxwfvv{-tigOYWXfUsU?hKp=jX;9 zW>J@fFW!qiuZ5MuYd%AJre&OJnyifC`e9If9XM(Oyu3x7J-iz1<9pDL?V7Eoy^6oX ziOY?&0*UL!49Su`^S_`&b9jgn4%5{e30m{czWhs_Eq257MpH27x|QNa#+QvhTNH-L zGCOvvUqlgC2b}2k(k}GGAPH;Ib!OFu(4?Wkbpuk?z9@;0yDMp7SpK!CaVE(8fJ3mV zl7#ru2p+4!zkPpqNEah;BpQsch+NPEN%DsrV3hrf`WVc&rY|)h5G>&32C`>q-YGC_fZ_tkiVKY zTlTr|8QD{@vKOddmvD0{ZKwc|eutC6`o8d)wimRFc=FTcmp;R9{;F1Z!c6phSv&MM z&9u5K-3mTvR!tq2tgW&cC4K3M85^{cFoNS^-ZY+FGt)aXF}tv9^OP;4c2lhGN4@}C+>l-DuyR@-R`O#`oqeO5 z&KFfkBBRkH?(oy&WjFvfWFiC@1uY()N6)3IfVEw6!jlT=p?2bB#^0u8TjN%LkHi8@ z-{Cd^^IZj0jKbB)+AC|pAW>vMbF=DfB=bn%DEnpcm z`GIBag0Z&B6I%f-_kY_<(uX?FMy?l-A{4Buyx+zqcDq)rG2xDBx-~eyuxe^C`im`e z5IfGpI=BMcR`D1WVO=lTOMnQ1jJ7z9^DbZXUkthW@4_l_5P46U&3V2TN-EK=$1>Uj zGT+W~)~zNlv%sf{ik5VrS_YoFiWNw*_;8?`YBR`>VsFbjjNM#aT{pB-hAexJqh|%^ z@SuI*5eUCsndvYn$07;OUi4(G?wNM0t&#{fFn9A3D)LG3c*tp8nv10Qz?OgdOXwvX z3q}z`?5@_RZ}51@n(l`QSC)wb}3#tcgrgil$)W^7y zs$vQ{M_*XCjDifyA6UEExDsYVIav?EA&cAYHm$7?)jQFn_(~-Q+%3zV`t6QmdJ=2K zyiKCwL7wXEHLK-S6x9tS&AEK$C$Ib#P0ESY$AiRvZv;wg%4Bn-k<&?=;GWX_dt9K< zN8Vg(@X*aps6NR!!RED`Su}24;EL7x2=cr^G+awIn~zCi6w}j=?vKeGU$?+*ILAq_ zq}@kMX?B0#tZ(J~2@q^9!EcP7)=Xjo@3Qx}`!BxM5u3%-<`R8VbtitT!V8Q8%waD~ zYyT@|iwWgs780!T@t0bWGcT1lREXmsKHOfC?|IYdG$R2idT$a=s z))97ca=a6$gm_1=PI#7Yai_RC25fuU0yc`0q>g`0X=krai1i5_>j8`Q;DDo^!tk?W zQc?KX{_#=HlnMiDoq>f$kzK8u?NcGy+304|r2NaSG7T{P!NR^7@`21qwMNv~O`jBo z1HJd{bn-)bGqNLS!0`;Ej36T?E#UcDe{;^32Ps6YhN)-9H)eOkQrrE!Xf(y>ohy9b z{Wr<3pB}vN(I<|$HuPqQ)AWIi+qZ!U9Va2W5JDx)cp!0!juUx85~=@Vv*Eto`R8O{ zW@M<8Yl_UFdP%M$h;xKHT#_1chPO%F&f}Dy+LZO%+zvRGcePY*^-@wEp~#Xn&BTJw z4^W}=A>sZs-&`-JPDWE3n5v4TN9hU{L!J(?i}4eSq+?x?M4FMig$#!HsdS9 zRy_;hzh@VgRwDrJfeZ(z6C2%gdtk`OyIb`ffM@IrO`9+_mv$8(6ouCRy4VS?B(JAq&(69M( z8U_XEr^5GR`Kdc{BZ1a&e$3Cmtqb>(R(~TSu%Ui#I285h;9wKXExp*b@_+;_I^B4T zVt$NC(s3Xe505i}0S@6Y@^XhHDvo(?j8V||T_vJ4aB=I>LH=t4d@9ru3bRlUp-S>s zSvZh5DAf_m^oBQ;FeHpORGP9s{K~ss3K!U|9|2@8hRlZo5e(N?{VaX1A!DryYw(iX{I`tsAzN&qZ9CTkn}! z`)-FBEGB*GV7u)%YTMtPoi#K(#rB_GK{m*6)Ln17-(i>pp~U-qT*Tvr@ks=D=W_c*;faZ zU$T6YrZqNK9f2b`gq?((9_0kumnUK8^5fOgV#@J+`|Q#fi}bZJHsc}*Xx_vc-Ay;4 zS!8X_kP;Uh=%)gn5<8Tg04{iQz-_ulxmogUVV_T`&B71lsOwN&>#kmB4oe=%(Wk(s zVn3uE81Dw=2sYmng+M`QvaxYN#n&N)5ga$!JgOmCFWrQ3A1rq<;3HohUWpQ^WoRY(>t&b6Rdbgh3 zTOU^dZ5S@?uM3g80Ux&~7aA&#LRndTzic=SzmBD%OyAvuhv6}}I#0qD0lCS%T-Bi9 zz$Q2+?oEho7M3UtstL^nmwQdnb5i>>{TKo>bbS1!QCl%i2}>`2Z>||bD&|^r%4Dl9 zx?}W68<)NPy}4I)oikzl4hA6qyx>z5vHv(|5Obk<&H&z-`Yw#=Q^hYIaw2Rd#FvyCxuH3_|!K(oO`arcqXs>;84z13j~ zMIg1|dT<2J+y1v*@8q@M?>}Js(`kr=Rqg6{hHP+qb}&BVj|bbFa{*#%^qQ|g<=t-V z%TBsjnxxDxN&Rmes*rW5BqeJjcZr^Q(fK<|->;RvUD{GZ-JFxv_ny0OJ25nY+Of^) zPkv!y+6dd@C!46N>o(TDvp4uAtp1-liLx9RqG0j5km=}oFbgQxg%fP^4HsAQFA;8g zNDerdTCTW@XMK#hM>C_?llVgvf+msS%I8Njz-4lB;%QqO3A5sP0#$tBFd<;iwo>bQ z{A-{q@%jFY>715ZV%Dsg@4@nM0D$wK2M{nU>$v3*MWw=#8F7fss;;-e6b5HZ+7Krx zRCFYR!1+-RDh9;-*|BCWB8oKjRlKN4md{(D+qYm|_a8E*$+_~a@$O^3hjr!jRr3A2 z=l27GgnZg#2aE@c9ZOPfZXkv$UBrv;2^)P>XYX|5`NqZ0a4L59__qW(kSqNFly>y; zc1uWvu0v;wWJNDg_VGr?4dZ+?#j~xDo13)kaDPOBYebGjZfig9fDjJPfeNo>^ZH|{ zg+*YW%JrE-fpbGxAg{MYo0t{}(qpX{EIvvMs8FB)e~|Fg;yxZ??( z1%(9`!bi|(v?p%4@#~q`I)x5AMZW!abfORz zro?sQn;$`KgUgM6rSlDIXDD@g*KHVK4rl80JKO8c3Huk zbSy`z-YCzLZ#p$x?DuTTV!wC`+Vq^_2o=U8)_@4k%6S(*$Z>MOn5Hdi+}x*!>IhCA z78cSbKa6Y;@-ZRr2jrSwDeSL*rY#Vb#Ggu$C#7JUYzvtwL#MX3;_+~b}+!&5V%8b$k zR&we6jfi)tX>r!n)VHF4c)x1QCM|K??%so`frJ^IapCa&h~@xs_|@tNxzMVP3A!P{ zHu?E^5w#}Dqak_qfSj&Bt~zh3x+BCph>M+_#0CHPmz*qS;$&YR?%cJymu!Tq(Rm-I zv!+PZuLAQ)WM5dC-KUK1z76|l40OY=sXHQzzlJSNm*F>Z=3QTe(LFB}{_k26pQ-HdXft?JhY4cNP%c9Zfe@uZ!g z(36w;?Mt;XrvZvZ@c(b#(aV{c1^D(^r?sz+c*XQ14ZW2hz3{2v%cjyi4ANZ+tQnOv z?E!x-bMX4qgd*wnv+qXuNQZ!%Qv;C|?og-*@^IGFV991aoGRZ7pbWEg-E*<6!!JX% zMO%$N_Bu*)bD>WCI`@yUyOwr0aqo96{FE=XZECxbPabhd%)1{FHc`QC?K87D(j_RLrRY%mkI5U6z}Y;U zM1m*-I)CN!e?aGjTASh-$tFD98&%K5P_I|;{m+<^E2qkG$bBFI;j}5Z(7B^&Iqh;t zjQXH8DlLuQs-<5zm^B=}VQXkn>~D%U4~b5=YE+dy%8HDwXzekvyTDQZMef?0b@G6d z_7%B&>>u{5_o0Qy`KO+KJZ`4Nt@d0HgnhG#0_|&v{=g<0&YD zArjV7_QZ;dYc*l38FM|;?0)?dh0^mpqC3}r7{gb?M9;xiEZ+-{?Jvvge=M(}{=wK% z7x-@>L&WY+3rO9aWVRfpkr-3SXwd8t{$qkp3quTGKYPYs0uXTrsO-=V_$OO z+Z#SswK9GX)~&o)sZaMR4QKoZYhezgva(U2yw<=8w$j@Va?4;@9LABLOD$n+iw9Qx zxG$2>)+Wvv?-AtcilWoS>Jn2+D`-!0|3&_pGZK`rd`{~*0;R^@Q zDv1(N@UG0`z_8@wcRzoVs|CJc3X=uCzg@f4=vf7M4OUPW?GAoDYT2}nv4H;#KO+}$ zb!-#ZdlXrdFb*%Csg4@ULY9}8f$~pEQTXXTnj#r|H*~f}tf%^V%jDM)))44}HdfVT z6KBme*YDNO$1FwNSZxxkxr-#d&yJ3>i^VHam6esbA&_NXjBzOF4Xga{V8jiXkDrD4 zrsC0BRA-ipj|BUL$L>eyz0qHZo{kP`9M4o6j+v87Uj>7w9BA(Ucgk1j7kvN7b3LDr zyKc-Tq%}NzJaqo-Z1Y%Y!><)6gIBG~1eqbCo!`f4FaPS>Aj~$mQx>jR+LErGa!%j3 zXO)UMz<&7IPuL`EpHmmz!p~y?;OR%el&nR0-JF@5qAAcc`!`UjVVtomBGzj11HMrP z!96m$^ACMyT#u(R&33;=+3kBO{r~bn7o7uli!-=#_{lK|($;xMJ|1stCw4$rPtRUC zpzD}-{2&y<&Ka3QI43L1$N5b9(Vbx+REj)a4Plden|OpcDMhsz$}S ztVCE}RrNWFm{NLZUi5mj5f}JNlQk+^cA`g`*4hds26O4f#XmEf=Hp_V%iFJ?g}l8w zzV+ic2%B5RWz_)}464HWwzpi!C%s6YZ=vQp6qVIpeDxp0=%c%%iDahn*NvuEVguRO zwJF8#2Nmu%up}<^BD-X7n=8$|VpTd&JOJQP$CG>LKPpW$*ZfLEbvtZbBC|HTY-*xR znWPDaTAUb0ECP0(r+G3E^I0(41YF-nF)QxUG`Yp@ed&kP8$WzobM-SGre)PF^kw6eli zrA*dHiMGjM^V743(_C__o?xVJl*YD)6*R7kX+mcl$j-j8_wt0{~Pf9>W>sAz=9qTzxQ9{q49L1UDM-fTDe zI^FvRx>G4Sf0CV>g0(aEz5iKJ;L2nRIA#Cl!b0N8w$9X9R#6d&w0wNJb%-Ti%Dpv- zhYj%u`HIG1mUOlI{)TcFsI*k_j?KL5a*M70`Tj{ayZ!cp^i&OZ;E9vsubqHlG&Vi> zon#55@Hl91q-cMqGnupvcYjNGh`wu&o1`qmkB_s%&wM8-2Ri{o<&85_yw)Bb0Be-a zP9PBFvt!`#iY5ngxT8*MAPys(mzI!VbLRt1J0Z^UZR!7)@9k_(la4i{tr9uUH-$^{ zLPld7on#5~%}Mo}N{MWp2>8wOJ5#25b$wRoA;&ii%bRltiY$tV7J5GmvpThsJF9Bq zQ1t#KH33>GwE@li-o?ZrTNqOHUV92Db|A9+K`)wj=3D=_L!4F zia)}5b6n#reKSVi{o+IT<=u?9t*F7?r6g=HK`fg72O5h4d-Ra6E)u1C=*^YbBm!|`r^v7$1>L{?UzSKeG`v;~kZyOGZUUJ&IZ&pKr!M!q0RThJf^ zG5PO&fiM}~!U6(Vzp3HY2JN_Mfy&mFI_o3fj#b%`KXlR;CEoCTZ!i;^<*+Gfb?|ez z^W7a7WN+U@RLRK`uwyPS8?)5Nml#p1Hma}OqVRJOH^d7&ngdlnq}kIq)zPsjsG2#1 zh1U5YOEIr6Og9dt-BR8$II0-9Nxw7}uWQr%9)LLF2DFz*Jjl&To-zQmz z`0d`n$ketJ^(o2LRRwbgq>f6pj0QZ|CA6HD)T}Udfc=v??l<3JOH(0-qyu3Eqi<)V`C~0i zVPuB%A9rvkliR~%`wMxzS#i!cY{Um>J)!%NrnC!~`^VjeVu~cnn%Cd_0TzkmJn7y2 zBJ}YNoWp%&-r4z6M<{n)P(H%*1Q%j}2fnX;?trs`@ZIzM2GbgE>rW1UMvBg6!h5;H z@15`byHrb{?xB90N4x|MtVrnPWylQa-RSw@`Of)S(ovRh$jQKG9I2giDD2D_4#`3r zyJI@T6V?||gd-q=B_N3I%6|L7t$sKbmua~PG>rE;+mgJwGKc9J>2t}ReYbQjP_4i2 z?|vV(ez0fBCh}2j#4dl^jVa>e^VBQ8(KiQUY|RGn;?!>{$fEgqj+B}297YX2mh$`p zHJxn%lpIZ(ihhj2jO%!Y(Zd)3?!5I>Rtt8~KGJCZb`c`B5-S}4zER+1ov5Ek|LE)u z8)*9D?@SE02pwzVd5~gRr}iMEvz~}d0O)ajyyCOtT38{JQf+h9_&lN9i8nDS8`U*K z{Uzr8k-YRxSSwE1cjUP0xdbYI*8JAqI~)YAY+|Q znbbeDPOOc;(rS1mM~|6HNo5L8%Fo1`PR37OxNvr|!ve3SfXsX6z(Hb4=-4jaXacVjPx@$6lwq`ReggMjXkgCk%pc<(DGn=t&C+|w<*x)FL zRe#x@iCa@6Sx8C6jvdrW7M9>zBN~ zjO~x$zX07P?h4p40lJL}2msEIH?BORC?r$%?_Gq%i%y3YfP7po(cqm;lW5YJmDV<= z@9hc8*mK{=Fyf7Un`EG>44JWs3oi&GGwQqcY*CQWzfup@)!TOS&rhEHn9&Y72{{)0 zb2+H7!22X%;p}MUfCO}zZB#dJVg;A9h6}1I^Fx%DPwp)rfT;nChiCsKdk^Rj$a2Gs ziTD)bGW3T09yKpvyPgOkg*46>8a@E^3uqcoR&QjK1ICrG9E`$SeCX`0;p}Yu{YgTKaX>=z1`84=0j-h#Z3SJZ6x<>>RT6B^L6H8n!*- zxwO5{e-M% z*lWc=Zb@-|pmn=CUfrhMn?%l^y}Gu%{Am@=;6c##dYbpdmlV|LOX27A_Izln`ErgH z)RLry1Ls(QarFk&;znBUZ)+BpdS)D?K7-R~32`r(Dgs-DEedNU-uB@0eGUA5(jQhX zO*zV`2uUXp= z@|Yi9&a~2q{rxwxs{Vu!M(f!QXcAl@r?q+Fe1(Z= zAzJjPCF;51H(G}N96zd!CI89A9|GpK)_ns~H z$3}CS?PDnhPA)f(1g_vVaN*8D!N}fL9f9ySv=vC^f(e1#^{qxw_-l?6EY|{N;>CS^nPS>78v_&$679{sWI-k3*jQnw71wGBc{AfrAIXFz&t(C?K{doz( zDv-W@Jpj|bseocyGWzh) z8{?yz7FdEscdq2k4fC-F>*Ed24O5eR;=eKrBKlw#ulm!jS#5|NUP#)wP5kM0c+sKN zaWi;+!F}C5q{Bqvx|M3gn?d;YBmE5aj_q|l(?YY?hk{_N$jqSM^0zv`?E`Ebw=Okp zkAZ6E7~v2SP}t--xAzE1RK)b?zVyc#SL1lh9GNNV%U{KS&!vNg!eDD)=Hg|K<0+4E zwErBLJr`TEnS%QrR|Alq*1Btk3qQp;pr?_$1rEWj(8(2Cx>t1!WKwpGEvpziA0?I! zyPEy>xwLJ$)QxI|K@R;wV+iT)@`O8TWUy1IDEuVg^hiNj331O%41vFl5TNxfPI>Oh zt=`Du<^?fgxL7QCK~Noc;ofXWf`xH{;B;v?RQYR14$OsHby`&j9VsOnt3g+rPds%ZB0s1% zj?Jwpsr$E#sYOx$qJY=WFh}9m&w1$8xcWfXt>j`irNQ+;eG@X>HuJ|inmM#aOz~%r zqsyu5pi6p`ATxMTchDcA@v}4u(}MS2ncA=NH+d`#divQ3{w@TE#fBOjCDTlJGu)Z* z0wscUpyY3!j=R9LzqQO6)?yT}!)oYnWjU4kXvmP^p?ZJREml-n>88+teEp2n_0Fly zno}*Cn|t&jjlFqpKVZzZcK%~RpVLjC4l}=`5tt7=t1~nW5iNS#20o<^g|sqsz@S_# z2Iu>J3Fxpy9il=P^JHK$Y=7=e!}wp{{W*G5YLp2TO0X%WRu+zsazUj8+Djp2TAgw7 zw;J^#zu&PiGEN5BU+VOkk+{Z7eI)odclBGXz)uYhzS=&| zM8ajz%1jGyO#sV=a;i+sw$Jssyq^?1unx4C8W#I+P;mAeR^2hBOxBT@V3t$YSj&(e zGP`bMTlgsNrmn#kf5}5E@Mdlp`8gHBXAng|%hU{s6nJlq7tQ+XO;ntdieb*`+h)^Q zdrz2F+}6@keKd4hWANV0`d~`Bh1o}Nvm)$J~Eu9Wz9s{oT8Ivz|#_8>0cb zh0lZli*pjh8ML*yC4Wd@U9Jj=j4b~sbYtAfhJ<8>d7G#Rt|hCGd4J6ujz$<52qGdM z+AJ33ed<9gP88k|ii0ONfgljz1nJB3?mK=dL^SwKy zZgQV513A=l!5BM3;O4e$|2Sg3&|&|p*nmgf5PEj&0J$A8u@fSfOT>z~{0o}yo2jg; z1~U)Tv_yU}72p+~ebycyBRk0usNV3<1}w{7Kvr0+b7X8O|pj+wZ569wE*?yzbx+>|DX?bTN2KdD+LL*G9eeP$-#UD#P~ z$36Sttj6;6^~ZQSRAaPLWGWY~Un(Sl`LRAvsfH?A!7he3GAYZy6ufe$pNJ3RK3n#d zZ%xKo+jHR|>6wA{Wx3BZ5Pl344pvtS_g(NZ;Naw>Qk%e3b}l@Mh+5iqCy>3lmmt6} zJxEE(*Zy5e(2{Vpp@vsg7?9$u9T*`bBZ!kUHLSpu4=Q9iVjt4s@IJs*d}Ja+UaX& zJU$~TeSXM-)rl4Y5#&A>!!1qTREd@tz-N1=xFr!QC4~ql>{k6fe};w4PDY<=6d)|Q zNm$%G1Di_q{(-cb&k$uuZz)>vyYoJ+f9B0-j3*S;<%7s_0~qMtpS z#urS9i|SsfkF6}J5 zLkbBt{Vb(nM$QkF?SJ6`dO@F~<5*b!256U=1+c}GyjISp+nF5!54hB_OW0;LrdX4P zOPGDoyVinIv0z}Bi8?*o1RES`qAQmNxJL}njsg_9o_;k5sVi~Ll#oXWhFv@c_%5h> z4>As5{&@vsz$j|X;uf`I*rts->T1hZ?9FGtYTLb9@MlX6`G~^`bT9qWIEJVIb6~SyXyVJ`81yS#tm^72X@gk zH+IVaD~&9kH2l>cBR-37?{wUNwqEhX=KjD#lQ3M&&2d6p4KHqR3Nj?nxg<7N&SGV* ze44&@NycE#rrR8$|6OirllMQ_!3H;D)Q?7&tDFbh-AYK&?;Fkuv|nDUPerVHFHhg8uYQt1Ym9cH{&du{aZpq#s1ikDKOk$*Dd z6=V*Ttn*F3c&RDi@-43RSY`XX$@I-?`C1ntD}0OCsEXj>`w&x^8RZugal6K8^*VwJ zB>S#rK{nbI0-SVUE#!QP z%&Sq{~ay>}v z19()OlQ!DX?%@Y}QNP8|n?aWD9p(<<{iOUb?VUES*NzTs1`opyhEzU`rz|b0-9>0{n)FB$CGa;Zkk_O z{jKAT{c8WIrCK6K3v_5FJqX9X-Ddx#KuMm3AVf>EWdylowml^y^PAq(HHn+Wuo-G; z*AQ2U{bPY>EZ*e$o-oaArpMOYo{N^l>g^ekk$64foWU=jAK7)poKN`{q60f%&p>sO z?@RiuA>r-aV6e}F*RijAf6h)R_E(9usrk05;xe6`UV;=uBy;m4uR7Q$j)r<)#<(X6 zB014G`e<==i@BFDwu$EA2Dz5xE%El`*sGGLQk!t+z584avp?jHqQqH6ydN%bT89^S zO!(_MB&O$flz_fx(Y&(u)*NV(PneZz6xG`Xf?_Zosek0CXSrX8pio%^ z%b3W`=QDH++Z& zowOTGVT?C5-@K-G`yNeQX`IYlpA5P8P8r##r)2T&=2vN!f%F47@JrucJ|+RYf0z=R z5~<;vfLrt!-+z6LF0>{vr0>#AeV*dga4o$g@a=&ufttL2j@M?&qyM-+a0+}>qr(zU zT?*@-TSPJlidSG%Zr>NWxM$GXEVNb>-7K!&P`qMX>O|8n^mz5V$Gg9ql(QYVO$xqh zB+hG+@4d9%B>wy?O}Gf*rTyoX-6v<$^P673|DK@T=a8rFo#*Z&XedP1u71 zbEb3Udy>DqMI|U|rDN&;o4_)>NicRQSF5SsSIco~;$5=s?gl40*rPJWkRQd1>^nE; zCCR|y5^!hsu5I}$SfsJ9s>_!tSJlwa!fDuga}*XWBrOsb8B@X+iMRvXSGXAsEX)({ zF4e8=#T|H>Yz$?~o2}4f6RtFOPXJ;e@w{CotByr90R?~;RO*@3FE%vFDN)g#0Os7I zZyw9JW0RnlGKI)W)c}?G!07!=?W3yijA(w64xn_2_8)K&1d&=cuQ%eXRxRsz%uJlV zX{;CGNrB%|fr$OtcoX-Ic5i52;VyB2y`%BQ(SWb*6C+A?smDLR9QzH4oNQ^6K1VW5 zfmwxr&7O23hEiq{;THY91UxH>$7924l#8@ewedXwP*#+7n^`;6{>X; ztE0U|9lhAuVOW;P7w5a{SjXWM#L?^Nt@)TKR!}CQ6(+<> z4IJt>RB`vK=ah+bvzvNbrT9Coq*crawH5%Un*S?bp1|2YDpMM^`EMQtmX``Y^;ws> z<~HCeZCCR~xZCV<2g3wg$%tM;Sk#*sQ%;e+qK>e#;Q-VxI&M^S@an= z;eC7G1I)m!f(4!*Ed*?8WR*IE*k-pp{#FZGL|D0U5Wq6JQIZYafqLhIASm z81(G?XEbVsP#P0{N`_@Loqh%NDzPm~N5xVpc&+k<5)sqo9FHjBvL+YPT zK4Rf}u5wGl`I@-AH+UEoiUP8`o$X1~)$|J~j_CO<(I`1FCu}C|dm8 zFLG)L>1+?qMm4Nu-tyreTy>>L`g{y(pfO2Gm>6IF=IwAX zUNFfB7*m`uqp8X{vx>^Mig9yv z=+i7>Xg)-bmvoi!)ARRPp=~PPH+i4Gr(?>3Y11PYl# z`{uoPPk@Eg=b@@T6ziW|;o9f{FNiPehHGXEe=Re!4$UvNUz7w#nQqK{E$Y=0Ok|H= z%+XWVa3F>4kFVgaArBEN-07}kUsZ3Z;`A{ztIBaDISD8PNx7*mK`5q6ocW8-H?Jx5 zSBI2;`P%)!3HJeK&WGe;kehKSky$m3Fi`RDE3bGCstw1`YhO6QBX zp=1bEtrEG1SAkofQsDuK`?xeU(-?#h5NWl`l%1Kv^{3=+y?ppK^Y6{vo+&M7gJyXh z#!r&|&T$Xbf}fjsI(*pVU?1%a;(XbqYyOEg!NZReYR1bf8G-zcy%g+L7%DNfPNO@) zE;SsuZ0wfzRKS|({S3knXDwhQoObM>f*ZXzoJ#q#KYz2^@ZI9eZR8Ib_{hu{Yh0@; z(ziwT1mpZ5O}x`B3FXXJpIfQ!@h57f{&g`Qygu^xu^Q6qjY{_B8}QoY;P)u~Cn)0ovAInms@2tf zPFVFAYt3#gk}okfM#qxPTH)enimGb0WnPG3w)vbA)b+geF7Ket;VBo&y`wW})1F|O zmvnTa;}kM83;Tx;8V->0q0A7nT~p&HoliB zOC@-wUWPO3Oyzq!Elh5_WvZ%$^2F=&t1Mn|h^>Gk5RtqmM6CFl8SpZ*GBvNnq; zpv7QmA)t}3S)r^+eZk)9Od=Usw#M+;gJy zEr&UXhG8c{a}d}(q1?|rm@Q*xqgxd@4Imm>O9@ELD`=8q&;3%md_Se5#1-F*R*rXP zRb%}J+23}5VDWi>l~GGgCe6|j^vkZE>9NRdtF7ywPS@wY*FKYkJ9Pk^bl|^gw#qW^cSXXANU~k-k3g(rg!NtGk22V2I_eIq+Y??o2nl9EB~{cJA1ND0Kn$ z+$4bBqK%RYa=NBs(@Ve0THgw0rEM`;^RO_Sg|nvug7IkA;HxvIGt&;?^$%)kTRhE> z*hA6ToA_uEe+2z@SzdFFn4l$3ujyYa&E3g5{@-m!1}_|XGabZw|FR2#*Z44q_aLH^ z4*Sr7vCBN{l;b%AGoy5}hgu;y02IbnWn@TPS-{jC&kv6HO!&pvl zT|@i$$Id2WcEC2o_(I@e9o^Fv01Nv z64!ptu3H67i+|pOjPQNmEMjG58l{F?gH2cgmyw#N)fR4?vx?{Vc-%Ya(ZchX63zre zf|V}m{hEN&g6?KUE{-f(tYt*1R+qBj-mPmhNB^bF`lSXkGaGKkSXT*zHzcpd>aksd z2;ws6U~%cL7s>Lv`cl_BNtx5ui-Y2VE)a$y$*+@;NJELeb}>YWFx3)@_M(Q}N8~)` zlz`!0!_I93QcATC!oEfd-{0@Uv<-4wOPT3C3tX-kf+o(0oPn4`>-YGkcq{2OxJFSnbXP&Rq zv=9~Fj`-W4ThBBlJMADcBc7}o8+dZtUpC=4bSC}m=V4p& zm}5JTvfZ-GgN~O+_EEacd(``P#YnlP!VUA_mv6&unAsR^VGFq01v5t263}MmaU2T* z7WA>1(^MT6hG7UZr=@6x{;K{+sJX7JDWWla_qc7l`EFs&p85QIJpcUhcpWcQ&+g%Q z_YIL8F~-=+j`NtAd3<&_JvQ!HdA_E@D$(;S<*z60DrT}Tm|hd>I}&tu4`>*+5&PTC z+|N0C=86d~HDU9@|M~UDEq*Knn`iy_`T0CKwwu7$>uli|O-qUd1jcH`QvzZ+@0aIz=~Jz*9ae#hTw-j&Y{iN)J5|r~HS_GAPL(Mf6v}-#yP*>5 zly>_%8@|2Y+~omoV7KI`0$j?pfKF&9)u^gdJ;4&oHL>1})iXN}=2RX~3RBe8pzdm8 zbi<5mfsYc+m9Pkwhok2zGrGHn`4|HZ`VtiqtM%Uag0fJ8nFBKaC8~e`OE|%X!69)N zh48TTL9aGgwLlg+%moKQm2)H@W7lf%3$UtA=g<|;zBSsKZhpkLUs*?lN2KSm|Z&hbn6nxg$^5r~Dr)v)tOlpcF z8R=+9442nPG#j@mo?a6$>=1J|%1rkxJO|UN@h#i13o>d5#Ksd=^=(?z+gopU_g!fg zMwMddiwq4LZWdd(`zn|&r&xrM=n{aOQ@z9#ZuAJFX?Sez!z0*k=&I~KbC$Y07wIII z6=IQHw5D3;9gX*O(IRJZCpz zHdzZ!TW8Oyv%3wNYWAS7Zvau*%C$VovP%;I0Zu2<#`w2>`zwP)u4ET1%xOkpwLn17 zmed4a&$`7)q+vyhZFNGh_7>~v!i9ic^d{&^bgjkj3JI*jKi1ft1%s`Vt5V&VM}B@h ze|~%%uQPKRgGQR%u2G;W^9;>j`CGi*7cfR3%tIIRE?SaiVV9C(^_d;&sTT6Iw3$r} zrJbjJ9{Tyv=Q)q;qOy~Y?2b1U*1>pVd9j&J+3SN|YIBU%v~kHJn)tW60MN$6eJu ztW4t2EXk{Na=cVe`Y7$(w(Ovjh1gtYk7)Oa8TbT9Mf)$)*cN>tXMEocG--=LAkPvWV2= z8l_ivPNLnzH)|@9DUa5yT6c+kzwS)8eS_5Pq*%rl2s+KpN0=DSF>D_rwvpX3ChK_~ zb(};E`d+uGwSlE9xxc^NVh@WUdC!^I z$1!In1^P1g(Nv19)w1osyfy^V8y0k1KL*yXZ8hrsPPIW!LueA^@-b@+DPbH&HCG#gd~X#h^{L6x}x4UT2-hnXgo}W*e!J(NZEq1sK>e zZY^4v1cR$-4wfv2iwqal=;~ZLKsR%bx|>?q#wOG4{% zGhooDwl<*!TX$-~RLZ&-ek(TQ?*Hy>_(hQE^&f%Gi_~+q#A}Vt&8^q3EycA-CbZ(( zqM11{LSfuissvue&dy>g>+#~p13p0eN@&1gjRuQO>ruT6uc|y}A1BV)1TTV%lg8no zb*r*bo%-6)G#%Cj@0O}?oHkEzTMqMKVPiKZx`@guWgaunnXglos5HNg@O|Ik$Nk&> z_CDV3w(W|5ts_(Qx`(bNt!xn{2qN6D3Ex%E_Udd^Qf)?=*>FT)xCnGrJ!U?YFIAx% z-6peN*}P?d2s3kEr3N>lp)n~svJ}2$p@W5KyiFYPC`~a=OFFycy0_TdDA9E*qV{N5 zA|O@_DTlmT)4ZWv3DoV@6ykde>08UpH??wG-ylb!Ib?&kVLoUZ)+2jP-=n^6e!2$w zy@E4K)mMG_(rURC%?UFLTjg;GPXO*4PnVHRv9pCn2QvPf;GFfY;IP4eU#Yy zZk@XY%gt(a+APl^FKLd^#dXl!7)m@b9HSJ1J+W-srCnnL&TMKl{K zyOM|eg_WJh^LQT5=i_;vN6n&PjB$IvfB*h%?_GdZn|W*La_buSw+Or6-sh~>$MbO> z=b2p%E#^&+;cvHb+dS}Vgp1>R3D=^&*59C7Yzjg*`WKR&8UbsF?zGJUoot}98?P!= z=d3fau*g=6>MwyVw0oe_88DbpRm3V$k(eExc)^w(RyH*DF>W^Y@Zk}*n2#dY4Qi1C z_;sr4oB-RlFr2EOWls}I9dM($RLBBO)+D;q+O=+cYIr0H#;gmU2}28r3mH(7`PwY%M6<K zW!MZ=x~c1!{XF}aX8(Wo{-4*jEz9TyCPrOVYFLp|q zsm#cman4?A&M|s#?e{aWYJWZ5Xh_vsYpSef{e&pUV!aaeD#^3(e7Zuk!~O6v!cb(c zYG|$Jbrm3 z{joypX*EAv8FUU0JC5TVJRMcJv?k`GvsBxCY*_PWExFj`uwgNVxYeC9hiC(#T;`It z^0$3YGRIL}8u&;v4cXqdG%)0n2YSJe4#7tC0uPvsr*c-0#fCYNz}ZAQftm7x?dwOi zNjoAUVKhuO`XFOeV7H@bsG73d>rb%?YA)!I!3kuwQr$0-D=1@_1%0@gSz80$oofM> z;%%fMD<6G5EU##|4LIE!s%kdTK-D!T3#-!|yQ!Oy-|QXlmDzdOtWs)rS7i&s#^K{U zj?-y~cC}SWm2Pk|jMoGAKl%^6wE&HcQs=X~2L%ufG-u)AL(9Rq5&F}4$BCpbu= zWI#pFeuIxdgm&4QvS<;u7b3N`K^LbZ3 zuFsEp&Q6AGz~G~*tr*VmtMt>0mmC3#v50Q3)iukR7GZRZ*q_w~bmh!?Ui0&D z&Dur&LA%BAE#AK$-~KSZ|1#d+{X7`aVbIQ30A@t*+4w$Ew~-D6xQ{irqxpSH%Cetw zgbf`bcO2BpW~;BNiCWCERh^CGPfZh4XdO*^)q&e>Hxi=I0fR_CTImLLnFlM$qgU}z zS2Ik#Sr;_7YS3?Ov|;KO@4-Ek;yA5wxb5B&M0%(M-YB_;R>2Od4jWaXyGrmOsX16e zplZ5#FY!?4mo;R^XDchwlp4LM7u_2PXRtB8DiOP_j7CeXs+^Empyo9nYd-ohk+oBa z)OK6$2ZTgPmF~*m;xgaeiAle98j0lh6CMK~nvfZ%c81{8%`Vant zfB$hke;XgU`s24BD409-I;Lw65VCWwxh9R@-VU=k;^;#`BGZ(WTC1N{HLv5o-4fFFgFxLBunEl#V1M|jrh9aD&i?%I@#lZ@kH7u)+w=L%OpxJm z%QD*g?dBdH@s-VPb3+pi(vCQM3>(&^bG*@h%eAgKcLG7VdPNKyJR-oKbV=-Xv;($5 zm|F?5ckj)_h9SP}<7(%YDemZ3aMhi|p9C3dLkC-n9*H#fR>NKFFA`)IUIi@>rHaG+ zjpH_Onz^-=ImThPF~(`(^jFt!pQsJ5e-8JY@9TlP1QX=47F>HuhLy{JE;n-lhnL%V+-K+cYxVtsG9W>jR9|nycXo+rii(%syL(jTQ`D|TM zhapU+)LKaBzJ0Ufwg(RWa`lq5&8ic6?WY#ZU~XYXREjx&C?x7QbAd*ew>ddPu=e3?WAC*(Rj6hN}b+cZ~tNj8eT5@*_bFh}o zVvM5$x!Fct7LL-+1koLQ(Xe2fQ72WSgDYvTuKVBri$C7)Z(4T$@oE0T!U|ez0mw`T zx@OLK&C89$+B^uIW5n%H&d2?u)_TkqY2l~Yt~ajisH{NOE?L9xwBgO`bnw5Z|mb{{LEOsQ+tnpulsG8z5Y;d!8!EH9y^r&Vf5xOb z-2Cuxc<&B-@4OSWM?c*!Jq|bUz+PtY!i$^oO0hVaRrP#6fBgRV{P?_Pf;Ps@X(J+z zao%p{xS70`XsFcya%QeHZU#F^kNHQVm zs9I(NYIpa&dEPn@ZOP&?Gwd-h)6B!eE$t8exLHOkQZ5u~|vE8Lv zO*6QB4DLk4a2tC%LJQEmVW5|G-^yrM=5)8-$s+mqxo7HEFNmhP+%2^N(OBk~*+Ed# z%u)p!Eyv+|i`zLFB-)!L9>ZRDRZU}Zim z!s2G`ZR=r$p2~7NoMYdv2OZXKe|3p#s~>wStWqdv3o@6{=!aTpYglX!9eg{+s!8tZ z7D^=x8$Qls9K!%z*$s1qMH~V5glDq=cgtGOCxfPNy9Ko^gJy7A(ZU>qSNYZJybIvH7kK!K$wedW5n%tzI~757V#Ei zY!}-$L7KVm1E)KcJI1*=Z%#Ks7TpqDy~Hp-c$m45+b!blj3W@VUUIczEv@W1H^Wms z>Gs!nf^(q;TGByps~5MR&!V)8<-T`&UjnWMQPbw|$aw!RZ^mw+;7}-6`$S zI!Od=3&V%km`}gJ*-J;gg^dVRb=zJ6zYZcle*Nw9dW9Z0eih1V-tYI%`;W)t>XtFwY za;P_m&4x7YG?;C3jDGH!EUKMCw*_0mh-!sfZ{|m>rZoT?>I#NQa98at6T+gq9k!oz zEQO;RjxebfI;Z7RVDyt+duF|F*g;7Zv~4&DFg%QIesep_yvPHOCuPH|aGEFxCU?f$ zhLHj=240uec5WfXj`TESve4_e2W>Ng5nDI#vU6|z+>YwW=^hatuQbi8`)kJ$c8qFU z;+56V%2|~G^buwUdB|c1WPM;16tID8*6TBFW_!;B{lw*eZD`WY?rbRwG=c6m+#)P! z-r{Ph)^n~opU=6ZJ@VP#HUlkuSPaIAV=x9{2ctI^d%NFWcY4O_gTNrHd*dhe-HV$O z>fCAN<1OCaY=j-wPxS__RddbGg(b9F?)WQ>m?BGB?QTNRLTWR#5cJaC2x?a%V$;?9 zi|E`mv#j=DWt07Kly+}gA1yY%vAO~4f_bc+u1|woc6T$YG%PDyz?7_4H_a-m>6vYV zy^@19r-!4u!}Yv>b<4(fjgkx#!i<&Ko7nE^-62)Cx;Edgh3x88) zA+x)RNAzxZpjus!YY~s0j-jlna}(E^W)?Gd95`!0FfYR!^b(!bKHzpb9S~)q@%KO zt-LOx*D;^>>p0JG+zj!!KmPI$fBy0NU#@FPtv%L}(6Y|zT0Q$~r0`$-%YQ8(RqU61 zq}`1Q`f%s)a0El-{%Z+uBOA2AF%(c;>-ppH@p&ygmsVPt7I}!pa3SU1!d~W+tc|e* z&6QMyc))MLFuKFHdiWJ)b5a|VzhiX_w+MIl!`tJFNd>W;Pjwht?b;YcZ=?a)<>0C` zi`^ARKM7NJm>bf+4$$Zf8IUg9+92C-`+D}gf#O)`+W zTOw5ax^1{SlwAVboMXheo#S@1F<{mW)SB12uE+iHTx)fe471^1@av^*7;xhthk-@_ zrK%SX`<3oP*r4sMqEYsGUDT$Jvg*YuFvN)S@b?jK#vx6t?KG=ig{y2fmeXec1TuQi zIi*9Tu3=sR$ySs5d#>tbdv+AAPTQL!+sqqw?MG@6y`!61wZHzPcdh*i1lgIOg5;b-{N2gs|Q zkai!vHX=ku2IDjv1FGuvPhK0DX`!`0301GXg}qRP5I`tj(MQ5mEu;3oa^uBX4b`eS z*Zt%E`FVd{&&+z6KK8Hu>r9uWxw6%f2}f6|TA75pyQC7XRb74m@%a76ub&^E*;~2& z3MxZ_*x4n|uNm&Y{{Q}2L>Vk)JKB8)O70dSPq&xBo9%&B@HTQ{OTRQJA}08n)+nCo(k`NexN z!OKiS)t)l>aG)Jjmx*0vPE^4umV>+629()gi}A`3wTf_b2MwuVBX%SuuX=upjl3#r zRcDrYLq!)GY_4q_Wf?Xpy3=bo)DalYS#}uRLy#O6 zXLYr*_&Uin)L;Mo1~=?*1SLiSqUVOPin;HQgicD_P)jN99J9B=3Cn~xiPG_2Ius`dQ*xIdn2U9(Z> zNtsF-ZI#l-34v{rsg3OYx6m}S9JEc3G-3-gWDr_l<7YI5wQD}4VlWOn2gC8jF4W3e z>Q%jZWFs3Me~l5tc9CjU9>Z$$Y&Og&OG2;SxT~GFcDe}oCit4NX{_xZ%WkM<1GYCP zjW&8$!#hh9D@%^*C01XpVVR|_oLPZYfKbnDQx(FaRdTDWaO-PnSOJLWdH46z^QUiv z5clW8{H3E)Gs3%ltG&z|U}u%H728a)nd4XIK8t)_Ggk%2ar$w*jbpqtH91?k&!&+= zH|ccu;T|JwR5I(CJ33xH#!J<<_rItZXt3MO>EXsD!FqQjev&M{|1xe<;OFx}s#k@( z2an^}A-Ah5NeK>n)f+v+m8)v4HOn?_fe3TbC7@pE?all(w-_;w^SCv#ETYZ%nsMw^ z7u0RU-j>IpIh#G#nro4A#&C!6xQ+3C_z|$4b*<<8C$EjidD(Hh(T z>a@*<*t6^2PeU}aaz3x;HSeE~=kqydRyFuq{%$qPT5FeZHwIZ~n#?T>VeX{Ge#3iK z-9MinAHV(ncs|xF?28s*Y_?d5ezh#WQVRd-U;Zl$-*0asJ%SIqIn}mPVC;RQEXkTx z`6}l-TivKqj*{At%+<90dvU4upBhFfwZHpxWtwbuQcdrH=)7_Pdk&BrObF1WH1kZ6 zF-iuT+(`g|!0MlMzv$u@xUB0ku+dJF%4x0QhL6iy6=eAu@KPFB4aiyBpPFWDPRd)$ zIAjg$)~t0s=LezZy5^3S)C$gF-3CKf?q97Lku7vxaU9)r9&~AO1nh8A)75CU(47B{ zZ=-sjSEXL7)>_YX<(g|vRlkgPT`sWTFsUGvnxGLz7daweGPfO&EE=Q~KpwX-Qh`S<{ag?itMa4udW4SLkV?t=Umskdd>Yl*lOD9?PI1|>i(&=o!3JE-EMEU zZ@+v$Z{Ijhv)D14naX_r{&|1gKYq+Bt5oTQA_nZL^wO+;D^)J!DHpgoklU3EG+CQk z4H|6QhPEQNg(A9M$BrgN_u(8dj(zhHqU1>QLO*j|sH#>|&V2mrZGcEs)eLxnHygbw z{3T+$*3|w0!>X3k_vC8rS+$Dty>Hw9@LAcCx=34@W=<4kE2{-%cH>&JrPhV{cuuS*5jJF3cDl^e``x*&^c~sT?lR@PbE3;lOc$r#iQB8?Qf6Z_|fBWIQowt~lHkjx*ygF$_frhSzb*`cj zQ)N;q4u=Src1#|MF(6W@wck3xZgR#@i*Pl{SOR@;ZU;t=)83Cb{Rld?vK{CNnbsC& z?#NrFlT64}sY3U)7BrvwDI#U^U{gKds-^*I?-pv%P`P(^-XtmpfI0!Oi~)6!>~e$9 zZl*4$?RNaJE*1l(#kvum+eF-#^n*sO@F)hw@+4QTP;l7)Fyo4OK- zcGT-Uay>6nW>#4%*Y&)fIoI=QRnL|IB|1v**vqJmjTBlQ2EvEA7)Gc!rA{R^bEPpv z!mYb^!KRyD-Jk_T?2lzGspu%O3?-?>HI=JX@Skcl{G8fq%L44n>z>y${CIo&<^7jG zeEZ{{;=HxCxub$h?=7OL6JVci2%IBQdc3TRXRz6l5N2_b` zpy&fj3aT2Ex-;9YSAL1V7>;8e$L)=5Gy)9cIbuXeO0yBBjM#w*x0b5*n!GCE!@@mw zheD{-7tGKs!aU-PH<06GOrP^w+Fn$1Kiu}{4q{bC*k&pgI=ip+SaSkBBDO>DJkPO> zO6scm=e~{Vy#?>gHLqFqQ~Qohi!if@7%|-UKSdrs#u2-7(KWAneBOWk^~dMO$K$zb zk8XvkGPs8yW6X8jcIVK%rOf$!KEMV~H^ts5htqu5Pu_p|w~yz2c9pcn6QROPB!bAF)=50(W$g(?Vlwsm)}+KMol_~JXENv4+3p*FFt98~Yq$0DO^xhNAF zxiaCaR<>q#wVsJ;cEbTiPIwIUlssIZ$>4<~0S< z4?pVy3pCxU}s50*RVk_F<8j?TK0Y6h`T2QWYlz=6$JNfe@4&-HK?IfFVwZJ=gf80N@UN~EMqKw#S?Os zfo*%zngVB)LAaXTR~7q3q{-Fc(KCnpYe-0EbE&&(WvX&^?J9?fDHw6(qPU0_ltSw2 zQnPXaS`9Zxw>z6@KBT0v1PxX-3`-)tdYYhuk?5=Cz2?tI_gqtG?cz%`59ryVHU#=8^^dE=j}M$VqQ;l61G(f z`w8FDHS@WyYbC)Wjxiz#ikYeI1%22ZoZH0Lz71w z77jbu6*9tTx&hE zq1@x8S?=DcHm&Pgtn2xDRlkMV1|)YKKDHh)m)s*WjqH*cSos3o_~F$KgJd-8+8toC z=xp0A^kJ$5J8^Yw>Y1{&38f>5Lhz;hmCW6PyI{rB1F*wJ&dwIKW;NU6c&?`vC7B=V z`JC4b&ev_@`FwQc>RMN~a$UJz`5vOQa~*IhVL`|^g1~S(%)){^oMCVis#o=THhh_m z5o4FT3ff5GQtlh?euxw{6c(UTa4SkMsjVTjK`*zO#Kuo#4(gRn#80olus+@1&$rv# z=_4c~va)(z>;8P+KY#qVp7-bT$10U1awPp6A%uZismJFt-mYJ+bDicey8U!GL#38p z%Va`;Yp*&tMhYdo=nNDPCJ&6kaqxCvcy;Bvo{z`%{G87xE4#=xcDo!ufu-G)jqcqH zZxv~s4XU%v)zvWTY8bK-HhPr>^IBt#tEp_kZ9sY5u5FVi}sgsx89{$bh9K4MmNvwvFbQZ zl9HQ8jC0(w0Aar962?tk@-fD79OrqQr+ZY3*PNNp`;Yt6e0Ut^w{PG6zUuAmtp&PA zWD9d%_s<{q`(v#O5`Ck%oZgO5x4zQEfBM6pNHApML`~RXO!E_69*492IM5B6>gg7Q z`8dpR-foAF<90?^97AHu=JQ&!x>w4;S66etnayEnb0gX#jBqnlYjVwMbfS7=tyVqf z^SbhJU6uQZ_A11znu1qmHRhW2g$XRLOS@rDB>f9-eV?YKhlZmv2!U!|n%(1g~gT`RV{2;SeHW+w5 z@8|G-prKyR)RR3g|H{4G@Ap=%MptDe0okTa6o>5@fV%osLkjp#N!#!-oWo$spv+s{ zRp?%|_8{bJsZ5b6dsJ&yQ)R?vcGxSp+Z)-DFPf5UQ*U0ob|nRI=lu0LFfn&7e7(LO z!@snat-PLUteHI@*Yo4L|M>mm$H(K>kL$iL7d!kcw%jIoGiF^b%_lK#kH?SO?Hpl> z(90zsW(Mu;%1d|6-6CwGyFm@#pwQima(cvZ#M|3(yT$u)eDC|`^?5yh|8;(T)OBZ7 zStDR@^t3N)h+YPl3TO4@aCI52mAfC~w*Q`3D(6^PPOnbVGbhE=1YBKwRehOj60DVw zbuR5qI790)^Xf7`q>6AE%m*?(9MsO_0I{&#ZCy#4wA{RE8m&8FQMGv3%6oiy-9_hD zDhxzZP0;d^-94`*)C;$-p=dA7*TI`!J93{{D75-$@^trCOEu`(v%uVs|~SXQ7Yw zh%ngKVE1PLSKF|=@jo!LeJPN7pW}M3^^i3C!k)M-i@DZqkzSQ+m6;vm1dNQ8tGbM4 z^l|K$#u4L)2)d&cW1Pp4*T~L!RrTfO*V{!}f0YO;d*yXKpP6fS>-Ovm5G^3U<7>5n z+c5;#u!dRONYD>%iX{=lxStzngdsPUu;{Eg*8yFsGF!{d=as#qiROkj8up1SU&L7t zpgYh&_*OWWCvO!#GwN+il)4#@>*YoN^bI)EODMp}S9{X&fev!Sy+|7>| zV~lVc?&o2H3J@wYR++)pZkU74ogLYl>n%(u0WlofYhIYWx<(LwYVI1KZHeN`fw^JH zUC@STlAYacKOGNIvX%Ygel^=r3$uDYpR4YV-#_p7kH>wgsuS)((fpLcz$}HSvbyJ! z_xszA5yOKq{4|Ffop5~B<-f2zfDv-SpjXw@f!d7(j==~Hvv3P`tvMf`KmPIl+pqfk z?5prH<49a;|H&QbEN^%3X2MV{AEqwz?UJEZ8BrUjt)fsZ2D;WO`DN-(GbjtNo=Kr{ z-$Is|I-7yaG=tRRaJ9m{wLO^aW^A~JN;L1r{^tZ(S;tONnWR*Q_ZJ*< z*tTukKIoR{yWa9z&?#e2MslY+fqwu=q(R~lWx>U7S z3%j>?949p1Zl^Q03A_jsZ+3QwRZkq@>MD@gP(ex`C%zt1zkht(zUh~5dA@!7`+x6G z;r#XA{`vX2x@)dnYppD8lv88PlCAly`(x$Q`xzDk=G9o$*UDL~1O^Q(AuAux=asW6 ze}V{aBSg!r%)T;Z=Im~*Tn8z^h8=M@V%2qBZ};O<)$TqQ)cyLMc7y(N1pQOfWO!XJ zc3i3{${bR8=j!fx&DB+#h_GuaqPbo6bOT@85osJTj@xlwpP!G#3CVq&xAS)1VvLtK zmN4?|JoKkOo`*lKYvmH@?%iFPDXq2c_iL@y$}JB*j`3n7mjsbbi|xUC{b3w!5O=RX z`2uMcyBb^zfbBKfMr!R)T0ggv(A=y=W9-(=^KsR+vQw@m8kLz_79xQcw8?JU__ui! zJ6tA}O4-$2-Mflf=>3)_tx5*(?9Kl&b7+m@b*j3d+6>+#U!)J;F28p3nRP>`#l{yN zAdOMoZUo9{KpCUZ?(FPgW~QK{(}}8CwbU8LRvNNvsn)W&E1X+k5VmDE1{MP7w83gO zEg^<_qFFUtJ1lG-1NY;reNrn0T_6RwHsh;hhJtyk2@^ZaVT|GD84&|2GmX&c^!7Ui_KuuewE8e%B@M;Di;D@_S2Vc^>xdWh_0?zHK{jm;&q$W7aYgq^`|&H z`MBTz?Z39aL|Ny;mFn!tbRLTk(upRicxzd+b;*&cAGM++#7nXOa1we|M>gA{m1)1{-qw5Wi^{!sMLPL71)ne1Yt6;WVUs|V3k#NvBv$3_Waoms1<|@I z7*#ZmsuZo48?3sVkws&4tuDK2)6SdR5DJ$T7`^kgD7ZORf893B-6(F+*9)ymfVB}e zyCm=m>7sUfi)uKiEQxKl0RV;_GOV6hBe{AN+@jf`uGMa1S&vqh!hE4ZHH$~Y;)>R) zK0OvQz$bOs$Swn(Rl~+oAHeDk+GK@#RuB3@4RT6@p1nf2x(>wb6~d!(z@A#8F}rVm zEc7W}rQvoZ4%qBI-LAxK{3Hm;EC9*8yUYWoEeEs1v`<&n;gOXZwjbCDFxZrPRQG=9 z#LZg$0%`Glaj9;^tP1$E*8x4N4%ofskgmkdY*T)`H7-v2j`xloo$Tsr%=Ed=ZO&(Cw!QPuUhx#Ly)pp zWoN?}I~HR0g-Iro*{ZcJK-Je=z4nIAY{dC~+-~Rbb{uD5cR6bFC~6;#vS!Y;QeCU< zbm_WQx*jvX_qDX#SV(sqr^64Aa~!d*%N=lM_f^XE&+2|PQMDDMFO&O5%)Hzb(WC5X zz|O){R->Cc8T$GvauXlUbKEN3ecNDa^{Vxp^A5U&g_-jRKgYNo<2>Rx#&I02u4+j} zld2nLI7b*7fE$MoBUEhbFyB2x`d^zR06srHcc#2l8$pSx_HFPmZs3Lmpw4Q_pFy#6 zB?|XpY80vg+kjOAp~)%wln6~iz@=oNEG6bXhFL*+mb&Z7>eg&@cd=Wzy4C%x*kYJrDL#Gj47;JMq>I4Z91=? zSjiH)3_&CJ@1p<{s$1ZK0IZ&dn0Rts&n7h7`XGDQ;is8~F~Y(OP6FzUkEPk(aZ4}4 z;O+wRb(r3f?EQY&P39N$O0WNH#l+WhLF!E|XBmLln9Kc%xZ2+by9&;;RJE#= zH(HYpYWR(>5}p~f_ReA|{4mMYnseTN{NrEhTKD_s``g=j#ChD}SS03imfFqE^SqrE z=gmFL*{f!*d9CWiYW2#M$2h-#`&DN5$MyN=e|Y>C|ML3tpa0O=wE|UabE`0`Tzq{m zuKRP%Y7eRF1J6QCH@2SIWFSEGs7~>LM(ZJ^bZ4rn8dd(o+Il-W&AiciO4)is6qDJs zvT3qUJ|NFXP!|B%bP>Ax65zw}UU;Yxk zO;Zli#FotDd)B$*RGpg@8WDprjFOgeK4(M2K;K$q)m7_ybR#_8^cL{6iP)!y*PVOM zG}(kuS0h;HSG>&j8_LxJ3fYaaIL_X3Fk~K!8@T3Xtgo}JM+0y#s?|N`TvtA>hw2rs z0~N3sBZkx1v}(HL)cXARy|3~8?cK~&pq%h=__w$7`ybxFe}6yUZ{rBD-fq|Pnzgc9 z=qv{M;yKO9e9mQVs zVCi+LqX9Cy?V*mUdMn#&mn)jmDbRe;kE9fDGs6y40D3b1z2wwinfX2Ck6>!Yu%e&ZEduijaqGV^Ht_7g_$8srYf?% zDmRQy^s1~ac8X)9_M4vOR@H6$<)mIB)W+A&fN0QzT_CJB8MK;W!v$0pfud2kRvXo1 zIIDV>%FXw-YP@~jHZH%NIX!V43Ac^=mC?+jTS{2D1G{)`Y5?y*5WlmiZgsHt4bl1) z#*lTRz%(LM10F659_)d%5U^@3({vpIvY zOm?wtexVGv@@@zUY@PvMXkDNid+T7>l%Iyw=~Wfh@^+NE!b}?*wtx9A@Q4|=L|OH! z=RUK*? z4Lzra$!TUa8x9%6sVdR-dPjM|j(7Es{q25Uk5X`04BuWBwQXzo{tQ4{k7{BJA8zKG zOk;quuwfabnM_rks%jQ1x{g+NXR2EpEl9elU(GeDsu?PEL8dK-+<1smH&ezDCJ(zV zwo6X~-5oBOu=*#B&`z~SLbe32r^sF+Q+KbpAa%`scg+TF<5oA64o|64)efm1xC~=P84un zj=%lUwnKmFeU z_>ccP{}qno7VqH)T+Mven(O-gdU_elj;A|GRJ#BE>;3=z|M^$>=s4g0aD4mrFW!Io zCF0vK(><^GxqkcYxqiQz>;2%jzk55zf%9Pd^O0KOu?;{9+WG!IH17AT&rYRDe%_84 zIB^`bah`M>HjcD2M)c*)*`NRBp?`C&-+%k2@%@*BHvZH9+y4u||M5Tj&zw-UJIy@K z!)>^G17vNkfvOOMjTqtX7O1sdcwhi+Ms|}fDk2DhA@#;Pfey4qknRXp+P3=o3r$kF zR-xD2*M7*^8l=<>7kXeX!MrP9p%fZ)nb!KR|2O`t0RGqioBus_1n5h$xZU%RWWa8{ zf#x9Gpbeu1%zf9nq^6;$hOyd6IRWD;2u*?EQf!2lFu4{m7oH#e{PDOhv|`xdZbr0L z^{jbMrh3MkLP_pp6hyCYfU{8U!)}i*_HRlqh@8d znPVJp=ll5f_U-%k+wJ=~4uId@;+OMwI{$nB{r@h2|K@-EUyGsvqs>AN!gbiNHxG9h z0%pU5yT99)=8k7ahZzT)v;$$*A$C=?n_F0L7|mr8n5asvZEIFhwxI)sLJ8Zh614rV zZ|2VZiMFlUjn-1t?8ye%tRc*W@xT1v{$KwcCY90H9@Z)vn#SQf0k-L!($12TjwU6R zuq9@?Rr<5@#twydgdS}Hrdigj) z3vQ=ixATBcMn0`pWZny1EWYEHG3&IyFI!(g7f&lV6%0cUnz05|#m$8La-`5u1GIws zrha-05!TjW%EqwH?r`hkFqC9Km%C$Uvr!!fK)ETi!g?i5VlB7Y`@Smkp;~Q*DjkL@ zVb^-CtIa_b4j?VQ*08dWEs8I(uv69IivSGurZd4`R&7cOpjA$yI@ncBST7ihBuy%r z|1t>7ES4~ajkmluyM z&JY^rhL)FCksu2mEC@w5+)Ulj)>A-x+aEX|;o+)o z0lhh|+-3qZtKPix4Lxi~R`S>1h*aX{Aga{e$7BArC}*#(rA)>qg*M62Ww5B#`=M)O z7ig^1CN!IQcgt8HJbE?VDtpK7XqGL8cNYE2&{s{`Dp*){!)-MfUR^d^ZE?nK@0I%N zfDlJ!&pw<(hRpuAJGc_hxz5GFzf|%Z%HBxa|o$M~)+ow;lEIh%l3n zf5-m3N#fwJF*jL9HAXZo#%QBCstNN1&CpFobjoQ0;ZdTQtwPXSf>{Yp%Z+X1UgJaL zVrQdXZP7O4{-_Ju31jEB71+jhl-RPRrcrD#y3sxwZ5AqppW}0nOkR+AzX;6GcJL+H6y9cbV1->a0p+S+2E+<>A2tBkcY5)_;8d)^`+W?L#ye zZEtetRY#bGzkNH5Z^NID$12QgHqOC_W5jb_aIQ?5Ae?cCZMQE|MbA~MTQ_&<6t2H; z&g0j|$BO=C_QBG8!YpUN-|$wyoaeYw!-K%EK|jx1-lyKa`2xS&IhUP?B;U`2alCn4 zSH|#EgxeF#M_xIju^JrxoaTn7t~WoH>@;8Ed7cq(e%j}_w_m?MerMvae!IaR$J^Wc ziq`x2^9o40bCK>0myazwXY41exT`9AMGR|FaG}%&XKyygK1Y;5Nt7{Rdu`lAmjNnC z!K-~2C*?|5m(*w)M6d`Vb|kyek|W8D|0_eiE~9MI;z`uk*JINc1EpP}Xqi}$!J!ub zoiOz#Wx^0@0K1ZDs3g#s5E|7~LtB&9LbK2&y&99*%IC_@#Z~6CF&MFRwhqbL zvvbbpLkoVqeIJL7BiFWs?^W#p~Fq= z4VemKnam`^_E31dWUnp62H1k7c1My%71Eq(bOPu=?5|#-nsxQ+k|Z}PkkZs5db8%T za+uTHvHwJxFGf#qMYX+@E4E9v3+et(>wa{wX%Iw*ljh-0-z+$Hp3<8%_T=j2lR|HT zIs|Q@OvAoXSZeQONkfhIv^d;Ye>lZ&^ZA+AIu496#`)HY)zzz%ReBw=&SP}8xkKg< z!D9@M-2^rQo28>JJC2{DHn4et7Qz`CSI0ZBdVISDe;nWAotGAC67)d#w_iq!jslG0 zsv52&Edb;D`Q!nM!G6JX{lvrxgiRwIgq9DW3S9Op64+Zcs@ z)AKX!P*m1&*m;ZFFE^X6+bz#8;0sN@TCl@J!?0pm_-+bowZaL~QEJ;RHAAE5%x*Uo z3So_Hk6MZsdbQTVu$IhVwaf;)%~U-x2KJX}O~tG7P@R2v(+$+T+CoOUdaicQm)$6B z*Ca>J;`oZSk=EWzB&i+dW@}^n`0|MFYeNfW*4pv7mSGpdyQ|r)s;ugj(5m`^Wvi-$ zQbSo;SL^DMa&m)Ekr&fm#Jar9f@B zXI0xqT(e7Z^J;`)^Kthdv5QBz8pF`7VWw;+I^jVS+^t*ghGq-~?RFfeM?^?PGFNWv z^q#4PSj8i}OQ#vC!){;nJUB0ieaH39^D$Jz*@fQh z@K!2Uu6aFXKJ)oG=Y3xHc|GUz*(+b%G>;uULN>Ci3o+E-=758MS^4Bt2Mh-1IgaYd+>v zcHq1nx3_Oy{`==gX{}nIt-PuNGTRB;;NWC)RP* z_<7ez+xov|j4-?`6Qr@Xj{^pKL5$Y6;kC(>O;D*qK}PdEl&FOls%I-LHY*&!EVDp1 zVEs(Ul!XLpmjU5TEO9Rq!WWVTIib zBw%K20=DOwyT;#BrOoN7Rr9$XSIw(uGs9d^p~M8K&Xvzqb9F%-rt@~Zos7e+BxSbk zSq82BFpYkGeA>Fw{t$elolcXzI${K0Cd~b)I&Lu9INW&{gCe%kbrZJ%^AgLU4nkt> zIlI~Jh`csAy{~sIgT2a0FtY&N6^9{&LrENj5~Ckpy=<$`XkBSz zKV?i^W}C(AKpEL+2CEZ}RBeugMKs#S7z4L)zP+8{p{{Pr-lUV&>{Yd;jDE1uW^UYJ z{WSae#b}e68d;{m*3axaMAKle+bTprvaJEa>;Ca#2B`7MgKsa@>U=)u z1gjGa@A?{lzN}Cdq(QYQ#oH^kpa2v9?6%?V8^-O1LEHUYb?@WG8F7Md1yekmyL=VI zm&K)_G8RFjZy(j^d>7u3-*6p!YiGtdOSkXezCVrSMKi*=d5MKE#&$4j9~#We!iIx~ zMZirO76U%uS2o;KRdiJSoZ)uC`|9F2zV-U{2eW1Dq0%k>^p<~~3eWnmtcH`?oQaw> zXqAoZTm-zTA=Win%}P+KD?EW~gp-?6B_n3mmQ{{8)aJ&n6x6*`N3Dr53V!O=Il_~& zTYM|y+k5*%H0O8iL%smS2bl)(Ny;O#JPeDMdJ%Vh=@j5nk8m+nkS_{Jo_#PFR zqc$}o8>+50po%s)})+ogbVht$rhH15$-bkDYpPud?l?TZ5hS*{+(m&wJ5 zw$|pucZy)w7J>oC^Eorud|qol^KsQ&xLO^mDIUc#1B?By%XB^dfO+|#R}zWc-*8MDI)V!*<9zV&se4^5UGFli9>fnGSl zEg%BBV;gpCUu-rFmVm`;F5}qG$WFxZb7J`Mo8F+Uc>{?~HM(+DSC<$x z+U*=|y^@*9{Z=3vBG?k?-TT)J(s#2+Er(aPTM!I$r^kp9aXXLU;lotOTDh{RS}k<1 z7NN$l05Hrb3m>ekZJGW0TF9Ac6-ve+b^$^pS4P@tX8Af1^0jM+Xy#+unwcy>)l1t znlVl@+}sZjE2jCt_FV40VRg$S(!!k{9zHB?w0GcLkF!?vs+^o195?sx@9#8zuDN>l z{ZWu);&^=iI>&`E)pnoHQa*apR1xhyQAhbwbz(EY)?=12wE|xAaWiJ{ zEqp^*uC>xy!r=jj+cy~ZC7YWWAx&f^#c+FOgOKKdK0%FnM? zxH;s5O>Had&(Vq5j;_2$q}cit+ei#j4a(Gb?O#b3oK6luT&lKSwG4*tWBWArOc>VP zCP}@AM@HWula#r^ntMg!TPukMaBO0-)qZ(d+tA(`e%&{!ebd495@QM_ZE1qNo};}V za~oq`ow+pUocHnQQi{ZWpaBF(efa z!L=9Upk7FH0u)|I%spf`8ZgRk;sy3$c4HsC4*5VCy-M%vou&+X|RcbbX7 z{@!k5vYA^h@^w!C?f2h>(uRx;8+%s8eP4k9-yaj8y4`#r&hxm9n@S%ayD`5l>l+c) zF9)=n(Ad6lqttY^+RR`Gk1(F+?fvccc6+}aL&E)Wf3C;%xK^!1s#RJNi*R?5V`l>y z4psT{t3CebLH$zbphSEg_dR>=404#8}ILL-@ku*fBzN{z6IsBr~h!h&^NE& zUNFptMTCzCGZ<7|dzg9paoo-kHy&fe5$+BqrrDOlxW8y+`ywS3_Y0pnHxZ8loMt9y z!%O|E*)F=#)JZdkNxiQ^55Dqtk4Jazv#aybZ619j9_uKp7t#hiL4_vuwC4^xy zB5ubx+@0#oUKRW35imm4`?QMY;eis}m8~s;dAKvq z4OdH5myQwDg{9A$I@(l6MbF0f3F{c;ct51sLO@w|JLBgE%Z4H$Q=s}4(Se&fWkwVn zl3=cGx_6fu8VUEV1P7D=RZFp>Rb^)8QTc$N6iumiZ>Y??t7NRzv=)m_tt73h<>-PO zt!@LlC0Dn?b}B+&n+A-2v>p9*^FxbWYN0cC`#(46U~71Nn8^)$r`~8aYR&7puFuEk z=l${f{o~{QxaMQ6ly{;#+DJeAwQ0syHq&FjVFTbkLy4-r&gVgB>x3l2(MU@M9ZS+^V}A^ae195Et};kV;>e}6y5;j}qdWzNT1S!oP6ytwEd5CVACblB3fCftW`a;XDe%yJu17p z)Z4024WKJ~hpKW_ZmW4qbCtj_3mXyV?YO<&&iC`Yo#X8=(l)R)h{h&F+0Q%}VXdG! zeAo!HFhggH-5BoSe)thF+#@26^G4DpQANCJ-CH~e?KZoLTh6UU3FzK8D>_}+lN&k> zVvubOMD;$k(DqRYL`#jTcA~URtU#luI+bfSy}oXKUvQCYqLma4%ZaYXEH}gLgdNt1 zv;z!=huwlnCa^?-VKfi7Xp03dbj_^w%H6LB|%!Nj4Q`XDOvM$Gs>Ud51yoEjP zK94Z-+i~9BLvMNi%Oy>PTU*r(!@7!d{R6K9w{OR~9ZG4`{jP5^;#0p)!tGQug8UJ5v^uh6D_ATTCM7I+CJw~e!dM8n@6up)hM}<3uf%u2Fq^L zmIDwa7B+H4-PD%0lLof6#6os+ORJkQqP?Cxmh$U?TWlFrwQ{a&t(;xi%9hL`;utta z7;974vRf;!=XHHPK7RZ0@#FsS@x11I&dlCgj!lX+Y$q-~WB-3CYy@%9tkfPs!|gb3Z|B?XJV%_z5gyfj zzu)hV$B)mCnKQSW3cX*Z{(An7bBxOT|5DiCK8V$79Esxw3<}bhYatjO_F>Y_~ zG=BoMCS)|4?VBsM)~Q-ul0b7no6_Bz#7J0dWTx#cMSXp8_Ry2GRue3BwsHg7YNxSV z*kZ5lUL`apzxGUDw~cwJ>DxkaQl*nH*e%A*{0+Qy9fdJ4%r(5J1$$&jM zF^&^b?ING))+x?EP?v5OctM^jQX6Ep3Hg$^>72ywab{_RF=d9XUFrP#W0>`xg~;hD z((&?Mq~MSMw_)Rtzx?rE{N3OE{h$8sIL`UFujdtJPIG#yoGumM)ot`HnY$XKtKc3c z3y&j?_w#l;j&p?3RK1?-`FK1(pOvLzxT#HW2| zmB^Pd>C0-RqB&LA=_$G-!Lj#eo%x!_k=>b{bInz2Ue{XB=i@%F$8%omT6xXNYGA9n zR#u~wEwI-fGnzY4*6v0-Zf|ej-rm2zzkPcj=i6~RV}ysB(ZYb8^4h9n{(SYB8)XK% zdXUELMdl!?6h;?DcXS;*kBIYpgL$s?vz@s87LqD^X6CHDVFOY7z$65qmcNGB+=;Ij zzO6-ouZbd^u8oGTex(<|CdYW;`YsO z)Uwsy^E?LHMNIf;EU_DF+Oy-)c1hV%UiSQvpC7(H9pyVZ-s+ji`^?hu?#J6Nw{bAC zYBqZ-&a;((Dre5ovvVUOQ` z{Bl}-7O2OcFJ1Tbe15V!1RDn}+y~VjC%nzP=L6xDpiDXGfu80eWtZm@7QN;SbE8EJ zqOJPlw`XO}UOsG9M|)>EG@tYT$JO60Ns=5*mLSWIs+oI4W>%p9^w&dsx$pl@J43VE zAAqXLj0kr(Qxy^33-w40ng~}35Gtd>-Aq-4`5d!mAi}S$z4W>tyKvr*N6e8AD!XfJ z`}P?$lqzq6;!1Uh!U$D`2*stIaaM>J$R>tmvr7ns0i}ZG>H>wa&K$>`QpkkJE-R#_ z6;G3@sIa5~C@Q3)5G!(`Q2KKPi<%{|iqOzPV=WC~*7FtX%=lc1|9JZ+Eo+`p;~ZyX z@M$`MW}CUqI-@SFnnE+j|aQ;}fh^&*f`f(QH0Pi`!YyUv~sn@^j_KUZ_5KXKD;-+hHT##ipG;@Vl^}u6f zl_ZUu7m$DPBgvTo)d{CdkYdjhXI5hAMFe0{g{WF*Toa9|&s%u+O{DO|2p3FC3bt+u z5)ooNfBlcY|Mf3_d%0Y)@=WH8%v!KV)z#ZjtO)0NAWGr+a9s5g6``r9hypQZ zWu507&ue2+#CuZ}fg~2To~CN9ee>R}`9fXhj1194q5m77#}F;SOs#cEGL!(TpDb%C ziAq_wIZ8z$$b8COkd-rY#5s<~{c$`#j{AMiV?<`o9FbL3sEQGc1G@(f~ z#pb@*?z)wl+Eom!85uJsE6Iw;$A~c_=fv7d0>yMSsEVK}S3?48tVu~02|njtFk9$P zSuw(878Ht$6Qw2v(WVMBk;+N?&)vq=&7|43NpH46H#KW}>#u#kl)a4clCfnr5eG&6 znHj0dK+>Y*81oTwsAZPs-5&S4y~jM7OPMukMHDlcb4>R1SChVNZBwm^IR-N;7gRR0 z7D05m(J3+F9Aic<2Cplf3kT8aFR>r9Hf>D^=aAppJ+ki9Bdqmk@_x ziiuUQylvAie(9TC&q{3HW%n-_;+`)QnN8S_L(&OnVxRv?@i5E0SDA)EVP3j@$kAxZm&h`?_kxj3Vm^z_B8u z8YD!ZSa~YwQ`p2+mN3(8>(?*W{pGTrnQEr4RII)Oo?8}`f*Nu@Bfl#}Y!r7hn5_yA zbS-zuaG{HAqMORDwry>{Tta+|QMG|esxDIadIioJN6wfT0n>Ha>W1c2RTX8%3fbwR zG4MIjBFfBHhyW`jp5m}sB?7Vpf~u9mYTl6}rIG)++xYtJS5vqB#WuHI*3{JZ*1vk& zXZ3UJGkU6<8cf8(lvPJYO(`Hk=E)g4mBrK;Jnwn`upHdI$*z$KCTnCqBJwz`(63*7 z+eA@ha-4Z0(@JzrT}5?8H71k!7$>u&H+OY2ab3eZ#%S$1aeKf0!Mz)t;L(A3D)Oes>1XZY+nTd&Of#SwGMM~DGG^Mvj5i<{=(js6h%vdU{bvbm= zNL5hkX-!fnL4{_*kq820xvD|>r3ZfwsUC08SwEYSl6{rqqaXi{w(uG^BU{0S>hR@!jG||H^<_>MZXt4Q*LH z*rGJ6mXc7UC<541z`GQ8+vnM^sesTYNZme7#+fNbkly|4*Drtl?Qehk?XSOl{Wa#y zNQG9#m~-SvkW%F8^VXUyuD0iM46Z^H)r(ck<2=%QxuvOU-9c)tpbeH~Au6A!4%K8v z*^DfajF=H4B7tQ7#B$7eR;^I!s<~0bmKTF5B3cQhtnt-y(gE7cr~>eu=XuPJx7*wI zAGh24<2dF#BZBKtVcM*@DN2}$G#FJ>;LplW0YVj+nG4)qy%+l8m6_Sos~2Tz%1?iz zF3arC^(%~Astz%Qlt97MwYM>dmaCeJo4M=O{pIB~#E#>bndYv&wW)C)$D>42Ra6m- zv&Kx4qQw{K7wOGpRzPDJ8&`d*SbXNEwU`RYqEN_E6?3D=3J8ojmljM!zz{WXi2dh~ z>g$(ZTkpDEeKSa|5w*KCaFg1B9dM?(hzYtb)}C@cArYlgi7Fw<$Oz8V2*#v{Fj`-K zbB?UXn2~9?74+(>qZCV8zRn~}vPwX;TKk(?r_Q7RCeqrTXc|M0o-vNPA%qym_$~A!Kqts#jZv%szJCxg<9s)y4fz2l87t~ z@=OuS0Tr25Tk8>F(#RB-5nP62x>kb5i|q!lbUEu7yAI2M%Wg+b_7UUrR8@GETU&yz z3kXIMFoGp60K}A0FjSVPXhNF-qHarWm?_$dGoeB(uT_ai7S#{}Z64H8WFm!6u{l$f z2|^LUw8}{J(xRwF3RJ5Sh7koXVOc}6SwobV{9MDH%vlU7ZM8>1C~b*Zr3y}3gb;O{ z=W!lmgtcaFTklrbJSL0~Bt;~H5X~xG@;#*py-63_b?e?uFodYgT1BiyI9P({A~VI* zkSa6doXHiabzhOhjFB}9ZsI@TB4Q~5NC{|?%N(0!RmL@SUoRi~>Q*{QLGF#cTG%+jBL~Aueu9Gy` zrdo>WO0)?nw)VcrShn`3X87fzv8*Dcrbv{KQD_YX6@7hq`P*Or{@ZWAzP`Nli)Rj^ zR?|2mVuZM@&DV1~QmZLJU4^?#Hy86voacBv?h!?4LS2oR_26G!&=MB)m~hZ4`Z5Nt&KjS2UxAv+ z(#=u-J@j-}fvG-?lTR`MxqgZ3`PxKW#Z9fLyUvo^alXGl7R|G(K(ttlkSb)NGDgg* zENNXss!~gcV6Ea5sa(7gEDAt@qL@q*eu7U#^|{r{dj2n;5jCiAnz+<|GO50N`PTb( zdAY*voX51P2F)<1`)oj0PC6aeeaK^-EbTwus`bMJV zHQLy2hwVM5eUTTZ@V}1uasGZQmFX2FG4nZwkT}P1vz+OD6ZPF2fL_`r#Wq|Twu`J- zBp%SnoGbOFCg5yVfk0Dgz#OV-HO?8MGP8QWZuj%*+x|^Be@{Nn$k92+;}IyCxSw7> zZNn^WLYbu|h}8&(i%3uk3<`lLrBu>kD3}l>3So+i2~-6LqAFvt!WL(bDvD4y<6Pq~ zN)eY{B_VX5vG0+B!}Q7Tn~LR6F~D#Fw`#IyL>Bh5IepNbqnh^nXV+e`~E z&`eM`MvNF|WHH^gt@mwfPT6M6J~Gy&Gu4sx9F9UV3ngmJx4v&}cX-5@D{2?T5?9-s zuXJspW)Z=a9^j%4xHa`A-O*g0#^p3^ApB?7eHGcuBq@MshyyyIS*TTaXNp?ISwiPj zHxYQ#^(#w+@GZ{|6~pYX%e=X@-TS87t2Tpet9YKAfpOrdIv;#!R@oeENkti;Nefiy z4!xFtE4{$Kh%{o3hL&m6}&&XG}t%*sT8SPOVQ6SqPpR0^N>4-t!W#ee%)NlC{e5AQovbi^vd`QX~N)qAGLbJkIm} zxc&I^$L;RZu8hU1DkER$_$)WlDp_XEv|9KM75cB#lVIfkgQ;?1hD}RyToG#rEN zXo@CmtT77}PblT0No5J9bT7*)WP$5uG-sSg&J&9;6Qw0tLRPJmKj^i*z!XYTD@lb4 z)zp;Iv?a@c%IA4x)Yea6jMAQ8(ac&G(W{D}+%x^WXqWSdCVJLTHLw~P!IdSl5C!(l zHh5QSVoe3r3O&P1G^$)2lO9rMxdp4FIi*^Y5Js1vrrP5ifUvUbwg-EYOV3SXh9NDE z#xrd{Y14)LbS3Vra}lRO4Qfjgc5+OM3k;s7FZBx#)JV?R9RV9CZLR( zfG8(KSI1_o=3rGBA~RQEUQjFkAH>3u3P8BZ%mUXvMv7UYH6f0QBA_|_N|x=PA*3+} z7v-&}0~TJQ(eLb@ra-J}X#YmVuN3Ywo@U=wZPQY?a4p9(3$RamLccVcv5Zw~u++@=ZM$5q z*UROyUteEezrK9guiLDc=kfObcY64S|*TUc00dv-t9A1xvVVHB=u zu!&L>YyvZ)=A7s8xPAP%F~@Apkli%Kh%pyI9$B1ewMLdbB6CFOtWglSVNJ_cik7u- zNF`v^&kC0jA|k9ZFtZj-SVgK<&T1-5J`JSL*T#>3{9|6Wh}^$@>z7M!I$}~z$ijb& ziqsOFf@};|NRbD6a7DNV)HRz>5U$Oro2eNJth^`eAQKV@1B?*SsDx0J($z1*>h-^L9RZX65;^ zOKca z6`F$3H7dtqNzs-GxvH?zc@zsknUe;=Bq)fI0#*-$RO%WMG6|3Y0oDK%D5fw=faggu z$^eEyGU1!(&%OSfLsZ1tnr0Xm%MU4Kt7fzY+so%6$%Qg>S8W1Sg}S#*d}u*);So~Z zt887T*;n#LZD9zhTC8)_I46YNdbe%Y7EDFj8s<#3;BFV++{8EA_P&`q3nSuj#{K-T zC+O~HJ?p6mdDh6ywRhVt#Yde{d6!zxjeB#}!unk9*1SxQkl#x3=l`eWEN`~}-LUcPDD!Gu|q2+At)`BRvt zbQNipInJ|t|LxabfBEGvzkd7Wa(OXPx7OT+IBv(s?c?{~|M>pnj~{Q}@Avo2sZPpL zb@M+u;UX)G#Y%`4%G9V-6;Wt|F8dh#EMSp zTqR4vD&>hQP-0r^Z1=ps4pA{Gt5{MMs96j*P*XAx}RZE$z=Du&e_r7)SeWi^- zl?s+aQV5kYqx_tRG}RTeUb7+nywg%dVI{Mu!t#OGH;gFif0`qMqXhl}a|Z5;I`ttdd-OYx(ToK#D2U$`m+Mg%aztS0T?zR#i=wmW1hl zCaK>4`A1y(Y5rpV;!eeR9&(;nq)FLQ5+tfUtJ+nWEJ2x4k)paAPi1FhhEgP@QB8H? znKI8R!bP+-6JbKulmiG|ST$WtPb?h15DF)$0#{n|WyVzslLe}(3#IMynNRn2@tY!! zC}T8TRb4e#iz#8cIbwL9G5Qlcz~Y6l|LrfhYHwbD+zrfW!O z=lc70_AjcJIUPp~JL+ioa=n_!m#ux-{V!MXmhUmnlufr$m~-R})XpsJFZk^AiU^$% z3sjjRkSG82`4%ygN~nQJH3T7Ml~9)?LsGmePHSX#ZR$g96I4B)A9;pW~`z`7@FtOsUF@ zMSW8fQ&oWq;A&>g+ghjyi>9a)t}LvHnVDIE$2>8L8j-AsO6=~tweFjlan_SMM_~jK z-K=$Mu->+xc%a=HB$Nj+lBgfDp1{)fy znwlid6bs;-;%-s3LQblbgpizv&}vNWo^q;rW+mb*npDJUr%?oBl8Q|ACYfbA3PbH~ z{EqD<;#eb~>Mww%tVo6+o2Uq?XQ3P>zCJWecNjUg&A?2If&Gvt`_*r7ibktB@6n*XBgJa?Ad6BMCNi`sIJ+K;>nLHBMs#!mvoG-xNdgc zE|TO-QX|n+Rp2TxUeussqFEwhUck(ha_##3^5^qmTV$24Jlpf1-WL65o$py?!1H%n zB`ae_MP+O5|66b4`yYQCm;G|RR^QWGA;16mr`O}_#hZznkG$CN>-=emg@yo z25BlT^s1aw>NG8N(UMXQ&D2UwGv8H5*$f32kyu5wS?lHnts;|^bH!##vPsJGIEktx zg-D91P`Bo8;?H+rr)u}ID@>qLQ;yrvZQGg&J#r+Uuw_Jwt4Uzfn5G(%!>pT%TUQgH zj)}6YF4~ovhPF;Wah8*f+SSU8X4O+)vHzy}BE6vn^laZs9JZUjZtaD-b*3`!K>Dv&jtX3ic;YxUc3-TyXLqQ42GMFqd099=bw#k6QNg;%VghZxN zfLgr%=C<%Q6{R#qIT?gQvWkVZdIU>FQP#VwNJd5!l9da^%Y`viAQOs;Gj|!&f1Yq% zmZRkv^O1$!Oj(mNHG`?Hi&n(sYNkBnDxwe-cQa8H7cOXB)*4o*V@*jx=9uqqKgQX0 zv&g~V4*Rmby!QQSn*x=ia%C$b0Vz0CRGPG=-qy`GvT~ZhTq9?ErlH+e%Ul-Y#?XNm z>)Z9Wzl)qWZ;v1U`2LUo>-OW%8CgYm|We*fe9?{9D4=lQ6Wf8!EGEsjX3tz?>H2A+Jw zwdc?khb?AON1QWXO0sKzX}g=xue7~?`|{eijpT^DTwmJu{X9ki)YPT3GUYP`Q*_Ua zpE*f@b&`>l6H%;zOkgUffGEYO2#Kii7&A{{HlaGC1ZXTwOtAQQ4_mDfg_ycPGlf+- zYOZy8&AB23L#lVFl&YF1#Ga^C2#~48JVq9TDzfeUdVMi(3NfmxqSOoXGl0wIfohq_ zO2o(zQL+@W;v8hEh$)>ku&caW_OHKO_pMc=`$9&CSE-aKqJ|*OWR|i)A%&GAOhV`K z^<`FZ<|JS&(RD1jsbFQS(mBVv-NY<%R**pE3Kzq+i+-+QU$0{4g`l9WW~~&{12s@NV?>Rt zqF8BfyK1uSX{kQ^oDOfLoAxf&L^VNX0q7<~S5HE`_0k&X)UVbyY2}yE>v)-bIq?Gf z!fo>%c8OB4m;G{W{^Hi7_$D9sX;W6A&y3v}ny`+7+FqZdHkP7A65;?$e93dT1URUI z3j^xPqBL2^Qj%=s1WdC^n&epPN{R|+i7GSDtQ69bQE;nSYhp=5PST_@0p=_*W|cYM zrY19GtwRY37FCL3s+lN76};6kYyV8yH)~M1taku`<+xNJDj|w`Di11iW^lbNKoA8H zh?ssNr6`K3db-sVKv2w(Qjw3xG0xMw_3Ng9B)YZNwtv~Kn>wXtF)6nAe=HR>SA!e1 zxv5I9R$eGGSy}RPx`S>7sG6vXLR=fXUpD{pb?**4ZjZ|gaxnPz{ymQ)y#o|6%o3Lb z)g>mHDk&~Wnxj(0v}HjBlTb)j0gQyWWIi+8Y7!+P9tBewAp$SK=H#02NA5jGeOGg# zTk9btdKHrD`Ey%FNhsvv+pk~$^4qV!USD3kc^qTRaU74^?eX#c{^Q5npWpxd@#DvN z-YaI?Tvic=vZA!>{;LpX>bi5RJGrQosOgH$S1>2%81syLjo$oneG%98@ykrc`PsM`j-ib_yr z#S&dZl10PhBtO&jc;=QDT2Y@0wP%Z^Xr&?$N+E?SpfW|cHMw4{Zi$2{nZY=O(-b1I z=}P?iNUVUGDkV$cYohoJ4&-v2mAFG_W{VBSBo$o4oPwD?QP5Qm%v@~a674@(ZT?w# zZ`KY@1rEq!3S?MHWEOii~+4bIz)fkxY)r zIR_!;?)|wH_}kwcg9uafZHL3E5H&xJd7gZq$6S)cW*1Xw{RFE^J#1IZkDE)=wXw)h z+JdK1p(ZZqv?izq_I4G2VQaEyZ?fg%dYdojl>3WtyKdL+MD4x5zP`M?Y`Z%i@%9g! ze?0DQ$0Lt}6+N?sT$M6`9X8<4mtQTC7!+NHKh*ypFHxL^J)&?%_Bsm5>a0UK?r`=z zE8|@D%qS~++#MN*i?cE>JF*>0MmeLLQG{;^A^h(5AABC4$NT+yz22|q>W%g0P9$FR zdYo0w{e@gy#iR4bQyVy7JeMYjwiEAn1VH@|=A9{yx+zyDVw2$?X96C~NtwLjG&Ler z@wPDMKo;q;Co0YU;@<)|W6O1nr|w2d_Feq5yq4>-$@ z<(Odh-b{|~UnJPf5?YH`CaKe{2umM|+;Y^Z=GjIxEe3Dd_%x(2ord$%ywnc`VkH?m6FFI4z>KxkEUbL&xs z!@<^Eel;m-^E0XTuzL5^(5vVD9G z=0C3!v8$m=8zW?kY%G+?ES`|4qGjhxR*m`9j>oOHjTMS9dEhK>F56%R*fB5W#v^e$&_@X$J96)&ffXY z!F0r!tBHQxVr6lGdwVGG^tr+3gUx_2x31^=1`b>ch2CMjzZ8w_a_~Lx#Tx7g*F8-SR#B%* zo@U1F?b6KL-;v$NY+fhpmK2As4nMSa_t4M17vF}iOa~7=s zcZ1VK2!p?cQ-h)H+wu=%&6%rSH}3Ujjog_-f2n~8UU#iBw5T$4OJZhb4(i5j%9Ed% zX@U$wZuW6c+qtUpan)-lRZ6d`VtB7YM)v3$n;(4$RC?GrXzti*9YWOgiZhbH57OW> zFoRbs$;W#dedmSkQ20#3K2gS!0Yb2KeiIDJ!;*} z&tD{UIaqI9PntV9?|e11HN@&Zj-m?ZNS0KZ=Q*;{`j04-+sQ3i^=(eOAxWC`CNs@I z<^f1W2JHmwH%1fRxnEp|dl9fzbX6ZhJtFnPu^vBrUZU{Q(tjGXWWN*FWZf%%OMX}k;7Mt92s>Qt7a=I+ui)QcHJCbvO}#mb?Siwl=qLIy zM~P~nv>K>_jFDKRVJ~;@V6NjcUSUX9(N<5XE3iq>wANVq`PZ9mV&3FE6nS{KWS`)? zc=vE;sWaGIn1z)?!5nQ`KkFXl53-sjhc5;S)Kux6EAn=$+#^?=*<4~SIH*|mcp+H-T(fi z(o18BW{**#R;e4$CEmnJWy1#wr70ANHgffG;ChI`Wa%`C! zq^Bh3eh9<}_VeGCx8k}Js18@3q@+$^+~nhVs?P)riPlm~3EJb$fCPxpyRQIvs;a6x zd?tI#+Dj=8z>vVe>g)>PfeR7ayBoIHv$Vne7-Cl3>>E!?IabNUGQ5v&Bo zc+bx{pI~@S4&d7(F3*&?H$i+g{jW{*V{og9s=hAjNPcu0Q3y(&qG#WFW9XzvZ_Ue! zo&dBOxN%zWsfu0T93Pz_TNPMyxx-)_u<*Uw{m9*u*6r}+*0D7=O3($D)Zq}F{{~=B z8D=ERdQ&eB&)ggFC5Q()pG|)-$SsN2fyr1nN?2&|O8g1@CTlTKm>tQT8M*vqw>9$D zwww3uUvIa6|8*jfG!>CW2E$N=0FB@t-8f0|Rp=TqRxvw433M+3xu&LyZ5N+$OEU&G z0mNxbVvIMkW?nXxLGHQ8eFE!-72@A z(s|{K)n4nog~Gy;nX*qN+&!XSHS|egK%Z+AXKc06eE?3-ZxxJrD#5u<)7yo|{ej*9 z%se904K<-~n}( z594n29(?dTwcJ|~-ccEpYq#y)6Y*(_U>7`@vX&<|dr(FhahW)Au}}MVYy#&S@V%s- zei(g=_T6v0Y&tqeTSIjbKMCw?e;^;X;Ahto2j`!Uu57-Y3y6(%wq}mOJ=>pQyD@|-v zNOD12C}qKRx;e{zIx$dBv8=tB2Ag#lvZFtOdEb!>H=Zbu1;x;SGgcQ1=vC{1-hmfm z7pj`4)i6%~ChB#vkr)4iPIF*)Kj zW7r&vT&=RoQCF-%4BZI6T31U~*zrH0ATF*huL>EGlctTU;Dp4Bt8-5KR3?)hCB*LX*GuRbz&&pB7Na-whHlF1+heOMRkq zZ<4ppwk6HvGfPUIhPH}J7z2r7HkLC>cNZO6^XH8GYGyar)xG(t8sttD1kSwDtihMB zB`ebent;^tAL+Eay4BLMbP6Y2&(kW-X@WqV^0cc*!3gsYJTzjKf3QU-4FI(tI{9S> zr%4^7I_%Pixb`zl^XKnnTw)t}&=I3rtyoGo@>M-uC2h#y5ejp;FaBB1io)%%$0T7V z#G|g!Gq1?kE%bwhs(OQMFgF-yd?NB9dUdE{;ReiOd9|HMelYa*A@u<20`%3q-!;Ok z_>AqgumImZ1HROZ7q+9%YW?nm&4&0T(lwWEY@`z{3uM602Uat!NAR2cSH^Ruer6e% zTk7t%_dMxPGJ-QQ6pW3v^!P@m1l{f&dRxTgYf|SbF!ZbbJnG-yUCrg?9mivOYW^RhlV&B_w-3dyVlwQ1^dK+h1$ zRM#T-N?o$;V-p#6JN}MB`j(d34kowV{JA+fxq64Xd}i$ntW*`*T!l+?k{`!=x}p|o zalfdvzafb%&f`zeJ%76WPL}Ljqqb*tF{<)zg@@^(H=KIzJxeRIif^wfP|0>Dvnm7( zLSkqGb0YwDxLzA9W3Js_gZCE6d;Azg{+Hi#nx5a))oHujeY#A;nSZ@q2E^qKFl8gE zoMTOM4HFFlbaJsmVuc)1_~|S|H&{8J>bEYS(UX2@pC&O|W+&ef@n$lXfNiP_)rfgD zbSR(4|Bs(%XHL#0jznu70?|?Db3JD#G$Xj9x6!bfEPTpEoj{q=WG)Y+Jf=z+1yW!T zWtNuB2F%)9RuYTFK0bD+sdK)hpSnlp%`IDXHe=HkNZ0{HO3r8pCgvr=MW?y=*^QND zu6mz*N<0fm59<#d`!%Qra_5k$7;SFeH2f{+uVW$d-xriaj0($Rr+<5LM)JbzJT66d zVS=Lpho|!n_V&{?EwkYyb7+^9gT; zqlIhk7ab{0yGPYLE#MAdOl`9oNYayYs%<4Wi#lM?$Xbb=wrQ{KL)C zLV~WDNx!44vp>Z2nd5b`NC^l$01A9QUbXi#lA__qmoBQY2Q#^%CuX!;Cg_x7*OA!l z|7!k?et+RHPj19tb>&tZgAkLqJ^SD&qG1G+E5_hcC$#b* zu{SyKvDzw97ZO&RDvj-oEIF9iU8_9neDq!A1*ldYx0b8py|M%on9NwE0V_=9gqzRb zJ!y#KAzz3y1j)XnfvYg6V&&Jz?g3+gZ&Ppv35iJ0uYiXX_+rdv4Jt+t&>GtOmtD zlzvjIZ``9)VX4(mnH|TaY^?I{@hA)pvuD`vx!Wq166$8T3T36T!Q*ZMH`w1=1>4SZ za(CFa&dr@3c1NFY><(>5l5Zq}bc%ui4)`5lsXddfy96vt-Umx3I@jo5H*a!L>Z_o# zIay^42gym6RtdYDFwuZNxD%+O@m&s6ObcZJr7KL3($fWKz4~nEkctvW_q@Iis7`8Q zqEYH~n`@J>29}n}%GI$M35qq>UVOVlm`qK99sP`%isyX1?{~Sek*5L+7L04M!V2p# zmBKi$41guM2jV4PVmD7gZON!5{O!Gn$#(Yv_vJ72W@8(Y)%fcG?q}Cb12Lu^W^`4p z3y(g&5vviV)1Xl^K!-6p|Y&s92e#Vnbf!$#3(*3INS137Z@j6S_6)Qto(T+to4^DMLHjjdVa11(npDqQO@q9~(8}S) zwjX<6JzCTYLAnnb8AXX5^xwr=@ZI9dtgLgo|D^Ohp^A1aOjLLsGM1QLu=+i}t_!{;k?>ryMtAF}j58n? zHpM()#wIQmQwsp~y;1%(U-X9&`UyD&>A<$k`=>^(hDNBHy56?%xgznHY{0+x2`X1`etq( zDh(x5+v*L|NUM?NF?ozd`O*$tdi`%c^(DXH#NN))LFmxo-gfcf;jX~)pRg~7QJz2Z z(gCl`2R-80Z2Ve_Psh%hNY9iu@MkgX#EdEAdYi_+_cM~(l6;*TH%lYqCLmUchmFp? z#sCatkg)EH?|@WFsC=o^!2y#w_&5O$UDwL)D#!?l#B$o&3JSFsRO7>A?X$?wk3%UT zkwxBVPVP;W1Ww5YaK`O)FE48bT;=TA zIica$lJq6Kez->-Iyu~>dGQ`<_GqSs!-#uT`|y^TL#{}+zg$lCg2ZYKVFIBr zo~GhKa#oL20g2A*l3TXi;v625cxPWN3y~&yBXXvrV;V|*9d#QKFTo*eF>>e4k!q38 zz|`dU8uiRRP%v%9R#k(YQ-L$CCMO#T0~gdS_#^VdkQV*(TyL~iDimENMy1V40dS0W zoVYA-siDDD2K^7}9O8a%wH(3KY0$@v;kJt!pGI0AM_tdPbNIYHfQ}29Z z0>Imhp7sBuLZ_DS@8P=fySqz{^1VDDZ`tnvZ8WGr1l%UDaJ{*00?_LleKWXHbZ$0ty6p2M3@D?f~pza zIQW46ARxS~D4<$-FeX)N8AeWae9-qAqT_J4ro|{(N;Czi;0?R2(0p`>ikbJ8l(Bmp zacZsxTgZLIr!ly9P! zcg|#Tp3~kSPE;!2lxzH+a%!4GS$h#x}vD4%{+%ZG7w?V3^UB9N$=JvE&o|#omTe zo#RYs-9HToU2IL!7RR9bE`(b|;kM;;?rm)N;F{ zf)3PTU@iNtPTLaR-cX%jWwxv4#Mw*uiv`FxP3Xfi%sQ z4ePTDHX!q#l!K|0R3q~d)Y-4)^AznjLxn^>t7^AvO*98vF5UYn8TLPZcd8s244)YPc05WxjgH51M-?1^Q z*GOxyo8sxIg$UlvrB0uMHvPfA=cMuQbDa{!f&TuQwyKu#83qERWfmsn-Qwa6c3Ypn zOKEMj!znk_3AUiprY42pNzWwYIkfeYFa^ogOcFr5QeQWjDq}KQZ^E$<6;-H;tm@x< z$%h=|aW4Vr3fEL3IqvSGk)|8cD#=2NV4e9-1#CB$Zk20iOX!HexnMs%oifX-F1XDD zg+Kn+b98qq*G`Iq*rHf;y5AVxMt)I@7Pz7aab{zXN?flA<=?1siK_jTRoeg#7P9WK z?~eN6&}~~MGYRtZ>>FDOelbuBh!4shuO?-s1>6F|avNu__gwI}rF*^T&h4^6%+UL< zm={$_+V>PHT+tpU<*I?Ka+yyQ!w86#*mX7%56@8O*pb3YRIK{H^=qVl)ld%dbeTn9 z<~aQTp{ef`m?fmX$yIZL*Eo!JI7-_l2pgeM5r2PV@=>;CU(!XU2=7oQqdU)!HquGd z^=qG1Y6hvbu|xdLOs{(qfzuc6f1Qgyx$D)E^X6)#ub8Q2Q|voKT-43eWIa}{{<&d< zneDX->CAW;y*2|FfNUidHRt0!k5gv-MUcAcP(7{R4glk@Ih&2Fxmiy(lP=W64kWg1 z_AdIEH*K3~egW3D40HNlF*k;nEFQ?^`{kcXeJlaSw;i@(4-@K$Y?AOu6r6Lu0C!+= zuc`%jxP+@D2F>iuj~C*feU_7}-Q8LI2FN$gb4sN~2@GB16#)VbN7#7PX(54tvOy6x z_d3C71r@AGpFqGiP@cUg% z&2LU3-J4PAJ!M-*(Sa=OCf0{!tv){C%Dh5({cDNtP}geyitDMcaaH_Hz}hubxnE7? zq~V|9^M6Asno$wwG+F;CkG^3droC=pdh?gerX-X_hQNq-tZVUFp}SdLA`7fG_>=0v zF4g0n5fEtUw={0+I4jDAv18UY5Ry?E^rtjsBKXraP!u3E0^XQYj^k;OxaFIjo` z8SmQ)=2>hRctLLJmf_@m!tEQcoET=NjK{xV9zAVH0w1aW^1OC5{-Yr1wX^&kk(eK^ za@X<|A3YFIQ*7d%FPvHdS0NaAhD}=_3Vv1`bxettZpx2t3p-09>ob3i&US$Js4ku7 zmFCIy&+_I)*>*QhsWQ?3Le76}l;LY!3E#;PtLjFLDisAO=C<(iz*EmCS=laixHJ@L zK^MpYu@!;%_&CxVqN)rr@^3W)owKA-993eu+K#gr#2$Y(np}YW=IX;?GG~vi;!ze; z&)5y&h}>KR!#$9i8INs6Tg3nv>a))Q zx=4%7HIUa5%?9E(Ta}f~+lg&5v`Y8m@~4y$@kg_>?FH{sP4fS~#9izk4!K}1-*QvF z4Y%n(LNB38yigvv3~y-}*Oa4v$mIdj%VV%PDM{77c6Xa>>m(U+aT>J*hg8c;}O0us9svQFG%e^Cp}*>%V_NaiY?- zk_jS2ZDxN34~Z7A_}x->Xfk4qzF;FWv~bTDjTax(N)AY?A&jrTPD_zo$2KqH~0&wQ=S2i$*U zBNCRj6q2(Ipl^j!&8>Mq^mx5uD9YUsDosz%n}Y8xv^aGCn{6#Sr8=nH&X+ZLN=`<# zvk1L-tNRT+k)BR*NrQU(5nB%Gl}xnV4Br^pw8FvIs3Wsm5grf`l)NQs8iB_LjWi)2jq}t!vxLoOMv}B zau1I?bQhMLA9G7pBbxAfjXKN>%85DHy831zfT$~zkgh0Jsj(5DIQgl3#=j!s;F*8X zAv%>cPWo_s#(9wX>+RyQkjIW!^@A_u`=f7juA6cN!1d>hT$uc8I(p~I44!tG+Zio1 z`nPqq7LIo|s4jdbTt+r}dd5#Q1>i3Sa9J6p-!gv(iA|99geW&J-JL_8va4^zG{Hvc zeI>hW*q`Q)y9F=e)X1;$-ss#`ouF-{I!}LX^w5-%4Oaok$`AXgcgJunE;6s4Xeono zY6Iv74a{2T#`P}u11C8Oa^sOEm744CzcnS#7v*f>+#axGJXaq(;7>2Ug`+*=?`(vw zUw9#IZL=0Ex;auA^oFf{hc`8$gwvb&cr-|?lzuu@7h@snDx(Mj(Hr2rA&>TME8A>& z|5J4?bZHJE8hWNUdvl`yY=?z@sSUSE+1Pc{yLrLBis5G`GDA_y&C z?ZuWdbcbH!OQV?Li?`uuKO^>coDXb?4*`&XK@3fr($%%>dq`1rLDJ|1JtfoAPfK6V z&hyf#zl7PHw)c<~(obCY(`mjPUnY@aqGBr*bwE*|nb0e);-RV<){Gps&%#4H7$PQn=YAGE-AUWHaO23%vQ3{u@N;CABG^yzmkUw%UmB;LkTBp zsma>o0EWyOUS$w-%I`S5h>4DOi&T8p-amwOMEBhZ%0nGjb4p9NACiUXqv^D{$BqO; z=4&>Cn&y@zJ_mIWNNPPtJJLW5Puqw3I-j&2ePO6|o~_|$7=bBN#Z)$j1}rR33}p}{ z(J!7c8-x7>)dbth+5hm~6)zuunu}3@D&KzmWqPiMT!`)u{VTkt(6sq&b*G^H17dM# zUrp%YgREATcF8=Yz4a#!t+U{3p=9WTC$Uy?jfzu|G6&i?hc8AC7Nx_e--YjPY4M32~W&&6FwLh zkgo3~m{=+&YqZs~v>@^0+&pstDI7vHD}lr0Wx(80=haa&3GJH!1738Zb$EkiE;?Y zoTImSN10&8`(r(q3I1wCEOi5TuzmXY=MT7h4gT}?t*D^E&Cw)^J=aB^@5H`Hyb z&LEOLaHv=0nu#*I(sKM{E2YI*&9l@XvWnMLcV470^D5YE$%O5pCpN>wLr;Uc{z*+b ztz^YmL1FO>TG$HT-=P$h4MV%n*UQvWIo+Ru{h>?@MnC$1@8ADk+~;gV-&$zVGrjBsL^eE(v3+_ZF3LyG;$(oZ0}GDuk1 z8xqF!>Lo|+><8#zJgNd2T}E)t zugg5oZT_*r6Fg`yTV$W+Ic|58Y^sW2JAE>3qM{AvsQ*6nH^M(Fy@J%x}L)vaB;t0kuZmsvCMdZrsJS@ecqPZ?tYwP*!Lio#c>af%LpuIL1-uWm1*AePH&IwOd*!?RKnJljBX%gu@ zd-u?{8~d|Q>={6>&>ix?Yhq7*SHMs6?B87U`QiET>11@8!8SxxC^g0ksilr!c?q#g zm+vK){+EzB4#2rx54#dqJHT5(qyCUx=wB%scRK5kFjwGig#-4=(*^!Hz5{Uw#%3?6 z-^JW0NWyhX<+3E{&6O}8v3%W^r)**;goQP$&Si$SnA52qa{}2azgTt$F>@Z zSgaZ-0Xj@{S*LzJ91CjuQyad;(er1UGB>B`&@;%N3l(wwlAixiUbjW%bT1fn9`ahP zpB%nt0jJi~G#8h+{Wh#*HKdb7RHhEdKHWu%O^fo=v|}g@jh%EVDvcGc&G`x0L3$K0 zF@UKJ`muc0J|;#A|JRGskWX0;2b6=n2S0>t+~w0qeZMQ9Jo&yOYOG#w z7dTjTXHPoPb*F5ubIOIKHJL7E9&p?-_tkMWw3?}>kst?*f}85W|`8{1~U zb*{FM$M{NVPL5C0T!R6rAW>)6fTE!x+In{R>@V%56LlU{_}aqjq0b%FIMoWak>}DM zl-=v@TxhWnyis2H=CAQ4pVya!hY0U?wV>DT*OkxvQz8}w#-?8ry4sR65f2B|CqT_f zI^*L{w@zl~D-P|RBzv*K+2flD^PNBPY}NyJYpV^=%5k(9g@E-!<{x$VWV3iz+4xT0 zCdZ|4zbFO9D@KV>;`5Srmj95lt;#lj<7A0Va`D%vo{O#Ha$P>-q;M$iMVuUqZoFOA9Q;M&{7)0!VI;<*>AV9z_8_dK)cRfY^sxP20>s414}T;+ zT>uNoZozVg?*ym@Oxzp_iQuQzgxl$$OqRp$P>S_ox3VlQGSkgEMVdiDyY>$ZYQO<2 ziQz$Hs&A-Juf9nYCqgpoCi&(cFF)tHW|rDA!rj1@xz8YhkldaM0b2G$EG{h;mPwju zQvMce1c^wRi|bmCO#PiTmeAFb0F5;dFmhfehiY5pcx$A*t$@j(JS-$QVmxh1(jZVC z?=XtU*(Xu^0Yxd$9x}t{be!3cGw@5jN1Bq*g21R_hWzgRz9z4mjSAWgS+~NVUK)d{ zl`TEZsw%#=n~yEfZMh+w~QEKL`E=v_-Y$Ob<~`jx_!=J@t?5SU6LT`gevb?P09{9AA}t2%@Hj zmWxzZ$|BZV^Le}A50@9=1zy2h_(dZ)JL^l$$8WQUf_dCA5=fxiIu4f65aq?Vw>bQPD@@{JSHtU*amA~ax=!d(6Ii)^z@TM5{W{k zP9Bmdq30omB%FIDtS@yj@3D+jNup%3TM^__e6m7Zim8vRq?bLtF(}RKp&LYQX)Ju! z5JVOlS2TxJH!g_1?|+xHbcZo+u=Zcds#EbYGu-Zxc6M??tMDQRo#p%NbQoT;1Pbqu z*o)kitIyA$tFNcYc0DD?>{jowdHNp~5PDh>eS+Unz`No>_nW`Q`H|&WRfpFPyC$R+wN@+ zXH9HxS#)h+ttug7hECOQ4^e}bG&Gi$HJu-!DD#BcwjWV#KTkM`3f$qUsitruPLk^d zfDSpHsIbn9E<}iUlU4O$dGP>%wxyea6w~Rwu?*rc2S@~dEjBkmFBHX9#p?aX5eR4( zhv~)u+;T9u>^2Q(oT^v3pk0e8KU@Pjo-xeEX^~_h58lU=A;MEBJ4zc%_gJCkhF+w? zQZIR^vUXy4?0icj|1I9*q=@5ApH%2hQ6zh`&&g8zr26+y$Kq1T&_G>%EAPt_7cFu* zGb;>bpxm6Ocjhw^IMuOs@z3+H$Btr3x&s!WT63XVfooSg;ggaLQjDzD zQ;e2BZkkGrWdHI5MjK`mWrN)ABfxH7E8@XE*&*u*ZZ(e;k^^gsJDUka{ zb&?$`-P`IgZ@YV>Tc0_Xk~Z{>rgIR0D7N^F(bfqYEi*ea=A8kr%d?v=USK8NtMqzJ z6GlgAra0**>4&@6GVINO1sW($MV-)0JlS4+A_awhad4=sKbcDjdQXrh{7|th_LH4V zA?E1dz#n0zVD?9q5mcYQv5}{BW^K#y3`G zKbdEYfecuM!B9Td`3_@vHs&=~IiY#BW|G68xEDOWNcW*%q$M!Wh?p+Mrii z?fxC_R$7wjoY!}Jc25Jo&?qSm-JRXGL4J4Bw-xI*eyL3EsizNH8%_a1)fS#=?x~6B zRcyjc>mekSH&@l2N7r@|QEKw!Qrp3J?qpkKB~SuL{t)OEf&c4JSy}NZN&Fq*MKiB( zT+6DM1wp8Qh^u;Q^UyNe*`fqM#T-(GsMHYtjni$KYp2U#>n^0}?tA@-yYBbX%aW1u z+}s!|^BT6KN}NG}iRdIXqHv>TV6GP8n(3A#X(!^eQP~*njk6_k5FTHQ!M;suw}MEt z%$}PQYA&B14>oDB0?c#!fZ#?eQQpdFTj9HeB$!yN`dGhGwNW)MQf&{Wdz<=XqstX! zG*WlXjNrwBs)t3Z!F3_+p0KukGSp3z;^^`mMdNJ z%wd$Krn94OvAC1I9>apKu|97mqH?~|ET-P>?c*80j51Fh_{wz9`4k=rVQk(o)$cnZ zbaO1WKFikbGNmajud5RuM>mW`{hIffmbi2BKBlJXn}eBw23${fF7NjhYkOm(= zbv6FjFH?0H>}d#h8$fI9kPbDM&(DU6dqC8a{mplw=PEm-jcu+ynxWwK@>2Kt@mSCK z(DGS5$sua*ptU9w(=iWLg^F2h)wm6z&tFNOe;DpPe$Wjy5K^R za3LV?%}Z5{g911uRx(reW6x(*n|Ea8^j~JaUgfw+m+C2M?YV%#fMUjVekXqaDw-#H1fUffx^^9ydkTnq_kGwzIHB)*o&$O! zPnJkqMijnj=fa`WvtOv59?hj_bzhq8@ab8IfPOy@LbR4dsMFFNR3Nf78Qf$?Ot4ggM*LoO2rSxJr)2}DuXDfcD4%6&mvJDk9h z>E~FACE@1N$*-d;=ItFE+5{~Gr8Wu|2j3f;4)v<8r-EPE{&$sB=kGV$(NT@Z4<_z~ zY7|fIh2GxaA39QhEHTOt1cWMX@^9>g^4D*;b%glMPV2UN-@C`0fWcwmYoUWGlhn{c zDn=oH-($uV+W3YaNMII|DR#FRt034V+f`QKl2qXdjXX{ju7*fggDkAoULJfiW#8J# z)gPuSat!ILgryp{#30jx10qqpI*hNBC6n*=fre~ z+Svc3$^4i%tFBDtC01nNx9#+qqL*KZf&j1m1$8T5UH-tV$T>jYRSCN$J~N|sY!68T zxt9AHH2SA5t-t*4A3XBR(&K@g`*u+*1{GiU@og8Ti-{BwroMUh3~{~d zFQ*(jx1HfRu5XE@q9D6u=ObTF&vx9$A1WR?OrM2N$FtTVy%(By>$>)q{aP!Y6iBTU-vZUf68SE^<(&HlG}d~=&}hK!n)D;}>grx-bP|eISF&+hFIhb<#R~7rN^=qM`}j^AULz<^ z=kLhq8F#I6MZc~!rvLTbNh+kL*i$dkw-6c)+d_6n;2h~q z;Vz<98t*@3;k3PLkIoJb{x3v38gn>5-lh$g-S?V^YD1`wa|~-;^l(hFi}6%u!j3e+ z=GBySx}j6{MW5QN=N8$UeT<$HsN}%ZU+AAz{g5f5Q6qgH#ESvHkBljJqs0kp^FT^- z|HCW4gW)3U%CelnGs$BjJed-X93zQ5tQ#~r!d}SIkgD?_KTTehnwZQAo>9Gly@&}a zaBrN2GKnX9t&IjJ91PZ@!?td96O|kvZA(h}ClcB>QJWZaAv#S#cv7Vz=2Jlmro(kL zk^j|Z?1hULE*OHfVJgMwG#7RKH|q_pYl5ooi3WN<)YC!0mA03s?fa2E2ahs|{>jz( z;wgp`g&T?JRFoe4%iMZxSjWbaia8ay}g30n3k_f3b0NhRBNbt1>Bl zSqf$4W^A0R*7Gk6#FDuTD}}4unDABCEXuA_&8~}QcYFK-4Jq-BVlch67sc66TyG8n zI6l<;*Z^LpbGu!FKHKu4&N*}kd!Lf#NXuux^P^8lJ!kvN=?cC>zc#j`!k3EG&;&NG zimuld!4lVy74AyUX8(M{J73((3Dr@Ru>vfzbX31b$}4(jf225Z_KE7Klsf16$g%#A zk{yq|+$dOLX~-$KWF5ThPSGETJ1Z9mNR#t2xkt#m`Gkt$nB_p_vRL`;2Lv`m-T0(& z^&a{D`jbKL_Z6>9%zg*|E(-LxXL-*cmG^aqbl?DD1uxAyvBCHw`5jk4 z(YgdeObg~o+ICx(vkQm3hlz+z@K2&T(nB9L)t32;AB@fUUIJVBNj|+O366n46q@31 z@>*LB^AFSiDJ0R>WVB;>C4g%+AhLRxb|ddxX+EC0yFV+JpP#?m({*|fc^>ppw#*p< ztTr70Way0Q_ycvpEN_&PYu$nrSyA5B7PVShP-WXBX>w?Us9rzL3cXCWhBgK$3a!rTX~Iu4cUMv06u@Of=;*!B|m1ZS;8N@7(d~J?!MkHcIpME{($( zp&qp$FvO!jSoxUx6q%dUYHa=za*-yTQ9)jQDZz22$%9jYyWb@|^kjeeoJwoP=Np=5 zG#|^($R12t2u-v>DV|jQc5dKg$Z*HbxiH?{`19pQzoKR(YUlW$$gs+DU;lDKPFk-K zYTx*^b9`)m4c3UeSh)gO0H{iGa-|}1En$g*iae&CLui3Hmx(dKNFf(pqiHg1z@U#C zKa(Bvb1QLNM9RyRyr(khQb-MrUf4OLl{8eUkiM8AP@fStD#bL#k3DKLHK&2{jfAs5 zK8#v{>_jf{n4Lx2uuoe&ASw?v;`ZUmX;8E}?fIIDF^Yc%F3r%=n{dk!Nhp5@i)EMq zdEm>f2eEsNFMH?wi1}l>wfYQ|1ljrN<^XK{)~89Rbd7Dcyo31NiM z9N+|u-gQfQB>ojmh-k1l{mmZ&lYtD6ehl@7(JXb4gjMzJl&XJ6Qb&|S(t6t5Tt2C* z%a*j;@#wuoQ;}{am`8;1;!6%FXQH;Y`C?!x1OkCy5&;cj82HxL^W3B>zQ?%hhAQm1 zZH(l^I@PJB%JsrP80J#U3Ik_@a{d9khQ-5W`{g)VQ&SnSp0 zr-x=W3I5AC55CX

>Y+rRE-6Sz*0b`ed(7AQ7)iP8Hru9V(3L51~BV=spd)ho z?uACM(L1@Y7mSTyOUo|-NrG*P)U|IbS>zV>xDOtRMMznr=NWfw^1FM=|-#k8{|Z3Xujl=t_Q_?g_8Pq_l4;VcS~C$(gkslURW;*63619-a=9SOIwSi{(^9l;Nphhs4YnNG51oC40I*j;M=bC{5|5ZhA9l5v- zF*}u9o97UUyh&e(BX~JM^HP z15+tIF(saa6SZBAfT5q0wK%z7sOTjQp8i^x4q6PBbg#A9Ca%B(shteeTtq!tsHXR(cX7B0>OXS&;YOJAoeeMsqn)*|r zs!8G?$DF5$t>bWoZ!O4_>WhB|HV5~Qx7XTtX%x2>ER?ee;&#-rJ=~kj^RQ3}@Ghsq zdx>&3R^OAf6@50hjDDo--4*V=spw2rPY(g7CXQkm!d|b@Y3j@WkE3((XZrE}I6{bW z%_Wt4%ys74M{aFmMmC$wHMhBi5V<6BsTsN7HgZ{rnMMgdp8`ca)(4p1o09CT*lel066Kd!qgvwvnGv%{d0(^G_H2 zb@g=TcisqKx3S6${BWE;p(0XnL{-|(tgxmkZN;7t3zqcEG?=43Y;SYuh=>er?|2iM zq$5}#LF)}U3GIv&-FK(e!;RF-iFCy{=RWNsMU~O;A>M|M8NNJcrctPtm#hu!Palu+ zF>|}Y%h{y#d?Kp!DN2fO3VBl!c{zGr1-4YYXF7n#_0fE|JjA4!3M%DbQL#KetD>ukh^TH7#5Wmu2&SanQg|)Ijx_Gie&J9Y zJGcaLWVa1Uq1);A&ihaOUUutfZ)@*pKV8~C`SAGB>=m9mvTWYqe+j5;iE$W)6J?l^ zYBEaW6$%JhNxmAIwEbvS`N+HVUiPs_>rcIX2-Gr@OA?2{e z2R&T);%BXLT>#8QrYXg4t9c=HJ20st4KOwzmQznpEhA-`Zm|ov$=yBdcVexy;HguU9rG_8GKRuaiY|OFYAex6 z(*7y_)hE!FWm)Rx<+2qS39|{s)@T$6May!mf6n`gloL9!`rofasxVb0%lm=BXzjTQ1wb z{^{Acfp*@_(b=->&Gvp7G^>Y!nOLnI3_YR2ByB~l&y-<`Vnzp|MN}YJ#24O$uiocK z=x4ntueEPS8dfj(3Qo91mXqW_r7Pq9s@30PB8cMp@ZUkmm&kz#RC~03})*5U5FhX>|Xiop;u2 z$oF=4!;*>EhAh6PStf!*WWuD0A4$Ca#!CHQ>2{q0gB+Eutb(24SZ#mXlCih@Mu}L0 zM2=+H>ha?(pu%~yyF17iW z>zUui0mF|vZ)Af`f=_cKyAcvjT#oI}m!HdCWVyri;LV?Y6p@?s@Z%HC>>+qlG3#;o)}_ zF%P|QAg}vRYS6m>Ve~F9zO}Hg3>LFmvx;(MXGe+ueWiEP+A|&za6d`t9XOTpE;`%| z$tf;lrSSGnT6jF)D=^0hB}>OuZ5{K$wE{fY$W}hi#`6dCV>-=50hh>qe?Rj^85z%k zn@s5IoYy$tPu7X{j|7N7$&8ecl{YtZ7Y{xHk?THDOlloKQXqX9hZ|&FY_RIIHY|!$ zD5sDJFBhpHNnb*1RblQ4x%-#atagm?)+5VY(qB;bLo$8W?Bj;t#faGn7z;}bF>ZU^ z+4?S!I$m9M^zsVzbzk{VH=h!Dj%qofyzwM2$o+;{#gAReOv8SH@rRtHylttmL6$IQ z9p1VPE}5_%wqi}-c7!8mR~*8WZiM#e!@1JpR%tOkNdR_p`KYly?7L)b(tGv@Ubz~a zuQ`bw&F@=>vN>A&Xw2zPBVkhItzWkwWG$$wnfC|W)~hGiAGeE!+L78jPqhn{bR=-R z6VM)^CcPxRlXalTqIYVEj6Cu?E$#6Gen;DV?l>i(ac}}}4{RJdOq8#kBFWqX^TA$% zBxQtWgj|3(mkphr&afgP8P`;F^fjrT8?fT$A@6T;t6(SN7 z2`)*J=GGK1x8|G7UY9{Kgeb~*VT9YxaIa5~LaY`aI|d9k_(qMh&~z;+XYs_ZzS(b# zhkJ+Vf!-+?atn-;AmM(M>%br1n3)+|p3yP-22f|TjC7KK&H7`#g8deSmZEd{u-d58q1T?&0fA&#W;rf3!ig+S&?iQkGzU(pM`b4b6gaz<1|a-tIf* zs1JijQq==Dh*7Xbrb0k^ktLet>_zOu)S>jH#D#AQUr!3vnSR!)xiH(%S{-R}k#?iD z{D{8aZm+psX=lQp9q{?wLEL%LV!7%&iRVQ%X=EEkvsb7WiM&iq(ja%P7cX&LcE^MarHDLqW>qSQ-F8@ zi-LbG$^+47W9aE7p}OK~b`$H7fGaVloP*pMLWxV87)UDKRJB@QNl<&qbS5(e1gEUI z`7vLT@3`JR(wmZQ1_;}NK;757+RLAPUJ=n&czGW+Q4fH@KL?jvy;O<&rJn6Nba$A* zVJBD=#pG3r$}8c@R?0QyGf6d@lJCjuyD9Z$`Y^bBp4OhG-K&#w)?3YtT%p~adHzS` zxy%jn?}N85(z_WcYL&#=w;$IeEFiTD;lSc=q6|nzZtWuJOqC#Uh;GT=M->2=cDa^?6n0d+FY?Ab(8ROl{Z1jp zTe?}UBWEB`Cu>)$tirS%AIntu9i;surF-ppAX7JX^NPDzf&ouCyXHu2N!Y+Va0*|U z_W)`w2$kC3IwQTuqP?jjXg0$M^@J$Mk4<%0kc$C2P`}>YqGTxpcO;d|ezrTzh;m%F zt|VI|gf3HBYZ0=Opk(qZ86~RxprpsD#FXZ_=RKak3aq2^ckWGb!Y+2b<4@%kuuZgf zig1q5>#t>UHJSh{fLqDH%_}tDh;$h0r%)M>t6BVAW#bL;u1(iIVQJJ zC?vZf+a?dQW)p~0&Hp-QcEN9c+{atS3~gOHe@LI4`8fNf3?ZAsy@Z&)-StZ33}$<3 zR+-Gt%+YO#CI7>g5zsts4-$v#wm$d9Nj~y-uCJtk6?3#Wf z^v&*da|(@^s$q@}Ez+oi`%qx`vimm@%h~&<+h8T$`=Y1QGuD4A$J8zVt7FdOS?v?y z>-_`1EPxpyK+^NRD%=4~2;VV8rB@(`Ze70;1t!_2&@V1TnIHi)*7J2~5@scya{kfe zS!2F@2FyfvMuNSC&9W@afFs&eb3fwQ%ALT@q;UDBAr^?!T~Hz&XGZxX)pR|K#pLCu zn?_|FJ`O`_?=5grJUiUC8WmL6=B>whCu^`TPNJoT`*eTX-c~lIx6uN1lQ#~&Q$9O7 z!j&F>*$2B0Xsx$Fd>x*NUGCMQx~~@Q)4r9RJ(9biqev9zwT5}GHc(y9ap!}~be!D{Hq-vxqc;=ejw@~jM$)k%yPPT{iCSQ1| z=?-1)^@wf`C^LPVnwYmrTbLld! za!D6q^p^N`@;jipu_JWyPhkqHvHZtkXcp=Yn)_w_)G7VPvQ%0B$XQclEXso_Axf2y z0>x?HMj8vGsyhdaiV4K0W$h%c%mi;Y`d~tJft9@Q&OL(m@KQ+r5)T-9Ms`Ei2}I=E z94CiO6q6(>h~J{Hs$YD@3)k2)G%M>o?wX%`JFlNne_JFi6mrM1l2kj(UrOaq=hrU| zAiz_Yu7*}keGx^%F3Pqz*rO;o_!&HJTuklbQ+~9S`IX+C-h!>j4xmnQRWE;45_{C> z&I-+VQ%rSkYL4)>J>7pk4a5`jd;cztEUX&$qaU9rarE0{JfM&Q#Q9-`rcCk;9{;_a zo&v&W&zk_zrPkI+@26z#y0;*_wTzir5gCt-GnBdKInd^1_`tOcByZRDEB>Qnof#fo zD+RJD!d7Q*>}~|K+s)Zo`3KC+q7wg2h4j%RjN-n0`U8_Q#64ANF`k&d&ipFC>V?6T z_*8$Zr>Z7|)HBn4$Z3mIva9*FWN&|jCAw}KpTWs&20Ak=3~9@`Ihqublp=h4?*@e5 zjojYP{^Y(Z9J6vr`>33&qB(Vf;ixE7#p>s!<`l^g)m2v#MLyx~$zXW<@e~@HxdEY= z!hZ*0es44$3xhyBEG@BqehNcc|GI%^?p)YQS$WK;?#4juon&#*l7XSHKtVj3{T14V zh_`5E=dWdS%hFqYx@4Vk)vX55*}1ZE-+DT9CiR1RR?g-SVTy}B5Q(Sdd9CKVw%Dko)2 zk@W{lyOf3f^=jqgU9$a{8mDFZI744rcukrNxkL$}7(j!l1n9PhXD0R?9nWQPJ$iVq zZk_{aC2XKzwIW!J9#J=tc$o>wPWXoZ7)OI7EU^rTo=`gIT;_cp?&lzM}`UXR&vT5leXcxJ*` zZ20o^Dz8VQV6d|5*&{EShl|lLD|UqnwA+S#Z<*n;f-JGTRG0~4?&Fo)xV}fX&4q0B z0voB&D}0aUeSuYV$iI+PPB}~vuLPCTz{PN)wy*}C)4<7L18v@y5^y-PDPqu|xc=kF zp?5?|AeMp_pGC)a)R(Wli~a7(L2of6poQwtu}Dq;(bGc=w9<&OlmEoe76a#d-M>Y&1;76qDVcVI(!^g9Gua*mH{;$CiNyTkC#MsVwMFsvt(H5OqAg(DJ+ z*CRhp|w~FkF>QYY5RLP86ldnpGAu|d~wn3Gq^+#Uk$Oheb zy%{Rq-(V@8<)rh5J5bFdOGWsac7vg7-1S+Gl2QQ{Z({-pN~ADbXk2^s1H7Z-pYIB? z%(#)BLP>R2*GYS*?#h z*Gv5;(?~QJ87wwrAqgF5;quZNu+lK8I%81?cgl3824_RW^M}JXp(K=Px2PcmNqTp$ z$dj1zR0y)NbO8=O;!e8NE!k_SmFvF(1m_AyI!`0Fx0C0RPEVRn_p-N6_cpRS?K@6e zI{yU&IiPXM{q2qpC&@Zln`H}+oot_;H<4|6`AxP6^|_{X>c78#x%G2aI*wr9IqKl@ zc*L(fF zzz~U*<4|Dcyn4E>cM5os7DhtxZeVeR;uf^IzcR4V7g)O?KdR>PQAgY9xxm;A)fGZ7 zpV3(T&;dGyj3GP?zwsg6$`>GkA*VLUFgTbg`o(%4EzC1kLo+>@rNllA@1 z*^{tHU{e+zPRr3F{)d7p3K!|aqH9Hk?5y_%ye!4_?2V<({l3NUh%8dbi;3OvqHnlZw-5zju^dSM z?C-fgTtnSX3Lw|xWz)cfvM$jfx4QYZQD|}T0Mwp?%>FHUiv8T-2rj@3N81qvNvAzf z4T$az72WT(y!kU+kWr-K#kh`?*UAnaIP#55p)Wm{yqIIzBijg4bsO5g%Z_u(PrJ5= zy!{`owB1VhQ8tG+otr7XPh}TR5=doKkF_>Tg*!z_^Pt4RzZJ9#90OB1!EuKin(s4A zohxMpO;8zpYVly6l7}yH`?ccnE)0P{GClZoyK`}`Lr%FG;F|pp!9NrEv%1rF;qi~- zX@@4L08XaNL6&4(`MR3q4Ou4khIE|%-8uMcu?w*I*&{lS%f8VJvVs1Xe+9vCNC?ui zq@^3L!eGPr%M8l=!pTkJX z@bP%-K%6v{fSX5jd;dDf+jsvd^WPlQwB>KHU+~n-<4v9tZH%d`FJ;XvyJJ=#^YFk> z*lOZcpcIP)c6spKStmmbjyK_RRT`41JnEfQ>O*r4Kj91|)Zy6C?$}K5lP~_Wp`uTL zS95@Fdy;$=*nor{?XaFT#MF~Xwig9%@Xae&GQTL7K^efR*)>1J4&<+AAps{$a1~(;FJDtcRF_^Ki_jiky}raZ(rwHn{i`zE`>121z4>*3?o}tV>A?znjh5C+)?^_p zKxXBJ8=zD(^rM9lQpEmf>hV*V@)AU4jijZCv<Xy<;m@gcMX(*ktgVI z-M}lVNhX%(DPeBv_n3^gjJ&XBWGQRVrC6pYkCnoBnzNjMFbb>F?La-*ZA=vgvP#-@ zYg0v0r5v?zGphQ-Z5#)jZ3Z=FLUt`z9SYh-KoKn7;lRY9T`Z8w&ud1>RWq~zQ#ot< z{&<%dO``m_s9&q@3&dF@e*9+9K+iQGO9)x&L2`dptG$L>ceW4c zpJyu{XhKTjZL(B2U-hx;1bM^N=W_krCWI|N+#j|;eHw>C#noLhJ}2H)iv17`DNA`; z66RHK?LtX;g8Q$w7hBmbtpLwye}%%*Amc@EYJ4W)O~^7Zqd9S4kRK5=x1E!nMgyDY z6hkGwU1LK8kw1Zea!MoAS(R&c$4R%akHl37aiL0b_k-se*SlL^|MqXKR5ceJS>5P8 z{tIL{lMXE2u*YXS%>eNkS$mJih7HO3P)+U_y;J0wZdc7dW0%WON`i5Hzip@a1Ysd2 z4O+9vIhVnTe3tF1sIA)VIv#8M8qttL6#+ZrTrs>9SvPRco4EqqVOS{mN0ohB+|Kq~qgOKuo-yQT->0!}y3+ zOJI{@<|t_8skM&W=!T2`lCb^e@YVyHs?s zN}RkN&l1{Z{Pj}@GqpB4{%K}iyNx<6<4Gci))T<-Uw*#(eR?|oPSSCz{X1{QtIeLl? zLrg!dw?BH4av+LYd@nci!=Vb4=}vKf+@W#!IJIh8^I;`%>>>{v|Hrs=a-q^JLpP%d z;1BN*mn?F*kGo9*^m)DujmNf;N1EYBn5G1WJ*F{#QGe7eF*}0SrfKyv(QhI1`At_( z-*lcHZ*RNk=*}K(hv@%%+Rr(kp67+VO699TCb&($DKpfrk1TgDa*O#)Z17h zjPae8(4~8q`C-NJ=3w=hB6vn)!#uU(OI>^_5-dM<5N}mXAh31KFxlf}>lLZ$Vu35# zi&C;lF#RVPM0;Dk#tP`UZ@ldy2#@nmaxBn~>W1nf4p~w>cVlrmg(cY1t0e5=MSF}> zW(k=s#{=W$gfTgI5pc;06~lk!%zhIbNks7w_UXij|ske4jzOWy6YPF+nOz;3>E91jR=l z`bnetedUI2FSyYk$lW-!EEM=BmhqH!+=D0bd%>cH;od!D^}xDyw};*wJ=E81Ne!g0 z0!Zy)A$L0aPSffRX?6Jg>fzlkL}fvpMmAj9P1sr+@<{zBh=awLs1_wbB7+E^CpmX5 z<0xl?{yNTvoZw)7TN3TPlOzs~5znmSyVDb;y2WC;*Ln4^;m;4ji~GSt07z zKG=ommxd7uGUTI1iP+N_-w!t0h5=$OI^{JopZ5Wo5nWV_fQ8|0ZduBz;#AVt6{Yw3 zKT3HQWum+6Yy<;K`neN%1%VeeXT!|QXAnLmDe#bD3DE!{`iG5E^;*MQAL52F4pYU6 zIktRXnV#l`m8FsIoFA=1b$GukY5S9BVb}`yy7*IcsVvm_sVX6S0z>t?;uI`*N#^-& z1oECtr4Kpb!UvTrDQWc_2?(QG&F3`oT|BJr_|1LVA29vczZx#RobUdoO>N=u-gP-8 zVRw7oNWq!lxF4HUEu6M%WLEXEkFqol3w=_HJZayp=5}g_&)Fn{8|!_0f(tA2j<(^L zaQ4=ZO@&tISc@vtR#3^%15AMwp0e5t2!c+pp6(`}?(ZEf>^xeak{H28dBEQ4;8Ej` zkq7|bb29p9{{<~1DKt-g1+V<3ucN%wqdeNEVHZ)21%uCGP-HTwUloOJ++3;ru@?{; zv`bMtt|!6FRiT6SUCmcSaZxh$cr*)a*1mA=u7a{!*;5mar#1~-8zqjADb<(oVss%3 z5>keT6DaB}k-HUwF-pj3evGp@wz(%(L$&)kxJz2{i<0rW2y(G3)~AZ{JSSE&Jhc7f z_h7Z3Uv;}9qO!3Vl+o8TIS3>9+5#E^)~l}@%b3lOeDpPlcDhHF$%9*5+xhBWo{Fke zCjPb(u5F;+Y>Aj7>Syw*Wu{I1Hs|4c&#V^mi6||N#FRnf#SJjbX%g=S(v0aDsB~;- zk500Xr4#G+Z(n<;VaqnZ*ANhE*2;GgCAu`u z4-{S1{$*oB25FCSYzJO*u#tGjR!(5gM1E+Fw?IU*J8>(Ww`DR?{{*2V_WiEPxHpD+ zYIEk(?7Iiz5KNnEM!m3h>?>E{vnWpS@+$TBtp=}JTBd`xj!YN!6ZR`Uzqfld`xC1B zuXww**+yWn^>F&|c+mUi5v;WQm6W&Dmpt1$K~to%oLqKwJ^NwMW~Fe$)0#A_7=rWe z#RL`#(;CiebFrST&`m@QC}c_il`qwVMkSGgUNN50A)46vgx|Eha_tgEB>iI6^9rIV z$M5y-cN?(x_vX2t%X0e;(T?VY$ajYj??RtiH<`mt&9-?9A0~K|E~Dl72CA$mYgX)M zzx*x3iWTweHBBK?yEcF3{p%26)nAo3pJ`4c(3W12fuFWINJNYGPA42;BD4G zd`>K}{_;_upCHgO{}qMrJ@~hs+<6KJ`HmuWd(Wg+HXDxg`X>(HSyEVl_~N5KdM|^J z&$Z=~0Z6FG>v9k6i^XoC98eBs_ir^%J_1fk1o>j}^c8O?zBPQorZYtq!giOPOP0*a2xXS=D%lI#k-wN$ zN-D-%A!%Bji8Fy`m; zV9{GTE}||gasr7c53cw16q>ETxSi~fjX5DS4qs1V+2~gDb$=KVg+x`{Dk5trx zvB&gHv)w&d`iw9tj317EXZ=8v9*|#0#zCcGw4FHubvApKa$FkeR!z({9#uv;79h~b zj#mDNuuHzBZZZu{CO`~(MQ)#Cj`lKZ{M7ZaBzDmzcMuqb?{Hl#spoYrx&;-;v?$Lk za*pUwV7A@|%)t!no$_m&RxBet5fzovT?Ot*shR4O*pz7HdhIqcf-@@k;ZFA8^3nbs z&DIFfk=~rurEQ?_*SN`Awu0ily76c>c_}0*WU9F6t(8k@r0^)F1)L{fDI_cTrM6V| z+#8IvG6{*|&XTclp9Z;Mg9pY4&Z5Hr#hW-N@6Xw~M!wb!<&N$<}G zPYlvKHckRUy*nzl2n`$~=X{?LHaE{Gv&zL73x8?tI%PCpa7Gp*e2!@CX4#qmt45)|3{o)n3>2u-^IghHmYb&W~+Ks1N*%H$oZhA!Dc zp|28L^LG`-(KuBLim@@lqCP6E*pSoFgfh9V%Rv-eGAT=_r_Vh0v9GnbIs#1+v2q4$ zx7L?a$kR=GsCiQupV=yB);Kz*Qg~thFlax*+xMc0XUU7ladH<;0}w{4F!{G*&~B6SKIht@+mF14 z0e#m#A)!yfkpc^0r$(L-BdOqrih^MMw^l5rMh3AQ9%qCOM0@+n zbRR!TEH3w{K0X{%%DpAJpM3Redvk)2eB92frJ`Ry#<4N>nY>SAg%o;(<5LjB16Yg& z%N=Vv$uOM&t?{yQgVmH_IgK*|MlDzdip%l%in1E)%%jLD2VkMsUZ2gH9ThYReI4#L zxh_Kb~l>}(7QjT{7lq81H6eSv-msv<4K~l0sW;&`2*^9^lm@z~+KwS(?O6()L z*Q|s_b`CA2VywaIQDX`dAFZlW0G8n@c=+|6g!d~S2^$RUOCM9Yskp;ti%)5l)GBd* z|LRhR$Q!?7cO9qpX@d#aTkfv|zDltegBs39O#E4;w)EIPmi(qF6E_A@A%8=flZdNY zz`BS3{e+VXbA4Jl-K+eJz4FmuQ`5Sep000G@AhB+PE1zN0*yBKBu%9bIJ0hS%m8zC zz6+Q#6!#kz<~j71npy%;v1<+5Y0u@7az1a!6J~D zGgVWjv+6@;45~8PDBTylf4|wm6|W>npg#fxw3eD2ND_}&lNC!G(uv8vDm`|mVrzBI z+cK1Y0MV+iu|#JHA&6g9>MMV}kCzc9(mRLX|CZKx{}3RU_OiJ!0aaoAiw2Yp`$V??v3DK%^X6h38dF9DON!f6=(i7e!uqC7B;FO+@AFt6RU+*FUT=a^yv%_3 zVllK>Z3xM|EE_m(ps4-{@NbqlwyZY+L(U!$Y|gHzh&P4=X{EA&y5@jf+Q#zk!A51z zqkKR#V9Fy2g)vdM*i)@a)_SmH39N^i@sDa$3f-c#D${b>YALef2+8G5q`;A~=Oh#| z@UIJNN=|v@J~!Si_}&Byk7r(8M1%=NV4tl=xkvj+Y5AIu8c~9tIu1PJnd*O z9e~qpjXPTjAk~d)jZyYT9*grxmoA`S=dWp6SAYQNS1=$iih(LzYJWa79D`!dIW)Df z)<#)?Yz%n@sk>r*&t=DFdK@nd%ugO}}}n@iCKFDCFt72rLsGKe)1KcEC<_h#3Wy zRBmv}fz0&pSQ4maqBe+9FBx$~6TnmbUgB;F(aEqnZH0$SR~-P(wXu+O*aDkIqin`pqqlPr|l9XN~{m8uGbsTo`lc&1GT&{7vua&yXWnhwyr$ADW>6TQ{8 zoSr(e^XQ7DV$-ZyJ2Bo|@wF;8ZoIk+zx+hUeV8x3kYM%v#l7P4-O5|z8wZJ0Y+Y9& zd2vzZx@?`4`;fD6dvhFMyXEF0W?rsY89Id2ev?JmTq(4f0S?J?Mu|JZO*7imi`re9 zM5||)b)|+o7+XG7XO%ov*yl|S36Q7rIm*QaK9$LR4@XRRRDiblO(YqfzNOT|SMCCx zx)sW=`S4h~#_Ukh(ENQZwN#1Z{hMf|HR0VydG1NZ>?%K2=8viti+lcuNzz#u(UQ-< zCH6b?C1yG$I27oXI#eC$?&$U&Yb85VB zKU2v%JV?f1OsvOkz-kbyZSOM^_RRXWM7HUfG=KO`!8RAXqzqIjP02Xap`;(gpH)Cg zvfJkAn=MakCOCEJ^d2lN-H48&>#Q!7b53y8ugtq`&D)C{kNvsp=?Vp7QyauEgFB3) zR9A7FXE#+FQj4NB4sMEAyl_dxN-pNP{~OPaIQeNUa-BbJ0F)KI?qL4J%m3K^eKG2; zn5nMtcMvQ_Y;a>5qC9gDrh8>ZH0D_v;w@>>lJdG%NgZ-^Wai+XOyBy-|GE%6I+3%w zSCWA?QjOuPSw$LC+i8crQs?m@z%q%5fW(D*J+i-R&%;rcA)z{US*Fw|AA`qee-*Rk z<(83^xz9tgGLb{@OQ6obaIJhQBaJQWLXg~KE`wuy<>E_`MZRUi9*NKtR2>-?5LTO(D1YjWhB7DI1Qqi{63NUMur>VYI9W~pp^LES4Y|7p;xCPdvKcaZ65Wv}iQN2}Mx7BC+LS!k@dObIWIn5iAeDvvc_ z!&FeE>MXFaU4oCrT%epQ@d^<8mx%;f2#?U$64GUe3T02$myaFA+x`byn`W@T8>m01 zv5j(KC8w;f-O32|UM8ne0z8#UD=j$!(HFbfEANF_m%#Xg|Ky0<#s@C-&mxjzZ$d^C z(!++URUVAOhwA0z9++f!Npn!di%l5i0*qRI(H=Zuu)QPT6TVZrdD!+GuG?E)dDE0; zgp1Yf(Q-*Lrj!$cm22vG{d*qe?e}iyEeWsYbO(yb-Q7tDnv7VV_nBP@746+|NvtWg zm{{4?@k@3Wb^LMJzc5O6%vc7E=P#+@zfab-A?^ij@QIgLXDUd(WT=Hh^?ewM%QY~_ zxN;wW<&?BS4_j|{Y^k2yFY#+7#(O@I%fqr7W7sKa7-5L+Eyuy=hI?kU(WvIep9Rm4 zH*Ug#WI)TKjLwhRh<_b+IL?@qS4;M!GJA%$K?XsUHPFUSnRDyO$H| z0zREA&QhC=C9SRm%I1nnrq%V}_tw-8akuCl?g*4~Pbj)kw6~M@{!$)bI;S*WdclN= zDT`w^Z(EnS!jHi|+1X7MWjXr_hWpDMsD_Bn!^T_^-0}^&$Dg(&IXsiM<^Cn_ z`j5E=P0t~iHP6T@IKN5z7Pvx8E>L3FKmY9dYB|MeP9O#IYebTqhWlP>n>{p@0e5sZ z?@ts??fbU+s}GH_kArG#uguftCfa>`0=JUr_tqb<1vxA~kMF~78#K|P|}1?*)KF)AyFOwsg|K9e1yI2q|=kKPIR z(fRN9-i%)-UVWhvOY>Kqw7c_Gta+GS41I4Qcz>l7%FwMBlP4;E`CY7>6ioxE@$h>1 z!lTZdrnEjP_?4p7n<(MM6J-Pl+xi@hfq2TCGjJ$S18Zb*U&z)nWdK1dK`c4KSC)c1 zhrqDhm>#uB2?I+3n5L(d(FW@oAFLWKsc%36X((6p{aO@mj*-6`vmU9hX1%;{uWo*( zW8=>5v%ixDP}p7h;3QaQjMyEII^FPJ7W}%CUKUuxrP~>783bCiEIT;pbCA4S(w(ni z1>64A3u3bqv$l0dJG!d}y#=p>PS+h5ssK(G@UGVcoYH1A)`jAryeKR>T^leZ^RV!sKSM; z6uDwlW=m3Lcd;oAk#Gpjc`BHH+A6cdzcwTz2j<3LaWN zOYn7`o~F09l!0>;h9Sj|MCCsepm>So@OLZPsPg$Cs;y~XV`$8!xv?-!p0IdXxA# z!cArxxBNT?C2g4d?Gk6;&hZN6624V392z^D?csrtEjl%~=_&9%v9o;$hDaLl9VF(V z_^}x7ayF}Pw1KizQLKrQa3;eZi<6YxKgz095g*7nR_Qz@J(1U7Se1_%*@NB8;-mCn zhtM+9E8*S?4)YER^BlY>E_uSu{H)AUlgD0D)Yn=vphPf>8%(gu#nUR@OJ-<@Ku2J{ z%p;lr@GD15yA+?a3{QgPAMLiFOp7QZ5#6I-E4b)d-poYiGHLEXIm-x~Fel3si~&br z@ia9GE7M?`wdQU-pU5SR_>idGOtHwsAfoQl-)mGvb!VCsK7r{+Gyg72S19sn^goLL zviO4zj;X$*Zk6_Ojf;n0*z^tEv~RwBKkdV26_zYqFa=_~lb;6Ts~}g@1$%c6nGrBg zoO=TE*|YGzmD#nmNdVqzL@)CHV{D;7A87J;Y-46{ul9dI9AQ2RCd$^Vc2H-fxA%DD ze+c=L;3@lievV!<+JG5LAL;Yhe~XTm^L9x`8EnW%TPp+!)mf7a@J|{TZkIX`nG6iY ztQ0b3DLENAWq|T(Y&>NJH?UUeM^hStk={z%OYzzt2_Z3{C9qpYLvZI@LvxzS*PZQ0 zx&R$Sx%&Cc4p#kajKj1m`#rl#;)5OE>GX(U(oUk5(93%qMz5_)+*;<@6No(uw1K4% zbCJ9OmKFa-8588&yMw=qB`Qp!2$0f#uzt+1G4^_HO{X zHRO8pcO`d(pubsVPmW~qntyd2NZ8ppCUhS+bOfl~NTcC_oe%mey>Gg|$@Wv(aFw_8 z%l5q|T5{dC8fUc85O6z#>N-)1QT9n9x#vqly7PVt#(e)+ey4ip&Hm*8L-=o=Kn~V( z_K{B?TG3lo<2v#kjcgw}9)SBPj6*tf>hP@INA{P1w1?(I(B z&$OSQ71?9Dy05o6`F%InR1@T@vSAbl9D!*Cb^(lLP}opgvxc{??^+e5N~RQN+9pf3 zP~Q&Xm1jy>b~5CE3KvR9R`ikd%VI??K6PaAGQD`VG-_R{b?vdj(l<1uoPAEh+~cNs z&aI(|sVrY%c~RYN9-g5~8PkpQ>3O9(p{0vrN6PC7<>mi++|Wm?kAK*b^y|L zko--{gfa13URf5#mR6p1P+^cf%b`h|47t+reU(?s(?9RDLgzI zQ#h6m_xTxJ5duRg*A@NKgkJU0JHSMKkf?_qClYwfJ|^yVQHU4FFMAy|V?6 zyTz4{9=H24@bbBVIF!7?jg~ipPXogNOp#u2Q)jz&l_P=hnpxBA`o#cZZ4}%2OtjOd zv?!4vVQJ%X2*?Ovx&DWzU_yl^pAEJ zI1!BVd7-mp*YkH~V`+Encg#!t>e;23)un&hZO36}t;A$G1bjfX-|ov_@a)*{Fqn0p z!tdanDXh?xbLU+thw(8CutH~!EM{$Ji-$Jl;h&^p(#>Mx3dJXrp+&(3sW967xnOrn z_(4pibFKs#{yxjg)(lhFHR{8HdJWJo4>VL zm}o4$Bsv%M0D%k>{7@+A6WHm%3CMA*9iJ!(mSuUNT{%p!cXQtAtOd}u8c3c84Tma~ zZF+TEG`L2$<@+!3&G!4%GF;G$I|a<{=lRqrw#cZbEG|Hz8ybOCV9sJx3N?X`4ruJp zLMdhbqfIt#e9K@p1dedU{X5xLY1BnwpKeLbVf#862tC8JAtHFXECEh=spr1qU| zKc3zSRG=Pvwz6FsK+Kt$3DAp!rbMgIaX+p_zhUOauD-MJKF{_RCBTOXg!pVWw!Mxb z$xSZZ_xHx)44=qW6jme_#l9`Ekf{AN>_{nKQDwnouiP?b%qT7_JPG8rFZ#eW81xm( z;;ruFg(WYl7fna2+zWa?3n|0ojIUI%Nw5ksatkD<0vJXk+={W5d_{^jHcdT-7M7UW z8b`f{fa^U*{ZUzJOYL1CvWsbXnEV`$Y%@w4IW{PH$^s1{2 zM9QLE@^d@JDwT$Upze+0Or)L+xH1jum`I%~0ShzT=v~qVe%AGp`{Szran>pReHO84 ziVr$XsovP7ji+53xezKeVORExUiCU+?tNyOH%tLjpV=`HG*gm6O)7z39Jk7iOcjr9 zerjDJQ#)p5RAww-cm0pqL*rpuFVD!Wyw$`Ub<1L<_U_M^M_0B_mO?r^t}ZA8Luhkz zu%)S@WmXFz`#OSYCueoT9|-OY?vyp11l2!h%&-Qc9|2uoCpYi)3iCJ zVmrn3&J796U2s6YYO;xg^hr>YlP@<)WbK@6a!L6}GG^VaRXmUQ&taK@g?MCaDfKku`OUz5p$N63^mSI9&M_`v!$Q| zbIy>pyK{jJNquDGQ9f8QA5~MTrY)TcviQIhj7+?xU z`_pp59~mCH8y&AjD=Cj(TdnbO6JTbN{U%S8<#@uOt9``50xPn3WIrgt!6S`vWeI+= zd%+{3Bd8>6)0*Fc2NuuHBbK{dqpHjj&SS`(^&m^>#r9!p-l6pV8OrAXCA5YNRls=S zo5E|?tV`ag41}kHXq=t4@hevtoi?HzZyP?guo`r;6 zi&Jrxo-4!@aA7COmr{1+TB11aOcto9GymIJX=*i4U#@sOGdDYq0NA&8CEx-c6J2!w zEa)fj%LffeE?JC)aXFml8;vlI=aB?vNV2$lVqMbeb_me?f>qg{p@&)MN`AI`r+!fv zYep@3t>pYW_|=&iTxHPN_T%CQ*XY---H{>bU9fSC0j07?^FQW&rpl&X^W`o{-^(o0 zlfMQ|W5G=~jWv1m=U=<(ejKXP6QX+oIZm&dp9s4!{=bKxvP*155$@AeV#g15LPi;} zBDc)QXt)^z%hKE~_-UO`J{HJjGftiIGOKKOD>5=HVjiVpty->9H=kX14n~oU<2DmQ zlhab1g+%R!#7XSJWTp%drWv)C+bT>m6*gBK{^eZaXVUo8WK$509ZYXp%>XH?ZIOF# zuP@*6b6JqGQ_n&3biX?G0_Pda@lZ&OIHpp9r&N@%^!is{CW9w%NaiL*hM)5r<%#*j zP5at$`*91DEDK2AzH@&>vya0L|DLYcGx_?@>B#))j&-BYOumJ8dop;^=KlcIKq|ix znfshu?^UThgoDjDz`+I+fpCy85Gvsz;UOU;@R0Zdh;o1u2n!Dr3zrW763(l4-#dHn z%!pWX&e3~s7=5lh_dXx*F5{e3?8wMif94$hPiwzc0L?}((^O)Kgt__4w_*N1tj^+8 zqTot*i@M16nvQvlhmF#6nGGBExY>|TcW`ds+{f)voO`x#t4tb{L7UMn2@KQN#-g5S z#++lok=m?0>8MrZgP4P~FdL6RVr&#p@49WM6K_mA=!OqtjQf1cq3!O!_5u<-_ajzj zoNJprPWPMRw9TVdR!hd>SZf*9#|JZ?N{nF%lWPAw&g1y`+$+`@p{$UqDl%0ncXu}% zV@z)MVV1E-vuU+{u%0*CZEoq0urVmi@o)|+!&xfNaP`o-ypk|#31d)W8^84F|1jUj z5E6s#vDt7ybSfMURKb8E-K+#B;5KQ3IS3zv%;Esb=ukGOoYtAul==gc()DwwC74vS z(fQpe4Vq;ZsSFkRUP8zWvtt@a>xk)ML1rcx`4ZsT<{m?C6naj$35CNDfVGap&A!(Bg(h4E?-^Wzl ze}F2%Skc0|lAH$GrYZmJFK_>P@$<-k|Hq&8JTevgIqsVY_xmkD8)nw{Dmm(H*k8ZD zXZn|b5GHTn7~4h`G^`{VBdNl#n5JbUYcR>jm^Pf7nanrG<`{?!ve_Bt1{+Rk3|1wA zt7J?WwmU1^7c3Bw#%T%ju@wZfLfNoTZDSTU!#CQ^Zx1JiZ8XE^#5aRvjzn7Cbw9F( zPt<+EzgJaCn_P9+cm0Zde)%>wSelt6mU&zLODa*ej(pY`?QehZ&tPQ<Md zcd#yz_#Cux6W57iS!Q9KqI}eFV@WoNG?exdkNe~Owx;|vXdI99&%4+AcE8)bUa;=t z{ECS6jL3y5EwiZ1xZRe2K~bTs$aQ=^|Mt7`xSM_qcoN2dR%U#y{X9NCzgC{iD{Hyj ztO&O<=NOa2#*`b~n4-g7jxS&5ZH%|adwmZSXf~$X%}h~s6wXzrBh%$pWN|5p<*{2m zEcH^gz1?o88k3-d12p2L+O8`m;kp;Flc;&!#zBB8GqX!_QzERg72hqo0Gf-5{&OK; z(oQL}0IMP-M4_t198p6$76dNHLSrXl^x>NM__`<61)^i#>g~Jps>k{@t{@4R-=#` zsFY*CY%}L+?l;lKpcbc(VhvMeO(Ig;eBo*wm(LfB>5%%&TwQ7u zhSNqbDmmOb`@L1FpqT?|#V80=G0Y+Xc0%90SxTl;&lWen;@ zl$JcC%>2zzzAUp4>6NxU?)T>y^G80< zs`X(r?{B`%b>}UAY_FK6uUd5+uO(-4?;1HU24h8{Dk6^O{L{ z`uX)U)@s$BoHRE&$Fyl2gEkpwu|R|1Zo|{;#7cJ`^Re9*&ZLuL(mK95F0-N1Xym3| zikWlSIyH^_)xX+kk`v$@Cbadsmlh4TFg73aDij)od>Md16=1#jY!i2joT}HG1_b3& z^R&^WN6#p(5e2=sCRGBfB1%~)$xx1#U9bmVrt1_@D|Ua8KIgD?rU~UTxRjlzV?>99 zbyQF)TP+C7%IJwS22<;KJdb^!_jl%CX0KCpVbzAQiY5VN10`LjrkPg_vjEH;DKT}) zcJo)q8IqCJ zM+~nV0}57!s*KD$BaS$q$LX7#u~t=7op=o*oh-MiUSzY!(%tv<$yC{e07*9ilCsvh zCTom4{vMfC(<-xV#cnDH1`2Hwhmnw6=y`-oAY*if6TZ0W*juoeQxtzZg<>5U(fyHGa@ktr*UrcKBoh$ z6&M&T+R6Z;(zr&KUV(u^K1WH z=Q@A8`G4}S{_+ps-{xbktbhNvKfk`>C_%N(Ly=1n5no?l``2?{ixP%!K7}+|ss<@) zbLa54`|bX|ZMQAOSnK2Yxz2OXaM;+!v{6MbL+?I&`sko}$zrdc`xcjVyfp{P-{b?Y~>c zIp6NTy?_7y*T1|?%jeh3(=BI3e0{!c_Sb*?+t}{=nIB*0fBCnc=W(KP7?Mli3d@Wm z&kkvx1TEc0!HuIp5Fu0AS!D!mOdsal45-pt`|-^4JdVSX={0=HIH>>A|Negm;J@+T z{m&9^oiOAwnEB?Ky(owR6QX(Nqk`o!F|tgT1*|DI?mp>o`X#4stNxXU(z-QuZ~+9V zTGm3K1oK{gmO8z?cL{8WXp0i^8XEy+A-Dm?hwcBx|K@)J;Q#*r{D0J7Ex=M$J>Wr) z1x!^=rZU)Bg$5FI%e(}k@{XSok?B}zN7Tnh{P_L&>`4#(D)Vcck6$Dk}X-1#6ZPT{#+i!1w{ZIbkumAen_wR4-?>=3_iKubvzw_Vx z?*sU6{m=dnyKSVq=vSIp@6lms{e1hC$hI$Au*80q3@Q{T6g^+eM3@tj8N`Bh%BrbH zI90EuO(G$f5(O(s7wt7rsAev?wacY4G8Z!22%5%elJwBW|LuSMp8@#a{$KxJ%ZIAP zXVqzClys$47@$@GsNa6}?61Y$RYD~w!4htgUxR73fLcJ&X}wcu&}^|eUnGD5T?ml= zk3z|)G78tktv5)L3{bx`7maYs=l}S>{a*+0fBZlH=K^B&!izLqtgC7nowqJbs$5Bk zZsxACG2Q2M?_4)^(og4D7ywdK2+uvBV4R1 zW@)Wj75nq7$p6v*@P7c{-~507KQgQ(QOcs5%AiyVE|s%$XC0L!gldv`Zm`TqVLjF3{*?**$?`ZVUG@L&0_ z{a62q0N0&aGEW5EG=YiSFec!(y=_UKVyfmEvDQSM&oisy%$qSIZ4S_zmgDqF(6NZ& zzWSeu61hTL60I$4WFW1b<;{-~DvHxnHEd=S-4kik14@o~DXC(`vq%>Ss~&T~U95$2Jpb?S(D zFJBQ6snl?z2oAw`asDZ3P*o`4D0o%0OKB;KQIw692_H*oyv2*Ux=N;iS!J5rVA>dt zyb>AdNI1=;+6gsmHy^y;A3r4Z28bmQ$ABz&3 zZVYvdw0er1{oyK*Oto!~wjJyJ`k}*xZ0sDZ8WkWI7;rKK8UYO=*-<-k5-EdK)gVeW zNeM${1-8h-kMuJXLCVX@6>Z@XPem=|O085xqHjfjitbivZep2pkYl?0m~(DEZ!o5F zdT9zu-mjPD;GHAMG^r)@8ZAzT(dab3(5~zkw5*-X*9qLOXhRZIYOF;gu!?A)Q&p8& zf72zoAw;XHf?asNB&^}!*7A`)1*#-d0tx2z<{1eQ^N8QBPZT$Jur({_zb6<_*^F4y zSc!pGzFIa3A4aQe1g{7n>847WQEB~4lTeyB@rl+X+9C*4(hZfMu?pU+Qc!7b6#&hu z;HGQ|bi%CH9M+=Q<)QlvscflIXArbmxHHh(8k&`&rC^ZB>`IUkl?G#$dwai$f;&s1 zUgp1ExAk?KXl0gppL=~%=5-~MUdBEs*08MDSbd6NiZpX%4r5lf`C_&%GA+@DVl_sP$%JtN9F%mqzpRdTQ(BaEVennr45tG#Wcg(OdT;5XMBC`&*LfB z*tRk3zKys0{V(6&e`DRaW_=+(*Nu>F!=}j|UhiA{_9(R{OIeW-$C+ou*KvOC`_Hd4 zb9EhNhladZ*l32y8(z<9zrc?wFY9U{x(N^sK%kP(GC_Rb>FKlyj=h)sw!Dh(%%qsc6X+4O0i)JW~ep*_$2%;v9COjT!c*g(7C$MpMS+~3Cc_xtzv_xJ5FO{Q|# zXfOlo)r^lK2b04cg1Hm2uF*Sv+#IiHVw%CqF1))7MR=)r840_R^XrB06|-b8ssPqS z7-`T%!9f*dSFG`6DVBky{G}5pbdg+6D<(PEgbDne3{6Tl6>?H0!cAo&T&9=#g++Pq zzT10E6ffxbG?vk-#cL>H$j~tz1}krA0A7Y)BW1AuO!h0L*UvPvP~afaX!8C|lN{w8 z-Q3TmV6&+xFQhWdU3u9YE~A6G(q6IJuw;Y7$7uIigDY4R5V8)YqOrj0pGc(aqLB3! z(uGt{4A@pYl~|NrIu3)m0U9sFmc)LC`w2Rb1Pi+SDl6j^UzRC^&s;rJ;VLT=lI3`X z!m4OlUS1|@sP&1NECS`~?;jP(BD7G(l`a=c<%}{l=?_8`h8dw6Jo8pX|BqDuB3tmxw%yjKS&l-p>Pdu({DDxk+4VIqbgT?-8OjJ zM|JE5?-bb1?DL8mhPsfKV$;5;%pwZHGy~wBjRPv%84h~;T$)=p5;CY#PAX(WRUGuH zVeXPje!lkm!|xqVt7r&u5y{M4nNi2FVwZCmFwM6yZtjoGzis1f()R;3%5Q+2^y%)? zH)pZj6=Sva5LOv7?$`kWP;)ijfD=7YmFVpAC^2FOI-01qDmlxSEnj12(RrD(A$Uxb}P zaP>pikkW|Kr9FYHB`O76gDiVhp?xsDhA<>ip!4r>)srt7F~=xH*>&(-5cL6xG*&Z8 z)ycwQi?ClILbXh73Nx!{kC}4mIF9ox&r@+uX*!{bu4bX26j3YII<+EdPcBr7?%foX z;A1##+pmXNgTBwthnu1~WA0FG0eDB9XWf@<1d>UZcvitHGf4*C~pIC|*s4 zp;`(@!Bj99Qlg<~06KuE`jC|1{c{>FqlBE5@bUNeVwB}2&uYe$T?CN|L`79W80Hwn z@K$k|gJ!A%tU96PN{EyhqpDpc7;Jz21#V4!2*N=8`*5fl&&b!dith7Ld70YpFyR z=jPk=={|^=If^rvGu>d1VKlQ=8C#DZ1Zn^@&Fzh>LS*ju_usx9$MHSZvDW!K<9X!g zbN&1}Dk~as(GHa47*HZ#aJlv)REA%yPN&Y-EgUyIITz{}OgA4kH@i>EF?`-|yW7n< z*IpHYD7bMJ?&*>?2}uNz9jZkL*5P&4-2pERY~T00%2H2wuaP%JNfOy@`854s7M+i0lpt>y279aWx}`}S8Pu93^4R6DT`SNR`5KKhcZ-)D#Q}6_&kf0JBpk)A2;~J#<$z{_Sm+~!84FmX^4e8 zot7`(MzJ|8q(g2%e=7Q{r5Rr{s|Gyv%gNDE7NXek2p9UpdNFjFg3UYTmavN~)Zs-< zgaMl*ctN_<{bCQ1Iy${$1l+pnuZ&PBUwp|0^B0w)s?9M%zZVxKq(gz!S00hqZP0O@ z6usuC&P=%mIT)`r@4lh+P^~{ZC|RF+-LR;DzwbeQxo~=%M_BbHuwS);w|`5j<^vS0 zf4!^oEAIrqzO4!kG@}d^mC8ytC%orVjVc@NZ6lCmwC=>Ze!_)Ip_x-isau81JSMO^ z@b-)K-ID-hbX3nK`{^d|3K%RHnqQz&p;QTibbC?GP6#He1h2!YM-fIPF}g)&3Q`4s zJx7IEUv?!e7@SCG=$bx?Qmu|d6Xmt6Q`T9?1%sQ?$TUm-LS ziwXinxR>PrL6*4D9dj_fp(wS^fE1OPsjW)%amB#d93BQk)>~G@T62u7tSU%Z>a|b@ z-xT{->!p!U2~Y!BKGcUmM3kcRFnla5$^!Rp4azB2{HRyowqXzjE=cX`mU% zbKyJX%;iT0jwFAZyTlYro&l;dEP{=URybvpB3tIwe^ml_&uTgQo;N#5CT@2=9zGrpBBix5 z8AhP!!^Vq%>z*{iI?>kc${Pn>>+xO+8#xU0W@$AqqxYwsTt>cT8NBw+yl6wbVm3v< zOjoR%^}-dXD#6-+(rq~)4x-Oi$od%Tt{v#CcOWj%)-P_0c~x{_A7u5la-Byib_(~4 zPb-uWRw7wfr_oCUne_;#x88ig9Xf`!OY|3GuIE)oB)Y!rYq;EA|E|oaOk77$!=fOT zkt()~!vBsCaamnsynZdE0!o>x6=zj(;yRv;FmM`a9L7k1s^aLJ)@!}j34mg$vsg?S z2?vri5TF|c4)3-If(`59_$qZGLb)P9uncZ(TozSSB1@U2O8ryfT$NVIVyuj ztCCqo!C;-NlSL!4%Fx>&XO@qwG^16P5y>(ds?5;I>Q!Z`pl1I}&q^9RvU-z5I7)66 zMQ3+|ZHerkQ`ib6IWxw2%CQpS=bttx{Pn2k9YCd%1HCcoHpsDdIx z^r&=m*_9A1cU1C4`i#O7Dr24dIu1LZ@Pa#2DP*cY87RjB*R4t@HyhJ9XG|PUGdP@} z(@BWKimjhe^*sh0km&BE+e4-MyuHPW`~BN~yx(qb_s>7So*&P1Ki9zl(ZM>?ESXBH zh$0*B>Of$4R=k=G+=fDD9nU(Sk*3_1pJVR6e{MdW&+R_O22S&4a#Rjs=R7gq60^8T z8#P;;1~Sbg6HQ`WXGZlI0rg1%u=!9zucTz?u@ecHx-`3{?=R+4d+o03*+8ms5?zR> z5M+sp>x%6F5OhTYqORNtkwq6P5JhFH2I@TOh+6yFdm6Ex@>bmDjWF#^*ZGR9AvKg=+%We+I330NUJ(U!qTznS7t?Osns9WYk0FO>Z2pB+;hm>HOwq4 z);jm~JoY8$%r)-<{hzOQ_)I-~@%K&VC{nORFxN(;w{DxTtH%vWn$DA27G)68!7JXs>+a*C^~S?tt1>E(q3;MX5_;vNthwvRj$f6?LKnI(H|YfBU*L%o|I<4T9LZ= zv^212){%dZ%%p@Fi!%QG>;p?JVybHWhA5z5v|*}I*R`I5)mbHJ@<|p+S0)^kipq$} z)EQN|))|>=MXrcCR2Sa}tZURoGqW+>$K19#$8BymH>Ty0&u8o(`zMmyIN$EKZEjnpLr#S>$PgVBqFXgiJB2}nIG|NC>j06HJr}p(3nj@={%ZPFf)$MTVf#a@&Suo7dC%+G7z)4e7p( zIq$BNYBd2i!+WY*9>o;Kn46Ey)~uX_H%PI1_SXmK)sGhktD5zENw(<@b2BH)hABCW z2t15q+}&<-ZsW<%g=c2MT@%KV)3X}G2*|`&pE+*7vcyxh##YlB=oO#QVQ>boubQ+$ zA7(LqAD`u#B)EMAJ5O{dOmGG zGWX}X;>?W5IwGSGZkdQr1M5_9HOmbwR3H}xE0a-6WUbbxWZX8x)VOK8r}KP0)%_U>f6XMWUFh`+*30426pY?)_qggh3-F?H znn3z(IESH-WosR})CK*suDMRnm8wL8lc20Z%F0^Vvn;TQ^QvEy=*yW?7i0J;0_qyT zQ|-&?w5*m~_e{K#ve5eWDj7N?tZzy{U3xr|l>MwZdMnW79jLnUP@5&kqEIFl=@Ba;Vx^DBtT9QL=n_fyR(?^6%yS)I z$GPuoKlbP6=huFm`&tp}JTuxQ;f-K4h;sM2&AHv?ZM)wdkK1Fr)!eK+k62&l^JD*5 z$8!1G_V(A`etSH=`Ph2)vm(#6j&+5wl>%XuDaB46EQ4f41u3fFx<-i#Qc!Olk_K0j zr8-|d7feY<8*5M@Yfnk_Oo`XXD&*Dgh@&_fSr67yyEUv{jFtq7RbUxcnXCjBv}ncP zKSvpUd_FmCj;+;D!%PkXhq?JMbCD6TmX5XDM&B(S?qi!a+=iQA?f{CcGxu{RMN8rY z!@MY_S2KuKbpjiw*)W?v2HjgKB37zhOfc4xwKZXkF4HQKYC^l4@&5L9e~B$312BUP z`hCoy%0Nb|+AvKn4P!?WZA06zIbmF6RHZqngn4xUnPg_w<|`>sv7(G7*?FWuPysLWM!r_(5f9FiYQ>rL5N$FzM3JvLI%au?>1J2i=PhPByl&f4%@YVa9D@ zn%t{HY)w8;=OyUA-W{}vtBoL9&_QLclU0DbOqi35Ym?ykdkS~;gseZ+-5=1;AZH&)Z|#oyA(rz}nXy z`?=ev6ot4yzS9P*fLgK6l)P%Av&T!H)WY2tBpD(Q=ZAmMu$eB;lOfK32us_F(kU;pCqDR z$Hvc(Pv3moa=M#w44Au-bL;F#Q)sO`&vl+jMvpzl7;_BH&E>=0+ zX0)Ngxx$7i+5*<$PHn!uM!S+Sd!e2vD3zI|8q>GiW@N-tp{m3ZH0Im;eS5q;t`Alf z#&#RyVRf^Gb*v+@iZ<=fr; z$j_hVM!}|6Ci-7?rqX=2EHjk_V%5kM=V#sK9FOgW*=b|Z>W<8p=Eq@rX=}Tm}60;W({q+a8lY9}@2)>%$w>`AJD=JX5nkGwYJ@M2|KD0{wp^(Y4Z`bxw6 zKwuDK*lpW}W7f*5Ug*7TTt1%CGoaEd@B^bypepD@eysCZ&nIQK+k@k6*f{A*1*Z$; zzw`-U*+A8+!?4Q*&g(v2GC@iMQH_%CY1Ng!_+m`zl8`mJwAf=Ke+l!h1x(4*OV*w$ z0V!T8Frw%evz95MWptud2NU)2!U6UutA6>JB=jsESA-im+{ZR=Hr!~(dRFE0e2S24 zxNkPidtX-(`||02KKHNV-1lQY_H*CY$_$VuswGP5_!l_H)Nl_hkNr5u z{`?%qoB7wf^;g{2$xx_U?3}X|3ZGa0C?tvzUh{ zRF=a^vVvAQh_g)Sy0VrWrlnDbx;#6;h~L$=ykXW!{U*5?ZDm>XovI zB`}dutkROxnQ*WIE>wUrMX`XW0miAV5d7!qe*1ZNU*X$!yA5{^b2l54<^YwcSm!!3 zR+xEaW=7xBr39S@jxIKUl8DY?loHS|%VBf)Np~|{HmSB&y$V@^V#h^Mtjv^Z6+t7n zF)6^RlrAF5{pOJQZP?4}o<(w-bK|l~RXRoUahsd@^xRBF%+2n%`Tc#~yoeR~bR5j$ zB-&bE1;6CemANvHW8cr?I8S#o3>Yh=nMJ3~MwhnFpp&K?y!muw=^p1&)i~BXwS$qC z4wRJ*X74t$IPJAGdw+jp*%S{u;>`1W4!I(XBdWFCGzZdXj&k4Vzzw?RycZu;cj{&_ zENiV;itOLW0jF7u7jYeYdMW9yF?C<0%_r|>q|`NPv+Je0(wuuQ(#rBne`s^?*A3U` ztB`bF`tbthRp+LXMr5`uVpSgNIQDox^H?}*h{BM&dNrslr=`OpXyq-yEBFFzRp ze}zgF!k|PSivEk>+mrdr&C%@w$|-lf-S~FnwyBP2<%gSrb~DtftSq*Kz(ACBa-k3l z@zeJo_53oq^B)HNg}i=QKT1VYoAh5>2ebu~esv^(+T(3z zsN766wji3bI_#ogj!Nk|z1crsS&c=$K3~SnU)=OIimO!W z-={t5({1{=O{eLGTR!utT)l=)u3bl_BF_DIKEFObK7M@s{PFSp@pYVQ?dOWM)(Vsg za~l-Kl`W~u+|Jxf$Jm@Jy=;`uGr#I{*m!^cW;RH7r%d+x zL`f!JsVYaUf*~s99RmVoa+p?;OoiMK8qO@(V1K_LI5RrCL@=#0Kvfin^$ZFH-)tqF z)-S!HEhR~+q9jER!;dwLZu9nbGu${wmgKCy*HqD<5~q&9{11+el?rA>(9EZe;oCHK zqk&8zOLeYwt}}|8_hT5TQl%;(qq1k&idAvMI%Az@M3l-XbI|FNT(n8~4p?csgzMzrAOwu^7>@moM5!vt`%^u%r3*~)>KFh3|N1WvavRv5_vd~-_gwpVa&y%=d=$#u zI%OiHVK!a&Io@vmxX<@F-`wvd&+s}|X#a{MG{)Vw&E}iixb&Z3La4eX?xdsId;2;s zE(Lv~!dooc0T~_UeAUyEUAlZ{d+=JHuIh!%K<0*f2TQ^k>?U5r};34QQ6LdYqQ-7 zq*9@h1NW)N&HwU-IU#3cCwV}?5xh~+QX%xL#I8{dk~|e36@Sdyt)|5+8;(&5CYHaRp!33Zbg~Aj zEgRRQvqzfVx6jK$XU$BnYVLiL&Wx}^cg(+*rum8shS|WC!$TC1wCNl1{`d>|@%8bQ zXPotUeEi(^uYLb~etqtrUtiC2ooB4fa~&<*Gqf`<%XQHecv@+LaN=vbNw{c=sqH zv!WR1d7S&_^Xt#YZH{?o@G%}^@~k+|wLd?n-{*Lz4fo-5zI+=&n8|yD)C~QMP7R^K z6);(@tgK25QsIM!GKXcBNf|PEMJekz*Lof+m#W}?A6_-LLB%w5KcN&HjL6bTB{R?c z{QCObk3Emw==b}#_xgvwe!mafd~j9wEw3_BsN~S!r&O=Wh`9>qDP|hOh&kOHFTKW< z^Vf0`pJQ%gY<{K2E+v$izRezqueHu~o@#mJW2)d4 z-wUYu5Tfk*3fjU)cPd|Gtuh8G%veR5qxGlJPShwXg+e-xII{K~N6gb5KFBeLWd=}O znyRWOWkwZlRmZ^EOg8CO?gQ(=+F~txuJd>JsxgacLJ!~TGUALfoJt^W16$V+h_0$Z z3PHeJu2=FiJ!q6&8BSMS+&f(h^!)yHc{v>@K?*tt?;t8^ntg*~qEEK%SOF zZgUzeHU#1jYn2x;Q5&>6ZVE71(#bF^YOmUrC$xr%l@`azjWl%>I-S3s#po0~*-O{Y zzS8WRUk5hM0R3Qp)F`MYgC<@ zlOn-Q?K+g;oyllGBLd63Rk|ek~K0ltH>yPz3pZm}K zb3e|n^SM@)5^k(iWn>gks4fMt8OIeA*BVK7X`j_1!G^iy5<#pAAa-zkV!v(QzI~tm za8b6ctjyf^!{^r=XSBHU;I!DB;Z}62_rifFfl{UALu;HTz)Da<5 zreMritJX7)tjI_QA>U>o#~4Bx{ZeqRk}OvovQXxk$Is{Q&##aD^MkUvefw*j+qS*m zJP{a(l6u$cy!I&9ZttJMMaB&2dDgMcv(A;n$2P}p%;ClcDGu{(f^~|F)GT1r}a@88N ztR+S<%RsHr{QCMC_e)9j{cSUvgK`{U1RpoPzumrle|-P`+i&lWF}>DO9;}62%2KV0 z3Z^?@K_6&4bX26i_K%-`e0+TV`SE<3*)|4NuF|pZ=OTymjn}xM>;e~%R9p*>>X|-o z&iDINM%JnfX=t#-pfe$xmFpKpq3VpZZOggY+M(XqAxctwhq5k=Mtjt4~gW4nF3 z-yf67Og+!#nYE6biW}`FTGE`nBK3Mqaw&0X=B+Wg##Qw8beOtQ&V;;eR(;;oWgNVG z;OwOZ2@-gz9T^q!`dC$c>QC>GkmBZ{GV9@rPoxSbj`CVVrWFk>w!z!YM@h>Ain zz-gAQgqsbU+xr{710Ls5KYso^mLGM4VJj4}DiorT=A(Rml2lShVK!T~E=lZVy=!m2 zzm@`$0d__IyuPz%PIfJ2n~s-R=U$&5mFe?MG#hu0=@8Jh+a(w6l4Wyv9X$Rx_j4WF zdfe`fy^6C0l-H^)Dyxd_SIcj*B^B#bouLI!zqw%>R*Px52M0!i)3W$WV?ncWNAVIx z$pE(4wJkK)#PRYL4!{L>BZi!*N-C?x9AN2n=;)=GYHbx!G_e#ascI0E8Hl6WIGAu~Dh0!B+YJN*4?)OPoohw|OY@c&G{3)x`ngq8(CMfHI_tSg}yWryloBskeL1 zv4JQj!F?UJKaF?gobLC>zzXSl?JAZ~sWg@liOxrijKgnpN|-*{q7e~l zUwl+3ti9dc04m~;%{J1y0Je^#Rzy^u$z{lOK4X0x`=1{lf1F1WkNt@Ie2{*#zWTB^ zE|7W^mNH9Egg^*UkG)%*Ib7M$u=PD|5NtqS2tYX2k7R1Ob@I3P4=hxr<_V3TH zC!PM-%qGXoJhKq7U|O{(8cdkh?X5(K%7UN-i;U^@7?lIdGpRIhGgd~0Y$Ot6zI+=$ zfBxyV$BlIYQC1u>4}qgfgC0>Jm7IeUGMa+9SO?cNMLtVdbrx=%KKxO0+L*1o>{)d| zsot-mHK4#tBi{g#*60ebPSLyEVAp1|N(@#?b=@t{`N}V}NF)()&9YTQWY38Sc1Rg0 z>#rxS0U2Dh5|hZ%DFm2Aib)m;cgbb0W{<-sw+-B&ajf%{%iYHP#>q6p5U@}sBmy{T zB3G4FMntf!0rEz0p$LwZ=C5tCDv)O0n(L}-PSinuY6=r5NmWbh$o-4WU)-iHX&L~9 z2rwmMnQAgOtf8~k?snE$@L~;jR0Rdkn84i<14K>QnH^GCKuD|VX?3#7eHvVVR|r8I z@^3^f`FwE)4Cq%3C?%S^#Wu!u3b$R^?5b;>>ej5_s$P>fjH2I7^eh1VgSy+GTM1^U zEUBs%SScG*Z4Rj-{l#M}wem;C=FRl3qFrxQ2{I)&oq8oj11m#{K=&?v;+2bLw#-j) zYEa{O9-p6o{`lLEfB%pF_W9?hPj|C%yUky|Fyh5Lftghcm0NI8?cD5nL7`OD!S%3S z;434Pna8oRLTB0l0)}TT&D#y-3yWjW-RzitxCw)s{W4Zt`B@TNnJGs`;(~-il6j#} zrpUE2j^_chGv>phbm#Olo{3mzCR7oy$|_M+(c31gQ7fmVH7Pq+QW0k{vbzms)FB;J zD>BZtBEshO`=5XKZF|hOd7D)wXA177a-Y@D^`8%^U@*B;#ZsONpU=ZwqV9!Bm~0q~ zbhphqwbjgBJ<7>gXGEl#ZP#?vZ}V-AIUHly7rkOEbd|I%iC$X2dU15XyGq_8DXAG>1PU-+j%Y&4idb5EWy~=w16S!liOqJR z=vEtJ(;8Ys<#TRsZ`<6=T|gA7VUyfUKF2Iga*rCS$dp#;L`K9H@)WhXiM~Qhp*q0n zb>^7<3s3?yrQ0sIp-E{{ugVeDja6-x>@ETaD|j0{n-cnbaSbvW(i`+yWXtE+ah(6@|0vtOy`cLD(GI z?fzbR;*8Ys@%4xQKJ)Fo#lvWHcQ84p4PNOG|9nVA(-7ux6qF8B1dgNTRxQdvNqJR- zq|j)9jvR0?P{_(UPrp=BMxWc{9ByNCBUxL#p~_W0#$L5nSAo)IHCV2!l<7btTtTx- ziX4xew=X>!cmaQ_MgqfZD1+!Y49}#R#&bB^D868q!Q-U<7>0 zYH`V8xy>z=gTHngA0JQ0B8%`btfDDiAcy&ww|9~lgS2gKq{$r`?gl#C!AzSK>x@J$ z-;R0PP0KW@<)HMYWE)_tSE93w&Sqt!8jq*!zN2|bY)7&xm1{lsegAjs`A6jk8N6*h zSVX*5;uNmkzexp?kT6xYJgeE9MSw0hR3(sE5t)%nWok){ZAM984Z>AK0GVtT+z9RG znV+Bax8Hw$?Ky}|!(o%tc;k4BZSxE$Sku!zspx4yHH$cltX`a{kzp^{ORGHjVn6Do z165}UT^kN2RHzsVDx5h`#ynWC%({FQ1`Nx)#{=h$hvzXpe9qe#H?M)nm65rkl~6VY zhq+H5qqdd18n?JI&dkH}DFxI?Cv~z6YBW}mhK$RgdV$kv)x1ho!V0Pj^?lL6F3d^W zuA~$gDPv`25>WwbB?PN+Vjz1fXDpRpju2Lb5LqWoc_FcFT!j9{37Q0Rva)&enaD^1 znI%XepRRG6(?rgf-S6w`3shFV*7B(gpf0lH#VQz|e}4Y?$G`vj`Bkw9yE0YA=5wHv zl!UrWyK?u|I9PAt!RbECRJE$8$P(d2nWH3S=E{o5GkRV(HYZG}GXt=S4C9K^$Kk%u zTlki9yF31l)+&q4va5${<9@2W+$Hy7v5=8_#D1I~sfzTpv#@;gb?k|Z6@@mpiI)*M znkMQPm7KN`lV*x2Q)ZM!#!)ykS7v1%g)+*_XvjK0zJ4#(dA>g$kLhFFrrEaHJxq5h zF--sb*ccStyd4ykt}=({k;=19TNKVf?@GtW%BajiH`*B6R%$`+I!{&kxQ*dv!_9a_ zMA7Mw8=d#t_U&tbKKJK-e695fEoZr;j9xah)^Q9knhjXCB@GuEfkl%$iR9Q)3$Lw! zS(pE-YRL`50j6-Qv_0p@>7}}clU{+UQ8`TmkdP}2$UM$7*0HazL+kl=^KFc6VpI-l zkmhdS-M2mB;~B^Esakd0obF^rK+7O9jp-F7vC7QMt;UqKS9`5IBVD!pa`pUjCi?>2 zWK=NVP}G=a!2R*|*MIoS+qb`rIWX_drOnLbg=XEjs8u?T<10TtSL}NJZJ-|1eE$cZ zk1-zXmAI^!jUb+Gb8rlR?Bt%!W!7d~P&Bh)X2WSro#%f1Jbr)v_RT(SK zim&JAo*&QS2yH$njXt9irA*5MN)0qwD#;X=`)!(Q02M(qgMPWh;g|Q~a$DeH z7j}$E3yx5x92H)9n{fHI**V4-^<;`nX^?JuQ1_AFxh7Ay<>bxh@Eo=BJkRIXaU2m@ z##h8qXO|mCovLFZi{)C`FvDrnX8ATyU?Pr?#S2}sRR>>L>5ZJKW&eCg#SDYrFpDPZ2mnJN6AK{{g$RLzKC&vS z>$0E^Gnl)XDf2v!h&4Bf+XP5dB7u%#kxjpi``9>d^a(mxTJlO}F%wb7Fmc)pnYmQ~ zrbtyOS5_2K6kT^T+wI#&OHu95CZ!amgxb4m6|Gq-h*>LYgp%5$s&nqOoMsy>rH)9n>5$0nYuc87F+J#A&6-tZF@C* zFPK}+j>}c}Whs5<^&R~;-;kY+OKD$9GU|Mgr}YVm$4gK>uB?WeA7fisMrQL4H3QaG zUq$}AOmUAZ1|zq^p~ly>t(u24q+Z+Al?N}FkA$7wAm$%J|8BxEL;64>TW^zYpQ?YW zELfgtzQTzQTRT4p!$XV1&ku0q1DK8==HJT)3eW7C8vlEJbjuQy`Tpim?L^^WXskbq zJsiC$<|SLwNwDvH$E+N-`~1^K$DKtquAtZf%q_Wis!^g^aV#Fp~#CS*TKEqAs{aoPEjWUz3&V>T6bfcx5)q9gt&Rekm4a?{?;rSP>> zL6K&sT9mcvCf_u+d98VES9b2@&3p?kulh9if1O%sn7BKel|d)G1p$L|=8t22J=fI( zHSbRq=x9dyPqc5SESko^ZZaVx2mE^S#z%e&DC>mR4^OIhTp<*^m1?zQVBL1q^F$;| zeBh%Mypji5MZQ5fvtZHDS`w)?x{qu>XQnE+yNUhV0CVLOMY8N? z-!j)~Bt7IUP@XEBRach(jpvfIQJ4^w7MNa}np^UR5ke1Ec5l_Y{r&H( z2Ui~sBvGr1hY}94fny4ENNtaL5ZNA(A^G;t7r(g2g~m3BC;%Gvu>Qs;46g;|+Z96O zZ~5EFS3h#Teo=&%E!4BDleNys;PNt%-PuB;lx$!2#m2rqYQAQLM$SR#n)J-#&v zzD10Ng#-nT=8aWyl_9|{kryXXm%F&jBS%q{4kFz>nJY9jdX*M{51(&rXvZ8t&q^<~-)^z*{D>4qI3?krt848iA$2q;8-8&A%a1R-^uBO) zd*5_FvN`@?C4@XXJwFto+-^a%o7Ll=EyO8$AA8HHXu?(KzS7CddoB=?;<4b?;d`tR{^_x9q{faN0IMHAqp#I9zxP$mqZM z8&prP+cgRy2J0odSnsjQ z{8V*mmfWoqx2=bzrIkpg9cLsBZicKW;|Belw@6!WK|D=PKT{GcfgT8u?(Yn=oP*tG zx(JmlhsNP5`$$c#<7w)vF}YCr!&+b8ieMULAA9y6uF3RcYD(V)sV`U|b#n5T+81>o zO%-C>uDJvkJ^}~*%Dv;MS4b1*`jh4QRqI*HFtS0JNQenqUM$lbW%FyNg27N;;rf#& zZ%q&iTvsV}1+dY*-mkpo4b#CR&Ysw#L=0z(Y2Qz#6b^Z%7r4JP6Mo6{KwkO_7Dy1_}XK{MsQe zLql`jj$v?-|Lh`UhhC!{tGed@d|+E^S2_KSvQFZMupC_@`W49`OMQdD4(Q@5AVfH6 zut%!W@+ssg7oLpKo8@}AHImL|#t{MA{OT~66-!5wEPI9x=eHC1sP za<1uLbkr*>+urB<$LGT*owAQXoU`lM3fIiI&ELvOB=oSND|SqZ45?olz{Nlh&xZ8m z+q1QV6v}ZM*$I7l_<2V2&Ju3+IeD6VN?trnD>=iF$(JT2 z#3t8A)$+@EN9yPKLRF}5ZF)$)y5$wrEwA=CoPd|q@R(#1OR&DU=(`_|R4G%I$uCFC z+Yc7^0;}JI5?@5J#YV5jZ{gVjr`O}vAf&6ac9qGTaNoanj!c+}rf0_!AlMl}E z!7t?`!d`=K^N8vow>8qpi@4r>3pw2K-ae7kGx8?vj#prz(sVzdbIR_!>f$yykKaD` zQ<{|ZXVf9z5=Ng}-YvSGT%z^woI*am>p7>kN5QI`hLUOSt;oMI068lLe^7TV?;5ZN zu?6NY*xx>1p6HbSvoq5VT<9#`rmCz zeP^SBEH-b`*#zt9p~-;op>gi7w`yxJs2{QloD=oxM%@ptrFuhqsZheK#!3AL}ZEchRPp(c7+kwK@(%ahA0AEQ)J4T?>Gk@{vG5H`j^2Jaj*U-0-_^LD7TTzMtp+X zea;ZdmB_Bmu2zs{DQeygO~mt6{1iT=_KlIPJgQk1!115!5Q_qq58URAc@H zi9|lEsZkr>&0)p6Akn*FiSkR80NW8+VfWUXYzcSxV1uR*4XQqoBz#UazU-_Qq7qk7 zu|bk(y!!IgAIEEpSrG;QnKTh#(HF?m>^INiUHzyY(8)CcQpPu1{=lJwp)@>;oH;TV(%)Q zY`6utdmL7NMAoK*9$+BD;RU10xt~NX#^tfse|27Hv z+Mu`*Hq68oHN(lw>63tEAA-a+y{4N?deJL9gJ7C5uLL60uNW_T4{s6RHIm1{`7~x$ zqM1tRQxAtVsL4|oR6YtK4e61v^(a=(A}JsEM)1MO;U0DqycCfjr@SB5)G(})?elTR zNli7khqi^`#Q?fV^cCAtNvX$oZ}a{r4bHry!#jHw6@0z4Rf#{xQ`DweM)DiN!!F$- zUlA6R{9PHGMSs8KJOLYXo6pkn*6nSEZcZNw*y}N1r8?8czcnc9HpW6{fhs$)eFlJ4 zK^~IVPWY+hXRl4FgLBtNRX>3x7fcoqyv1$8)0&c~^+OzQq??c*133PNoun3P@`g%H z?sCyySx`7Lo9?>7CN4u#dtKH9tY#^_2iy?VmusE+=vKqkX5(GHwygdJVP9nqB(qJ++YA1Oy#F3AtDsP;k z^k{H^%m9#Qyn;}G&GP0vf|JZ)12HSCtZX%}iUt`}MRm2@EBXBk@pD7ueT(bCUT}xm z9I|RWXQTeu4F~Ylysu&`M>}!}(=to6jQY4zEJ+oP^~Pu}HmF_$hv$s{w$o3u?auHy zm>u6^`Ay9?SyZPSh6$XJiy!-cw{x^W3^@KagpQ7|Ri^MDdxwS5ZfQ?j<y+%bd8?cr<7XGN{X`6ae@>wYXQe126_q&`!N<>})$ zGra!_Z9Q#CIV9n(rIMG9&rXQPB{8e3CFt*+jfdA#OE8)#FH^n6p@z`3PmUugC94g9WG`T0+f8K0iM^JZ!bb60cA-!RyUFkeN(K zEvL-Y-*xgdHKWQR3E5u!!H zLEZMoG$=R{{91mX|N9N0{5#?kRrqmOt-Ngrq4SN&F&)h_VQAaa5bK2(a8Jz#d=}}6 zRlhSO>axo?ZH1p9`&9cqlwR=g&$Zw!v+)HCS-w>-qdXBlpHndQ16iJXjj^jz`bE>h z{NeHnT)0Ksb)TonpMZqJsNyq$@CLhWjbnx0DUzzKcy{aq2fhiygWvFikFW3h`b0!$ zl?R^&6%{@j{Aj4#Bgsa8E#=V_9>_eC@4vnN; z-2e9YwN85{wodNs+N%1t2_&fE?a%a&Sl@k#LvGB9`O8&2bh{LZfvN`P3D;LakHT0I)UNphxY8<0PbX z^!sL$iqySqxLjAHlD#!ox}(a!iVFcP9{NZ-N)e>=^`2}p645^SW^bQycqb#a=b*Uz ze#yI&`)QM3O~z?QznkmZ09m8FH6J)r=Uv)uC%xb(XnV)j_^sE>A)LLb)SEiEkW3;U zldcJ9d1CDaPmjtDVXe!u`+Fl`rNzLgb4O~6@A)CjbLs3%<<%K9@_du@`Lv*Os`27H zJZg8kMC(;dA}SZ+vjxRDk_4_lb;T4r#%d=Vdbd1 zU$sM0O`5{|gTPk#cTcCe;jNSy#R^7s;*rx6(w(`U&f21jYO2Uv{|_8IZduB#ab{pX z!F12y9rl0w@kqaFXK4gbeH1?lViL91DZo;9NtxU*wAV|Xz`b$R#mn<^jYSL5(7pi5 zIA)(3rEXj7dmBeZ8%~1@ap8K9>kpo9`VYSAw8FC+cBgetsxj1xqvjgA1jXLvY8UKj zE`OMq67k+jmaFK5x_5V`d1uAYCKkc?0#|&wbo(0QC{XCKV@|g^@BBs5+yC#k6leanaAvYexI%e$1r5<$ zR`hP4+0z+!zQK&B$bh?IvHd}=p0pbczwR%a$~$g;mEn+i?EL6BFIhxh_$5Q570JFq zkLw9g$A)6(LQO+^ct!bP&u+D5&t}svh%}IyvxPRGygsqgL>p68-nxBXie^T}!TAMm11@lGAg5r0o8D zldwp3?V&B!)hDRY`$+b58_eZrM2dIo8f+&?j+`gSbH)W}i}kEaj*uRn(&iz6EKO z@^$?!`J#T#8&4N>K4_=3(#im$N9%yT`mNMygau*lwu>m(xDOAP1$hqdByr=xl z-Ct$Nk1T46wB<}Nz+?(krX$8#**6tc1WD~H{zPKO%7+_#kG9%U<@Q1|ECv2{i=J}L zwF&NRDDjJczOLM|FluSl`a(M==~hS^#8UV6yH^fS7?qP94>CZ5EJ43xV0@58abDY* z8+a3svDi;3&Kzmi6cQ4Qq*A`=$yA=Z4Jy*yk+c=x{=~{eLA%_ltXOlm;QI<-x~ckX zvHALpNtYuHXh63CE@C#(?^D_s4^Q-qg6)6d@_p)A*}~xE6>Taw2qi81sm_=CJ~euY zg%-dcUfHH{Dn{Vg|G||5-2Fqkr`J|TE@EKjnlaEUUu*l6@4s8V%YN!%u(U6EwKWtz z)LBAC!90)5Sk$6#VyWYLWF*b`$w17V&kG-&dM=_Omj(9I0@eif*ZTf#TwdDZ9Xw)o zN?x6lwDMP2AY>eccoI{xCaA(@wC8zvKn#n*$|-23!ftP?5|Bu=xSXUSWXo7aWoMDh zBU(LVDH<$Vn17~tQ;`3eUBttKf26H|g1c(|^~Rihv5|TZg7KmLteV2Ve~l}01qzv? zdni0(C=C^bLT|$EW55FAsC4 zP0w&~{-T(5u~B`_rI8vfsNeI^W+ZGmvIJP+U0d3nA2OHHQRZYZ*BCVH`RbxTsc(7J zn#g~sA%Ih(&MbRcQ)eY*Ccuopyd1c796gSfjQizOz^^P> z9cXC&sAKRQXXeF>5 z0(oXkA%b*p&dID}(OMtiYJ(WVGbWT`3l%ChdF2FHZ3@*KJSIn>kE(zimJYsdMZ;%g z(&2R4zSiZ*<-S(d(eZJ+O9LwQsNsK0hnRz1;@WcaZa4ELuf=0eo7?XE!?mSI?hl_N zUcjE*LYp&ljQqHR>Utqz%*ZxU`pn2O{qA){LU0_LDYJgbkuIZ9b0Vm8t0kL&cPx{& z7*W40#e*%~^d~CY78oKTo->q(%*DQ#3SX6cyROZQ;k9wBQMHi6ig`x-D%eu*;ZHlH zUNC!U(`sT0_c-G?_Y2hj*7tqfQCWMiD9{#Gc!T!7EIIhmvN^i;Je2CK z$%whh`BH`)Wrx336$h+$aIgL-k5r2CNmkj+(v(NV2_~>w`W@2$crT?#c&c7i4hLIE zUe~L|kCn**%5-Zt{mrw*bA4WpDMAkbrEaWNE;o{EryD!lMc>3pb62s};L*eV@**a7 zm#tU}HA6d5E^#iY#7=Qe_hZP|LShsqBqQ zUr%q}sy{ZD+KIf~?R+f9UJ?^?8MYj1YrEZpxmc3Po$UG)Rq7jA*FL!}hvOGXtx0D_ z*?a1m2IcvmM_8w*1@CSqNZxJ7bDBTn65^DNU4Y0YN*L!9nON0>3?2Na_1@oq`P)sw zP&4tOpl6{^03Q?35B0&O=0ohYvX3m2Y&=2bolVJx65tgtFlBAw!f4~^U^5d>hfT4U z-vGZOg2B8@_c7E|W&cLNROfE4yI3>_CRr`R>4QW;C*SggZ>2wBQl|41k{P{CJkE?e zKb)>P;5|4cMJ;PhPs8xu2}uDxYtp&ckZ{8lgYSpQ(>VUaMA%57{eYN%`OH8Izh)m# zJ!J+x2G^K6BU4*?FE6wX`HAh_7?a`>>QCn-Q5xvxTJZj4cY%r(2!=RvZjj=}C08{E zh0-E&kRBt2{^p*|oIf@@+nYou7ViT%fBk@3+|*lGBzK`8Q)*L>K`A}meDfw-u5gee`Ut7v91e2oC{}`C=@|!IsW%HA2Q+Tm}wl z=tPh>Iqt*?yA5fOm+^Omt$P2vvgYi9c5E9o!Q{A?);Z;`cCGy-d@A){NJV{j&hr}2 zYlRX6sPE~!fD)DSN=Rtn8|#JeLJ?g(XKE$zMebT(57jCg+2_~T&@3n%rrEKeK>WG1}DD+Z3d` zvU?gag2oW?6`Do(7QC^GN@e5XTcuBq&acLM?wVs%bv(OSCUmPaOnfC0j1~d{Uwl_9R zDHC21CL?!!={!^1p8(r~P4s|9qA^Ps_HW$kMqBM9+y&A|F%A7`U-$zx_euoUM5hb( zAwH!qrbr}h=%h*Wes{qj5nrR`Ri&E%k{e^^Mvj83s-%U_dMkopFv{jJ!rd+#{PnoE z#>4W3t?yeR^nJt#ze8yB@AQwgmwyzYL}@&5L{*S-c6yQ+Xw47uw!i+Rw_zUX=>`z; zLF&sHS*ly<@fnwPw=h=dam9gLRK5baeIZO3_SG)FE>k{+s{0K9pL2N9{zc z*zah*v&ot`3hG()Z|tKUmB_Q>?W5ya`!`8VnQSbElNZDDNfJ-c8e&BUqioq%B^6Sq zk?up`U4XkZiAc)o@orG`s~)8#efiAeK4(7cqD!A{mL)n*V&<7;Hd@p#h&b;!xtOkU zTD1jM{xz(^Zo@W^uP-kuT$@%~&HASH6S@;w+pRpj`EodG;C-Vy%lk}-|0)q8Tcgz% zYwY8c?Ya3c_0{LFY5_*FMWyHajIn?|C~p^dhbkxqI0-d~At*y*CJ5tw|4Hle zO|?BQ_DKu1=`;;3HvzVLsM6|H?%xIGd71fg`xL>FH7$3uw~VH(oYdf-`rm(IY8yHH z-^-YF&D-A)??45u+rGfip!yfb6-7@)C0)pRW1NY1nRv=1JP5aVKqJWj1epGfW|be;3QmE1cY zoyjVvdCCzVlFI)M^?3N0=O-w`QIQjkz@>oL?N2}NlGAW@u zWJE!#Vv$z)91;vcVH7ty_AyHdX|QX~6Ek*8t1i?n}&1L-G2upP0!{1G&O<1Vv9~iMndLKjjTk>3!Rz!7w z6*wJ77eC7X_;Hz9CD#zp=e!z2Y>;U`qK~glb~Dgps}Ds%QuRyYEk6UeM|nX2fboNu zsj4ijNCSyH6>M~Ovd%9V(VWOL;GyO#g}#56E)%8ZNLcD2KQ0gmkx%ex*qdC&?{qp0 zwsejU*!{|^k@DmlE4PMl8OsNGhS(0E9f;CrtIGwE`#A-j7UNC2?V#jwC77X|N?%?u z=$d|T_=Dnl8xR}Dd0d8meD?9r9}|oo4+AUp#`7e+>3n_cMoIt~_|bEF?Nom1?@g2d zfL$jVo^#K<1(r%Ayl98~M?GFuXH|hJx0-_f3(ItL*Ap(M90zS=Gu*F`-v-yep2|M6 z;7g9Zx zgZ>3}(fsLo12_dGv3p%QbB2EMil^cI4$kG@j!aLQE8F0R!VJlK!mN=$Jnxm+BN}X^ z795oi6RyM6Bo$h6Iyrn|!yL-&-(wEKQHEtKM&i%JZb)0G`PO%elz5dO3a**{mYKa* z0&iJB1Eo~2l~&gr0fP>pudh}b(Sa;|It6-Q{Ap?Vg4bGVBlj04?r-`*Z^qpGD_(I# zpa7BA(nu$VYg$yT0);v??Njz~;fe~>(n%NTTw~wv(hWTWqxfn+>2~Os`i5w{hLr%B z7pFSa8WiIxH8iRw?lsR_@MG4)cf@ZonusO#?oZkpCwI%isIt{P3Y9#z5SDW<7KwbS zpg%&@7U1F1JswQHo<-w!NjILGJ3UsGu5nGS?x8@xf+T-`6uehSsQ5k>_}vQA(6RPE zz?>fu)92*h>+-7WDP)LRd`*=m2p~lA5^B)F&My!(=3(hTd~#evc;rxJTXL)sM0Z=` z-nW+06kkYDGn@sz&}eHI9GD@yz5O|~m|7Qw6rZhBvW0QtR9iQx{fX`n|J{}&=$f=Y z@Q1YTRx_)_KMhpsVX+c-SMPrsXx2pboR*HvC)r$;Uccnq-pbOh$rJ^@8QtCui45;q zcCLpa`Acvb(*W4eU8}{wq4~(HFo9wG=jDE7#cMypl50Ah`<}R{R_Mdo62jK@-^`K3 z_K%Iu)`I)6xqJv#1NqIbOD_V*a>8N~^`Tg<)9BP1(BcEqhf-IJUWuY4n*=>Fi_3xx zNcvM({isL2E_@rx_gP}g2gsmqLto`n<@+^EA~W_A3rI2xU48w2z3^<_r;?uN_1XL5 z3I!hp?4(@ZXUo^d=1B|OeWN7ODlY543QSZJM z4C{L2V=fLBMFLtg5k4JIf5=kj{|YnQJvK)9EZ#VMIiduWSUY+0y27D%fAlq$4l4_i-#^NYkif1p_YBSuqzESE)bulW(yQ}bI@ z3&M~$WAlTf16TT=D$H#(PY@TTp{WqGZ`!!%G3Ac`1e+3dde=?OXXXC3HL>~T!`id2 zIwe}l6Qbff4gPLp3X6W>PHK9FY$}pUE`|D1zwe8{l@w00-$Z>~72j%@?h1`j>f`CK z&@b|@PGuj;a>V#oz{xqb{Zha_X?}bbqH8C|j`DR_q3y_*qh6PTpT%86r`LnIPF`e+ zB#@TYs;eJO6rdc06;_uBar@0zLjx9V1MWS;kmOsxK`{_Cm&qe^&==|q0*rC=9xwP4@zwW3Z; zDAxMYJZyI$1S~Z5oOD*gx*bh5@PBna{{Q;*hn=49RI|Wp#@PUE!)isR7Ku7`+Y}%x zGPb<9gy)%|p|v5(SgttO@li9r+8?k>Hz2+&?q~vgZibuySP)-QpE_ZaS=+hFON~0u0OXEc)KGxkm z`0ch#!h`uZwUI!`Z7+VZdoj!=_J@Cc_*y=)-u_emX`b54{V4G1aGZ52c>! zI#R|zRF*yS!Rh(k`pAJWdm7_Djk|;@^)YBvpYc?hw|DJS3u8`Y+;rsub;X zA&`c;`2j&*3ruI(ilJ;m8WvJsp6{>f-T1Kn%*2fCp?lm{Js|mUG3qj++qt12bOJ>R z)SNn0{ZMd6YxzjZmEMyc0>jHAe(+bMs-~R;YKg%1cmgh+8y5jo_29q7J~13J`L1a7 zE2j}?M%Vi|-M(*+uT$GYDrOKoAK-G+R@)lFd9)2u1=*;#18w^8(z&b|Nmc`#pS|t| zBq$SXtV+v;UsPF37=dQ)X6^Yn4I?Jjfp%i^M?7^Q4*k>RgGOH;{q7`&#qlosiX5Oh zAD?D{iY@D2I{1DS2P=`oi^*5io*)htAMQ*GGRyF`A*o>0(cynFv`gUvII`g3Ih!0i zK$!NF-ThaVm8B5UzT!3W%P@y4G?KWNH&h7inO@TeQx_zcwD3M^*Q@G2Jhpl(A~csV z!gr;cZdi3%eVW<+y_u1erW}}y%to2UUBvv0(m=HweT{Sh4+M955t8}I05+UEK2L$E z^ace~@5p^Lo>()!9^rRa#LGZ|*qEzEdUbiUPas~l{2S@(>v3F*IX&JWhGnzpFbn4; zz>PA*4d>`D&Dl`Gs`NGbcG;XqO2tpaCdb5|gQQD^T@?ncC-KWIInN(@axsFY4sY6A zeKM7|tv!zp4t+=F@ML>tW8e$z-oxhR?sj2Ta_yY-*L|C{f~EjCIgt-Y!jA# zyiy@fN-Jwl^NJV*nKhWlmP0_08#SxAo;2cw=iT;yM@x2D!b0?$C=kTI8aN>LBgy%1 zFh)GDmaclh{%^`cAdiw#4ZP)fhJIU3;+9iTwYmwXFhCSIl41OKnV(rUf!0Y>pEiB~ z#KOSHU1CW_U}J8Md8TOhI`aYhZ585On5|v8CPwW!?4C9IHHod1(p1}5r_g^9{;eWW zhW3Bk$ofXbchUj7w~BvoY>G|lx_=g!E?zsl1LRU^J<@Got)yf^!W6ylO({H{SJN+V zxw?0$#4AA?#5FNgW_U}{cr-`i=9NjbXi1uTf|X@A9e0yXHFN)K#}d{WYuh7;19qj- zoB7=k(jA|cyj*V#W=r2nS+VtaZ=uaZN>&(A6RA^KX}7&*7tH^mPK(eZYWyP|TGtqq z(=D7!r&DrxtxTNLoQtEG968ul_u8GGZJ1ZiP07fZK6B50K;8xO*~GFk(}ym@3pi3N zLnBp?NN3D09;@y2q)68)I9Q1zPs2soMM?|4#bNj7&#i|2;d5Ed`ge9hFG`!3F_H)e zg!5ZH_BxWNRK>IK6X75MiR(pOG;z!l{R{KN_OrzLa!dt=+ubPYc}lgsD+40}>=C2C zSz=1tNXumft@gJ`@Q(uprf%swIF6PhU1h*$vnzhQbLPyahO_fDFgm9N~TU9wacOwyIp- z%Ybd5#%&Xyt#O??q`9A;o%KQOgx(H!riV{^%ujKyTN0X^Lmampc zzse&U-D1e4WH3rr6HE)-tN|0|^(Ozw`a1ZoMp8ESChJtjwjB2Bk@f*@y#eTSucwOg z!a)YI@8$=&t1}h)&(l^%=yK4jUiZDzynPAS6k*s%vV;Vy4dPQx{DThVoNeA(V=Nlw ze{(!O#El1aHibg}(EY*Lk*r#yHY%L;HoKEmXNI8Owk;3lHkL=si?V)OoB_vkN}iq; zd_F=>HM-?wmd2Hfh6xzDn^n!!LfTi!jPJxgNLY@Kp`*HrQNTy8FFHc^>nVHn!2y%R z&E1y$+*KBR!!Qym8y(f;wp~zgFM%#0ggMdyYpEZW4Gz0g({?20_$K26D^vBS7tl&^ zbPJyEIb0g_!phJ>-@IVZ2$aNtTe8C%;j$QNZ4MnnflWZ5gn?i{y{BOf zjg_}EUJ6J9dehxiG0__Q=c5zgjCZWD_%NjUAb~QVoKgT%POkT|c(a?$T8p+gygC?9S7EMJI5_Nx zQL;MZH9XqhU~Y^yy34VRy(b)`saY|#;!!=xIF)H%_9Ndt>ii_I?^P-lD?d3uiQ=cg z3fb5|z+0kRV9fRHGhE=S zG|>H8avWY9!oPg=6}!DVn|}twet!1f#Sww220M+T3O{?PrXdbVJ|AWu&-?Q&blLJs zpd-v5m$WH2y!QKn)9z*x%7MB#shZR#jN8vu`v8ZvRDv{hvpOTJU;-|d zC~qOd;i(6zdufMq0{=5kiWBd?HBU6+zX%ZHqQ}5r06{3D{P2CySTZ~h{8Zk}`@SIO zxy+NYhASoFJTg~b2unP8Ho^|E*+etCv27j#7Nrk1*mF7?WKpQeN(X@D^+Mt6`^;6k zx$v8J^cIVMpEgQQ_9Hql8}s1;-)TR4>1jxE&zRK1B^(B{IO@+!m%H;+P|_TqPmR zqW43N5n+Pai(Xq`6O{++!P%WsO#jTDO$0~wgzSJgYUHNYoEuM@ai3q9MNk@VOl}&m zM^#ss{HFvOrSnQcA`?5iI)OkHV zHzL&e^I9ysji0_dU_w$kx*8bOFx8awDq3n~J=~@<26tQ~({Q+|IG!1AhEl7UGY|qji++0$tIJ<06e!QmrsK z`7ltkH%;}eA~1p&2#j$Pb@c%*IUGTKoj(BYFLFDhkfn4$L%BAO_aU2q?>%}pDh~yq z2#N9wg~FTS9o_KGeUzL1d-->nT`{~g=1x&uD+wEJ?8($=Jg!T}#3=vM5PrqFz+vrg zO!r>E!?SVLhI<)D)m>^46awSu`M-yk8kxiF#B+eE$?LlHpi`96HQE=*3b*x^v37z} z9n?}Z15n|?R_HgGng{!S#p-rhD>SZjW% zIxY}z!Lti}k3_UJvcBI)$5J)N#2YC9@F&pbCHa^447CjTe1C3R=n3xTan(ck?%Z?v zya2&N7VgmeIc0JRFccpWAD}T*25V8TB{H7swHE_le_aj-@%5_%x{&R<6;sc8)1@aEN_=z#|(4*xnq%V{v-YTPe%vH{_S%BO3XyQRJPv7 z-ogs%WN42-_m`t|F_f62i!LVzU(=qk>KUl-;{OAYx;@QMu4pJ%8KzpeD%rY3N`u)^_^o<@3sF%hSo7x1WM^GElLlD9-kn#Q}Q2U2ViH=G~pEOgzx8 zCzh7x3YdHV4DaO}mGHnrF2@IS2^0cp6=*N^G+(99aD;?ClFq$7tcTY#F&{8wl z6tyu{X8sVdU?Pj8Pp@rSNN~GRKQA?zUi;QsEmvCU#*THy%J^sNw?U}NQQ7%*UJ-~G z<@%Sj85X0mAtMu$qE~@Y(ce3J@^CBkm;@YZOv_QhN|3ZGRuRjk z$&feZkxqBTTAUwu6#@!}IqeONkBW1bi05G5(=(q4|H|@&Y^F*h*xXYOc@I0Z_S|D2 zh)ySIhwXsd_xs|0L55BB9G+s|IXvbsliqy(W5BV6Rum4q&sR1&9`{~qZ9=Mb{Yf*L zbvr%&zOcx2c*j>YGV}CcGRp4n`aQzS8zY{DAM{qHA495N=qNX{Ga8h)eF|M`JUfb5 z!RFpX$lM_rH6m- z2nRQqhKJ%2etq#=N&ZehY|cuKEFs?`H)Y(qt#x@kaTc(+4JGEME0900rjlZu)6T;C zv{KP!!1~VZsefOfG{(}CUPWxvANqj7^=kM@bY|SRCz1yOD6Vn}7x3p4{$o#avFd+~ zQ7W?>(yg&J(h1(hwE9@yEqSNk&!o};&(O75aGZcw@g&r+1IwQb{JA$_I3(dy-fos5 zxmqSVJj_GOvSqE^BAbw*2ARCalmLux_|8gL*VV)G%eKl~%~J{3bMRg9v%`(y5Zs79jcpsbh<; z@wd~%b26)fAG8Ef1F*`09O=)%x_1|^eRHH=!i)UCtJ-BD@F^)u^-^BH3uk`z6m$j> zC%YU}^uaew?DQsZNmIRp4aARoJY#<@$PXOLW)R+7qQTUBmda8FE(@y{hz;7B_n7*C zTtY+usyl2d%Kvl32|v{_ewg9-kK8-BOswi)9*zKa6s38kGnsePO!YW5!QmJ#*0 z(6FKTzLgR9RT)d8eU&hHz`C-@#N1fYh^D(Nhsi=zsEmd2WS+j7llIyz51`0(NAodY zjbS#gesz1MnjR<}DCBWv=%otCC}1|&C>1_fww|p48{vIq8D5oQr||lg1e=lw;c8AL z7W27wJownY7GMFQcZ6~|SB_7v)CY8@lh*NSUQ;4>z5Qeg`uci%`!0STw^WVu*cpH9 z6fd>x3|NdVCaZdZ;*1-Go_wx)Y9Yxy+|sd^#wsfT86}IbK)C1?t}Y}fz&wPIhP4C- zE@m4cR(XA=1p16Zo#*&bq#T`-XfQM!TDyIl1qJBo#M1YW427Y z?N!$B`CXB|H+_o9cDKZupf=@8QP9*R<44(BnG*VwrTv-KcgmN2PYV`Px#?fWZTWo4 zhU2Y08Iv+B)7gzfCExcRZEyF6ZxE5docW-x7IW10Pr$2B0T$!&EZm^U zu)Zqj%N1#LND8WA{G77EQLL(Z#6{&lIrX?-#nOGd79uew)0iZGEI_S-zQ9 zXrUk{GR3CbRq(*Xm@B=79A>=e3Ywh!c+?vnKz zT;M)*ySG*nzq|lC>inHhTML*6=~*1AY6|j;(BDMlDUDeTzRNU-Dhz>M?C#F;a0ktg zazy=<+H4A6BUd<#)^Dt>It%PdA>Fj(7N6d%jY-HNRC)n!5u6Jp`LEIbIj!iT#Hje9 z-XxR#;FgG}YO*;}ELGUCf;}pv-B7WjV2MEeM{fK-goUPNdZeC_$2UOD>~ikOwZ5sy zOp^w8ppZF3j99I5;e=F2O>>>qBb$d+p5X!iZgX*KEMnm9GL$os{wm`^}4O$XN->+ArZ4htA`3A0T#iwxf zq3^AHJm*&6e3wFqO2Ey7yyloc&x}O-m12yi;JixKRg4bvO2K(l-Ok0|Gist!THn8m zzL>KOYQ<8|N2RZ5zuzBDCa8XB2wYF>EaYoLiWczqI=GGTVDq+pZ@d-6I$x7`B?;fb zVnL^LMU=J-T~&aJ3lrnI!Aq8ch~CISiit|uN+y;EtH?Z(La6Z!Sl3R59)4Wf7z=;) z3kg0+BaYb4I@8wL*M@7pXZDfB-f-vw+v5Rh?hIfBZ^^J$-f;#)xqdy&^^zDZO)Ke% zDjQ2Hk9uh*&_cu-EyoL2Sshg~w3_wzugO?$gDvf$1VaDv89umg+W# zfO&!=v+yT&(1}#8`F*7-I>G;Qkq-1v(mmBLLDOlWT+5-aJST3fWnLFIL{XW=1yp= z64YMlID(pzaCKW!I{eZ?Dw>;nQOOc!{e(TWUZ>iy#$3&gLkUb5&nEq)&A%X`eJ0vm z-)(-pC?~Eaoq-z^voyW<5a4eNTsgtWnf2x=b$ed&5;i}ZS$56qTL0p{Bie7CvTp}V*D{PK)iAF{UEixFPRT^sFr zKTxyYzE5m+qU@$;_=ti2CJ$mv+%6b~7VA|+YsDUctmP%9O7pp`tgXhMsXdz1X@)Rt z6Ow#@Lbc+R(4!rc+^;p-tJk*u8i2fK?@&oOAzWO5Hs#nSzA_G?Xu4xJ>JON)PWuYwOBh* zH0Z4t@~kzp>c%48ns6bm+x#C%-yY87|NoDuoC=v!D&`P#HbluO#1Lb{oO4(XA(rzg zLX4bGLkM%2Q#qe?Ag3W=%8VksS*V0q<9mO8zg@fh!FBoLzF*JR^YA!4uWDdEDCxK1 z9Y`0 z|a;T$i<<$(9D?zH}WTdNsNCFt!jrCtz4jS9kVqXT%)mwu)nih>0E1ZnsrHG zXMVnRYgs$`M?TtYuKSh|&JCTm!s>~QHv)u;$L(IRON+CON*n34VBz$_3IbXJ(Z_b2 z^I7fg?oxz*ln;4{#I7II*L~+3*BcRWiH+X$Bo3yD>jB)B$0UZicHQQqaVOJrtHX5l z^{^7v0Sm7As2ie)2|J9JFV$~{g5+$dEof`gIma&h#=6Cv;3EI-tPz~Z_YO&NvimwN z*IGhmD|{S;uPZQ!^)k5pMY$*u>6KhsJ-GV!kr(XFCiaHDQuQkW7cx8BZ0trwOxxgw zI?|=8u)II5%pG248P8*Yw90(oWN?4tfV_5m@aoj&KFh~4Cc;9hW#-DKri!q={*#%- zqVA4%Uvdx`fRV{SWHj0@M}$mibhIOHeW_*&rqa82X?_%zqe|Z9oI>-a=M4KZGC!NN zPp-ks5hgA!#(Z^L@7DP{uBgIi2(7jnGR*~5DzEDzdR%9MTep9Gy}yMy)|ljOtAyG% zFj-;tBnTM}%}55c;+r=?j^fbOy^(+qKq*}5(z6h}M;~hM`?@aye$$=9x+c@NM~gXi zKG|6Ns}2#^)BFL|0z=l->)3z|1X;p@CJ(zzgLMoN9uSoVeMLtml^&$pehV>z8U)6| zUSH*NsM$#^Ll|6m8mMHK_p>4k$rW%ozE%}C-fu%Bc<(3xVX`~m*f>veQV86OIu){% zyIO09-;eI{-Ro5`upVWrsDCOipkZKRJ^uFH&AtMn!}I4-fG=3#BWx&W2qCk0qtnZW z#S+EhJH{jTT+kAv8wdAz9^XEpkGJ=so%}uqO8(t}lamgBX?<;gelp(S?>|3ozzHm) za`^{Bfrx^3;1^Y&55}V@jG&56Tc|2QS&*%^oS=wDU3&SpJ@TGt`PAjjshRDf4GHP!DtOR4TyaC~iXO{;5DhrjpPKT({jn(k0hZ@yeaRJp{U9VMlCupSb<>UUg_5QA#Uhl+xqm~)a zaOK~c!y9zl{JpyjL`Hb`(s}IC&@-f#+c*%?w&V9N?Y4{!)`-YLtmY* zpWoU43XjKUNM`oV=h`3ud*3CCHz9NIW>=EtU9FVA`i@}Fy8XIL6g932S#Qw{UN6Ya zI9q@X$MVgeAj``Puso91>j#*r)-4>8b;j-*B{$}2QY_efAx<~mm5JM@eR*MRKQN54 zbItzh10}PDN?z;A=A1|S=>C0^y|W=RK^GSf=*^D7Asq%nBT7DjRf=YFW(}%7&aDv< z2SEjGnm=H*9_%(F{LS0Zk>_j6jpo)(rE58oaF3qXu{)>_v?#A@ib2~Dh^RAO=WlH8 ze6a_v{}WDNjS^*^%cNSA2u?x zIP|%+b*ZXQS~~uFD%OR^HvuN~Vcij_z+gRA6Lf=r3?IT@D_41bEV!%p^f!6=gnGE$ z+;K1SPhykk+&wl+ogMP`#iQ`c4FR0Dy-DYlslrrY-|n4P5R{}^i^y2zNL&ueTwCY}j#^1)gFsl2TGLP9PeA+`Y2&Aiy4HlJ zOhcB?O;_31@x;RWzr`(DEDXvb z6K2FGr#bJ&T8OQl!u2a`cJ4zro!s3$)_5Wm9*|T0mI`<0`?dk3LcY>UpQY~cTrfKl ziJ45wLbPl*`*%?Hj|01$7J^@sKAL`G=A+#!;OvUMTs`zkNb;qI@?AXc#~^wMMl@LQ z=}>~H{t}#x|5#Yts%&QMC7uFj=S^_z3*+x(@@@~K9V{tt0UqWWYbMk3s<|^}hfxEq zzU1Q_U;iahyEqh6rg4?GrKd8cXgTF$nHl`6M4< zb;@0uzj3%SKR++|7(?Ch+nGp46D%-~RWbH;&S7?|j4aA8yCLZypL{WvS`T@iZ{6aW z(xnVR1iH6vKU`p&g8n+IYgs%>}g%Y%#H+=p_+x|qo0?zk?i+K^KOgx#S#l$O$0z3kT9wQ^m;&@Ou$2V-Ib9jhNi{q)%Fub&Bv%bm^k$gO}A!`|{ zkpf*01H>Q{>-UP& zS3`>{c+GTd&$1Y3xeeM%@WK&u^FDXxoX_u&~g|KK8EXrC-q&uBJBIlb-w_OU%#KgBZ5>02+pk}W1OV=JPBVVJz9_@xH>%Q zKKJkB*D6^7kR>=BE>XJW8F8|Y{=~g`y?^36kbCMci8Owq@(SzjkvX1;YikbQPzp3m zlfAz-M&!f0{MYB)7}28w*KZA&3`iCBc>vgjA7H}H@ zMh~3zrSA9-FI69FsZ17LtV@<%;|)f#!%vSxc{S@^y!1`dfYn&+oyB^28#b&tzf|Iw z;M>;6`vy~Pyo=nc$>Fe`Ee6*Hb$T6=I{Nnex4ngIxrNWD*Xm8?hzt||Mr!uQXzyH0 zr{PFuFtS8DAE6SiXzyc@LlA72?|a+O(?*NDSMJ{+c;9bpYXUh}E7{sMsHeo7Xop|@ ze)H0ssz@&mX2P;^&bSC0`_|K#ohF^Q> z?{ZYnSe>tabDn0Ge8VRcZSlvL$7(KNFqow#P4x(s8Htd4^e&j)@Kcxo@X!Ibl-ggpTFh}y3;BbaGh8Ri0& zVcWOMBP5S5Z51!y%;-TgO1?LPaWV8-y6}V7u?uCAIYxTk)BSl7E_21vFt*_&f=ALK7%L0!x9k~HZ1+=wXz1GQRsX3)L%WLzW&~wD7VQl;$G*l zpp35JEU6=Epm)*FcljV{83>odh7j&lKu@(nk1idS!bu6all_|ffjC(`1X^brb-5oW z)EHRmc6xMa&OvH&KlbROP);@MWXBGajN41|Dz^;>UNTdt$CyJHvE9fxWgC9`U8OsJ5Z*-dqyO0J-RQ z*?Gg{blDhkUv|A~`cbLtaoYy1lPQ!>tTb|-5Rs@7+-5OD6~q^=jtji@<{{gnTf#~;*NhqGJ8>y5xlM2 z8oJ2^#RS3m>ORERx)aVm#UY{S?@_|1fVS<|(Gk61oMm2U-Y6zV!aPyz$5c?*0f5$)}knAu8MQK zyzE%q^2->Ey5&H%+X*@(djN2GzR9%>%?f745RlfSYry)?R=1jnT+dy?lHD@7yvTP2 zqtAk~bbtca5LDSsb1RNYZR(&zyp)PJu79x@w<0CIkEwcfn37SMQXoW1uW+^6%@3wF z+>l-HnSc);{XO}01k|1D&6;d4@gn&>MuvI3375cDgXgb(nZ5cQy&#>It8;;0~M}jTuP2kS$Bjj(6soVD{F(*{w9qFQoWx()_cYaBwuIqH_ z^k0njPd95`uN`mA4{-*o0sHeEV`^)2{z6z?K4H ztq7Nlt0v{gn_Whjnmk`wXUFHJNIEHtk@8IX@*xRsvVV}1V{c`1UGlsUTt;UH9rBNA<(r2RP@@36ry8D#8s;fwQBz|D|1Jf#lqtQI&Mk}3 z^CoeS6CD)5`2SqJx8fFGv$IL7H9r9llAEtp!!%2;QW@%9FSN`(hyv&5pT@7iXY#=p zQVSofB?V}2rPKaxT#Qa6m0g8awfVAz%UqFeo5t$MGC2#{fObDUq!m^x$n&5;uK1T< zazqmhb|Vt+>3%|V{xFo9laPD8(w$Pj(3a#aLHhhY@;TIq2TO?elsg}-`>k!^Wa|4Z zQ=7Ls`EsICyv&Sq^{R#M@*@Q6nNDt2_4rEED)IjvcR(6AT1<^Hme8~!b~fhqpWr#k z6AB9CHIkNg%03MPGsDL2e&kb6qMU0VA&-$EN%xPJKmXG0g1m1Fjq_YE^x` zxYonwj@O}6md87(Sgq4diI3pk{^l^>h+Lj;E3f~uL2A@|><{}o(CugMQz>3DRM2RE zJo#=_g&r@s<(xV@-zF>U0uj1KZW`#P zAD;l+@~|F1nqLGaR^8Su4wA_QW{rTwlni-0GJ-`6AC^`gbC?B&bS5(TBjdob zn;E8Yg1mPO7;V3QWy6wNvXEX__slk759e`tZUSNx1A9rmCnyG^Dm7}>js6#r0$|l%<}!JFI+jTBiZF(Scu9~ zU37qTs*we;C zHEDd-RrmOvff~$aX#QqbiH_XRSdf*G61{MQmn9dkvj%;29Za;efLI|+Oxs#>AQJLe z7`Ab4@3_#W_Ks@BVCCTK{2i;k&3L#H>l;M&lQCZ20-sTfsp-$}7#d!^NCIa&1O8;5 z1T5P}gsxORjmNg<`-6r?hC8kaZ8kR0fojLk`%tU@1APT>$zfLTVS203sRAS}pjdI!e;Be6l zD5Wj2(I~d*Z&Auk`)4@pmgbRqdit=2&v9_qlKfI(ljL6ErD#U6(kkVxsoezcr=H|n zGIcCCm)c`;j|b^+cR76+o_lVXm{d5#(@@_w1HtN>Rmg|vV61?f91t5X`Oo`=P`7XZ zg;(>=*7{Q9z(Xa#KSg`WYR-&}O4LeQ1*F;=bXxHxm@V1AB5&^+#SZi6;$>O%Tz+5BT zJtAn4_L5be|sFM?a=20Q0uqsE7!F2_F{G1F=ZvSv(RB(pCk^2OtO_cwaDdFzW`Sk1Q(*XSWF!2#%8lb z|2Vkkm;VQCxoWJ#X?P#TXOvjro+yB=RIao9t$A_NepaPr+#{xIDSie0P8idexHn|) zb80_>P-+nn1PIdU!@`%&DlI#5277t7TBN~Sy!Bal%6q=7lNL>56Vg%>g7^vhON08p z;#JCiRxfyPr-L7-Wr(dz_OMrlL;Zi+l` z6<^DzZft0JTcf87567ds9Q_pl1SLP;*N4H>R}PLQ0t>&E?xqqDaM|`+OI}wnq9wPr zxmx1`v$5*hj1VhSTIN(Eg_M%?gma}T1-#;ouA1sg=7upeey$uq=XSr?em#+@c~|nm z_jgk{v(4P?BU!Fy@178=m4zU#7EkSfrZR~9m^#rj^&1zYHw2;eU=Jzvj`%!9c{5ed^({jK>j{lv^$i>p zk~U;ulB9fRR$}ODE=!v!LFVbo2QF1SmxmO!Pj#0LjNgO1c7V2JfDQmEG++=5kb-=p zsQFc#D$NixeK5lW8&{z*SoG{Gxuo9f8QT@00)T*c(ppn>5#XNUsr8#E$L%RI>RiQ1Q z0yu@4p%4|a{0*$=(OB4-k!38!!Wt`7s+>3kvfH{W!aCPBoo8oKGIyf#_MUX@q!-7i ztawbc=EAmP!{&ZfL&PP4nN>dtKI73$4vWs(e?P8g7rQ|c8U%wRme)p28L`fMLQi?; z^lfK_47gK_|t>lb6; zwHs{y%1Y|f?N|P5VZ`;F;4PQ41|2GDQ|@D@=yA)D_#K@Qurk;Ft@A`)V-gRv;qePL zNAcQ7>kC_gJ(hbu0PMCr6|o4II%%(q;)?~@g=)7ZD1Feqx*^SB0k_op`1hW5fYHsH zn{vkl76SxXhZb8c|)(UdZqKOl6&oi?mj&f&2(Yhf3GOwja9t~bNFRFhvfn(FJPz#nbB+!J@&+I;r0p*p z?$1)u&i@4})q&DSanuSi1_ConCH?yaUK1%xa}ch!hPJ;J+~;7lsGUatJvH|#GSw}L z)5}TrwIm1Ou0*fIorb1F59L@@J!hSgxhvL;pqpq`34Bco){w;emdJ!CAc>&+Z}>j) zS#S5x_a&FzbaL7~oYFr1cL5pV*>d_(MYWmN9pGqhIF2_r0V|tZr^nCWBCPR&`U5N1NU+pRcS`F z>E5aj(93`Ki`pJQqf@3EoM+8a;}9lsR+cECY8LMEc&yaiOZO5C8uF@auj5KwKB z^NP>$a`$2zDIY8wE6!IS8lKJwNy|Sh(p;9e(27{jc!2Ju_xf7gY8AKE6`eO}{Bab* zhIyICXcb!^*UD*IZjs~H*>}(Zyp#0plh+R<+1bbB2)5Z)b4g0;>O0pk#Cnl$l)jK= z(!|Qq!q*iguJT9EYn03r80-v=VQV=_wQR6`ADrnzIGbCa-F!|d8{N`h1Zk;zW=


U7<+P>fht<`MltUR+;m{<{16AC!>7sNOo)|9L4y1!6#DiWr*{LTVr4k}+G@c0weQTDCbJ8_caVA% z!6!HMh-Q{$b`l5#SM!@!V1ppzs_y18^PvRmoFp4QqWI9FiT)MN9nbY{02STNA4e7q zPfsX)pde-^_G9MhYGh8=g#)z*35tL^e2Gk=6sk$kQrAPKCBug`KwC?QK3v@a`a9;f zsE__DmwfbvbyFAAjZs(}t5CtXott-g7Ta9)%rh>1dBz;6lBY7$a}GOn*FfbdLXMEO z{YuO!eK%#A>!T|hF}2Fo)l#e`y+H7jh2iFi59U7dyTC@6-3^~8A3qQ8D#aOs?9;&e zE+Q5>MT~h^A~3$6e3@mX{Ke7PzlX5g_yXLA+nuL>VhPup9AT5RVK7&D*{mtz%`g_7 z7Sf*Efe#~GDg+ZGEeS^4>M)B7lHUv&C$Qxiya1}@Z4+B4Z4`9Il*k{30JsWX-ze zHI%Zk{ft$sMNdon4r*IpKjrFh0SGf_<+|HQGO*7p$=(Qv)|$j_P5>K4$w*2`=;Nq& z?#*M&Vk^Y~X_3O!QFcw$<#kLe

s4{PeH6raVh*W)|0k>WE#_IFfE;lvIgrMdK-< zx&_5|6fG7}a8>>T@!_7kOy@EZ=pqP0;8)??% zRS@HYx8K>tJHEqxz8=_ASW0EQ`j8dbu2{Mu8c^fV2=r|xWWPs%6lg2>gyrXCy|9Lq zne9~E>j#E|rgPk9FvV`d8yetAf~;{+fzPv`LjZFL>hL2GY3;y}c6=?c?;$O^%b`i3 zVV6QIQz6Z$ig2n`J!~DGe>`0BvIE2sUHF=R-K(Lvjt%c9?Yq$*n$19LMPYn4x_I@K z(Bn!Q$4Jx5mA$vPuxDGg0Z6o|Y2JUJ_w)o9`5pi>tNuR6g9ciA&PTQxd?c}4YW*fm zBEMZ~o@R9dYHVep#NLR6K+hcYcl0TDZ{cWn3F6EPMyUClvLCW{wAYNfh zt5%Rz1HQ86+&mddvryzPYsk~Lj1W&L z1z#y2$2UCs9{On6F@j;&<-`7eD*f9Nw5lo`({Vw0*C(e)%M3Oc%P*Md;u(u#Xay&WD9wDlO+QCva>lPXqFT6%?3 z6g_K-?ZNH$-Pnzq=UD2OU)8(Qu)VOrk=d@(6Im4`{_s2k-}F9+*__L-Ac7WDda7_p&7JgL8vu?XyeIW}echX!Wo}Vpex9N9$+PQM zJ#VY^N0;k+b|J*OTxv~qNF9}sjT~F?E#At4n&e~1wG^a0L`O)%1mRr*AduK8LCze( zVF7q5mId700Dy)E(OnPo0jAvTU!I=@3MD|_9et|#L14AGWnCQ*(>}0h-by}$ z;J@S(vD`u5-v$V~9nLZI?fL)ZhKPZFCkk)Xc?;A{lU&`{jHyc}$Gyoi^Ed8?y){z8 z9p#?2Pgix^sLn;HFsw_y)8oOT^T*YO=X%mH){}zZ3?=ZB0zHp=Y^F?)a$o;rQn593 zHE`w?EJt%pS8>F~v78aayNl#7URR zjHHZmH&V~Z>~-H$;C<3oqewk?Rc_mL^~w{-dDMzKD!~Sg)at3LuYO%Hmf)3F?W*g! zkmGG(>5W_^vFqf=deJk-J$0+1M-}XyleB9s1)A4k>~~1@#@@K|xT>jwZvb7Mr-v>7 zt|KF+bqV4<831fpU)>AtmvW~T@uD=Q+|ILP!EwoFY|yzWvHH9mGYt zXZhMipd`m|-C0P@wF^WLIa!N!zLQ!lboBzrvmq+Cy?uh+zay+~0MOn}(N=}sJ_ulP zr#}E4E3@Y#PqpQasF5+s*IjaKR?!M!dd4^SXkEDdRZ(!!5 zW0TerI@aQSUBDyx_{G{TYZ3#&y>q20iL>(V;yVB&2b-zn6n!l8T^g$iMWc; z_b~g|%(tRvx2O0)Gw_4@WWb`B>zRqU@cDH+POz(ywVdK{>YnDXARG>VS1gtm@UGW4 z1ahycTVb?7wJVKRl~NdDb8OBn9UJ-XE@|_iR1{q9NlztwG0ZP!wlYrUG!imA6o;E*HnN*ED1gCkc#d+-e z5D%RiZ<0OM)?lZN1b<8|;09*JCxDl``c7D)P|nthwaIOXau)<6=uaYyn-^?;IZN+V z*dcn|-Ule~0Km_wv8ky8U|%>X00&foALw=cKKWWb&#IEjMBi*Fv_|6=_6)7PUcuMw zP!EZ3OCuFyS@rV5o|zxrD!nb1sbC^D>u5y2?>%>*&wEH+i(YIz1cCvRs^m8LKjR;_ z66vzq@D36{UHW}|{CgV^7?5dHGIhyZ$oZ^)+ z`zQXD;B6cgo4%}9(-yNPc;K{F4Emtc&_ z5cnuuDfBzikU@?r!Xs51XzjZ1-U{1KbrdtV!=$>wK=0?^*r~tA6XM=+@NCmq0lQt= z&Vp@5z3CHU^juQ+@L%CnR8p94_*17F+ph1VRk;wCBzS9ms^M^s@%Z~xGVqj|{rj?& zt{w{CEqRKWEaS398z&1Q^)3OVoZ)`QkMrjIyq-M--h8E+^8WS2HnpE9^X5mQK8@8I zuw8gfiUz)JtG}HJHf^lGUOVNLQGe?;X^N`HYRyJHLRJ)byRs~J_kmw_r=4&F_!J@b z(wGnlh_>s}IHv#Nem878(S@&L6(4|drKI?!FIY>UKsZ@+*4qu9I2Bc%W*rU&!9AjY z(gP$2(IE5ANMqwysBro)#r`^??3JHWd*padSQ>B@uL8dw7KSS4ccp(vA;!{ax20~6 z6`r1cp+#IBZ}&Ch4=3Nf<~Er$LTo#zCA@!@!4<>A&wAet9Q(X6_EF>eA;=r1M#aA@ zPvT=AD{)EwysQ(-|E&Aadaf38Fl;9S6?09Sj_}zY@)Qf4)6iNbN)ZZsvoGss$(rd)Oi@`oW zf;;3_OeXE8cX$;xi?cBcRvv^5q#YfE_uuZHGW7ku4KXU|Z$s z5*Xz7dd^#M(#ccHR$=w+_9xn}^^Bzq9Jc5OjpXnq$4DMu5JUBUx4t2 zsk@R;Hk;~)%CNxX2qpanTga#R_IfT+ACR|M`#msfIH2Z9)35RQYVFg_^q9X#{#pZ( zf;(9{>lSserfYJauk~}*80AJPO6k9my{}Ag{6>GM2!*$nNR}MXt zgrNngFtf$miV;{)_Zpl9%7o`~z6pFQ(ES{6A-(`dUR$|?@NLND54mj}S1#|f9Q*pY zQVsc{!ct*6sNUA;GjZ3A97RsFyJ}W#Apc0I1>$C$b8WZ`E#y^*k6@CP`ew?VD{JeG zmm<)v?8SduKguGHw`fvwUv6AZ?{Gi`D1(Lue|Cj7f>J&cImuUOHQQcEp#x(6<~I?( z9C-+uN8?qmeebH}?bppCsg)vPjLIhUF=LetF@4{@LjH^(V?{V}Sy-~S;AMorTRX$9M^dgJY@`G&>7@%>JD%GeeU8qBZl~n`>NeWGAWy-Iehmjx3ir z91@=UW#gP&VQ$4GNzWOb3jDEj1w>NK6oYKkOdBTCdM?;p#*LaFrax2_LY@Af*RNLE zu%@+wm6(?H*MzPPvY*0_sfne+_y1JBDdacNjB~rR&P}ApVSIuSxBwCd;a!?fz_Pl z@*;jMk9HCXz}QE>kM^n+fT?+DiF1h`ZF_BIZ+vEECf~f{HEqU>P0a!F?Yo_lDRhh( zt}>Tusf*}>)|pC(nT4Mp8VM}QK7MV=C!o4OO=jA@oA@LA3i8x9S*P1En&Pf`5&S1(|j}Fwa7U%y5X;#4Wk~gFla79A$xvA3Q{>&J204QK_^;|A9X3 z6OqZ)NJpj7_yF{#c9$>0AQlwXcTK+Apz*^C2Ue8tnHCw3|L7&!^nW+B{sk0IoDQno z5c~3%m{c}fyb<=WYRB!>=l3UV>vldvg&UbTX0jm5e{Y!OXK1f=oRt~fgIwa@;Y$V$xp;iX+G1Wj_`qCN6aaZf%&5`R zuhG(Ss$uhJww_}f{Xu=>+G-uX{BtjM=HC;KpWF76hZq9{gm>^O?n{#?tW+v0*M6(0 zEB$4&fAqzO=!G5gqN5koUrKL=AbH>SrCdBfUrHvg{t&M->S}$G!NADCV63l;m~cJ2 z5H{yV@M%4FUssyAt{E)0W*%j`*yjR$^PFwv&-v(fVhJ8j8wP7!y)VN zCjZ}o`8Lkl%0^(WAX)(VW4{+G%+ey?m~X?CXt@iW!Gjtv{DT;XCdoDzpv?G@+yt!9 ztjN8j8a*@p7A~cYUwPCQK6>SzQCoeax$?J!v7|9jX0qh_8eM%vTkh5Q+*)Qre$t&- zWtO@@dT>)|w3AO`my=__Rlu+ybrhEL`BCet!Q3M7j)sGm{S~GZoTHDAs!OAf-%n`| zM4tS49JAm@+i?T56YaDe`N?4GdT|yU(_L|^jQCFd@|Gu`#o0aV#2sZCIG8@EdP-{t zW}zwHz0cwu_By?J`1f5g`)q?EWwmV`2wupY_Wqv*$f_5p@*`%?NC(z~AJ$|zoq4Nm|xVK`A8 zWxf(}@@kenyep zTnT+v<}PK({KMTDi%?`TE~TbUr(>gfs`A47j;8&Bjj{TQYhStHqhmaZq{6+&yUIna zr}9vn!7$>gI@HqC3tb*m{oJ=_nV9H%XRuBHnJ&0Tr$-7m?Os%!o-*Hm$UGSTWz~ft z=`cj*i=xwDX;j2gFrlS9{G@np8B}-cklgQkzB@6;FPLf}#yZ_s0BbUX!rXKux!8pOIFoAiPJY9-t!Z}gSCri7xR!}LmTQ*1a5zo4=^aN z{W`CGqqY)d-w7-&*Y0(3KO-0rmVK-)*|+!RuKy~H{JQI5`xiwnxic%Oogta-!-Sr& z_n!1g9^->(n{_*x=JVWE&uUUUt-+yJH>gxGrgqL)J}X@xuePrN#T&(YNva!~d7Mv^ zyB;9(LHf#&Q51WBQPILr(L1kEn_=te@S?)=_ZPN(k934y9W^ghSXt*aGH3fC`eZv^ z)QHBH6JVjM^F(>x*vIVl99wUmMaLH=IBfh<2czYr7DcPh2q4F@8kKpWH_!DbV~~Fo zj?ZZP_|15J^)8o?Jh2=byAsz-DUF%rm9E`8Y6o!eL%6H^NOcgocoS)7Vo2t|i)MO00#o;@x`0fZ9v_*=fG_|)V@7+*i&|OqauDovzbTF zcK}1rrX;>%yWl16t-qeyHMwf*y<%>cfstgUeJ*|<`B3X%swn+(Z6=U_oHlN*7cwR5 zAJESw5p7>vC#h?7M+7E2mTw3L=u0F6g_#I;ix*W0$Ap||=0i4H({O?O-JBj;Ct zQ&Cuvy;zrSYunpj=x2UM+xOthf!P7d7H4XpD=L}OcqMix;Jxn+YH!AJVvQm{dwK4C z&3P5gv?M_lZd-2UL_fWOzR7~*j=da0+xaU`&dADJh9%g-2`qVvZpoH5mEd)|pP}Qj zvP1N>&Gz(b$ify!U9Bi+&e|M~3!QIT; zkK=NaB{OoI+4^Vv#m;HEkmr3Q8y*)(DQ!*Zx*uoO*lQoMi=5R`>fQjV$$~DFj`!U)SP%v3KQ{NK4t;5n zmj~$o)~Ka1MFA+_Z1Gh=Ez8W99p&|9LuV?;wcG?aSu_-JFyh1sob961oeM{y_8sZr>mzH5U)B zZii~8-B+7^_Ewa#1r|>^In$V}D8RTWrEB*koqQIrULRz21@)(&MEK+Qiv0t_bN**N zZ^uV?&V$M!;2rbTFfvdu4n#&D?^8Sc2ck}o0qPii-$5K}@hFyaQW}OWx9xwPpHJJE zDt&elPVS&pPvNHPZG$?4B-j*H+&-wd|U3vjg4$s;*#p-+bNxF zo%6WK(OO zfK_@~{=%K%X?H|}r4b;QIONc4LHGZl4GXDZa^hvWrliWm&n~WwhMG*z<0NAS6(9TREEMdT;e6F01e-bdFd6-fHyCli!;& zd->*v<_d0m1=7^jXT8#na%epT`Xl`NmuDfxaaoLl-i_Xm48DV$J0ls^=f%paIp;r=P8eqy?LzjhOdN(6x|PWp|AXD_Qhc{yB%vSEL=lA6x*9Fkf$ z$P0R|q30|yM=@(B@6kV0o0F?)+uQr&zd{`|pZJlZd;DoLzK1|SJSHE~kNUr;?ewre zyml6OI0ktp?ga+bzLMo&fpYR0B>Y_P9%;0CoBVqeJ2^Ila=4o=k6ptQ{kkau^+OtI-3+$a#?5kQU9Glb+G?mavxwz| zD}8QX-aXs{_CkS%?U49DX(s}SObeS7toQBFoi$y|5Zrs1`VniS@6tq@0c=GPummEe zFFFSBo;^Ad+7qvilUEc4bb1%&mZ8(mex;=kPao4Z`)~JzBiF`KMNA)FO4wXq^*gYw zEm}R%)YA3`s8ra{h#1<*$pNRP#7nNmu(5VgzVCjzY>2+u_H`X8Sex=4ZHG>7_FSas zyZXi+gw^s;!~-u0)(&?a#Y_4t#nM`P^mTGN2BcDoNZh znI9@RJ89AX0O_NeR&7q(>(d8M9Vsmx87r`Z zFmxMB+;?Q+_kG3}ed=4_IGy)W_Nk|h9G2}e*zL6S=wQ^pYaN>k6Ziew@88ZT_AvF^ zooCVreMe5;68n0S^hHdrnSDU1(U(3Ri#yzRdO&$B1;4_L9zz^NvkVGf%R9+DljtRA zjrEea<@cHNhtIWTmctM@$BdsBFT5&j)JEBt@Drc=mtsyxpsPuIu{6d7j7jIIc!b z22SSN{s-Xu43NE<2Ub;)BaFQrtbhlL$HuW+3rgs;ThfF+W7f0W{%f&M@RU;x32Wn* z#D?T}8!LG%UarYOKH;tg+9f|e4|kMUjgPiSvl27=>O+{GM!Eb{0hm3g`xd%EUwj~o z)T_X-bl2$IMUOd+wmj9#<8gLD)(=>iQz;Ap2V#Trrb< zGKJy0$;tXUc>_9ZS~`w~?Hx+Yeh%lyJ33Ez6;}O?B{xbU|MxHy{uOTd<2BNG?Sav;W$S>2fV2#lgJ^)q9Y|GH!aef zzDZ>)LQmt8P*duvwa`;=y8La4Z<`tmR91TmZn+10@lR<@)AOFPy2?m((>TpA3@@`QQ5w87;XtM z$JVcEi(!mxU8ud7=&tQYks~)-at-oQwIC07&FCewzon?L8MHWZ+PuMO`%`4cL#x0a z%;2r-1+O%%?+fy7m3VVCea%>B`Kf_hgy>JnHTxSp?zN}!Ed%2jDBI~CYbDrlErp99 z9W)+mHETIb2>VMseoA}O(Yffe-s1_8|J+7WzH>h>wN6fA^U%R@=RR+O&VIO7dri@d z%HA0isqW~Uz|^;CbE9R8;W$m{9;7BjVy1Graz`g%ph9>&s5Vz8d6e84*sZKo6GmpU zP@8%7J$v!sIn_I$JwAcInlE?vr{XQB5ok7Mz8!mFLhVwaaGFI4g{fnsQg7Y;1n2Pb zNzN-EhBsGLHSVpR{k}X8e5Zc5VK3z@LiJVfyP^llH5c9LkCwu_QuY`H)7L;9k`BepPkWpv=PE} zVYB&Fd?D#n@`)U1B$pOR`TT>C5seMj`zvIlDIc-`xu20W-k{pG?7Aw-T_G!W&Wqp<~a0@}55G1ettdU?)n00Qk#oD&fOBkB!IIh3 zjm{z~iBXReV%Tfbrvs#b9W1KR%q+i(mhC5-182TcDhl*zO?auyKYl_yKzVrsYvXHg z{L<>Z_BWr^8uHEfb_YM-}G_S>%O;%tL=#Wqy&v zIn=RN(>d!&J`oK@^NIq2)d7Uv>x$*xKu)&%2Jl-cD&O)jhyQ%=N99tV+*FG{BVo9}=pKuwkDi_{ zPTnlm&KoY*RUKwEtr%-Bh z#B+h?Rdrqy`nSt#ChnqhJMs(8xCC4zlTdj>t$ROJQwD?%2E&|-Ib8J{08x256o#^5 z1p!mvd1D|6?AWV+;LiVCva;m;hHmEfdto;n`2|*38*7DQ$PV+(f&3RLaKTqmw((R$ zoBb8}&&KQ9T!k}+BoTtETFDS!L5e<@sVt=6MR`->^S^UjE`LOQUb29mCLF~}*j=|VX6o7FHSC~^eF&Qk zXJ3*Kx2&6CW6MOurB!@$Sy0TXy&yqg3TAM=U4Vzj97ID|F|)Et_g|A#K@I+F;)k3P zPBay)S3SnQfJxy`(arq{WH@~A6C_s+0Vd{AO}j2wsAZFrs9A&;HaoIB>n+kc5Wj3P zEWPO&WwH8JItZBaZ|JNnIGmCHU2AuJebf>&1a+gTnzMG|lq<7jB2vv6(W@7JA-&dx zhrXu*YIE0YP>RcH8t#Qy`7G?+V%zJTgK$c^j$Yc(Dbhg*qPLxW3Z+72$|QqW6ygw) zJ2g09Q@~#jEJyfTMR6B=0Ki1yX|;u^8;(_%PlotIw`93cUgg@x^FYjvNnDRoRpTE8 z8PeaJ0EQkhx67&~T9B-Iv8&c$3J>*$k^bI9=-lG;XvRHhIP`I*KoUPsGx&tGyj#Z#vXIABl*S7cs1SBlwguI}@)olv3}Z zy>!Cisc}+@94vgn$4KRELY(eql65)yZj=QV69y3qZbW=%nV2zpQ}yZ*#l;pmyzKJ) zXuhE*f^@XiU>8YdsQ&+_EA?UAf`~5YTb+rbl<;FB>f$QhP zAEv5f9Y1Z>Ez|l^p@t2AQrPm+-@-4&m<-}`bK8?mW%!&D3f0vQd?Gu^9j(jXzmdBK zB$eg6mIY18VBsvMsaN1_!ACezq;EVPTORpCkC4R+K~K zW|IuFUIoZ9%-L*eHFhX|lYqNqfy(09bww;JeXA98NY5^hyB`Q@JN=|6j}K4F}8-g zzSda@Ks&$L7ZURyigKsNlf5fk!lQQABrRFge+$y-$uvKHPEtR6ebGj7@%r-i)g-FY zImFiUj*R$&I?i7v%-O&+upxSocrSQLxe!Q2$LMHsFACd^SB!-lSiXc2+Kbe5oRGos?QXgdjNfSFZT4Qm$RZUHyx^o98!H zh}IQAtg#BlDV3G`nLeTR9X%tF{M6QLSW<#&C!%+{iKDOWjvKSSDfRFC{xcm;=!bkC zwB(jgut&JLAb7wwp}#7a+uOxwC~P-MCr}%rNRtzC6;KtvEuyl1Mwp+sF=8y}z}&1R zf3e$<^xw(P;`~-1-hsAjG288k!c0>tC;$oik#3JrQqk`lwXbA`Sh9LN_|C2J%IU6W zvFoG<+ADO3IsR9$e0b;5|Bkf34vwPr_|A^glG)l-mOY;ap8=v@caP;z_mYjW4BmHS z1kTN-;85I7xbB3hTk2RrynJ3hZIe|BlAdFInDhZ1@3$P;0^7ThRBb zjt>w{>@tp^ZMM~|pQH?P0Sv$>tfD%iJiR`C&e*vF6l&V+hLL2+6^?roe{10SY>D6; zcw_l7j5b$Sr(brRvL%**zKLDBsoKBcYNQY^+0dAf=bQt1C|jCtg*O<@r0)0};I^PU zomi7Num^YdKo>mg#!1PG8L%edBmlFNR3*%vaE6o#z&-Ff`1o_J1_+N1Y4+n}^!WU| z>WNLzPFbSOId??4bisdL1MFFhOP#T{$s>+qKk4f6&UB3glz)mXMI#ha)0zK-;{1jM z!+O%=WoM}M1Eh0{GWE|Vp#8e0MOB>y%8bH3C5h zFfUn_zTmB#wSV#ZK-A;Y_<;L?MT$ES5rsmt&ni%~#~^~GD45(0^xJtJer6=nhU2@^ zT%c1liu6EBEV${dMMYGn+)s~P4G($4JHMuWz&11trxd6m*`lKbCtC6Man*E^8s*q> zes`cmwNC5?4MfZ$A$vETo_SR%7LTt1sT1S0-X1{D1wT(mH7c8GR0ErmX+>PzJ?O=3 z7C>KTQrH+u_1HAg{6z2WJVC_c2-M$k6fe#1f#vzyiBRQck(&f$v}(SYi)<6vw4^p++&ZgP)c!0(!ew`Y=UEb3F3S!33I zP-t?1m0Xm*;~vSUv`j;sJ`bp~)W_ zwo!<$OcT)-GMkae89eoEOolSjw3TibJt*~_8b4|EQ&y<5Qi0Pu#9(Gq!V7_h6|5py z9j!XN82_QEv!~D76D*$T$;W|)dwAh9L0$?oT?P@-t4Sqgpv3H*=kfFUHSJ%xD=IY8 zuYY?@5*D`HD%@*%8q~f0@5w*mf7jRwCE|h+oWCDUXVny~_AgttNfFOE=@ZhSttHzb zInQ{-ENqgmoRRyQ^@A_J1awIh!r5Uwt5PSO2Ze`QH%pMCDh>HYJ0`U{KBT>LDJhB8IFhzjyW4K9;ZesE zOXY?}7?#u{JnnlO$5q>yaqBY^62ChS6mhXd24MGp%hE*C{C~h+j zBp5d)w0jK8D1Wuv^;R0sN?~h_H$Y4;N79Cs^O>4}4J`wzoDb zmv3drxjdc+4g==NX=zEdu=MVTihHd4lmY^N2NYI=VX)val?o3&=3S*@)VoEY><2<+ zUhybL+jmsPs3ZsJ5nHwNC2Y4sMRe>JZp-cBj9f(DncCIq7XQ`Z3HgA|v70L-bT9c3 zcmAAR`tugtdSva^gJ(xaWOK2LbHcpcLk^+M6Jp{@+y|cU=W9R3 z2id1LRXwPAlZBuL3fge0EU%MvbYGADIeUe(N2pcea|4b8^4Q3)@St#g5`H1U*XpCB z6yPe0E}d|{BDCi4IKzKs)x4KeGN6lBPz3X?mObtLI2NEbHn18#DpH^9abtFAL3-M8 zKnbaNVyv!PH^Pg6t7JYSBSe60QDq=lAi&IN6-=i-}!pxO$2i zC_?e%Wd6WL)VUX)%d}x$9Dj(XbDJFPIA)V5qbvol!AI$Vc$xe>Da?c`TC2mM@00sQz;<7tw+@ zr(#s)fw}0Oz2$f%VBSY|dU>tIpZco~FA+#6ov1+?yRjMW1MCNinMDK|8Zpc7u}4kP@VI@K-KEU~?(A z@Y;j*LN@UVWyk#85~v_jQm~?=^TjXB-by#AE0Xl=YEzwP9(ne=&BC*1k9?^mUBr=f zmk&+JJa1c$$?Hh6bjS6Gu%;#d9L^Ekt^TCv{riZk2kUt(K0YlSJ?~UgP}?Utf~y}B z%q%h~Eh%3zD+s@pn1zY{HElp*#lYOC=u>73-tx#nx;iUv#qqNsb^a86oiK>seb?&z zaa7V$oAE5{S8E6U6{>3|)UXP=uBA)G0tEuV#&3Ce0l~i)#FMO=9~tO7zpZFn@p}Yu z5E&ZjRQ(_-X{t>FPYt-XN)|s9Vk@WiMk2y-HcJ}sKV)P1YFv4>^ZBg#tb5jw?Y>d( zAdjvknNL{QhMJZXNnQO#Oi|63ySQQzgz#4gjZaP&DXk@fy2NUVyHH?b{MQR zTT)cqk}9-p5K`>-?GZIMnJh38B)# zi@5#Lj-kYxokbt)_BMQubB!7I1ZbWPuN5=^78Xsk2Z>HuIN$50&Olrz;mT!fu| z1I2;CNNEk}o7bN^7=ci9@1( z;Tw7`tLJRiJCdzA%YhQ<&e>t)u{%hU%~?;KmdO#zEK7DzVF>BP`bXZy+Uh!dI$1Kc z$wJq!TCj*M&5W2LV5n(or#g#7IAPSl4&EH({Af(I`EY{ppm;6h$w@drKs#K_47Cu% zswucWOrCJ;tIUKY6|C?%U6jqoRwoG(QRWAXMKy@S;5g=4xwPdEOr{dDf*uYucjHFN zkXlC5v)im7<%18gZs{(#v|*dBgi0l{2yeaa_}$emr~}XNAXVW_`DQG!NM!~*h=N#RF$rmli_oRPY>KCNfS3gp&c1=?zECeMDd-inL}I2E~ScY(WCB1yV!`x#w3K)8%P$X(ut zx9~<5S303BOLOs+ZTn?^{1awNDTn(; zSb!X)@(Y!qXB`}{$v~}R&-O%KFE^lCachpRb-d@<-&G+Oz)&VDP*IoG7)77HU67py zc>xQkn zqX*LDY!+6~88M=sVXPLB&;Utk?uVS-(^@!7am*FhxY>vG=o@B#nr?bR+9i5pQyuHM zvuYyrzc883%AFwQ zhIQ5dnHm4|`|S6W6E4MZnDEKWAI)YaZhGPo@np?uZi{#p^sudfU8`+p_4?8Nw|4H# z0(EyP^9`cxxTVcbAuEeKUzd-!whmC_zwwr3{xg+V!8%R$lSkZm;m>Z5(cI7J+HMBh zKex=A1tN8+4>Km`vR>YP)>MF7GF(#Ud@@l2THt=5K&t zx_D-OUcw%^W2Sudb%nHkwRS~Z@40w7SJr%|yV`A(taM@>G}Z|?jp$k0U9Du7lAYD! zH1*_5307TxwK~EWFtAlX`aQl$9?>6=0?l#425#4>7(|CbnW^({m&It6@*2`yq@hfX zA5!>lWyy);W>J5(ksn)?9Y+_XD4HLyw>8=&DaxDcWiS3}T{%Ye>?p73jZ-$BDG4ig zF)j-a!pVeAd<*ikl?3qyG@cRZ{BM;iWM`-hD0f{%T@v~ZQySg|%%8KG*-x89J8BY= zN+vuJvj$bMVe~B>phus8UH!`2#=3Jiba2+zJ-2q$&%ZM#Fz5xcEz4G zEDH$7Oz~)MkWJt`C8Y7pJF`)j^;I{O!e7k#O2>Qvf7ja{!$J2!iiUOWPU7<*am*wb zm78d!MA3y2z=DAC&Dc305~))+Of3z26Iz%%Bq^;Y{u2ICB$I~mNxzO+0h?swupGcpey|)^ z>Y~K#vSWepIkA@!muzKMtfn-C--?a@l`cI2jJFi~<>R+pTx_SdbVBr%vZ!tOH$bWM z6+&?CI=-{HCSE)=OPXM0k!4{KrUo5nYru8HPoLKhvVte1i?~`?xzn2HsW5Ui-H#p* z{_ov;j^SbZjZ8VRpaHOBW7iS>`kUdTk<}5Wu{NRQR~@FV?mxm&mnl0DG+CKi33|Vh ztW433tiXv4YP}(hMN`y(>7Gwm=$&RwtWXMh_ix1GS*h;cXIPmz zS$`eDL!N84j^0Rw^h6W5U4%0}_S?x!*7D{WaDwINK4JK~b#Xk6Jh#am{@EmX0KKl; zi?4ELot%(s@|Gh1dG+i#@v1?G>C3e;>o@1x?lO-U7>YZ?uGQMpNZgZgfGgU91U`){ zg^Bf!%)Z5p;2lNo4NqadirR=So~`Z-T+zj>NGM*sfzhg*wUE9Ug48*8A0$_6hj%|orRzz^J+$iIz0_CyX->NK(` z^F+c)_SX3@m<{Z0{3P+XB2hfJv*-<540V3qrtb+NhHBvf!UkN;aX-(~1=%;8nx7>Y z@A#EkAXy{OO(Rp6mkT-GMws3?BTHNRFHQgMx;4)PPz$-KD;Qs8UHx0>ezexM*QS_5 z>)4khk0`l;tW~+~QriB9nPU)PI4Qky3%Ga)Quv^hMDbo>YJOVJrszQ{DY%DU0veY) zWF=u<8?Vv~7@L^oU>CH6fySKZP@z!Qt6g*RleCu0>6{vez7F zQ@-IDW;Bb>&mEHK`ce5`mQEIkx@oYFRgNkXG;u!gg7O>A9#aj`!5Fe~=+J5BuS;RU zz-Tx&oz7qu$dxauxXxYn<}*tEL2;mPdVZtYNUJdC8!?G_O7l}uT~kQK7bEE!I6ptSI41>#rUXbgsRc@Uz4pz# zBhej^vbQ&|<L=Wn((%XDXhMa*1!9F**nBB$ zyl@pB@tv^7^RG_yY3YUgV*~pZ@hMd|A)_eNmb*uvYRcITabEqB^!oC&O8xSWNzc1S zPsX`poQGOFI+lvPUq5-$iW)IcZcZUefHZWg&#qBcq#Xp+qWSRkNIZ^QzwyCBL(Xo% zuRfXBf!Y1jp#Ujrf!yK8=Wn-yDIhv@(c)Uw&pFSWMCwIg&{XH=5+QGu!@gM8Dhj4q zN^&mDaF*7h5fmRSJnLHa3aY=!cz)1JTw&D3(It%PSxyx`*%7#+(E9H=tJ(VWYhI4|S=rdzFA6g4rF3VGJJzcb#zHDs4W-v=RncR^ zKu#dn*E^h9tW(a7Q|!!KQJFtlhe%`}%!o7ptEVfkPW>KV`CQCj zc@7Mbi8$_}x}YsKyU&ck1$4+#3x1+n>yq-y;#R%Qx!p+e7!J zEQa9foJ9E7a50&Jj!u9@2Pc7D0-(j^g7Yab<2(7!jg{l0g`Df-hbe<_FB?8yb;Oi* z=2#+-T}x2NoGq=Mp$&HXIUpqdbLHJA4d`=PSw-7wY|85MkWNihQ%*T4$tpEVRqigj zI9;Sra=yxy{mtWgLoQ1O;rTpMypw#Qt=IKCaL?ace((4XNVyWSa?;mT9cP7^;KP#< z__Z>ZE6H}vJ8RLS@E9B2FH>wZJW#R^^IC}d9AS&th>-IfYXOZZUhw}{9VIy?>^gd~ zN=9`<)PZBH!Z`GB*g^+)Nn()t>CRD5cnXevGtgYQRO6di4kgaHxm^cljH}qhmzl5Hwa`P)dq3BTcwY!S6%=wM;!(=)Wlf|Fs_OO+KeZpTkf<93vKp?AsE zk_BO~-KaTU*ndf(W3QLizMT}z@j$n;N*`svkSsi}jndmk+(|78YzaTa)W5k~y??_ZPp z&VxIZEzz^As_wDXhjZ_&0oVSHU#-~{!uR&%fxrWsEfSI7S6tITDRFnthu9A9|8mfr zGPx~vh&L@ElU;aL=4l7%2^`gDTk8R+0;{f{nK(RMvxvo24*NB~}#fF~5=Es9JWW)1Xs;#uF8uERNjm?tE6$Bfr)L?7v;&Kr2XZegDuu0K> z$Dfr*sF$9~J7m4V!TyHFmp56*|2_7-FGlY<*__vEEIJeRfE@d`8eY6*6RhSag``ay zouJr$HmaA)A@2C>NoXBl7Q*B|6wBbP)25i7^UKz;&} zAPT5N0;ucJchPtE$a2e91H@I;)xv@EARilorOmzCTUhmwC$MkbE=~LB&3}zoEVRtw zZ)1RTU7}zMaGf0&;wbefbV6{ajN#6Y6FMCTeNHg~T@jkak+I zx*oj*G`i2y-xQb61=FGSAe8}WGS3$z3YF|{@}&z~AI)>?dKmv4vGLPG9*Sy-ebNEl z|KOab@X4g+8POv?=}FA84|!Hl9C3c}?<}zE&l|V(Q1NroS6jas72UL38#Xq%7<)A) z=yrwnI_r{vqLyjvs!t*>@7EOXnQZnitFN`RMCCq{!^*o^O8g@WeNAhkS}zTq}sh-CSC1BatPHy5ZIxQ6f9m&=LFtlhsc)0}}*DjEsc(;(z2hMen{EL*MXg0E7CNxm*o>@X{Yn zzM7vXRI5eP+vQie?hU=@KAS}9`y5S!E}OgBVgn-wBLjx9g%t!}Z9-v4^b+pY-v=f* z?Gl7z2_kcLsyqP^O(*73cV%^eDqcSesvYd>-Yv6;ksn(*us8S3%F;5M>Z5`84(E!~ z(=XHNs|Sw zH{Y!hJi^UF7VNHr7nEXslnKX;L@qU(5ic=aWZ~Xm%G1WL7M6SK`$tE_9^$n6#TozQ z#g{g+W`!&xyd>@K?5m`l1&U%tXGO;HK5_XtZF>_Vda8Lx?4kDVA|S;wkKd}R@s6JG zUN!b(o}k@E4`Qm#(!5^Z$xI&pu~&M#GjIzIq!ZJ_WHp!yNvT?VYdcy-b=J=HCi7;N(HPBI?# z5Ux*9ajd|*APV9}@5L}?$>hc-t9{$>waG@wqdzUf4-(6 z$sJDk@GNQTAseXd?KPG1dmp>J#2{8<#~1(p!Ac5h#D!iy>K9j-W|!Bi@a2+j66&(rWWt?xqQh2FU;T zmHAH1)QJH5_uvJyE;7k;$-OnNhTGEq591dHPd1&>cK5uvuU1Eg7XNb#_3}QJ`I7q1 zo~g4d{^6|cXC`3;@)+=bJa0%k3KO4DU6cGn4fe1H$`bHru|Fa-;x0j2nq4NEUt<5{ zELE!w+4q2H%AP%ka&|@T>fdG1i0b!&YRmrUSU6AG@Zaz^<)ELLXm_c7V?@Fi2+oqO z2IFZf_!6O{4Tj-m-!m(HiK?(=)0CNdUMN8p=Am`_S=*yFKhZ`Bd5~5G;dRG_w?6$3 zdx17IROgG7fLJRY&jIhVnr5-{Vgiq89VCG;ikSN{u-K2_-PHg34nBBy?}L>x|mtoAB%iPu7baqB)^;{od#jam9ITo1Amiv3Ej5t;BWVk+lal330Cj3yI-9j~a5wKyv!i_yD&|!F`+Cd}nV@#jv=qX~zb?50>57LMNsRcC+pcjgH{^f?>ec-%79(dm0H#AR&2@@F;7 zXQlK7mW3&HYt3ubfh7WPvvRpxt7w+FhDd3Y zUAO;3E%{%p@d*dOcUoUvemjrnzx~4JI5jMGEj~CC2!`eNc`GfB3T`APv$AuiyBxB@ zK3C1GXS0%~OO35hoNp zGKv8|BUWNv8IeYl)(DGUFft1Ivn3&SL&TxDCoS{q&X@9x;Tc zmo~Poz~rlMM-=I}&4EJYCMcAn;Azz%x>l2~>X~-2HyH;OplR}J`$1pZs(vW15@I`q z&F}($SAUwvx!JeD}n4g~pg+ z1g#quhW)t_*Y442D#u5E{wv70_K>{i=A0Z58*d7H0fS3gl3a#a4O|eWv{bM9fRpg+ zZ{>|=89>f}=GFOJgETp{Ucl~Df-yd`@*2kb5yPb5n`_Fg}UOLqJ)qAUSBDM;H_S#N9T zAv=4G=StGn(0c$K)fPt}BS~NuA(pJd4#%+r(j&*tnq9|0vlXBbSl$erS6-S9`L$=g z5V0Ye2CD{fa6%_2jhA=H3U<=|@A|?N`MFJdLnbrRlSw3tCtCbr;8f!E_(L$ZDJYn+ z(|KV+y@{R?iGWMW5MqPw?#YXO_TnWdwGz6!e7X{8ZK@e)0&be}5{~!3X@n_z`O1Iv zSI$^oI4N5%(|nO~MymLAE9q92=xwFdjecOkuN9}>USXdWS27r1R3%bo{ z=zjfrfr_YYf7@~jGym1Q``Q2YZ@%URv2gny9%$*p1Suap_%TCCCGz79^-HhMKv`V2 z`)Wh9b5HQ%jC?izQR;1jgj7c+#nj6un#b>x)|!%cTvlH9FF0faP}_!2?(*A+H(Z)u zy@Cy-tn|rpH$j+vUN^VXsf-Xu! zk4XY*-W_JnKf7AxmmMF-S^e^0J>;F*l1~AX!zj7?L2BFcS?MOXanD5| z*^qEzan^^Oz?HqWP{iH6b=A9j5_5iDc<nzBKqT&D|YK{O2 zA3asI`&B3Lr2@;OQW~G7VX3>**JQ^RcumErI+@KUi#|bgS4V;0GCh84fm(oP)Vu_2*X<+afB{k%M5`7p{B8H~6XQvV z7roxbiUz-(tYe|qD`4=Jt1{sd+}3#X?U#p5hi>8D1$dEy3WEMyC#|WU^f?+fo?WR! zSo^G~5rPg{6}uH(Q>}hxSH~(nZ1)w6*%JyKXrGb(kZ&>Fg!YO}ZcbGG_GAr;)OC9B z`f3JMP&0SBe;JR%`a(zUt*|=>VYO)15U@8J@z+8>+}o-}O~KeO zVRJTOJxyhr*wbwjn?UFQ4Gm3k}z-Ay^ zV{BGeCx3+P#2a}%VBJ<~O8^!~NCL9O;m%pW=)?57il`@5sZh((RB+}UKvnYQh3fgo z#Y!*1>cqloIJ0&WqR$5M`{&p0T43Way1oUVHuq+0L~)+rHzF^FZOY3wRgXMBB*XCC zqXXwV#CvvIEv)M{zB!rn#Xjpb_)I*{U~qTOD->f*?fvmpQrJWOJ)aa@mSv^K8=AZH z!boR6{Q1A9gk!&+l_m>``BILKwX>jiE9=C&_X2D54NzUxoK4 z*;1q_k+VthVhAk;dxYB9s>&Q*j@R_jyZDdYJ4j^FBVhyT-hIE1S*t0cYe(BpPoD!_ zpE;EG3OLVF+iNv&_e#u8gywH?-!c_jxT|?+QE+>;we9}XaQ~nzpNG$^+XFw6M76@p z>v*UMI|mU)$}W#JG;Azz{9t_iQ^N^(2-_GX$E+fqyN3?kz`ZP3oPG&Z1|~=2u;yf> zLS*F$1F?F=fQFvke%~i2j1(0)V)fs|CC&JlrR4stPR&_5>8&7OV*Wg#R^D)zOTPoQ zW`-pnfgo81fsfb-``%HF-)97x;kdlAwK2pPeK(I;LAW!DF!2m zBK)E?aV_4p4r_>z8Na?>ZABQ}6&+WO0T}Bz2lPCaP7Az}?(T`;zzp5csmwe7dUH$9 zFFtPq?x-P!n|5wbTAEv11AzNSf|(VL9FX2lr%GNy!IfOR4h^5T-*kWmA>2^Bw^MG1 zp$k$ZlN8zWbnJlq^%s_peC7_*>F5x5+g6Z@{? z0*Z$*nck8p;{a419hUuWRR0=U?Q=PgJW}-$FGD<9(b}^vm;TSXKVHjILym($h z5@WTF-*eH!ej0m=gJ+4k{zlEpH8Zu|H$59{iAgW?ggszY1FNUXSI5ZnTmN`R##leR8^jf2J0i)@4Q`n5V)$W%9?8kL4w zeyCg6tVvidq5>gb)NapLhY3FpWeP1p+>w8ZEPYS`fX&7}4vKMI{!nnH_DfJK>qln_ zsdWdBK86ySN7avnmC&ZCwHg)vk?$7H!5RH>vWcF*G2DH%wdM#ar>0_X z@kps&lxY?`or7NT0w0OENLp!aVFAYmc4{)HZP%HAT!DQ}a1#T= zta8%Wk5>Hkr@>~r^c5R<&y+ueMlNr&uzpRjFU@|V*drVdRe7)=L2Y1pF;UI=sI-Jd zFX-^Z^es&W2F79kKJ6w52fv{bYSI9O8Dljyu~DdPY^x)@QTSnm5b%+!;MY$QW&x&g z`Gd@g?t?}y5jH*XH}~S9Z*PBhl$pHz)DHSP-+!ISB2L3}Y0Qhx(40!$>NIofMY2}c zU^PE1o03si{CLlnIL;vHPk0itK;v8zZ!;55#+Dz4f7`Qb4LK_yB*ho5tgMF+>s~o6 z`mcv;dmvys?3Q|n9=ZB+@+dSC5L8BirGAzbBbG2%DTu5CB$!>k{L%t{3ana&uD_8t9Ib>(Y8WgEV5vs!WNPp$hIbW2l1C#(bmT|bN8 zTq2zQ*Oe^zO@xWf(>g$LvY5N#ec?UIT~YQPiVxxZ1w^El18t^)DNDeI)M1EhmMLNe z5-+Tujl6F(Om9t`ej&lC?UtoiDl9HqS^Hq+aqS!1)}?{nPiUG4OOS|*ik;DW3h>&lumzVDzsQxVEcJZh8lw~eqgmVH9RC2~F4FU34 zx%pw{bS}~7;kTeq~ewh*ZNz8X`_K7W73r5P45P4jSuK35d>8FT8O zW5@E{rXycQ=e-5N*ggR>n4n_%gSFrfDi?E;-`jD@pqrec0+f)FFxPJpb9AhSz~wE} z8EEVH1w~rqGB(Q;^We$d8^`dqw6sAH@H~{T$i$27)G@rujrezDf^j+VTiu#y8Q&W? zz27$eJ5nqOKJ}i7{jV%f$KQ#Mn>G@rPXkoy=`t?6ui?G(-ey;8t6k&moawolW-r^j z%$E7Lj5Wd~DI{omm>fXICM+oxAy7TSfxwnDPW-pOy>Ig0af50u;^^}&cWL8q?tcB$ zFRFnC`KQy}`me5s&2CHrHlfZs)tPF~h7g06dR$ z6GyJunZ^WrdAI5_c6S@s-1lcn>`Dn|{+J2Yv3)@cJjZ;MsH#L9zo##n=VaNMFA6xV z3qBCBltiWuXi0larOi<$NwC*@IJIv(F(N@OS?-aS)H33TB;C-$>jfe1E@w1#1|E59 zS{2R)F>A7Kp!kttThKzBIE$`$dCjlWg$b^f=4T~sjjLj3$Ndc}E0Jf4cKm5WOFHJ0 z3Pw~(k}rlZ*^Ido*YU%Wd$+Kb(`ZY9EN(rH37w3kN=Kui&qq;XZ*o4HN8IDBtL)6{ zhe#B9*xFjd#eUH9J&agCeZuAhfVs3!c(H}vvY)lltY#boOJ(L4ET}2@@vjlT%Wycu zq^iTY!o^~<|pG{ z`~$&7Mm3bM7s;rVcOM4xbYZ50m@I0F7I59Od`KyP!mSS|S9;S!XY3^C?>4%lKow3ByzJw4wvP?8CHNPfIF^6?kQd6#jo*W3E0{^frAoAv3xYM<;e{s3XH zKuvr!_j;>y4SzoVS@$iJ9Trl^?=3&D6ezt8^4$x(mfR)PI6pSPSxW3f_d@WaMc8}v z<`MtiLp0*Y0~f9(7m|)vRlSV1j(~Q|fPgSbN;aocRLresIXy|M6;VY9#_)aLdXp~5 zvm$$qDJU#eyQ(7!u}j|6pAksMyQHfv)aGk+Fs`!FZHTMF0Vvh4lxN?1SSe1T*fc&| zRY#E@ACitGkqU~ogYd%n^iChzyDRp|_JZwd*Dd?|S3!vjB`8B`RwncuTiH6-`6j2_ zZU*sTe%tZsW#ER-H^1!;MOJ5~!Tz2u8w!6+Z=dcrk)lGHr6m{3vaW;$At@b6ITJZ+ zow_D8xwIT2h!#Il+$`N0168P!`wd|x+#F+UjsVXUkFgGm$8F2va(NEP$}=fli?E;| z3=2x#b{BKMEm-+od>eiZMT?E6`M7OllZCXzLgXy|_RinFKmPQ~+xPc4GsQN|vjZ#x zK1qdUrK&`Qm?cY9SA_ye8D>y1-^{d1bIv9^n+T9DO4bI4ly*wJ$KwL+bVRj7flzVC z@RDag#?HcmB1=p}Xg-QdZ2w2E4Zvv>5bnO2`X+)5psMCd%34xNmDsR5Y#-Y&8Ky3s z(}VVB$6d=*C>`blspJTuN=;dUgOyy8lVFYFY$&z^E{&+OPW4A1C*ov$e`v-Y&EhlC z#nm{Fm6?&5ACr;ffSK;w9(S2FhJOC?`O8nAKiyvT`(2BEj{9x9-Jb6I9!F5Jl0yiH zZmwb=u5413Dl@M6X$u7%D%bF%#{~smSAont&hz!{y>f^ZYK>vHeKYfn8RUF_Pfqo? z?YoFWC1VvUniLoAAJ^E5Di1YP7lFVvGgSqVTB)3p=Q`fwT*x%TZTD5u$M>v7s4F>) zG!+!N){4d4{K-VdaP4u>?8uO)SjSzf3|FYB;kRT(GP5F~L=~B_Se$sDOY5vyh`GIt za>|g*7PbL^<>Q9)xz_WVxA|)LcHCc;VYe^h_ocTK3o0{PDbSze?xD(+&qZ@x6%eIr zkErB>M%9m)9<=;G-1Zc!nN)@Ao+2#(M0be$zkiXfuB`^U?F|TeT>JY1Q>8kR&@Jft zXjpcfxT1o&0&p`%*+#mQYOF#FT81X)dVP6}!2-Mtbc|t6 zEzWV_AF~8c1-o@Q&C=@QUVnbf-@fbfUgu|exUw4C$&Ag38p9VJVKp#Nast)CRfKJ; z9Ah{we57Oy32w5k^+7RgXwcgl)TqKRq1$9^_G|%gqLzRhmtKtziRI)b@;n z{alHIgtg4jF|PaNAP4O@W>@P>buni;W3swCF{?H;!!U)C*Q!p{EETngI5cXaFjL*g z=tRCt(de3*trKi;t{l~X1t+_>*xrg?4hrp>g>&aSgE7YeW#^Rb}{c&jmIi$yzW4#K+fIYTju{hTF4Jnp14SeVfk zF(8-BF&_%C1?W>(Wh)abv@PaZI+9qOt$0l0Ol_Hc88!B8df(ZD9iXNkJ&x)$tJJcQ zI@m2`&jk`g2iqXgs&&qsJ*N(fP>tcfo+az=`SFZmL!-52`q}5_?fmn7e%_C>x;GTc zu+4cl-!uyzR-$|N8Zx($+2!T}mK1Nj^B4YeWy7`Ndu!}!o&_Kb31>Ij-8H>eBsjdm zumTSC`#h=}o#!!c_xpJ=C43nvLcBvBC-Smm&FcN}pgl&kWtxwKVmEpYh z7;CGUGtQIFYEKi-s@XlCAS4^Z`&c)6sXBSi^LBpbJdLi|pP#px$4cy=*+LgqID=q6 zrG~2wYtC+&Q48?b;Ie10JcF@HrIiffn$A~MWtKC1_%?hDhO>+9KuV}tttUM#ZCoz9$27`@V`;i`86*$%S#=_Fb}c~mHfUxnt8;2*R(cE{+_nJxO!U#` z%*>On+0(?<0cEcy4ua`z0-0;ZB2)_Khbs=&K$fUNl_2G_??K;Y=UlUH_s6m0sqh@z zW@S2DwF#p#&%1Ne>)*!yi!R@M-;>)pEjiIEOVxOeOQ>bnc*JJ7Na^V3`{5DSU9)|W zKaoreLidi2tYp;I|~T(=%RhI*mC(biSU z&d%(bInT@^=aHT2S!}GJm1<=gR_=o^(%n`fbhyu~&H7+Cd7kU$q4infO1z+|mQrz5 z`)F(Jy>Pygl?{5B4~ta|p&1!^zQxb?9~xYJTrK)otI=K42#46!$LucClu|jHB3f(U zhjoZwvQ}q5u*EH+a{<~!po=8UlXi4YK?!u$JkGN+kxk4-r<+^~97LX7Srx_T2Vg=t zS2$yVW1e}JRp{aK{rT|GG_=;!i`u{it5T}lJF6O$suusQ2G8ZOkfJf0O=M}Sam?N_ zoutx*EgY>%o#H)})~Xh(gwPAnM@`H&Q=1Y5nJRpBxK9$b$FRxip;tTwICYhdumGT@nvpwMCKk0OY~ zCaA>}(CmZ^Q~XT3=d`x-&~4&8yUkmo2dd0A@PxL0Do5ie}dCsbzrkzUkdFFAeIdhi&(C5de!Ayoi&b;5xtTU@- zpEFNM&WOha9P3t7RV2;e&Z?$at7)DMcRM?Sw$Pcg3aw_mn(y28dbyapGmP#STkLdG zK+PTwDo|@%XJwCKJ*&xYZ*PVP%|?q-p2||6m}k%XoLQi8vK9`196B2ZGpZU1iduxF`ABXj&Mk3A4Z0&JKpwFDBS|ftx ze~#?{RWD6RADxwTNL8IJEgzsHU|1|f zpww)+naK-u#u(VQaXbC+<91He?4G5c8(5fI9*rEd{a|&7s0HJdB+QAcI zQR^)hrx8BfHVL!NIV;n8HMBY_5BBLw>oj0!yY?7rBgkG4O6h@uw`bSAtIF4dud>!! zcTr7R)?KSC&aOGU_?R*DRIg;noEQY1|_A%jL_L z%jLG%W$UE& zSS(?k;nqdM*8PL6m9?p#lh<`C>4yz)aZQ)?0|dGk;6>f5gkha$_5Pei|2R&Uwu#CH zty|}UIHz@MR##S`7K37zQaQ8MjiHGyG$GO5)|j;_b52{9tB0PPP9yzl7x%Dn-j8$k zU1w)?&Xo{bja^-?5IR%l?kjp$GIKPHWXTqzSL&+8O4d5)uaQ&B3f2*dZH#Ndr*bx{hbWkY z$v#n8m;SRI9`VvFKE`cV#!gM5S~u zb}MtGhs&L5T@^F2_b^P@nt_xGzGawkqgk*`*lcS&_FKY3rbq#`BwkNe70m*zRjoxo ziD&R9B4s9z*+0(X&!aw$^Y(d)(jm0gS?@oN+pX$$_JjLo@TLc>SME*1$~fjsG`icC)YpVw`C{6N5zx`A zIJ-r3a%`lMR z_VMxY`915_D%0rLwzx)&F*Y}EU1&Dv^zh@HIcIgB_xpK2vyzHMH|OxCj8KVg)uOG{ zG+J`=tcEOW64px6+&w&UO0__1==C#1A0}>Fb5BpLu>9c}vcR|A=RHDHqbL1osHX7RIHLt?v3MGHYV=yYbv;c&Le78aZ+1=(A%6 ziMc!P0iOcgQ3NelFE$yRyKirCA#Us1hnB5ay z%~F>HweNdoc5iY5G^|H_>$#kNaPtbwm#W9L62?WkN3Z0gTBc%^bzzpe+F8WeJ*DGZ z7QqtSs@!8#Me58Mt8XLgmSH`%s7dp-LD#-*8q2U3V{NRKR7_1YHYZn)MoJk_9x&62 zB^b+|$EkCE-jg3^-;O-zQJ80`P}OIXQkgrgV>7x)&N{dzkUmB{O1m^_R+nz~L%na8 zee(@&4V{_7RIdOg-60j-n^|O5`F8PrfBE|LIwEua{{6Sl+iljY1^J4Q+BDQff=W_Q zwXroB|1n8LI$f%SwFx1u6ZZS6ebuchanDRi|h~i9rAiA7k77@@nHH>$5sDhbmO+mM`|b`A@p! zZ@>NhOo#?79jxX%@(=GN)4ERLNv0a&wyb7_g|il@RTXF7@NncZeGS*oJZ@PZx4NH5xy+2-visS>Y|Leo zS#O3jTlcodg&Mx98y1i~3{hrWQm3_wcMtagT=%HFY+>$xeSN#WU0<)m!}H8q&VX#l z+#AK8o!+}H>N)!JKp({_zi}i2*skEKbGPI3{x{xvy!ZX{sJgKdK71eMUkBrOTqPDe zW8I`zx7&Jk3LaDo(H3n7y$neRVm7Bb*ZYB8xtNYxMkjI7t|ms+f2Oz6J0c-+8nBqp(T;lB~Yc3XIaZpeF7|8V}K8x;ndR?Fm|X9@BC5xwwUETTrWH3_fzhj~+^+dq^zWN;y6* znXE)LGgqa$%~&agW@eNb$_Be_74fXs4UZ(EBY0w*Dzk{?X<0@KbU#|m-p*AwQLxFj zNA|C{prp@&^`V}H`_yd$5(*A>TgbD@%uJ76#h^n91f&5AGTaF|qEzN1YXY=hgb2dI z$;z0WZg60PJKM~tFuq%hz9CwlCYZZx$ZzX0zt)>o0%*?eG8Vf8h4@b2h!Xtd+@YrBfPf805)@_+w6M_|0*~WN%xpr?J<8ry|+dj;-&Q_@`WdfU_ z-G!gGjWhm9v1!De$Gf>l)mPi!F2B6}(|`S!|Ih#HzyG)SGXDN=|Lgwmf8KsOe;jvW z8Wpzpm#>$&$M#xx%@v?xdN%(Ro5&)d$VdVsx)miga zd1|WaoOxHBtnR)mKhFEd{eGX%ACKu&cw9HzFWg6PlnQ4Z*>hH*T9`eToYY~k4tF0h z#^z(tyq2eVl^a`KeLK%ny}Sal2RPBqkIsG6ri**LTrRf8G0OAvHd`(mJ{a_Hrv;p~ zEI7W8Z67frd_j;c0=_-)kSi8@HxG9XvxVcb))-A{6T=ziX1#1-0j^P}t!X%WJ(*Vx zf7m*DvZ$v$5mGxXMafslYT@Rqt2CFgBO%bOWYk=FD27oB4ROECI`ekBFU70y2q4vp z2dtN0oqC`H#O7iv;V~eimfNr8N_M$52GzFA@|@rS1D=+ikz>7CyGfKGzs%aj?f@x#B7Lpa~IH zc{|Tb17loXUcQ{?yuE*9PeQ{1RfJFzQT=#DR&0SP#D7duiGUT?`b1rgs?G_Lju;lR zi|%L|81PGMo5ir0nbuHtY-729AW(}huhs*J5l*n_zG&R)UW+7mtlE}Dqq<~HbelPQ zMgCeQttLpTD>ZXTe9e}e8TqvP{rlhkt!LIeWB4{+Uf;fa`TEN*zy5k1FWbJE7<&v0 zQTOYfUH|^KzjY~d=Gm*aF?^6_j#WPGZo#UWv`+YPZ4zj3vy#|Ga`-leZ^O(f&0Lfm zR;s}+G_5H$yH$c!2{8A|cD=agj>=ob^H?P}VPU9I%9b@vHd@hQR#jHcW8QD)@i~v9 z7v^SLR}f`sPR#j8oxoU7nahjCZO8A|&)+V` zp{&c-{W1d5+e`iBm1Eb3)scM^fmo37dVt{D2hA6)u@+nEk%96DFI=Lnq>x(8ZP;ax z2&>sWk6V3D-V3udXW!LRJ$ru6^XBJgPsRE8DsHHddvnSf(aw;=$6%a7H;%JXl`RWv za%@hH;rkeS?E9J#EOdxkHP?tRJFA`Z0Uicu$@PRYk=uqC9ue1lKaTzGqcfcWxZ5!E zFnTjg4%=gF+gRTl59e@qv(38x0lFS+rvgR~V+-y~wYfhiQ*b)EX_RVR3P2+pW>&fK zo5>(1=|IT_bheM~Qw%bJArxUAXF1T_W(;VFK9^T(RTym{=g0cv42OW4kW^I*OD4gn z)wV%BJ2Xq_F0pBR_|^4DbQnWL&(f4Pm;0;HYE!qwDmF%C84c^4F(tZ69bT;%Ml>zD zwRH17TqWaLwcibCV@Zfvj}lBRe;@W{$_!?!A>OnyBEskdHa@?KY_DpFlAF0XHhL+8 zNS!UIl5U+D5m32%2+OT#A`9UrF)X?*VrjwLfuMu9L_};B4+e};;p-S>A#Q8jMQUcA zDU67*#l?MCyRl6@BD^XhghI0m=2q3^!Ji)0=Lt6<>!(|}vggT?B}T_tYlMZSSp()4 z;o;toz-fU>rP+{jiKHIuKC@C8tyT{E!9!c0PrU*3NC^5w6u*SCGVY;g%2CT+IATrMP{?9cH>r_MUE z8v%EY5gr!H`fF6@`gE>4Q`4%_Nr6tyo&-zj;StRzKD^aCE)DpqyRg_sba z36~A*HZBuA>geYOOIB*G(*+gob=hCsHItvm`T2Q!|M+qH_?X8B`ZQ=87Q+dub*9eQ z*%IAWIbqe?9?I!1p=MQSw&v_}7HN-6W+Pm_h~9>gy+TZx>+NHZX~jb&WzKueIa8ux zqKqCZLAE|vZ~tU=(7uSD)9%a}XK$Bp|McJg+n2xmQy6Z?yaI0n|MKeg^5wt%zyIa> z`dTL=_HW;Qk=glqe!m@myw6)P_R&6$q=4Z*MqKugv9k&VK}($+w!70nb!qBg&B=LO z#`vdSziwOTIPUlJ2ao&xonv;+?#y|1-R60maX*5;UU{$UV?%SKZIx4IzNfieU&6L+ zRP9^ayeE*k!aZv@Xk1#35hKQC9`z`TL)BPgpyk+kVC{2xcFl$-M2hM{loN2bm&@gt zS7Mf}c2l^*gdhwR?lEGFVG(W~KFkI^h**gC^;K+RfMKzI!P(tyrnQFvNLS*~ZfGO; z7|||3Mh9U;1XLUg9}fyP3UhrXp6jMH_`JAho7$_m8XaMbeZWF$$Yf#R+rYNPcHNCs zCOc{P$+ncK9`mQC!R5(EUb;Cv7tvT{4apll<;7l-)2ymaA)(oo-BXK*l5+2^_2r4a z#HW!>25YU*+AOM?ZqemP<{FH_q|{25)!Nz84XgRP{`6jEP494z*oaZGp94ctZ73sy z)Izm;gy)%%-EN1v>bylXUGxoI79sc&0O8SNF`-s@2d3_`(uKvY%LurGL99WHmca`n z!nWOGn3)r|&rdr`2AcWU<1&?Zfpm@72GYk+2}_Zys4V`c-bVXCqbpa)cbdtoE$B3a zdvSf9njR6$@CH@(JSSE~$=T{^Ce5&uR9dwY$Sm$6;^ z-1{%D{qLstqYoVEb&1_a?1sxlBjBO)R2!;cgsCFZWWhZ1j=Gf&;G^~VD!%1kUN2jZ zDtunf9|v#DNu=7lGiROI_xs4R8*le|{E!z^2U-37Jc6#P^CdRO!^dX3LTwT=7lYv+ z0^AnVY_cJgsIJmuT;Eto(_Sy&maQxK%CUl|Q;3HdJN$aN5_YN^v?K+_!mGH^+=Cu8 zmj~Q19A*rv{&Df!zy1}An6hfrS!;4*tktP=wm%`VEA`CW0=)r9SL`P#)shM zC9oC`8=%6x=^obYHPLNEb9h`f*jBxOzs9g_1nqw6wP!L;yf|7v>C|c3a4$UHA1kJ&qOcvWk*`djECjHC;(OC+W z%3OK2u~=HYT#glY+*NApaE8$;t81oIg<0Lr22gC+FhLehIJJn?sI#)~w2I+Mg>S<* zD5Gb%xoKd*41~246AasbW>P^=88H^;l9UbG#<`h|fEhW6YKE^z>|%%B&igTsnX}BQ zlQyI@+h$L4VMJZs4r|G2t#Vp+(HzQUf}uq&>K2;Lgov&@%!etqK3{b=;hsN~@0xe- z9$OfcXU^2wnLiy5KAbUz)BJuvzW@2d05$uCN;+V$#O&_(KR>_!@yGY?x8poo%TT9P zv2_1AGLySaogixkIC>iW?9++TdjIuC#)y4fx3P_3m;FjRhdU9Db!aoC>a5HYqMN;p zeUIJgefBDCZK?UwQCL8lD*(p0xbB+gZJzV?dH;OB-#%u3E@0li#byf6VSOEhEr!~n z=JX0jwc3Feo$=r(^KndRbk%Ait;MWISj*sH&W2^GJI`}gl_Yl?+eWhHROb5GO*O09 zV7|TV#-|-YQUS&>1<%7$0lnYy-~PwH{g(fFo^$5y#5wCO)sNr)_WrN`^Y!bm|MD;Y z{q4W~x7V@1#LL)x#`tUD@9(#N`{R#yi|g0FTwb@g&G*d$)dV&rsLsbQqE5Sg=6pZS z?{j{59jF`nGfu^Q!}a=d-RZpWOrLPke5f0iwcUEm>Hh&=Q)*m*Mr>eOQ{Vb%moK17(jP-gf;t#t9kw9Urqtb-IiHATY?vrK*D5{ zAoj`zsi8s-uotGq)@*{p#6t9v!lXNE_4o_1bWS0Yxx;<5PiiYB=_AG^%(v<#ohy2c zrETwA^j#7UYRvk>NPIdsp3oiX!6{bvI_^9!dX!O$Ql+fBT0$39#B60Fr6QUWV>Otp ztJ;)38`ee2EXyoV?GEb>9}Wu}Yoh=w_n_m^oCwZ`j|#&^89A(gAd3p3%m%_Rs4){i zyY8ae83Us=tI6tGlbLu21AfY^23lb~6cZye!X-BwLNF?=OY9<=8*OmrBcTR}Hq(El?57v4aM zE(u}2^|*|;FY)=g-S1U*Z-;5kPHAgylRb}Cqf<^(l^#SCcC~@kT5~5iRQB+N%~=^% zTO!#OoMg+4T?nUIqy_uJezke!Ap!Hf&Ks&t9B`s$uGcI7w;m~-K4NnpRdt^C`+aQt zw#DXdB_Lrp9hv|B=imSS+n?{B@Aq4B>2MZ|ZADwiZaPBU-ceN$H5(lzIYI*6mbQXI z`0(8=!gs%10}h1AA3&wf&el0+XOpmt`3T=4)#!-^T(vsu>x0!UtFpDd0fU8X&FrK5 zBy!MV47LyF=Dk_(!OIqm4W?#SYi9B(*=`{0@%P+~1(065B|dqkS`)QJyaB)mY%w#B z%*txXk|~ok&!aP!CQ^X4c;>d{3H$lMQk5DtOcm_i5{|Zcp7(cuzu#_mtGn+C%yg>s zQGeR+x9Q<8jvc#y-N$$f?OSrczw1wHd-eU>)n6~$%7-_>yPht z|2~i3$Nf9!Y4usW0e#T1UGU47F-F&ljW_AAXvCDVbQZGoG0&NgB-L>6^qHEePqfb? zRLuBt8Qahn``{P@_lPs+dGbMKEL8SF2Q(J-Iy-C0O2mhX4~@r-{5iqsQn{EZ7;81j z%=mEA6|yCE6S1f_Er}*0oH7&D3$+_r9X~%&ync1eE+MP@6k>4zqDx4BXveLwFt+Fw z08}kXJP1cm#fl-2-C7uaJf<{c_hRQQX(H?qDP|jq_TBc2nX@LV)Yjr-&W1jL;JO(- zL01c757Xl=`49?--5~1m&lc_v6wqy=YBiC8#?n_!E)qZ+hkNgX%W72#n({1}uV$>k zwTUrzC)~n~V^|O#!^6gSs2O1vR8F#I$;<+_DYo_iD_KD%dJwV@qGc)klskwraRcXL z(}O`H8g8QP7QOcTWmZrpP0Qug5@2)=Zo|jEZ|)AY2lwGMJNt7US($lXRsZJZFbm&} zgRrVT=TyR@+-h7mdQ+oeVb8z(Zla-f1IghIAWTh$Z2|r#0&YG;fRKy?(GuVCxRCK~vbemmjr7pPi(2^|IP{gaM?Nl4eyYgt`NvmYGW( z!=3eViI-rDuDOrRV8h*HDAn1|+|)l#RV6`Au{+V$sZOEF$q{zFY{SS7R6-fVaE=kz zeY3Hdqd0G;vDZwK!kRY7*M`%VN{o2{$}RwA{k`r()nnB841*XgF=CHeanjr|Y zhdJWuX#{ERVe7-1r3NclaQ#%w8`Txvb=I0lnZagf z8x0P)l+2SAeSPym&07Q)?Jc`p zg+`(0?e_a0-+%l4w?F>)oO5K!r+=p9dbf$9F zY=gGguGf9PTz%VQy_F(t&Xp}%vq7#n!V-ErA(%fu0sTZY&>&J~vb9K8s%CYqzYAYY zkB5^|%?GNlA)pMcF5^%9`wD}u0l1vmabq1$-{eL5P|n~|D7ceAZ#FJwuQ+)8sn7Fs z{)iE`O!oQB?!R7lZuiT6AGU3mJw|MAmmR1c=ey=T^Zhe*CvU;~*7u`6^ZsKV_YTco z=$blP?J-;P?AgaTRfd%Jh@;0He$2Ydj(om0wiu~!Bp`^hRk|GwHToPF-gCQzn{V3& zhIMswF?(rT*6sp}-U|rbi{8`?s+M{Q9=@I^(4x`d;itknr7qSo7n0=h)^9AD;%v<< zi{#=+n7O-0c<(>eKpfj5K?a=N^3m{W70!vXaTh~!&|s2wv+kBG0qf=Yz$p+k${tvA zuyX;T2_t6aJ2N{ihskqC>isw@a!MQ}8iyw}WD-BgQbCZo$_#s@bn_{`VOv#plD z1dm7!pW?*oC1OXg*VIhDXvJnGDZ0FIuFgNYB!#RAx3MX8GLV4nvO=;2wq~pZ1H-ze0%%+_y6_# z`;W6|Yu6+;2!^3c@J51YmU*;F45->n-Egl?*kBjjyAFhRHRR}`*b~vCx{Z#qvBj4? ze!a#oSG;ZZ5&(I*?s4l|Ruuqq8bQ)k^JhN&*I$3#Vt@Pkg{Is4ALx5m&-1J?WgC6u zaen{f`yapk{l}m0=keK%7`C!g)dg6!!mL{~udYIus;%d&Hb_bL+0%$>fJ}5kXC8T^ zO3_q5hHR!VnX1qGJnlzj4c8?ifV>}F*tUy$xP@lyvWF3F9e~n9$T>9&Id^y8_suQ$ zS(mOn^E~IFc@KiuEnZ$;-d?Y;tvVxWeK1k71%O?h9oPy*LMyxc5a_MQ8|(q2S}2QN zL*rh6uBMr}#Tfgxt;ZkFQ+k2|*X