diff --git a/.gitignore b/.gitignore index 5f078afb4..f9cf7353f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # Temporary files *.swp *~ +.DS_Store TAGS # Compiled / optimized files @@ -16,4 +17,16 @@ TAGS /build/ /dist/ *.egg-info/ + +#Intellij files +.idea/ + +#vscode +.vscode/ + +#.project +.project /.project + +#.tb +.tb/ diff --git a/.gjslintrc b/.gjslintrc index 29e8788d6..81dddf337 100644 --- a/.gjslintrc +++ b/.gjslintrc @@ -1,3 +1,3 @@ --max_line_length=120 ---exclude_directories=3rdparty +--exclude_directories=3rdparty,tb --disable=0121,0220 diff --git a/.travis.yml b/.travis.yml index 343a6b417..eae4ef0bb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,15 +8,17 @@ python: 2.7 env: global: - - OPENBLAS_ROOT=~/openblas - PROTOBUF_ROOT=~/protobuf - CAFFE_ROOT=~/caffe - TORCH_ROOT=~/torch + - OMP_NUM_THREADS=1 + - OPENBLAS_MAIN_FREE=1 - secure: "WSqrE+PQm76DdoRLRGKTK6fRWfXZjIb0BWCZm3IgHgFO7OE6fcK2tBnpDNNw4XQjmo27FFWlEhxN32g18P84n5PvErHaH65IuS9Nv6FkLlPXZlVqGNxbPmEA4oTkD/6Y6kZyZWZtLh2+/1ijuzQAPnIy/4BEuL8pdO+PsoJ9hYM=" matrix: - DIGITS_TEST_FRAMEWORK=caffe CAFFE_FORK=NVIDIA - DIGITS_TEST_FRAMEWORK=caffe CAFFE_FORK=BVLC - DIGITS_TEST_FRAMEWORK=torch + - DIGITS_TEST_FRAMEWORK=tensorflow - DIGITS_TEST_FRAMEWORK=none - DIGITS_TEST_FRAMEWORK=none WITH_PLUGINS=false @@ -73,7 +75,6 @@ matrix: cache: apt: true directories: - - $OPENBLAS_ROOT - $PROTOBUF_ROOT - $CAFFE_ROOT - $TORCH_ROOT @@ -85,6 +86,7 @@ addons: - cmake - cython - git + - gfortran - graphviz - libboost-filesystem-dev - libboost-python-dev @@ -95,6 +97,7 @@ addons: - libhdf5-serial-dev - libleveldb-dev - liblmdb-dev + - libopenblas-dev - libopencv-dev - libsnappy-dev - python-dev @@ -125,15 +128,14 @@ before_install: install: - mkdir -p ~/.config/matplotlib - echo "backend:agg" > ~/.config/matplotlib/matplotlibrc - - ./scripts/travis/install-openblas.sh $OPENBLAS_ROOT - ./scripts/travis/install-protobuf.sh $PROTOBUF_ROOT - ./scripts/travis/install-caffe.sh $CAFFE_ROOT - - if [ "$DIGITS_TEST_FRAMEWORK" == "torch" ]; then ./scripts/travis/install-torch.sh $TORCH_ROOT; else unset TORCH_ROOT; fi - - pip install -r ./requirements.txt + - if [ "$DIGITS_TEST_FRAMEWORK" == "torch" ]; then travis_wait ./scripts/travis/install-torch.sh $TORCH_ROOT; else unset TORCH_ROOT; fi + - pip install -r ./requirements.txt --force-reinstall + - if [ "$DIGITS_TEST_FRAMEWORK" == "tensorflow" ]; then travis_wait ./scripts/travis/install-tensorflow.sh; fi - pip install -r ./requirements_test.txt - pip install -e . - if [ "$WITH_PLUGINS" != "false" ]; then find ./plugins/*/* -maxdepth 0 -type d | xargs -n1 pip install -e; fi script: - ./digits-test -v - diff --git a/MANIFEST.in b/MANIFEST.in index f6b7bf24a..dd8ca92bd 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,6 +3,7 @@ recursive-include digits/templates * recursive-include digits/static * recursive-include digits/standard-networks * recursive-include digits/tools/torch * +recursive-include digits/tools/tensorflow * recursive-include digits/extensions *.css recursive-include digits/extensions *.html recursive-include digits/extensions *.js diff --git a/README.md b/README.md index 434ccae73..f1d60018e 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ [![Build Status](https://travis-ci.org/NVIDIA/DIGITS.svg?branch=master)](https://travis-ci.org/NVIDIA/DIGITS) DIGITS (the **D**eep Learning **G**PU **T**raining **S**ystem) is a webapp for training deep learning models. +The currently supported frameworks are: Caffe, Torch, and Tensorflow. # Installation @@ -18,6 +19,7 @@ Once you have installed DIGITS, visit [docs/GettingStarted.md](docs/GettingStart Then, take a look at some of the other documentation at [docs/](docs/) and [examples/](examples/): +* [Getting started with TensorFlow](docs/GettingStartedTensorflow.md) * [Getting started with Torch](docs/GettingStartedTorch.md) * [Fine-tune a pretrained model](examples/fine-tuning/README.md) * [Train an autoencoder network](examples/autoencoder/README.md) diff --git a/digits-lint b/digits-lint index b11b50021..70b25189c 100755 --- a/digits-lint +++ b/digits-lint @@ -5,9 +5,9 @@ set -e echo "=== Checking for Python lint ..." if which flake8 >/dev/null 2>&1; then - python2 `which flake8` . + python2 `which flake8` --exclude ./digits/jobs . else - python2 -m flake8 . + python2 -m flake8 --exclude ./digits/jobs . fi echo "=== Checking for JavaScript lint ..." diff --git a/digits/config/__init__.py b/digits/config/__init__.py index a903cec87..060d7b36c 100644 --- a/digits/config/__init__.py +++ b/digits/config/__init__.py @@ -12,6 +12,7 @@ torch, server_name, store_option, + tensorflow, ) diff --git a/digits/config/tensorflow.py b/digits/config/tensorflow.py new file mode 100644 index 000000000..0304bb689 --- /dev/null +++ b/digits/config/tensorflow.py @@ -0,0 +1,28 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +from __future__ import absolute_import +from . import option_list + + +def test_tf_import(): + """ + Tests if tensorflow can be imported, returns if it went okay and optional error. + """ + try: + import tensorflow # noqa + return True + except ImportError: + return False + +tf_enabled = test_tf_import() + +if not tf_enabled: + print('Tensorflow support disabled.') + +if tf_enabled: + option_list['tensorflow'] = { + 'enabled': True + } +else: + option_list['tensorflow'] = { + 'enabled': False + } diff --git a/digits/dataset/images/classification/forms.py b/digits/dataset/images/classification/forms.py index 1d1edaca0..e10f72ce5 100644 --- a/digits/dataset/images/classification/forms.py +++ b/digits/dataset/images/classification/forms.py @@ -20,7 +20,7 @@ class ImageClassificationDatasetForm(ImageDatasetForm): backend = wtforms.SelectField('DB backend', choices=[ ('lmdb', 'LMDB'), - ('hdf5', 'HDF5'), + ('hdf5', 'HDF5') ], default='lmdb', ) @@ -28,6 +28,8 @@ class ImageClassificationDatasetForm(ImageDatasetForm): def validate_backend(form, field): if field.data == 'lmdb': form.compression.data = 'none' + elif field.data == 'tfrecords': + form.compression.data = 'none' elif field.data == 'hdf5': form.encoding.data = 'none' diff --git a/digits/dataset/tasks/create_db.py b/digits/dataset/tasks/create_db.py index 013757a4e..48538bee5 100644 --- a/digits/dataset/tasks/create_db.py +++ b/digits/dataset/tasks/create_db.py @@ -50,8 +50,9 @@ def __init__(self, input_file, db_name, backend, image_dims, **kwargs): self.input_file = input_file self.db_name = db_name self.backend = backend - if backend == 'hdf5': + if backend == 'hdf5' or backend == 'tfrecords': # the list of hdf5 files is stored in a textfile + # tfrecords can be sharded as well self.textfile = os.path.join(self.db_name, 'list.txt') self.image_dims = image_dims if image_dims[2] == 3: diff --git a/digits/dataset/views.py b/digits/dataset/views.py index 80c6f75f7..7a0751e9c 100644 --- a/digits/dataset/views.py +++ b/digits/dataset/views.py @@ -6,6 +6,7 @@ from . import images as dataset_images from . import generic +from digits import extensions from digits.utils.routing import job_from_request, request_wants_json from digits.webapp import scheduler @@ -54,3 +55,31 @@ def summary(): return generic.views.summary(job) else: raise werkzeug.exceptions.BadRequest('Invalid job type') + + +@blueprint.route('/inference-form//', methods=['GET']) +def inference_form(extension_id, job_id): + """ + Returns a rendering of an inference form + """ + inference_form_html = "" + + if extension_id != "all-default": + extension_class = extensions.data.get_extension(extension_id) + if not extension_class: + raise RuntimeError("Unable to find data extension with ID=%s" + % job_id.dataset.extension_id) + job = scheduler.get_job(job_id) + if hasattr(job, 'extension_userdata'): + extension_userdata = job.extension_userdata + else: + extension_userdata = {} + extension_userdata.update({'is_inference_db': True}) + extension = extension_class(**extension_userdata) + + form = extension.get_inference_form() + if form: + template, context = extension.get_inference_template(form) + inference_form_html = flask.render_template_string(template, **context) + + return inference_form_html diff --git a/digits/extensions/view/imageOutput/config_template.html b/digits/extensions/view/imageOutput/config_template.html index f83e0b585..918249a1e 100644 --- a/digits/extensions/view/imageOutput/config_template.html +++ b/digits/extensions/view/imageOutput/config_template.html @@ -12,8 +12,20 @@ {{ form.channel_order(class='form-control') }} +
+ {{ form.data_order.label }} + {{ form.data_order.tooltip }} + {{ form.data_order(class='form-control') }} +
+
{{ form.pixel_conversion.label }} {{ form.pixel_conversion.tooltip }} {{ form.pixel_conversion(class='form-control') }}
+ +
+ {{ form.show_input.label }} + {{ form.show_input.tooltip }} + {{ form.show_input(class='form-control') }} +
diff --git a/digits/extensions/view/imageOutput/forms.py b/digits/extensions/view/imageOutput/forms.py index 29c7816e4..bd050de73 100644 --- a/digits/extensions/view/imageOutput/forms.py +++ b/digits/extensions/view/imageOutput/forms.py @@ -23,6 +23,18 @@ class ConfigForm(Form): 'is ignored in the case of a grayscale image)' ) + data_order = utils.forms.SelectField( + 'Data order', + choices=[ + ('chw', 'CHW'), + ('hwc', 'HWC'), + ], + default='chw', + tooltip="Set the order of the data. For Caffe and Torch models this " + "is often NCHW, for Tensorflow it's NHWC." + "N=Batch Size, W=Width, H=Height, C=Channels" + ) + pixel_conversion = utils.forms.SelectField( 'Pixel conversion', choices=[ @@ -33,3 +45,13 @@ class ConfigForm(Form): tooltip='Select method to convert pixel values to the target bit ' 'range' ) + + show_input = utils.forms.SelectField( + 'Show input as image', + choices=[ + ('yes', 'Yes'), + ('no', 'No'), + ], + default='no', + tooltip='Show input as image' + ) diff --git a/digits/extensions/view/imageOutput/view.py b/digits/extensions/view/imageOutput/view.py index 4a8c6ab28..e53a5b674 100644 --- a/digits/extensions/view/imageOutput/view.py +++ b/digits/extensions/view/imageOutput/view.py @@ -29,7 +29,9 @@ def __init__(self, dataset, **kwargs): # view options self.channel_order = kwargs['channel_order'].upper() + self.data_order = kwargs['data_order'].upper() self.normalize = (kwargs['pixel_conversion'] == 'normalize') + self.show_input = (kwargs['show_input'] == 'yes') @staticmethod def get_config_form(): @@ -69,15 +71,31 @@ def get_view_template(self, data): - context is a dictionary of context variables to use for rendering the form """ - return self.view_template, {'image': digits.utils.image.embed_image_html(data)} + return self.view_template, {'image_input': digits.utils.image.embed_image_html(data[0]), + 'image_output': digits.utils.image.embed_image_html(data[1])} @override def process_data(self, input_id, input_data, output_data): """ Process one inference and return data to visualize """ - # assume the only output is a CHW image - data = output_data[output_data.keys()[0]].astype('float32') + + if self.show_input: + data_input = input_data.astype('float32') + image_input = self.process_image(self.data_order, data_input) + else: + image_input = None + + data_output = output_data[output_data.keys()[0]].astype('float32') + image_output = self.process_image(self.data_order, data_output) + + return [image_input, image_output] + + def process_image(self, data_order, data): + if data_order == 'HWC': + data = (data.transpose((2, 0, 1))) + + # assume CHW at this point channels = data.shape[0] if channels == 3 and self.channel_order == 'BGR': data = data[[2, 1, 0], ...] # BGR to RGB diff --git a/digits/extensions/view/imageOutput/view_template.html b/digits/extensions/view/imageOutput/view_template.html index 8f22fdfc7..0f02a4bd0 100644 --- a/digits/extensions/view/imageOutput/view_template.html +++ b/digits/extensions/view/imageOutput/view_template.html @@ -1,3 +1,6 @@ {# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. #} - +{% if image_input %} + +{% endif %} + diff --git a/digits/extensions/view/rawData/header_template.html b/digits/extensions/view/rawData/header_template.html new file mode 100644 index 000000000..fcd137d9d --- /dev/null +++ b/digits/extensions/view/rawData/header_template.html @@ -0,0 +1,7 @@ +{# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #} + +{% from "helper.html" import print_flashes %} +{% from "helper.html" import print_errors %} +{% from "helper.html" import mark_errors %} + +{{data}} diff --git a/digits/frameworks/__init__.py b/digits/frameworks/__init__.py index 707cbfa2d..a6eb057f4 100644 --- a/digits/frameworks/__init__.py +++ b/digits/frameworks/__init__.py @@ -12,6 +12,10 @@ 'TorchFramework', ] +if config_value('tensorflow')['enabled']: + from .tensorflow_framework import TensorflowFramework + __all__.append('TensorflowFramework') + # # create framework instances # @@ -19,6 +23,9 @@ # torch is optional torch = TorchFramework() if config_value('torch')['enabled'] else None +# tensorflow is optional +tensorflow = TensorflowFramework() if config_value('tensorflow')['enabled'] else None + # caffe is mandatory caffe = CaffeFramework() @@ -35,6 +42,8 @@ def get_frameworks(): frameworks = [caffe] if torch: frameworks.append(torch) + if tensorflow: + frameworks.append(tensorflow) return frameworks diff --git a/digits/frameworks/caffe_framework.py b/digits/frameworks/caffe_framework.py index 8c33ee259..2a5753e91 100644 --- a/digits/frameworks/caffe_framework.py +++ b/digits/frameworks/caffe_framework.py @@ -34,6 +34,8 @@ class CaffeFramework(Framework): # whether this framework can shuffle data during training CAN_SHUFFLE_DATA = False + SUPPORTS_PYTHON_LAYERS_FILE = True + SUPPORTS_TIMELINE_TRACING = False if config_value('caffe')['flavor'] == 'NVIDIA': if parse_version(config_value('caffe')['version']) > parse_version('0.14.0-alpha'): @@ -132,10 +134,11 @@ def get_network_from_path(self, path): return network @override - def get_network_visualization(self, desc): + def get_network_visualization(self, **kwargs): """ return visualization of network """ + desc = kwargs['desc'] net = caffe_pb2.NetParameter() text_format.Merge(desc, net) # Throws an error if name is None @@ -150,7 +153,6 @@ def can_accumulate_gradients(self): if config_value('caffe')['flavor'] == 'BVLC': return True elif config_value('caffe')['flavor'] == 'NVIDIA': - return (parse_version(config_value('caffe')['version']) - > parse_version('0.14.0-alpha')) + return (parse_version(config_value('caffe')['version']) > parse_version('0.14.0-alpha')) else: raise ValueError('Unknown flavor. Support NVIDIA and BVLC flavors only.') diff --git a/digits/frameworks/framework.py b/digits/frameworks/framework.py index efd783531..ba6d469ee 100644 --- a/digits/frameworks/framework.py +++ b/digits/frameworks/framework.py @@ -26,6 +26,18 @@ def can_shuffle_data(self): """ return self.CAN_SHUFFLE_DATA + def supports_python_layers_file(self): + """ + return whether framework can shuffle input data during training + """ + return self.SUPPORTS_PYTHON_LAYERS_FILE + + def supports_timeline_traces(self): + """ + return whether framework supports creating timeline traces + """ + return self.SUPPORTS_TIMELINE_TRACING + def supports_solver_type(self, solver_type): """ return whether framework supports this solver_type @@ -77,7 +89,7 @@ def get_network_from_path(self, path): """ raise NotImplementedError('Please implement me') - def get_network_visualization(self, desc): + def get_network_visualization(self, **kwargs): """ return visualization of network """ diff --git a/digits/frameworks/tensorflow_framework.py b/digits/frameworks/tensorflow_framework.py new file mode 100644 index 000000000..38d41f242 --- /dev/null +++ b/digits/frameworks/tensorflow_framework.py @@ -0,0 +1,182 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +from __future__ import absolute_import + +import os +import re +import subprocess +import tempfile +import sys + +from .errors import NetworkVisualizationError +from .framework import Framework +import digits +from digits import utils +from digits.model.tasks import TensorflowTrainTask +from digits.utils import subclass, override, constants + + +@subclass +class TensorflowFramework(Framework): + """ + Defines required methods to interact with the Tensorflow framework + """ + + # short descriptive name + NAME = 'Tensorflow' + + # identifier of framework class + CLASS = 'tensorflow' + + # whether this framework can shuffle data during training + CAN_SHUFFLE_DATA = True + SUPPORTS_PYTHON_LAYERS_FILE = False + SUPPORTS_TIMELINE_TRACING = True + + SUPPORTED_SOLVER_TYPES = ['SGD', 'ADADELTA', 'ADAGRAD', 'ADAGRADDA', 'MOMENTUM', 'ADAM', 'FTRL', 'RMSPROP'] + + SUPPORTED_DATA_TRANSFORMATION_TYPES = ['MEAN_SUBTRACTION', 'CROPPING'] + SUPPORTED_DATA_AUGMENTATION_TYPES = ['FLIPPING', 'NOISE', 'CONTRAST', 'WHITENING', 'HSV_SHIFTING'] + + def __init__(self): + super(TensorflowFramework, self).__init__() + # id must be unique + self.framework_id = self.CLASS + + @override + def create_train_task(self, **kwargs): + """ + create train task + """ + return TensorflowTrainTask(framework_id=self.framework_id, **kwargs) + + @override + def get_standard_network_desc(self, network): + """ + return description of standard network + """ + networks_dir = os.path.join(os.path.dirname(digits.__file__), 'standard-networks', self.CLASS) + + for filename in os.listdir(networks_dir): + path = os.path.join(networks_dir, filename) + if os.path.isfile(path): + match = None + match = re.match(r'%s.py' % network, filename) + if match: + with open(path) as infile: + return infile.read() + # return None if not found + return None + + @override + def get_network_from_desc(self, network_desc): + """ + return network object from a string representation + """ + # return the same string + return network_desc + + @override + def get_network_from_previous(self, previous_network, use_same_dataset): + """ + return new instance of network from previous network + """ + # note: use_same_dataset is ignored here because for Tensorflow, DIGITS + # does not change the number of outputs of the last linear layer + # to match the number of classes in the case of a classification + # network. In order to write a flexible network description that + # accounts for the number of classes, the `nClasses` external + # parameter must be used, see documentation. + + # return the same network description + return previous_network + + @override + def validate_network(self, data): + """ + validate a network + """ + return True + + @override + def get_network_visualization(self, **kwargs): + """ + return visualization of network + """ + desc = kwargs['desc'] + dataset = kwargs['dataset'] + solver_type = kwargs['solver_type'].lower() if kwargs['solver_type'] else None + use_mean = kwargs['use_mean'] + crop_size = kwargs['crop_size'] + num_gpus = kwargs['num_gpus'] + if dataset is None: + raise NetworkVisualizationError('Make sure a dataset is selected to visualize this network.') + + # save network description to temporary file + temp_network_handle, temp_network_path = tempfile.mkstemp(suffix='.py') + os.write(temp_network_handle, desc) + os.close(temp_network_handle) + + # Generate a temporaty file to put the graph definition in + _, temp_graphdef_path = tempfile.mkstemp(suffix='.pbtxt') + # Another for the HTML + _, temp_html_path = tempfile.mkstemp(suffix='.html') + + try: # do this in a try..finally clause to make sure we delete the temp file + # build command line + args = [sys.executable, + os.path.join(os.path.dirname(digits.__file__), 'tools', 'tensorflow', 'main.py'), + '--network=%s' % os.path.basename(temp_network_path), + '--networkDirectory=%s' % os.path.dirname(temp_network_path), + '--visualizeModelPath=%s' % temp_graphdef_path, + '--optimization=%s' % solver_type, + ] + + if crop_size: + args.append('--croplen=%s' % crop_size) + + if use_mean and use_mean != 'none': + mean_file = dataset.get_mean_file() + assert mean_file is not None, 'Failed to retrieve mean file.' + args.append('--subtractMean=%s' % use_mean) + args.append('--mean=%s' % dataset.path(mean_file)) + + if hasattr(dataset, 'labels_file'): + args.append('--labels_list=%s' % dataset.path(dataset.labels_file)) + + train_feature_db_path = dataset.get_feature_db_path(constants.TRAIN_DB) + train_label_db_path = dataset.get_label_db_path(constants.TRAIN_DB) + val_feature_db_path = dataset.get_feature_db_path(constants.VAL_DB) + val_label_db_path = dataset.get_label_db_path(constants.VAL_DB) + + args.append('--train_db=%s' % train_feature_db_path) + if train_label_db_path: + args.append('--train_labels=%s' % train_label_db_path) + if val_feature_db_path: + args.append('--validation_db=%s' % val_feature_db_path) + if val_label_db_path: + args.append('--validation_labels=%s' % val_label_db_path) + + env = os.environ.copy() + # make only a selected number of GPUs visible. The ID is not important for just the vis + env['CUDA_VISIBLE_DEVICES'] = ",".join([str(i) for i in range(0, int(num_gpus))]) + + # execute command + p = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + close_fds=True, + env=env) + + stdout_log = '' + while p.poll() is None: + for line in utils.nonblocking_readlines(p.stdout): + timestamp, level, message = TensorflowTrainTask.preprocess_output_tensorflow(line.strip()) + if line is not None: + stdout_log += line + if p.returncode: + raise NetworkVisualizationError(stdout_log) + else: # Success! + return repr(str(open(temp_graphdef_path).read())) + finally: + os.remove(temp_network_path) + os.remove(temp_graphdef_path) diff --git a/digits/frameworks/torch_framework.py b/digits/frameworks/torch_framework.py index 0a6e7775a..8ce2d4976 100644 --- a/digits/frameworks/torch_framework.py +++ b/digits/frameworks/torch_framework.py @@ -33,6 +33,8 @@ class TorchFramework(Framework): # whether this framework can shuffle data during training CAN_SHUFFLE_DATA = True + SUPPORTS_PYTHON_LAYERS_FILE = False + SUPPORTS_TIMELINE_TRACING = False SUPPORTED_SOLVER_TYPES = ['SGD', 'NESTEROV', 'ADAGRAD', 'RMSPROP', 'ADADELTA', 'ADAM'] @@ -115,10 +117,11 @@ def validate_network(self, data): return True @override - def get_network_visualization(self, desc): + def get_network_visualization(self, **kwargs): """ return visualization of network """ + desc = kwargs['desc'] # save network description to temporary file temp_network_handle, temp_network_path = tempfile.mkstemp(suffix='.lua') os.write(temp_network_handle, desc) @@ -176,7 +179,7 @@ def get_network_visualization(self, desc): # we did not find a network description raise NetworkVisualizationError(''.join(unrecognized_output)) else: - output = flask.Markup('
')
+                output = flask.Markup('
')
                 for line in desc:
                     output += flask.Markup.escape(line)
                 output += flask.Markup('
') diff --git a/digits/job.py b/digits/job.py index fe6267c1c..fabe5b30b 100644 --- a/digits/job.py +++ b/digits/job.py @@ -43,6 +43,7 @@ def load(cls, job_id): if isinstance(task, TrainTask): # can't call this until the job_dir is set task.detect_snapshots() + task.detect_timeline_traces() return job def __init__(self, name, username, group='', persistent=True): diff --git a/digits/model/forms.py b/digits/model/forms.py index 797e2e9a3..2c94b7957 100644 --- a/digits/model/forms.py +++ b/digits/model/forms.py @@ -121,6 +121,15 @@ def validate_py_ext(form, field): tooltip="How many epochs of training between running through one pass of the validation data?" ) + traces_interval = utils.forms.IntegerField( + 'Tracing Interval (in steps)', + validators=[ + validators.NumberRange(min=0) + ], + default=0, + tooltip="Generation of a timeline trace every few steps" + ) + random_seed = utils.forms.IntegerField( 'Random seed', validators=[ @@ -155,12 +164,15 @@ def validate_py_ext(form, field): solver_type = utils.forms.SelectField( 'Solver type', choices=[ - ('SGD', 'Stochastic gradient descent (SGD)'), - ('NESTEROV', "Nesterov's accelerated gradient (NAG)"), - ('ADAGRAD', 'Adaptive gradient (AdaGrad)'), - ('RMSPROP', 'RMSprop'), + ('SGD', 'SGD (Stochastic Gradient Descent)'), + ('MOMENTUM', 'Momentum'), + ('NESTEROV', "NAG (Nesterov's accelerated gradient)"), + ('ADAGRAD', 'AdaGrad (Adaptive Gradient)'), + ('ADAGRADDA', 'AdaGradDA (AdaGrad Dual Averaging)'), ('ADADELTA', 'AdaDelta'), - ('ADAM', 'Adam'), + ('ADAM', 'Adam (Adaptive Moment Estimation)'), + ('RMSPROP', 'RMSprop'), + ('FTRL', 'FTRL (Follow-The-Regularized-Leader)'), ], default='SGD', tooltip="What type of solver will be used?", @@ -300,10 +312,11 @@ def validate_lr_multistep_values(form, field): ) def validate_custom_network_snapshot(form, field): - if form.method.data == 'custom': - for filename in field.data.strip().split(os.path.pathsep): - if filename and not os.path.exists(filename): - raise validators.ValidationError('File "%s" does not exist' % filename) + pass +# if form.method.data == 'custom': +# for filename in field.data.strip().split(os.path.pathsep): +# if filename and not os.path.lexists(filename): +# raise validators.ValidationError('File "%s" does not exist' % filename) # Select one of several GPUs select_gpu = wtforms.RadioField( diff --git a/digits/model/images/classification/job.py b/digits/model/images/classification/job.py index ce82e9f6b..e807e7351 100644 --- a/digits/model/images/classification/job.py +++ b/digits/model/images/classification/job.py @@ -28,7 +28,7 @@ def job_type(self): def download_files(self, epoch=-1): task = self.train_task() - snapshot_filename = task.get_snapshot(epoch) + snapshot_filename = task.get_snapshot(epoch, download=True) # get model files model_files = task.get_model_files() diff --git a/digits/model/images/classification/test_views.py b/digits/model/images/classification/test_views.py index 8ba697e1a..fc037f852 100644 --- a/digits/model/images/classification/test_views.py +++ b/digits/model/images/classification/test_views.py @@ -3,12 +3,14 @@ import itertools import json -import math import os import shutil import tempfile import time import unittest +import caffe_pb2 +import math + # Find the best implementation available try: @@ -17,16 +19,14 @@ from StringIO import StringIO from bs4 import BeautifulSoup -from google.protobuf import text_format from digits.config import config_value import digits.dataset.images.classification.test_views -from digits.frameworks import CaffeFramework import digits.test_views from digits import test_utils import digits.webapp - -import caffe_pb2 +from digits.frameworks import CaffeFramework +from google.protobuf import text_format # May be too short on a slow system TIMEOUT_DATASET = 45 @@ -97,6 +97,25 @@ class BaseViewsTest(digits.test_views.BaseViewsTest): end """ + TENSORFLOW_NETWORK = \ + """ +class UserModel(Tower): + + @model_property + def inference(self): + ninputs = self.input_shape[0] * self.input_shape[1] * self.input_shape[2] + W = tf.get_variable('W', [ninputs, self.nclasses], initializer=tf.constant_initializer(0.0)) + b = tf.get_variable('b', [self.nclasses], initializer=tf.constant_initializer(0.0)), + model = tf.reshape(self.x, shape=[-1, ninputs]) + model = tf.add(tf.matmul(model, W), b) + return model + + @model_property + def loss(self): + loss = digits.classification_loss(self.inference, self.y) + return loss +""" + @classmethod def model_exists(cls, job_id): return cls.job_exists(job_id, 'models') @@ -126,7 +145,14 @@ def delete_model(cls, job_id): @classmethod def network(cls): - return cls.TORCH_NETWORK if cls.FRAMEWORK == 'torch' else cls.CAFFE_NETWORK + if cls.FRAMEWORK == 'torch': + return cls.TORCH_NETWORK + elif cls.FRAMEWORK == 'caffe': + return cls.CAFFE_NETWORK + elif cls.FRAMEWORK == 'tensorflow': + return cls.TENSORFLOW_NETWORK + else: + raise Exception('Unknown cls.FRAMEWORK "%s"' % cls.FRAMEWORK) class BaseViewsTestWithDataset(BaseViewsTest, @@ -147,10 +173,13 @@ class BaseViewsTestWithDataset(BaseViewsTest, AUG_ROT = None AUG_SCALE = None AUG_NOISE = None + AUG_CONTRAST = None + AUG_WHITENING = None AUG_HSV_USE = None AUG_HSV_H = None AUG_HSV_S = None AUG_HSV_V = None + OPTIMIZER = None @classmethod def setUpClass(cls): @@ -207,6 +236,10 @@ def create_model(cls, network=None, **kwargs): data['aug_scale'] = cls.AUG_SCALE if cls.AUG_NOISE is not None: data['aug_noise'] = cls.AUG_NOISE + if cls.AUG_CONTRAST is not None: + data['aug_contrast'] = cls.AUG_CONTRAST + if cls.AUG_WHITENING is not None: + data['aug_whitening'] = cls.AUG_WHITENING if cls.AUG_HSV_USE is not None: data['aug_hsv_use'] = cls.AUG_HSV_USE if cls.AUG_HSV_H is not None: @@ -215,6 +248,8 @@ def create_model(cls, network=None, **kwargs): data['aug_hsv_s'] = cls.AUG_HSV_S if cls.AUG_HSV_V is not None: data['aug_hsv_v'] = cls.AUG_HSV_V + if cls.OPTIMIZER is not None: + data['solver_type'] = cls.OPTIMIZER data.update(kwargs) @@ -491,6 +526,19 @@ def test_bad_network_definition(self): } end """ + elif self.FRAMEWORK == 'tensorflow': + bogus_net = """ +class UserModel(Tower): + + @model_property + def inference(self): + model = BogusCode(0) + return model + + @model_property + def loss(y): + return BogusCode(0) +""" job_id = self.create_model(json=True, network=bogus_net) assert self.model_wait_completion(job_id) == 'Error', 'job should have failed' job_info = self.job_info_html(job_id=job_id, job_type='models') @@ -799,6 +847,9 @@ def test_inference_while_training(self): # if no GPUs, just test inference during a normal training job # get number of GPUs + if self.FRAMEWORK == 'tensorflow': + raise unittest.SkipTest('Tensorflow CPU inference during training not supported') + gpu_count = 1 if (config_value('gpu_list') and config_value('caffe')['cuda_enabled'] and @@ -1004,6 +1055,10 @@ class BaseTestCreatedCropInNetwork(BaseTestCreated): croplen = croplen } end +""" + TENSORFLOW_NETWORK = \ + """ +@TODO(tzaman) """ ################################################################################ @@ -1069,6 +1124,36 @@ class TestCaffeCreatedCropInNetwork(BaseTestCreatedCropInNetwork, test_utils.Caf pass +@unittest.skipIf( + not CaffeFramework().can_accumulate_gradients(), + 'This version of Caffe cannot accumulate gradients') +class TestBatchAccumulationCaffe(BaseViewsTestWithDataset, test_utils.CaffeMixin): + TRAIN_EPOCHS = 1 + IMAGE_COUNT = 10 # per class + + def test_batch_accumulation_calculations(self): + batch_size = 10 + batch_accumulation = 2 + + job_id = self.create_model( + batch_size=batch_size, + batch_accumulation=batch_accumulation, + ) + assert self.model_wait_completion(job_id) == 'Done', 'create failed' + info = self.model_info(job_id) + solver = caffe_pb2.SolverParameter() + with open(os.path.join(info['directory'], info['solver file']), 'r') as infile: + text_format.Merge(infile.read(), solver) + assert solver.iter_size == batch_accumulation, \ + 'iter_size is %d instead of %d' % (solver.iter_size, batch_accumulation) + max_iter = int(math.ceil( + float(self.TRAIN_EPOCHS * self.IMAGE_COUNT * 3) / + (batch_size * batch_accumulation) + )) + assert solver.max_iter == max_iter,\ + 'max_iter is %d instead of %d' % (solver.max_iter, max_iter) + + class TestCaffeCreatedTallMultiStepLR(BaseTestCreatedTall, test_utils.CaffeMixin): LR_POLICY = 'multistep' LR_MULTISTEP_VALUES = '50,75,90' @@ -1111,6 +1196,10 @@ class TestCaffeLeNet(BaseTestCreated, test_utils.CaffeMixin): ).read() +class TestCaffeLeNetADAMOptimizer(TestCaffeLeNet): + OPTIMIZER = 'ADAM' + + class TestTorchCreatedCropInForm(BaseTestCreatedCropInForm, test_utils.TorchMixin): pass @@ -1149,12 +1238,16 @@ def test_inference_while_training(self): raise unittest.SkipTest('Torch CPU inference on CuDNN-trained model not supported') +class TestTorchLeNetADAMOptimizer(TestTorchLeNet): + OPTIMIZER = 'ADAM' + + class TestTorchLeNetHdf5Shuffle(TestTorchLeNet): BACKEND = 'hdf5' SHUFFLE = True -class TestPythonLayer(BaseViewsTestWithDataset, test_utils.CaffeMixin): +class TestCaffePythonLayer(BaseViewsTestWithDataset, test_utils.CaffeMixin): CAFFE_NETWORK = """\ layer { name: "hidden" @@ -1264,31 +1357,60 @@ def test_sweep(self): assert not self.model_exists(job_id), 'model exists after delete' -@unittest.skipIf( - not CaffeFramework().can_accumulate_gradients(), - 'This version of Caffe cannot accumulate gradients') -class TestBatchAccumulationCaffe(BaseViewsTestWithDataset, test_utils.CaffeMixin): - TRAIN_EPOCHS = 1 - IMAGE_COUNT = 10 # per class +# Tensorflow - def test_batch_accumulation_calculations(self): - batch_size = 10 - batch_accumulation = 2 - job_id = self.create_model( - batch_size=batch_size, - batch_accumulation=batch_accumulation, - ) - assert self.model_wait_completion(job_id) == 'Done', 'create failed' - info = self.model_info(job_id) - solver = caffe_pb2.SolverParameter() - with open(os.path.join(info['directory'], info['solver file']), 'r') as infile: - text_format.Merge(infile.read(), solver) - assert solver.iter_size == batch_accumulation, \ - 'iter_size is %d instead of %d' % (solver.iter_size, batch_accumulation) - max_iter = int(math.ceil( - float(self.TRAIN_EPOCHS * self.IMAGE_COUNT * 3) / - (batch_size * batch_accumulation) - )) - assert solver.max_iter == max_iter,\ - 'max_iter is %d instead of %d' % (solver.max_iter, max_iter) +class TestTensorflowCreation(BaseTestCreation, test_utils.TensorflowMixin): + pass + + +class TestTensorflowCreatedWideUnencodedShuffle(BaseTestCreatedWide, test_utils.TensorflowMixin): + ENCODING = 'none' + SHUFFLE = True + + +class TestTensorflowCreatedHdf5(BaseTestCreated, test_utils.TensorflowMixin): + BACKEND = 'hdf5' + + +class TestTensorflowCreatedTallHdf5Shuffle(BaseTestCreatedTall, test_utils.TensorflowMixin): + BACKEND = 'hdf5' + SHUFFLE = True + + +class TestTensorflowDatasetModelInteractions(BaseTestDatasetModelInteractions, test_utils.TensorflowMixin): + pass + + +class TestTensorflowCreatedDataAug(BaseTestCreatedDataAug, test_utils.TensorflowMixin): + AUG_FLIP = 'fliplrud' + AUG_NOISE = 0.03 + AUG_CONTRAST = 0.1 + AUG_WHITENING = True + AUG_HSV_USE = True + AUG_HSV_H = 0.02 + AUG_HSV_S = 0.04 + AUG_HSV_V = 0.06 + TRAIN_EPOCHS = 2 + + +class TestTensorflowCreatedWideMultiStepLR(BaseTestCreatedWide, test_utils.TensorflowMixin): + LR_POLICY = 'multistep' + LR_MULTISTEP_VALUES = '50,75,90' + + +class TestTensorflowLeNet(BaseTestCreated, test_utils.TensorflowMixin): + IMAGE_WIDTH = 28 + IMAGE_HEIGHT = 28 + TRAIN_EPOCHS = 20 + + # standard lenet model will adjust to color + # or grayscale images + TENSORFLOW_NETWORK = open(os.path.join(os.path.dirname(digits.__file__), + 'standard-networks', + 'tensorflow', + 'lenet.py')).read() + + +class TestTensorflowLeNetADAMOptimizer(TestTensorflowLeNet): + OPTIMIZER = 'ADAM' diff --git a/digits/model/images/classification/views.py b/digits/model/images/classification/views.py index 9d5d12012..0b7be6f0f 100644 --- a/digits/model/images/classification/views.py +++ b/digits/model/images/classification/views.py @@ -190,11 +190,8 @@ def create(): elif epoch == -1: pretrained_model = old_job.train_task().pretrained_model else: - for filename, e in old_job.train_task().snapshots: - if e == epoch: - pretrained_model = filename - break - + # verify snapshot exists + pretrained_model = old_job.train_task().get_snapshot(epoch, download=True) if pretrained_model is None: raise werkzeug.exceptions.BadRequest( "For the job %s, selected pretrained_model for epoch %d is invalid!" @@ -204,6 +201,8 @@ def create(): "Pretrained_model for the selected epoch doesn't exist. " "May be deleted by another user/process. " "Please restart the server to load the correct pretrained_model details.") + # get logical path + pretrained_model = old_job.train_task().get_snapshot(epoch) break elif form.method.data == 'pretrained': @@ -269,6 +268,8 @@ def create(): data_aug['rot'] = form.aug_rot.data data_aug['scale'] = form.aug_scale.data data_aug['noise'] = form.aug_noise.data + data_aug['contrast'] = form.aug_contrast.data + data_aug['whitening'] = form.aug_whitening.data data_aug['hsv_use'] = form.aug_hsv_use.data data_aug['hsv_h'] = form.aug_hsv_h.data data_aug['hsv_s'] = form.aug_hsv_s.data @@ -294,6 +295,7 @@ def create(): batch_size=form.batch_size.data[0], batch_accumulation=form.batch_accumulation.data, val_interval=form.val_interval.data, + traces_interval=form.traces_interval.data, pretrained_model=pretrained_model, crop_size=form.crop_size.data, use_mean=form.use_mean.data, @@ -344,6 +346,16 @@ def show(job, related_jobs=None): ) +@blueprint.route('/timeline_tracing', methods=['GET']) +def timeline_tracing(): + """ + Shows timeline trace of a model + """ + job = job_from_request() + + return flask.render_template('models/timeline_tracing.html', job=job) + + @blueprint.route('/large_graph', methods=['GET']) def large_graph(): """ @@ -351,7 +363,7 @@ def large_graph(): """ job = job_from_request() - return flask.render_template('models/images/classification/large_graph.html', job=job) + return flask.render_template('models/large_graph.html', job=job) @blueprint.route('/classify_one.json', methods=['POST']) @@ -694,8 +706,8 @@ def top_n(): def get_datasets(): return [(j.id(), j.name()) for j in sorted( - [j for j in scheduler.jobs.values() if isinstance(j, ImageClassificationDatasetJob) - and (j.status.is_running() or j.status == Status.DONE)], + [j for j in scheduler.jobs.values() if isinstance(j, ImageClassificationDatasetJob) and + (j.status.is_running() or j.status == Status.DONE)], cmp=lambda x, y: cmp(y.id(), x.id()) ) ] diff --git a/digits/model/images/forms.py b/digits/model/images/forms.py index 75bfdafec..da3407aee 100644 --- a/digits/model/images/forms.py +++ b/digits/model/images/forms.py @@ -86,6 +86,25 @@ class ImageModelForm(ModelForm): "preprocessing, assuming [0 1] pixel-value range. Suggested value is 0.03.") ) + aug_contrast = utils.forms.FloatField( + 'Contrast (factor)', + default=0, + validators=[ + validators.NumberRange(min=0, max=5) + ], + tooltip="Per channel, the mean of the channel is computed and then adjusts each component x " + "of each pixel to (x - mean) * contrast_factor + mean. The contrast_factor is picked " + "form a random uniform distribution to yield a value between [1-contrast_factor, " + "1+contrast_factor]. Suggested value is 0.8." + ) + + aug_whitening = utils.forms.BooleanField( + 'Whitening', + default=False, + validators=[], + tooltip="Per-image whitening by subtracting its own mean, and dividing by its own standard deviation." + ) + aug_hsv_use = utils.forms.BooleanField( 'HSV Shifting', default=False, diff --git a/digits/model/images/generic/job.py b/digits/model/images/generic/job.py index 316bfcab9..7bacce97a 100644 --- a/digits/model/images/generic/job.py +++ b/digits/model/images/generic/job.py @@ -28,7 +28,7 @@ def job_type(self): def download_files(self, epoch=-1): task = self.train_task() - snapshot_filename = task.get_snapshot(epoch) + snapshot_filename = task.get_snapshot(epoch, download=True) # get model files model_files = task.get_model_files() diff --git a/digits/model/images/generic/test_views.py b/digits/model/images/generic/test_views.py index 0ddaf715f..e70c850a3 100644 --- a/digits/model/images/generic/test_views.py +++ b/digits/model/images/generic/test_views.py @@ -91,6 +91,25 @@ class BaseViewsTest(digits.test_views.BaseViewsTest): end """ + TENSORFLOW_NETWORK = \ + """ +class UserModel(Tower): + + @model_property + def inference(self): + ninputs = self.input_shape[0] * self.input_shape[1] * self.input_shape[2] + W = tf.get_variable('W', [ninputs, 2], initializer=tf.constant_initializer(0.0)) + b = tf.get_variable('b', [2], initializer=tf.constant_initializer(0.0)), + model = tf.reshape(self.x, shape=[-1, ninputs]) * 0.004 + model = tf.add(tf.matmul(model, W), b) + return model + + @model_property + def loss(self): + y = tf.reshape(self.y, shape=[-1, 2]) + return digits.mse_loss(self.inference, y) +""" + @classmethod def model_exists(cls, job_id): return cls.job_exists(job_id, 'models') @@ -116,7 +135,14 @@ def delete_model(cls, job_id): @classmethod def network(cls): - return cls.TORCH_NETWORK if cls.FRAMEWORK == 'torch' else cls.CAFFE_NETWORK + if cls.FRAMEWORK == 'torch': + return cls.TORCH_NETWORK + elif cls.FRAMEWORK == 'caffe': + return cls.CAFFE_NETWORK + elif cls.FRAMEWORK == 'tensorflow': + return cls.TENSORFLOW_NETWORK + else: + raise ValueError('Unknown framework %s' % cls.FRAMEWORK) class BaseViewsTestWithAnyDataset(BaseViewsTest): @@ -750,6 +776,24 @@ class BaseTestCreatedWithImageProcessingExtension( end """ + TENSORFLOW_NETWORK = \ + """ +class UserModel(Tower): + + @model_property + def inference(self): + scale = tf.get_variable('scale', [1], initializer=tf.constant_initializer(1.0)) + offset = tf.get_variable('offset', [1], initializer=tf.constant_initializer(0.)) + offset = tf.Print(offset,[scale, offset], message='scale offset') + model = self.x + offset + self.model = model + return tf.transpose(model, (0, 3, 1, 2)) # net output expected in NCHW format + + @model_property + def loss(self): + return digits.mse_loss(self.model, self.y) +""" + EXTENSION_ID = "image-processing" VARIABLE_SIZE_DATASET = False NUM_IMAGES = 100 @@ -787,9 +831,17 @@ def test_infer_one_json(self): data = json.loads(rv.data) data_shape = np.array(data['outputs']['output']).shape if not self.VARIABLE_SIZE_DATASET: - assert data_shape == (1, self.CHANNELS, self.IMAGE_WIDTH, self.IMAGE_HEIGHT) + if data_shape != (1, self.CHANNELS, self.IMAGE_WIDTH, self.IMAGE_HEIGHT): + raise ValueError("Shapes differ: got %s expected %s" % (repr(data_shape), + repr((1, + self.CHANNELS, + self.IMAGE_WIDTH, + self.IMAGE_HEIGHT)))) def test_infer_one_noresize_json(self): + if self.FRAMEWORK == 'tensorflow' and self.MEAN == 'image': + raise unittest.SkipTest('Mean image subtraction not supported on ' + 'variable-size input with Tensorflow') # create large random image shape = (self.CHANNELS, 10 * self.IMAGE_HEIGHT, 5 * self.IMAGE_WIDTH) x = np.random.randint( @@ -815,7 +867,8 @@ def test_infer_one_noresize_json(self): assert rv.status_code == 200, 'POST failed with %s' % rv.status_code data = json.loads(rv.data) data_shape = np.array(data['outputs']['output']).shape - assert data_shape == (1,) + shape + if data_shape != (1,) + shape: + raise ValueError("Shapes differ: got %s expected %s" % (repr(data_shape), repr((1,) + shape))) def test_infer_db(self): if self.VARIABLE_SIZE_DATASET: @@ -1255,3 +1308,55 @@ class TestAllInOneNetwork(BaseTestCreation, BaseTestCreated, test_utils.CaffeMix exclude { stage: "deploy" } } """ + + +class TestTensorflowCreation(BaseTestCreation, test_utils.TensorflowMixin): + pass + + +class TestTensorflowCreated(BaseTestCreated, test_utils.TensorflowMixin): + pass + + +class TestTensorflowCreatedWithGradientDataExtension(BaseTestCreatedWithGradientDataExtension, + test_utils.TensorflowMixin): + pass + + +class TestTensorflowCreatedWithGradientDataExtensionNoValSet(BaseTestCreatedWithGradientDataExtension, + test_utils.TensorflowMixin): + @classmethod + def setUpClass(cls): + super(TestTensorflowCreatedWithGradientDataExtensionNoValSet, cls).setUpClass(val_image_count=0) + + +class TestTensorflowCreatedWithImageProcessingExtensionMeanImage(BaseTestCreatedWithImageProcessingExtension, + test_utils.TensorflowMixin): + MEAN = 'image' + + +class TestTensorflowCreatedWithImageProcessingExtensionMeanPixel(BaseTestCreatedWithImageProcessingExtension, + test_utils.TensorflowMixin): + MEAN = 'pixel' + + +class TestTensorflowCreatedWithImageProcessingExtensionMeanNone(BaseTestCreatedWithImageProcessingExtension, + test_utils.TensorflowMixin): + MEAN = 'none' + + +class TestTensorflowCreatedVariableSizeDataset(BaseTestCreatedWithImageProcessingExtension, test_utils.TensorflowMixin): + MEAN = 'none' + VARIABLE_SIZE_DATASET = True + + @classmethod + def setUpClass(cls): + raise unittest.SkipTest('Variable-size dataset not supported in Tensorflow/DIGITS') + + +class TestTensorflowCreatedCropInForm(BaseTestCreatedCropInForm, test_utils.TensorflowMixin): + pass + + +class TestTensorflowDatasetModelInteractions(BaseTestDatasetModelInteractions, test_utils.TensorflowMixin): + pass diff --git a/digits/model/images/generic/views.py b/digits/model/images/generic/views.py index 01a9270d7..1d0d72bd3 100644 --- a/digits/model/images/generic/views.py +++ b/digits/model/images/generic/views.py @@ -153,11 +153,8 @@ def create(extension_id=None): elif epoch == -1: pretrained_model = old_job.train_task().pretrained_model else: - for filename, e in old_job.train_task().snapshots: - if e == epoch: - pretrained_model = filename - break - + # verify snapshot exists + pretrained_model = old_job.train_task().get_snapshot(epoch, download=True) if pretrained_model is None: raise werkzeug.exceptions.BadRequest( "For the job %s, selected pretrained_model for epoch %d is invalid!" @@ -167,6 +164,8 @@ def create(extension_id=None): "Pretrained_model for the selected epoch doesn't exist. " "May be deleted by another user/process. " "Please restart the server to load the correct pretrained_model details.") + # get logical path + pretrained_model = old_job.train_task().get_snapshot(epoch) break elif form.method.data == 'pretrained': pretrained_job = scheduler.get_job(form.pretrained_networks.data) @@ -228,6 +227,8 @@ def create(extension_id=None): data_aug['rot'] = form.aug_rot.data data_aug['scale'] = form.aug_scale.data data_aug['noise'] = form.aug_noise.data + data_aug['contrast'] = form.aug_contrast.data + data_aug['whitening'] = form.aug_whitening.data data_aug['hsv_use'] = form.aug_hsv_use.data data_aug['hsv_h'] = form.aug_hsv_h.data data_aug['hsv_s'] = form.aug_hsv_s.data @@ -253,6 +254,7 @@ def create(extension_id=None): batch_size=form.batch_size.data[0], batch_accumulation=form.batch_accumulation.data, val_interval=form.val_interval.data, + traces_interval=form.traces_interval.data, pretrained_model=pretrained_model, crop_size=form.crop_size.data, use_mean=form.use_mean.data, @@ -292,32 +294,28 @@ def show(job, related_jobs=None): """ Called from digits.model.views.models_show() """ + data_extensions = get_data_extensions() view_extensions = get_view_extensions() - inference_form_html = None - if isinstance(job.dataset, GenericDatasetJob): - extension_class = extensions.data.get_extension(job.dataset.extension_id) - if not extension_class: - raise RuntimeError("Unable to find data extension with ID=%s" - % job.dataset.extension_id) - extension_userdata = job.dataset.extension_userdata - extension_userdata.update({'is_inference_db': True}) - extension = extension_class(**extension_userdata) - - form = extension.get_inference_form() - if form: - template, context = extension.get_inference_template(form) - inference_form_html = flask.render_template_string(template, **context) - return flask.render_template( 'models/images/generic/show.html', job=job, + data_extensions=data_extensions, view_extensions=view_extensions, related_jobs=related_jobs, - inference_form_html=inference_form_html, ) +@blueprint.route('/timeline_tracing', methods=['GET']) +def timeline_tracing(): + """ + Shows timeline trace of a model + """ + job = job_from_request() + + return flask.render_template('models/timeline_tracing.html', job=job) + + @blueprint.route('/large_graph', methods=['GET']) def large_graph(): """ @@ -325,7 +323,7 @@ def large_graph(): """ job = job_from_request() - return flask.render_template('models/images/generic/large_graph.html', job=job) + return flask.render_template('models/large_graph.html', job=job) @blueprint.route('/infer_one.json', methods=['POST']) @@ -433,8 +431,13 @@ def infer_extension(): inference_db_job = None try: + if 'data_extension_id' in flask.request.form: + data_extension_id = flask.request.form['data_extension_id'] + else: + data_extension_id = model_job.dataset.extension_id + # create an inference database - inference_db_job = create_inference_db(model_job) + inference_db_job = create_inference_db(model_job, data_extension_id) db_path = inference_db_job.get_feature_db_path(constants.TEST_DB) # create database creation job @@ -715,10 +718,13 @@ def infer_many(): ), status_code -def create_inference_db(model_job): +def create_inference_db(model_job, data_extension_id): # create instance of extension class - extension_class = extensions.data.get_extension(model_job.dataset.extension_id) - extension_userdata = model_job.dataset.extension_userdata + extension_class = extensions.data.get_extension(data_extension_id) + if hasattr(model_job.dataset, 'extension_userdata'): + extension_userdata = model_job.dataset.extension_userdata + else: + extension_userdata = {} extension_userdata.update({'is_inference_db': True}) extension = extension_class(**extension_userdata) @@ -742,7 +748,7 @@ def create_inference_db(model_job): batch_size=1, num_threads=1, force_same_shape=0, - extension_id=model_job.dataset.extension_id, + extension_id=data_extension_id, extension_userdata=extension.get_user_data(), ) @@ -766,13 +772,12 @@ def create_inference_db(model_job): def get_datasets(extension_id): if extension_id: jobs = [j for j in scheduler.jobs.values() - if isinstance(j, GenericDatasetJob) - and j.extension_id == extension_id - and (j.status.is_running() or j.status == Status.DONE)] + if isinstance(j, GenericDatasetJob) and + j.extension_id == extension_id and (j.status.is_running() or j.status == Status.DONE)] else: jobs = [j for j in scheduler.jobs.values() - if (isinstance(j, GenericImageDatasetJob) or isinstance(j, GenericDatasetJob)) - and (j.status.is_running() or j.status == Status.DONE)] + if (isinstance(j, GenericImageDatasetJob) or isinstance(j, GenericDatasetJob)) and + (j.status.is_running() or j.status == Status.DONE)] return [(j.id(), j.name()) for j in sorted(jobs, cmp=lambda x, y: cmp(y.id(), x.id()))] @@ -862,6 +867,17 @@ def get_pretrained_networks_fulldetails(): ] +def get_data_extensions(): + """ + return all enabled data extensions + """ + data_extensions = {"all-default": "Default"} + all_extensions = extensions.data.get_extensions() + for extension in all_extensions: + data_extensions[extension.get_id()] = extension.get_title() + return data_extensions + + def get_view_extensions(): """ return all enabled view extensions diff --git a/digits/model/tasks/__init__.py b/digits/model/tasks/__init__.py index ffbd05bd0..1a4ac7a8e 100644 --- a/digits/model/tasks/__init__.py +++ b/digits/model/tasks/__init__.py @@ -10,3 +10,9 @@ 'TorchTrainTask', 'TrainTask', ] + +from digits.config import config_value # noqa + +if config_value('tensorflow')['enabled']: + from .tensorflow_train import TensorflowTrainTask # noqa + __all__.append('TensorflowTrainTask') diff --git a/digits/model/tasks/caffe_train.py b/digits/model/tasks/caffe_train.py index 36e0ac6a5..6c7ca0191 100644 --- a/digits/model/tasks/caffe_train.py +++ b/digits/model/tasks/caffe_train.py @@ -934,8 +934,7 @@ def task_arguments(self, resources, env): args.append('--gpu=%s' % identifiers[0]) elif len(identifiers) > 1: if config_value('caffe')['flavor'] == 'NVIDIA': - if (utils.parse_version(config_value('caffe')['version']) - < utils.parse_version('0.14.0-alpha')): + if (utils.parse_version(config_value('caffe')['version']) < utils.parse_version('0.14.0-alpha')): # Prior to version 0.14, NVcaffe used the --gpus switch args.append('--gpus=%s' % ','.join(identifiers)) else: diff --git a/digits/model/tasks/tensorflow_train.py b/digits/model/tasks/tensorflow_train.py new file mode 100644 index 000000000..c98205daa --- /dev/null +++ b/digits/model/tasks/tensorflow_train.py @@ -0,0 +1,978 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +from __future__ import absolute_import + +import operator +import os +import re +import shutil +import subprocess +import tempfile +import time +import sys + +import h5py +import numpy as np + +from .train import TrainTask +import digits +from digits import utils +from digits.utils import subclass, override, constants +import tensorflow as tf + +# NOTE: Increment this everytime the pickled object changes +PICKLE_VERSION = 1 + +# Constants +TENSORFLOW_MODEL_FILE = 'network.py' +TENSORFLOW_SNAPSHOT_PREFIX = 'snapshot' +TIMELINE_PREFIX = 'timeline' + + +def _int64_feature(value): + return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) + + +def _bytes_feature(value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + +def _float_array_feature(value): + return tf.train.Feature(float_list=tf.train.FloatList(value=value)) + + +def subprocess_visible_devices(gpus): + """ + Calculates CUDA_VISIBLE_DEVICES for a subprocess + """ + if not isinstance(gpus, list): + raise ValueError('gpus should be a list') + gpus = [int(g) for g in gpus] + + old_cvd = os.environ.get('CUDA_VISIBLE_DEVICES', None) + if old_cvd is None: + real_gpus = gpus + else: + map_visible_to_real = {} + for visible, real in enumerate(old_cvd.split(',')): + map_visible_to_real[visible] = int(real) + real_gpus = [] + for visible_gpu in gpus: + real_gpus.append(map_visible_to_real[visible_gpu]) + return ','.join(str(g) for g in real_gpus) + + +@subclass +class TensorflowTrainTask(TrainTask): + """ + Trains a tensorflow model + """ + + TENSORFLOW_LOG = 'tensorflow_output.log' + + def __init__(self, **kwargs): + """ + Arguments: + network -- a NetParameter defining the network + """ + super(TensorflowTrainTask, self).__init__(**kwargs) + + # save network description to file + with open(os.path.join(self.job_dir, TENSORFLOW_MODEL_FILE), "w") as outfile: + outfile.write(self.network) + + self.pickver_task_tensorflow_train = PICKLE_VERSION + + self.current_epoch = 0 + + self.loaded_snapshot_file = None + self.loaded_snapshot_epoch = None + self.image_mean = None + self.classifier = None + self.solver = None + + self.model_file = TENSORFLOW_MODEL_FILE + self.train_file = constants.TRAIN_DB + self.val_file = constants.VAL_DB + self.snapshot_prefix = TENSORFLOW_SNAPSHOT_PREFIX + self.log_file = self.TENSORFLOW_LOG + + def __getstate__(self): + state = super(TensorflowTrainTask, self).__getstate__() + + # Don't pickle these things + if 'labels' in state: + del state['labels'] + if 'image_mean' in state: + del state['image_mean'] + if 'classifier' in state: + del state['classifier'] + if 'tensorflow_log' in state: + del state['tensorflow_log'] + + return state + + def __setstate__(self, state): + super(TensorflowTrainTask, self).__setstate__(state) + + # Make changes to self + self.loaded_snapshot_file = None + self.loaded_snapshot_epoch = None + + # These things don't get pickled + self.image_mean = None + self.classifier = None + + # Task overrides + @override + def name(self): + return 'Train Tensorflow Model' + + @override + def before_run(self): + super(TensorflowTrainTask, self).before_run() + self.tensorflow_log = open(self.path(self.TENSORFLOW_LOG), 'a') + self.saving_snapshot = False + self.receiving_train_output = False + self.receiving_val_output = False + self.last_train_update = None + self.displaying_network = False + self.temp_unrecognized_output = [] + return True + + @override + def get_snapshot(self, epoch=-1, download=False): + """ + return snapshot file for specified epoch + """ + snapshot_filename = None + + if len(self.snapshots) == 0: + return "no snapshots" + + if epoch == -1 or not epoch: + epoch = self.snapshots[-1][1] + snapshot_filename = self.snapshots[-1][0] + else: + for f, e in self.snapshots: + if e == epoch: + snapshot_filename = f + break + if not snapshot_filename: + raise ValueError('Invalid epoch') + if download: + snapshot_filename = snapshot_filename + ".data-00000-of-00001" + + return snapshot_filename + + @override + def task_arguments(self, resources, env): + + args = [sys.executable, + os.path.join(os.path.dirname(os.path.abspath(digits.__file__)), 'tools', 'tensorflow', 'main.py'), + '--network=%s' % self.model_file, + '--epoch=%d' % int(self.train_epochs), + '--networkDirectory=%s' % self.job_dir, + '--save=%s' % self.job_dir, + '--snapshotPrefix=%s' % self.snapshot_prefix, + '--snapshotInterval=%s' % self.snapshot_interval, + '--lr_base_rate=%s' % self.learning_rate, + '--lr_policy=%s' % str(self.lr_policy['policy']) + ] + + if self.batch_size is not None: + args.append('--batch_size=%d' % self.batch_size) + + if self.use_mean != 'none': + mean_file = self.dataset.get_mean_file() + assert mean_file is not None, 'Failed to retrieve mean file.' + args.append('--mean=%s' % self.dataset.path(mean_file)) + + if hasattr(self.dataset, 'labels_file'): + args.append('--labels_list=%s' % self.dataset.path(self.dataset.labels_file)) + + train_feature_db_path = self.dataset.get_feature_db_path(constants.TRAIN_DB) + train_label_db_path = self.dataset.get_label_db_path(constants.TRAIN_DB) + val_feature_db_path = self.dataset.get_feature_db_path(constants.VAL_DB) + val_label_db_path = self.dataset.get_label_db_path(constants.VAL_DB) + + args.append('--train_db=%s' % train_feature_db_path) + if train_label_db_path: + args.append('--train_labels=%s' % train_label_db_path) + if val_feature_db_path: + args.append('--validation_db=%s' % val_feature_db_path) + if val_label_db_path: + args.append('--validation_labels=%s' % val_label_db_path) + + # learning rate policy input parameters + if self.lr_policy['policy'] == 'fixed': + pass + elif self.lr_policy['policy'] == 'step': + args.append('--lr_gamma=%s' % self.lr_policy['gamma']) + args.append('--lr_stepvalues=%s' % self.lr_policy['stepsize']) + elif self.lr_policy['policy'] == 'multistep': + args.append('--lr_stepvalues=%s' % self.lr_policy['stepvalue']) + args.append('--lr_gamma=%s' % self.lr_policy['gamma']) + elif self.lr_policy['policy'] == 'exp': + args.append('--lr_gamma=%s' % self.lr_policy['gamma']) + elif self.lr_policy['policy'] == 'inv': + args.append('--lr_gamma=%s' % self.lr_policy['gamma']) + args.append('--lr_power=%s' % self.lr_policy['power']) + elif self.lr_policy['policy'] == 'poly': + args.append('--lr_power=%s' % self.lr_policy['power']) + elif self.lr_policy['policy'] == 'sigmoid': + args.append('--lr_stepvalues=%s' % self.lr_policy['stepsize']) + args.append('--lr_gamma=%s' % self.lr_policy['gamma']) + + if self.shuffle: + args.append('--shuffle=1') + + if self.crop_size: + args.append('--croplen=%d' % self.crop_size) + + if self.use_mean == 'pixel': + args.append('--subtractMean=pixel') + elif self.use_mean == 'image': + args.append('--subtractMean=image') + else: + args.append('--subtractMean=none') + + if self.random_seed is not None: + args.append('--seed=%s' % self.random_seed) + + if self.solver_type == 'SGD': + args.append('--optimization=sgd') + elif self.solver_type == 'ADADELTA': + args.append('--optimization=adadelta') + elif self.solver_type == 'ADAGRAD': + args.append('--optimization=adagrad') + elif self.solver_type == 'ADAGRADDA': + args.append('--optimization=adagradda') + elif self.solver_type == 'MOMENTUM': + args.append('--optimization=momentum') + elif self.solver_type == 'ADAM': + args.append('--optimization=adam') + elif self.solver_type == 'FTRL': + args.append('--optimization=ftrl') + elif self.solver_type == 'RMSPROP': + args.append('--optimization=rmsprop') + else: + raise ValueError('Unknown solver_type %s' % self.solver_type) + + if self.val_interval is not None: + args.append('--validation_interval=%d' % self.val_interval) + + # if self.traces_interval is not None: + args.append('--log_runtime_stats_per_step=%d' % self.traces_interval) + + if 'gpus' in resources: + identifiers = [] + for identifier, value in resources['gpus']: + identifiers.append(identifier) + # make all selected GPUs visible to the process. + # don't make other GPUs visible though since the process will load + # CUDA libraries and allocate memory on all visible GPUs by + # default. + env['CUDA_VISIBLE_DEVICES'] = subprocess_visible_devices(identifiers) + + if self.pretrained_model: + args.append('--weights=%s' % self.path(self.pretrained_model)) + + # Augmentations + assert self.data_aug['flip'] in ['none', 'fliplr', 'flipud', 'fliplrud'], 'Bad or unknown flag "flip"' + args.append('--augFlip=%s' % self.data_aug['flip']) + + if self.data_aug['noise']: + args.append('--augNoise=%s' % self.data_aug['noise']) + + if self.data_aug['contrast']: + args.append('--augContrast=%s' % self.data_aug['contrast']) + + if self.data_aug['whitening']: + args.append('--augWhitening=1') + + if self.data_aug['hsv_use']: + args.append('--augHSVh=%s' % self.data_aug['hsv_h']) + args.append('--augHSVs=%s' % self.data_aug['hsv_s']) + args.append('--augHSVv=%s' % self.data_aug['hsv_v']) + else: + args.append('--augHSVh=0') + args.append('--augHSVs=0') + args.append('--augHSVv=0') + + return args + + @override + def process_output(self, line): + self.tensorflow_log.write('%s\n' % line) + self.tensorflow_log.flush() + + # parse tensorflow output + timestamp, level, message = self.preprocess_output_tensorflow(line) + + # return false when unrecognized output is encountered + if not level: + # network display in progress + if self.displaying_network: + self.temp_unrecognized_output.append(line) + return True + return False + + if not message: + return True + + # network display ends + if self.displaying_network: + if message.startswith('Network definition ends'): + self.temp_unrecognized_output = [] + self.displaying_network = False + return True + + # Distinguish between a Validation and Training stage epoch + pattern_stage_epoch = re.compile(r'(Validation|Training)\ \(\w+\ ([^\ ]+)\)\:\ (.*)') + for (stage, epoch, kvlist) in re.findall(pattern_stage_epoch, message): + epoch = float(epoch) + self.send_progress_update(epoch) + pattern_key_val = re.compile(r'([\w\-_]+)\ =\ ([^,^\ ]+)') + # Now iterate through the keys and values on this line dynamically + for (key, value) in re.findall(pattern_key_val, kvlist): + assert not('Inf' in value or 'NaN' in value), 'Network reported %s for %s.' % (value, key) + value = float(value) + if key == 'lr': + key = 'learning_rate' # Convert to special DIGITS key for learning rate + if stage == 'Training': + self.save_train_output(key, key, value) + elif stage == 'Validation': + self.save_val_output(key, key, value) + self.logger.debug('Network validation %s #%s: %s' % (key, epoch, value)) + else: + self.logger.error('Unknown stage found other than training or validation: %s' % (stage)) + self.logger.debug(message) + return True + + # timeline trace saved + if message.startswith('Timeline trace written to'): + self.logger.info(message) + self.detect_timeline_traces() + return True + + # snapshot saved + if self.saving_snapshot: + if message.startswith('Snapshot saved'): + self.logger.info(message) + self.detect_snapshots() + self.send_snapshot_update() + self.saving_snapshot = False + return True + + # snapshot starting + match = re.match(r'Snapshotting to (.*)\s*$', message) + if match: + self.saving_snapshot = True + return True + + # network display starting + if message.startswith('Network definition:'): + self.displaying_network = True + return True + + if level in ['error', 'critical']: + self.logger.error('%s: %s' % (self.name(), message)) + self.exception = message + return True + + # skip remaining info and warn messages + return True + + @staticmethod + def preprocess_output_tensorflow(line): + """ + Takes line of output and parses it according to tensorflow's output format + Returns (timestamp, level, message) or (None, None, None) + """ + # NOTE: This must change when the logging format changes + # LMMDD HH:MM:SS.MICROS pid file:lineno] message + match = re.match(r'(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})\s\[(\w+)\s*]\s+(\S.*)$', line) + if match: + timestamp = time.mktime(time.strptime(match.group(1), '%Y-%m-%d %H:%M:%S')) + level = match.group(2) + message = match.group(3) + if level == 'INFO': + level = 'info' + elif level == 'WARNING': + level = 'warning' + elif level == 'ERROR': + level = 'error' + elif level == 'FAIL': # FAIL + level = 'critical' + return (timestamp, level, message) + else: + # self.logger.warning('Unrecognized task output "%s"' % line) + return (None, None, None) + + def send_snapshot_update(self): + """ + Sends socketio message about the snapshot list + """ + # TODO: move to TrainTask + from digits.webapp import socketio + + socketio.emit('task update', {'task': self.html_id(), + 'update': 'snapshots', + 'data': self.snapshot_list()}, + namespace='/jobs', + room=self.job_id) + + # TrainTask overrides + @override + def after_run(self): + if self.temp_unrecognized_output: + if self.traceback: + self.traceback = self.traceback + ('\n'.join(self.temp_unrecognized_output)) + else: + self.traceback = '\n'.join(self.temp_unrecognized_output) + self.temp_unrecognized_output = [] + self.tensorflow_log.close() + + @override + def after_runtime_error(self): + if os.path.exists(self.path(self.TENSORFLOW_LOG)): + output = subprocess.check_output(['tail', '-n40', self.path(self.TENSORFLOW_LOG)]) + lines = [] + for line in output.split('\n'): + # parse tensorflow header + timestamp, level, message = self.preprocess_output_tensorflow(line) + + if message: + lines.append(message) + # return the last 20 lines + traceback = '\n\nLast output:\n' + '\n'.join(lines[len(lines)-20:]) if len(lines) > 0 else '' + if self.traceback: + self.traceback = self.traceback + traceback + else: + self.traceback = traceback + + if 'DIGITS_MODE_TEST' in os.environ: + print output + + @override + def detect_timeline_traces(self): + timeline_traces = [] + for filename in os.listdir(self.job_dir): + # find timeline jsons + match = re.match(r'%s_(.*)\.json$' % TIMELINE_PREFIX, filename) + if match: + step = int(match.group(1)) + timeline_traces.append((os.path.join(self.job_dir, filename), step)) + self.timeline_traces = sorted(timeline_traces, key=lambda tup: tup[1]) + return len(self.timeline_traces) > 0 + + @override + def detect_snapshots(self): + self.snapshots = [] + snapshots = [] + for filename in os.listdir(self.job_dir): + # find models + match = re.match(r'%s_(\d+)\.?(\d*)\.ckpt\.index$' % self.snapshot_prefix, filename) + if match: + epoch = 0 + # remove '.index' suffix from filename + filename = filename[:-6] + if match.group(2) == '': + epoch = int(match.group(1)) + else: + epoch = float(match.group(1) + '.' + match.group(2)) + snapshots.append((os.path.join(self.job_dir, filename), epoch)) + self.snapshots = sorted(snapshots, key=lambda tup: tup[1]) + return len(self.snapshots) > 0 + + @override + def est_next_snapshot(self): + # TODO: Currently this function is not in use. Probably in future we may have to implement this + return None + + @override + def infer_one(self, + data, + snapshot_epoch=None, + layers=None, + gpu=None, + resize=True): + # resize parameter is unused + return self.infer_one_image(data, + snapshot_epoch=snapshot_epoch, + layers=layers, + gpu=gpu) + + def infer_one_image(self, image, snapshot_epoch=None, layers=None, gpu=None): + """ + Classify an image + Returns (predictions, visualizations) + predictions -- an array of [ (label, confidence), ...] for each label, sorted by confidence + visualizations -- an array of (layer_name, activations, weights) for the specified layers + Returns (None, None) if something goes wrong + + Arguments: + image -- a np.array + + Keyword arguments: + snapshot_epoch -- which snapshot to use + layers -- which layer activation[s] and weight[s] to visualize + """ + temp_image_handle, temp_image_path = tempfile.mkstemp(suffix='.tfrecords') + os.close(temp_image_handle) + if image.ndim < 3: + image = image[..., np.newaxis] + writer = tf.python_io.TFRecordWriter(temp_image_path) + + image = image.astype('float') + record = tf.train.Example(features=tf.train.Features(feature={ + 'height': _int64_feature(image.shape[0]), + 'width': _int64_feature(image.shape[1]), + 'depth': _int64_feature(image.shape[2]), + 'image_raw': _float_array_feature(image.flatten()), + 'label': _int64_feature(0), + 'encoding': _int64_feature(0)})) + writer.write(record.SerializeToString()) + writer.close() + + file_to_load = self.get_snapshot(snapshot_epoch) + + args = [sys.executable, + os.path.join(os.path.dirname(os.path.abspath(digits.__file__)), 'tools', 'tensorflow', 'main.py'), + '--inference_db=%s' % temp_image_path, + '--network=%s' % self.model_file, + '--networkDirectory=%s' % self.job_dir, + '--weights=%s' % file_to_load, + '--allPredictions=1', + '--batch_size=1', + ] + if hasattr(self.dataset, 'labels_file'): + args.append('--labels_list=%s' % self.dataset.path(self.dataset.labels_file)) + + if self.use_mean != 'none': + mean_file = self.dataset.get_mean_file() + assert mean_file is not None, 'Failed to retrieve mean file.' + args.append('--mean=%s' % self.dataset.path(mean_file)) + + if self.use_mean == 'pixel': + args.append('--subtractMean=pixel') + elif self.use_mean == 'image': + args.append('--subtractMean=image') + else: + args.append('--subtractMean=none') + + if self.crop_size: + args.append('--croplen=%d' % self.crop_size) + + if layers == 'all': + args.append('--visualize_inf=1') + args.append('--save=%s' % self.job_dir) + + # Convert them all to strings + args = [str(x) for x in args] + + self.logger.info('%s classify one task started.' % self.get_framework_id()) + + unrecognized_output = [] + predictions = [] + self.visualization_file = None + + env = os.environ.copy() + + if gpu is not None: + # make only the selected GPU visible + env['CUDA_VISIBLE_DEVICES'] = subprocess_visible_devices([gpu]) + + p = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + cwd=self.job_dir, + close_fds=True, + env=env) + + try: + while p.poll() is None: + for line in utils.nonblocking_readlines(p.stdout): + if self.aborted.is_set(): + p.terminate() + raise digits.inference.errors.InferenceError('%s classify one task got aborted. error code - %d' % (self.get_framework_id(), p.returncode)) # noqa + + if line is not None and len(line) > 1: + if not self.process_test_output(line, predictions, 'one'): + self.logger.warning('%s classify one task unrecognized input: %s' % ( + self.get_framework_id(), line.strip())) + unrecognized_output.append(line) + else: + time.sleep(0.05) + except Exception as e: + if p.poll() is None: + p.terminate() + error_message = '' + if type(e) == digits.inference.errors.InferenceError: + error_message = e.__str__() + else: + error_message = '%s classify one task failed with error code %d \n %s' % ( + self.get_framework_id(), p.returncode, str(e)) + self.logger.error(error_message) + if unrecognized_output: + unrecognized_output = '\n'.join(unrecognized_output) + error_message = error_message + unrecognized_output + raise digits.inference.errors.InferenceError(error_message) + + finally: + self.after_test_run(temp_image_path) + + if p.returncode != 0: + error_message = '%s classify one task failed with error code %d' % (self.get_framework_id(), p.returncode) + self.logger.error(error_message) + if unrecognized_output: + unrecognized_output = '\n'.join(unrecognized_output) + error_message = error_message + unrecognized_output + raise digits.inference.errors.InferenceError(error_message) + else: + self.logger.info('%s classify one task completed.' % self.get_framework_id()) + + predictions = {'output': np.array(predictions)} + + visualizations = [] + + if layers == 'all' and self.visualization_file: + vis_db = h5py.File(self.visualization_file, 'r') + # the HDF5 database is organized as follows: + # + # |- layers + # |- 1 + # | [attrs] - op + # | [attrs] - var + # | |- activations + # | |- weights + # |- 2 + for layer_id, layer in vis_db['layers'].items(): + op_name = layer.attrs['op'] + var_name = layer.attrs['var'] + layer_desc = "%s\n%s" % (op_name, var_name) + idx = int(layer_id) + # activations (tf: operation outputs) + if 'activations' in layer: + data = np.array(layer['activations'][...]) + if len(data.shape) > 1 and data.shape[0] == 1: + # skip batch dimension + data = data[0] + if len(data.shape) == 3: + data = data.transpose(2, 0, 1) + elif len(data.shape) == 4: + data = data.transpose(3, 2, 0, 1) + vis = utils.image.get_layer_vis_square(data) + mean, std, hist = self.get_layer_statistics(data) + visualizations.append( + { + 'id': idx, + 'name': layer_desc, + 'vis_type': 'Activations', + 'vis': vis, + 'data_stats': { + 'shape': data.shape, + 'mean': mean, + 'stddev': std, + 'histogram': hist, + } + } + ) + # weights (tf: variables) + if 'weights' in layer: + data = np.array(layer['weights'][...]) + if len(data.shape) == 3: + data = data.transpose(2, 0, 1) + elif len(data.shape) == 4: + data = data.transpose(3, 2, 0, 1) + if 'MatMul' in layer_desc: + vis = None # too many layers to display? + else: + vis = utils.image.get_layer_vis_square(data) + mean, std, hist = self.get_layer_statistics(data) + parameter_count = reduce(operator.mul, data.shape, 1) + visualizations.append( + { + 'id': idx, + 'name': layer_desc, + 'vis_type': 'Weights', + 'vis': vis, + 'param_count': parameter_count, + 'data_stats': { + 'shape': data.shape, + 'mean': mean, + 'stddev': std, + 'histogram': hist, + } + } + ) + # sort by layer ID + visualizations = sorted(visualizations, key=lambda x: x['id']) + return (predictions, visualizations) + + def get_layer_statistics(self, data): + """ + Returns statistics for the given layer data: + (mean, standard deviation, histogram) + histogram -- [y, x, ticks] + + Arguments: + data -- a np.ndarray + """ + # These calculations can be super slow + mean = np.mean(data) + std = np.std(data) + y, x = np.histogram(data, bins=20) + y = list(y) + ticks = x[[0, len(x)/2, -1]] + x = [(x[i]+x[i+1])/2.0 for i in xrange(len(x)-1)] + ticks = list(ticks) + return (mean, std, [y, x, ticks]) + + def after_test_run(self, temp_image_path): + try: + os.remove(temp_image_path) + except OSError: + pass + + def process_test_output(self, line, predictions, test_category): + # parse torch output + timestamp, level, message = self.preprocess_output_tensorflow(line) + + # return false when unrecognized output is encountered + if not (level or message): + return False + + if not message: + return True + + float_exp = '([-]?inf|nan|[-+]?[0-9]*\.?[0-9]+(e[-+]?[0-9]+)?)' + + # format of output while testing single image + match = re.match(r'For image \d+, predicted class \d+: \d+ \((.*?)\) %s' % (float_exp), message) + if match: + label = match.group(1) + confidence = match.group(2) + assert not('inf' in confidence or 'nan' in confidence), 'Network reported %s for confidence value. Please check image and network' % label # noqa + confidence = float(confidence) + predictions.append((label, confidence)) + return True + + # format of output while testing multiple images + match = re.match(r'Predictions for image \d+: (.*)', message) + if match: + values = match.group(1).strip() + # 'values' should contain a JSON representation of + # the prediction + predictions.append(eval(values)) + return True + + # path to visualization file + match = re.match(r'Saving visualization to (.*)', message) + if match: + self.visualization_file = match.group(1).strip() + return True + + # displaying info and warn messages as we aren't maintaining separate log file for model testing + if level == 'info': + self.logger.debug('%s classify %s task : %s' % (self.get_framework_id(), test_category, message)) + return True + if level == 'warning': + self.logger.warning('%s classify %s task : %s' % (self.get_framework_id(), test_category, message)) + return True + + if level in ['error', 'critical']: + raise digits.inference.errors.InferenceError('%s classify %s task failed with error message - %s' % ( + self.get_framework_id(), test_category, message)) + + return False # control should never reach this line. + + @override + def infer_many(self, data, snapshot_epoch=None, gpu=None, resize=True): + # resize parameter is unused + return self.infer_many_images(data, snapshot_epoch=snapshot_epoch, gpu=gpu) + + def infer_many_images(self, images, snapshot_epoch=None, gpu=None): + """ + Returns (labels, results): + labels -- an array of strings + results -- a 2D np array: + [ + [image0_label0_confidence, image0_label1_confidence, ...], + [image1_label0_confidence, image1_label1_confidence, ...], + ... + ] + + Arguments: + images -- a list of np.arrays + + Keyword arguments: + snapshot_epoch -- which snapshot to use + """ + + # create a temporary folder to store images and a temporary file + # to store a list of paths to the images + temp_dir_path = tempfile.mkdtemp(suffix='.tfrecords') + try: # this try...finally clause is used to clean up the temp directory in any case + with open(os.path.join(temp_dir_path, 'list.txt'), 'w') as imglist_file: + for image in images: + if image.ndim < 3: + image = image[..., np.newaxis] + image = image.astype('float') + temp_image_handle, temp_image_path = tempfile.mkstemp(dir=temp_dir_path, suffix='.tfrecords') + writer = tf.python_io.TFRecordWriter(temp_image_path) + record = tf.train.Example(features=tf.train.Features(feature={ + 'height': _int64_feature(image.shape[0]), + 'width': _int64_feature(image.shape[1]), + 'depth': _int64_feature(image.shape[2]), + 'image_raw': _float_array_feature(image.flatten()), + 'label': _int64_feature(0), + 'encoding': _int64_feature(0)})) + writer.write(record.SerializeToString()) + writer.close() + imglist_file.write("%s\n" % temp_image_path) + os.close(temp_image_handle) + + file_to_load = self.get_snapshot(snapshot_epoch) + + args = [sys.executable, + os.path.join(os.path.dirname(os.path.abspath(digits.__file__)), 'tools', 'tensorflow', 'main.py'), + '--testMany=1', + '--allPredictions=1', # all predictions are grabbed and formatted as required by DIGITS + '--inference_db=%s' % str(temp_dir_path), + '--network=%s' % self.model_file, + '--networkDirectory=%s' % self.job_dir, + '--weights=%s' % file_to_load, + ] + + if hasattr(self.dataset, 'labels_file'): + args.append('--labels_list=%s' % self.dataset.path(self.dataset.labels_file)) + + if self.use_mean != 'none': + mean_file = self.dataset.get_mean_file() + assert mean_file is not None, 'Failed to retrieve mean file.' + args.append('--mean=%s' % self.dataset.path(mean_file)) + + if self.use_mean == 'pixel': + args.append('--subtractMean=pixel') + elif self.use_mean == 'image': + args.append('--subtractMean=image') + else: + args.append('--subtractMean=none') + if self.crop_size: + args.append('--croplen=%d' % self.crop_size) + + # Convert them all to strings + args = [str(x) for x in args] + + self.logger.info('%s classify many task started.' % self.name()) + + env = os.environ.copy() + if gpu is not None: + # make only the selected GPU visible + env['CUDA_VISIBLE_DEVICES'] = subprocess_visible_devices([gpu]) + + unrecognized_output = [] + predictions = [] + p = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + cwd=self.job_dir, + close_fds=True, + env=env) + + try: + while p.poll() is None: + for line in utils.nonblocking_readlines(p.stdout): + if self.aborted.is_set(): + p.terminate() + raise digits.inference.errors.InferenceError('%s classify many task got aborted.' + 'error code - %d' % (self.get_framework_id(), + p.returncode)) + + if line is not None and len(line) > 1: + if not self.process_test_output(line, predictions, 'many'): + self.logger.warning('%s classify many task unrecognized input: %s' % ( + self.get_framework_id(), line.strip())) + unrecognized_output.append(line) + else: + time.sleep(0.05) + except Exception as e: + if p.poll() is None: + p.terminate() + error_message = '' + if type(e) == digits.inference.errors.InferenceError: + error_message = e.__str__() + else: + error_message = '%s classify many task failed with error code %d \n %s' % ( + self.get_framework_id(), p.returncode, str(e)) + self.logger.error(error_message) + if unrecognized_output: + unrecognized_output = '\n'.join(unrecognized_output) + error_message = error_message + unrecognized_output + raise digits.inference.errors.InferenceError(error_message) + + if p.returncode != 0: + error_message = '%s classify many task failed with error code %d' % (self.get_framework_id(), + p.returncode) + self.logger.error(error_message) + if unrecognized_output: + unrecognized_output = '\n'.join(unrecognized_output) + error_message = error_message + unrecognized_output + raise digits.inference.errors.InferenceError(error_message) + else: + self.logger.info('%s classify many task completed.' % self.get_framework_id()) + finally: + shutil.rmtree(temp_dir_path) + + # task.infer_one() expects dictionary in return value + return {'output': np.array(predictions)} + + def has_model(self): + """ + Returns True if there is a model that can be used + """ + return len(self.snapshots) != 0 + + @override + def get_model_files(self): + """ + return paths to model files + """ + return {"Network": self.model_file} + + @override + def get_network_desc(self): + """ + return text description of network + """ + with open(os.path.join(self.job_dir, TENSORFLOW_MODEL_FILE), "r") as infile: + desc = infile.read() + return desc + + @override + def get_task_stats(self, epoch=-1): + """ + return a dictionary of task statistics + """ + + loc, mean_file = os.path.split(self.dataset.get_mean_file()) + + stats = { + "image dimensions": self.dataset.get_feature_dims(), + "mean file": mean_file, + "snapshot file": self.get_snapshot_filename(epoch), + "model file": self.model_file, + "framework": "tensorflow" + } + + if hasattr(self, "digits_version"): + stats.update({"digits version": self.digits_version}) + + if hasattr(self.dataset, "resize_mode"): + stats.update({"image resize mode": self.dataset.resize_mode}) + + if hasattr(self.dataset, "labels_file"): + stats.update({"labels file": self.dataset.labels_file}) + + return stats diff --git a/digits/model/tasks/train.py b/digits/model/tasks/train.py index 5c4201df9..b8d9eea00 100644 --- a/digits/model/tasks/train.py +++ b/digits/model/tasks/train.py @@ -42,6 +42,7 @@ def __init__(self, job, dataset, train_epochs, snapshot_interval, learning_rate, batch_size -- if set, override any network specific batch_size with this value batch_accumulation -- accumulate gradients over multiple batches val_interval -- how many epochs between validating the model with an epoch of validation data + traces_interval -- amount of steps in between timeline traces pretrained_model -- filename for a model to use for fine-tuning crop_size -- crop each image down to a square of this size use_mean -- subtract the dataset's mean file or mean pixel @@ -53,6 +54,7 @@ def __init__(self, job, dataset, train_epochs, snapshot_interval, learning_rate, self.batch_size = kwargs.pop('batch_size', None) self.batch_accumulation = kwargs.pop('batch_accumulation', None) self.val_interval = kwargs.pop('val_interval', None) + self.traces_interval = kwargs.pop('traces_interval', None) self.pretrained_model = kwargs.pop('pretrained_model', None) self.crop_size = kwargs.pop('crop_size', None) self.use_mean = kwargs.pop('use_mean', None) @@ -76,6 +78,7 @@ def __init__(self, job, dataset, train_epochs, snapshot_interval, learning_rate, self.current_epoch = 0 self.snapshots = [] + self.timeline_traces = [] # data gets stored as dicts of lists (for graphing) self.train_outputs = OrderedDict() @@ -121,6 +124,7 @@ def __setstate__(self, state): super(TrainTask, self).__setstate__(state) self.snapshots = [] + self.timeline_traces = [] self.dataset = None @override @@ -205,7 +209,7 @@ def hw_socketio_updater(self, gpus): data_cpu['cpu_pct'] = ps.get_cpu_percent(interval=1) data_cpu['mem_pct'] = ps.get_memory_percent() data_cpu['mem_used'] = ps.get_memory_info().rss - except psutil.NoSuchProcess: + except (psutil.NoSuchProcess, psutil.AccessDenied): # In rare case of instant process crash or PID went zombie (report nothing) pass @@ -431,7 +435,7 @@ def infer_many(self, data, model_epoch=None): """ return None - def get_snapshot(self, epoch=-1): + def get_snapshot(self, epoch=-1, download=False): """ return snapshot file for specified epoch """ @@ -511,6 +515,37 @@ def lr_graph_data(self): }, } + def detect_timeline_traces(self): + """ + Populate self.timeline_traces with snapshots that exist on disk + Returns True if at least one usable snapshot is found + """ + return False + + def has_timeline_traces(self): + """ + Evaluates if there are timeline traces to be viewed at all + """ + return len(self.timeline_traces) > 0 + + def timeline_trace(self, tid): + """ + Returns the data of a selected timeline trace + """ + for item in self.timeline_traces: + if item[1] == tid: + fn = item[0] + with open(fn, 'r') as file_data: + return file_data.read() + + raise ValueError('Requested timeline not found in timeline list') + + def timeline_trace_list(self): + """ + Returns an array of timeline trace id's for creating an HTML select field + """ + return [[s[1], 'Trace #%s' % s[1]] for s in reversed(self.timeline_traces)] + def combined_graph_data(self, cull=True): """ Returns all train/val outputs in data for one C3.js graph diff --git a/digits/model/views.py b/digits/model/views.py index 030d94dfd..28df10231 100644 --- a/digits/model/views.py +++ b/digits/model/views.py @@ -16,7 +16,7 @@ from digits.pretrained_model.job import PretrainedModelJob from digits import frameworks, extensions from digits.utils import auth -from digits.utils.routing import request_wants_json +from digits.utils.routing import request_wants_json, job_from_request, get_request_arg from digits.webapp import scheduler blueprint = flask.Blueprint(__name__, __name__) @@ -100,6 +100,18 @@ def customize(): }) +@blueprint.route('/timeline_trace_data', methods=['POST']) +def timeline_trace_data(): + """ + Shows timeline trace of a model + """ + job = job_from_request() + step = get_request_arg('step') + if step is None: + raise werkzeug.exceptions.BadRequest('step is a required field') + return job.train_task().timeline_trace(int(step)) + + @blueprint.route('/view-config/', methods=['GET']) def view_config(extension_id): """ @@ -122,9 +134,19 @@ def visualize_network(): if not framework: raise werkzeug.exceptions.BadRequest('framework not provided') - fw = frameworks.get_framework_by_id(framework) - ret = fw.get_network_visualization(flask.request.form['custom_network']) + dataset = None + if 'dataset_id' in flask.request.form: + dataset = scheduler.get_job(flask.request.form['dataset_id']) + fw = frameworks.get_framework_by_id(framework) + ret = fw.get_network_visualization( + desc=flask.request.form['custom_network'], + dataset=dataset, + solver_type=flask.request.form['solver_type'] if 'solver_type' in flask.request.form else None, + use_mean=flask.request.form['use_mean'] if 'use_mean' in flask.request.form else None, + crop_size=flask.request.form['crop_size'] if 'crop_size' in flask.request.form else None, + num_gpus=flask.request.form['num_gpus'] if 'num_gpus' in flask.request.form else None, + ) return ret @@ -273,13 +295,13 @@ def download(job_id, extension): mode = 'gz' elif extension in ['tar.bz2']: mode = 'bz2' - with tarfile.open(fileobj=b, mode='w:%s' % mode) as tf: + with tarfile.open(fileobj=b, mode='w:%s' % mode) as tar: for path, name in job.download_files(epoch): - tf.add(path, arcname=name) - tf_info = tarfile.TarInfo("info.json") - tf_info.size = len(info_io.getvalue()) + tar.add(path, arcname=name) + tar_info = tarfile.TarInfo("info.json") + tar_info.size = len(info_io.getvalue()) info_io.seek(0) - tf.addfile(tf_info, info_io) + tar.addfile(tar_info, info_io) elif extension in ['zip']: with zipfile.ZipFile(b, 'w') as zf: for path, name in job.download_files(epoch): diff --git a/digits/scheduler.py b/digits/scheduler.py index a706bb0bc..7a75eabce 100644 --- a/digits/scheduler.py +++ b/digits/scheduler.py @@ -107,7 +107,7 @@ def __init__(self, gpu_list=None, verbose=False): self.resources = { # TODO: break this into CPU cores, memory usage, IO usage, etc. 'parse_folder_task_pool': [Resource()], - 'create_db_task_pool': [Resource(max_value=2)], + 'create_db_task_pool': [Resource(max_value=4)], 'analyze_db_task_pool': [Resource(max_value=4)], 'inference_task_pool': [Resource(max_value=4)], 'gpus': [Resource(identifier=index) diff --git a/digits/standard-networks/tensorflow/alexnet.py b/digits/standard-networks/tensorflow/alexnet.py new file mode 100644 index 000000000..85e004246 --- /dev/null +++ b/digits/standard-networks/tensorflow/alexnet.py @@ -0,0 +1,42 @@ +# Preferred settings for this model is: +# Base Learning Rate = 0.001 +# Crop Size = 224 + +from model import Tower +from utils import model_property +import tensorflow as tf +import tensorflow.contrib.slim as slim +import utils as digits + + +class UserModel(Tower): + + @model_property + def inference(self): + x = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]]) + with slim.arg_scope([slim.conv2d, slim.fully_connected], + weights_initializer=tf.contrib.layers.xavier_initializer(), + weights_regularizer=slim.l2_regularizer(1e-6)): + model = slim.conv2d(x, 96, [11, 11], 4, padding='VALID', scope='conv1') + model = slim.max_pool2d(model, [3, 3], 2, scope='pool1') + model = slim.conv2d(model, 256, [5, 5], 1, scope='conv2') + model = slim.max_pool2d(model, [3, 3], 2, scope='pool2') + model = slim.conv2d(model, 384, [3, 3], 1, scope='conv3') + model = slim.conv2d(model, 384, [3, 3], 1, scope='conv4') + model = slim.conv2d(model, 256, [3, 3], 1, scope='conv5') + model = slim.max_pool2d(model, [3, 3], 2, scope='pool5') + model = slim.flatten(model) + model = slim.fully_connected(model, 4096, activation_fn=None, scope='fc1') + model = slim.dropout(model, 0.5, is_training=self.is_training, scope='do1') + model = slim.fully_connected(model, 4096, activation_fn=None, scope='fc2') + model = slim.dropout(model, 0.5, is_training=self.is_training, scope='do2') + model = slim.fully_connected(model, self.nclasses, activation_fn=None, scope='fc3') + return model + + @model_property + def loss(self): + model = self.inference + loss = digits.classification_loss(model, self.y) + accuracy = digits.classification_accuracy(model, self.y) + self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy)) + return loss diff --git a/digits/standard-networks/tensorflow/googlenet.py b/digits/standard-networks/tensorflow/googlenet.py new file mode 100644 index 000000000..eafe7c7bb --- /dev/null +++ b/digits/standard-networks/tensorflow/googlenet.py @@ -0,0 +1,227 @@ +# Preferred settings for this model is: +# Training epochs = 80 +# Crop Size = 224 +# Under advanced learning rate options: +# Step Size = 10.0 +# Gamma = 0.96 + +# The auxillary branches as spcified in the original googlenet V1 model do exist in this implementation of +# googlenet but it is not used. To use it, be sure to check self.is_training to ensure that it is only used +# during training. + +from model import Tower +from utils import model_property +import tensorflow as tf +import utils as digits + + +class UserModel(Tower): + + all_inception_settings = { + '3a': [[64], [96, 128], [16, 32], [32]], + '3b': [[128], [128, 192], [32, 96], [64]], + '4a': [[192], [96, 208], [16, 48], [64]], + '4b': [[160], [112, 224], [24, 64], [64]], + '4c': [[128], [128, 256], [24, 64], [64]], + '4d': [[112], [144, 288], [32, 64], [64]], + '4e': [[256], [160, 320], [32, 128], [128]], + '5a': [[256], [160, 320], [32, 128], [128]], + '5b': [[384], [192, 384], [48, 128], [128]] + } + + @model_property + def inference(self): + # rescale to proper form, really we expect 224 x 224 x 1 in HWC form + model = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]]) + + conv_7x7_2s_weight, conv_7x7_2s_bias = self.create_conv_vars([7, 7, self.input_shape[2], 64], 'conv_7x7_2s') + model = self.conv_layer_with_relu(model, conv_7x7_2s_weight, conv_7x7_2s_bias, 2) + + model = self.max_pool(model, 3, 2) + + # model = tf.nn.local_response_normalization(model) + + conv_1x1_vs_weight, conv_1x1_vs_bias = self.create_conv_vars([1, 1, 64, 64], 'conv_1x1_vs') + model = self.conv_layer_with_relu(model, conv_1x1_vs_weight, conv_1x1_vs_bias, 1, 'VALID') + + conv_3x3_1s_weight, conv_3x3_1s_bias = self.create_conv_vars([3, 3, 64, 192], 'conv_3x3_1s') + model = self.conv_layer_with_relu(model, conv_3x3_1s_weight, conv_3x3_1s_bias, 1) + + # model = tf.nn.local_response_normalization(model) + + model = self.max_pool(model, 3, 2) + + inception_settings_3a = InceptionSettings(192, UserModel.all_inception_settings['3a']) + model = self.inception(model, inception_settings_3a, '3a') + + inception_settings_3b = InceptionSettings(256, UserModel.all_inception_settings['3b']) + model = self.inception(model, inception_settings_3b, '3b') + + model = self.max_pool(model, 3, 2) + + inception_settings_4a = InceptionSettings(480, UserModel.all_inception_settings['4a']) + model = self.inception(model, inception_settings_4a, '4a') + + # first auxiliary branch for making training faster + # aux_branch_1 = self.auxiliary_classifier(model, 512, "aux_1") + + inception_settings_4b = InceptionSettings(512, UserModel.all_inception_settings['4b']) + model = self.inception(model, inception_settings_4b, '4b') + + inception_settings_4c = InceptionSettings(512, UserModel.all_inception_settings['4c']) + model = self.inception(model, inception_settings_4c, '4c') + + inception_settings_4d = InceptionSettings(512, UserModel.all_inception_settings['4d']) + model = self.inception(model, inception_settings_4d, '4d') + + # second auxiliary branch for making training faster + # aux_branch_2 = self.auxiliary_classifier(model, 528, "aux_2") + + inception_settings_4e = InceptionSettings(528, UserModel.all_inception_settings['4e']) + model = self.inception(model, inception_settings_4e, '4e') + + model = self.max_pool(model, 3, 2) + + inception_settings_5a = InceptionSettings(832, UserModel.all_inception_settings['5a']) + model = self.inception(model, inception_settings_5a, '5a') + + inception_settings_5b = InceptionSettings(832, UserModel.all_inception_settings['5b']) + model = self.inception(model, inception_settings_5b, '5b') + + model = self.avg_pool(model, 7, 1, 'VALID') + + fc_weight, fc_bias = self.create_fc_vars([1024, self.nclasses], 'fc') + model = self.fully_connect(model, fc_weight, fc_bias) + + # if self.is_training: + # return [aux_branch_1, aux_branch_2, model] + + return model + + @model_property + def loss(self): + model = self.inference + loss = digits.classification_loss(model, self.y) + accuracy = digits.classification_accuracy(model, self.y) + self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy)) + return loss + + def inception(self, model, inception_setting, layer_name): + weights, biases = self.create_inception_variables(inception_setting, layer_name) + conv_1x1 = self.conv_layer_with_relu(model, weights['conv_1x1_1'], biases['conv_1x1_1'], 1) + + conv_3x3 = self.conv_layer_with_relu(model, weights['conv_1x1_2'], biases['conv_1x1_2'], 1) + conv_3x3 = self.conv_layer_with_relu(conv_3x3, weights['conv_3x3'], biases['conv_3x3'], 1) + + conv_5x5 = self.conv_layer_with_relu(model, weights['conv_1x1_3'], biases['conv_1x1_3'], 1) + conv_5x5 = self.conv_layer_with_relu(conv_5x5, weights['conv_5x5'], biases['conv_5x5'], 1) + + conv_pool = self.max_pool(model, 3, 1) + conv_pool = self.conv_layer_with_relu(conv_pool, weights['conv_pool'], biases['conv_pool'], 1) + + final_model = tf.concat([conv_1x1, conv_3x3, conv_5x5, conv_pool], 3) + + return final_model + + def create_inception_variables(self, inception_setting, layer_name): + model_dim = inception_setting.model_dim + conv_1x1_1_w, conv_1x1_1_b = self.create_conv_vars([1, 1, model_dim, inception_setting.conv_1x1_1_layers], + layer_name + '-conv_1x1_1') + conv_1x1_2_w, conv_1x1_2_b = self.create_conv_vars([1, 1, model_dim, inception_setting.conv_1x1_2_layers], + layer_name + '-conv_1x1_2') + conv_1x1_3_w, conv_1x1_3_b = self.create_conv_vars([1, 1, model_dim, inception_setting.conv_1x1_3_layers], + layer_name + '-conv_1x1_3') + conv_3x3_w, conv_3x3_b = self.create_conv_vars([3, 3, inception_setting.conv_1x1_2_layers, + inception_setting.conv_3x3_layers], + layer_name + '-conv_3x3') + conv_5x5_w, conv_5x5_b = self.create_conv_vars([5, 5, inception_setting.conv_1x1_3_layers, + inception_setting.conv_5x5_layers], + layer_name + '-conv_5x5') + conv_pool_w, conv_pool_b = self.create_conv_vars([1, 1, model_dim, inception_setting.conv_pool_layers], + layer_name + '-conv_pool') + + weights = { + 'conv_1x1_1': conv_1x1_1_w, + 'conv_1x1_2': conv_1x1_2_w, + 'conv_1x1_3': conv_1x1_3_w, + 'conv_3x3': conv_3x3_w, + 'conv_5x5': conv_5x5_w, + 'conv_pool': conv_pool_w + } + + biases = { + 'conv_1x1_1': conv_1x1_1_b, + 'conv_1x1_2': conv_1x1_2_b, + 'conv_1x1_3': conv_1x1_3_b, + 'conv_3x3': conv_3x3_b, + 'conv_5x5': conv_5x5_b, + 'conv_pool': conv_pool_b + } + + return weights, biases + + def auxiliary_classifier(self, model, input_size, name): + aux_classifier = self.avg_pool(model, 5, 3, 'VALID') + + conv_weight, conv_bias = self.create_conv_vars([1, 1, input_size, input_size], name + '-conv_1x1') + aux_classifier = self.conv_layer_with_relu(aux_classifier, conv_weight, conv_bias, 1) + + fc_weight, fc_bias = self.create_fc_vars([4*4*input_size, self.nclasses], name + '-fc') + aux_classifier = self.fully_connect(aux_classifier, fc_weight, fc_bias) + + aux_classifier = tf.nn.dropout(aux_classifier, 0.7) + + return aux_classifier + + def conv_layer_with_relu(self, model, weights, biases, stride_size, padding='SAME'): + new_model = tf.nn.conv2d(model, weights, strides=[1, stride_size, stride_size, 1], padding=padding) + new_model = tf.nn.bias_add(new_model, biases) + new_model = tf.nn.relu(new_model) + return new_model + + def max_pool(self, model, kernal_size, stride_size, padding='SAME'): + new_model = tf.nn.max_pool(model, ksize=[1, kernal_size, kernal_size, 1], + strides=[1, stride_size, stride_size, 1], padding=padding) + return new_model + + def avg_pool(self, model, kernal_size, stride_size, padding='SAME'): + new_model = tf.nn.avg_pool(model, ksize=[1, kernal_size, kernal_size, 1], + strides=[1, stride_size, stride_size, 1], padding=padding) + return new_model + + def fully_connect(self, model, weights, biases): + fc_model = tf.reshape(model, [-1, weights.get_shape().as_list()[0]]) + fc_model = tf.matmul(fc_model, weights) + fc_model = tf.add(fc_model, biases) + fc_model = tf.nn.relu(fc_model) + return fc_model + + def create_conv_vars(self, size, name): + weight = self.create_weight(size, name + '_W') + bias = self.create_bias(size[3], name + '_b') + return weight, bias + + def create_fc_vars(self, size, name): + weight = self.create_weight(size, name + '_W') + bias = self.create_bias(size[1], name + '_b') + return weight, bias + + def create_weight(self, size, name): + weight = tf.get_variable(name, size, initializer=tf.contrib.layers.xavier_initializer()) + return weight + + def create_bias(self, size, name): + bias = tf.get_variable(name, [size], initializer=tf.constant_initializer(0.2)) + return bias + + +class InceptionSettings(): + + def __init__(self, model_dim, inception_settings): + self.model_dim = model_dim + self.conv_1x1_1_layers = inception_settings[0][0] + self.conv_1x1_2_layers = inception_settings[1][0] + self.conv_1x1_3_layers = inception_settings[2][0] + self.conv_3x3_layers = inception_settings[1][1] + self.conv_5x5_layers = inception_settings[2][1] + self.conv_pool_layers = inception_settings[3][0] diff --git a/digits/standard-networks/tensorflow/lenet.py b/digits/standard-networks/tensorflow/lenet.py new file mode 100644 index 000000000..d36458231 --- /dev/null +++ b/digits/standard-networks/tensorflow/lenet.py @@ -0,0 +1,34 @@ +from model import Tower +from utils import model_property +import tensorflow as tf +import tensorflow.contrib.slim as slim +import utils as digits + + +class UserModel(Tower): + + @model_property + def inference(self): + x = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]]) + # scale (divide by MNIST std) + x = x * 0.0125 + with slim.arg_scope([slim.conv2d, slim.fully_connected], + weights_initializer=tf.contrib.layers.xavier_initializer(), + weights_regularizer=slim.l2_regularizer(0.0005)): + model = slim.conv2d(x, 20, [5, 5], padding='VALID', scope='conv1') + model = slim.max_pool2d(model, [2, 2], padding='VALID', scope='pool1') + model = slim.conv2d(model, 50, [5, 5], padding='VALID', scope='conv2') + model = slim.max_pool2d(model, [2, 2], padding='VALID', scope='pool2') + model = slim.flatten(model) + model = slim.fully_connected(model, 500, scope='fc1') + model = slim.dropout(model, 0.5, is_training=self.is_training, scope='do1') + model = slim.fully_connected(model, self.nclasses, activation_fn=None, scope='fc2') + return model + + @model_property + def loss(self): + model = self.inference + loss = digits.classification_loss(model, self.y) + accuracy = digits.classification_accuracy(model, self.y) + self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy)) + return loss diff --git a/digits/standard-networks/tensorflow/vgg16.py b/digits/standard-networks/tensorflow/vgg16.py new file mode 100644 index 000000000..505cbed28 --- /dev/null +++ b/digits/standard-networks/tensorflow/vgg16.py @@ -0,0 +1,39 @@ +from model import Tower +from utils import model_property +import tensorflow as tf +import tensorflow.contrib.slim as slim +import utils as digits + + +class UserModel(Tower): + + @model_property + def inference(self): + x = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]]) + with slim.arg_scope([slim.conv2d, slim.fully_connected], + weights_initializer=tf.contrib.layers.xavier_initializer(), + weights_regularizer=slim.l2_regularizer(0.0005)): + model = slim.repeat(x, 2, slim.conv2d, 64, [3, 3], scope='conv1') + model = slim.max_pool2d(model, [2, 2], scope='pool1') + model = slim.repeat(model, 2, slim.conv2d, 128, [3, 3], scope='conv2') + model = slim.max_pool2d(model, [2, 2], scope='pool2') + model = slim.repeat(model, 3, slim.conv2d, 256, [3, 3], scope='conv3') + model = slim.max_pool2d(model, [2, 2], scope='pool3') + model = slim.repeat(model, 3, slim.conv2d, 512, [3, 3], scope='conv4') + model = slim.max_pool2d(model, [2, 2], scope='pool4') + model = slim.repeat(model, 3, slim.conv2d, 512, [3, 3], scope='conv5') + model = slim.max_pool2d(model, [2, 2], scope='pool5') + model = slim.flatten(model, scope='flatten5') + model = slim.fully_connected(model, 4096, scope='fc6') + model = slim.dropout(model, 0.5, is_training=self.is_training, scope='do6') + model = slim.fully_connected(model, 4096, scope='fc7') + model = slim.dropout(model, 0.5, is_training=self.is_training, scope='do7') + model = slim.fully_connected(model, self.nclasses, activation_fn=None, scope='fcX8') + return model + + @model_property + def loss(self): + loss = digits.classification_loss(self.inference, self.y) + accuracy = digits.classification_accuracy(self.inference, self.y) + self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy)) + return loss diff --git a/digits/standard-networks/torch/ImageNet-Training/googlenet.lua b/digits/standard-networks/torch/ImageNet-Training/googlenet.lua index 92ef4b243..1c4e23e7f 100644 --- a/digits/standard-networks/torch/ImageNet-Training/googlenet.lua +++ b/digits/standard-networks/torch/ImageNet-Training/googlenet.lua @@ -1,4 +1,4 @@ --- source: https://github.com/soumith/imagenet-multiGPU.torch/blob/master/models/alexnet_cudnn.lua +-- source: https://github.com/soumith/imagenet-multiGPU.torch/blob/master/models/googlenet.lua require 'nn' if pcall(function() require('cudnn') end) then diff --git a/digits/static/css/style.css b/digits/static/css/style.css index 80a3bd760..094177ed7 100644 --- a/digits/static/css/style.css +++ b/digits/static/css/style.css @@ -18,6 +18,10 @@ div.progress div.progress-bar { width: 70%!important; } +.full-bootbox > .modal-dialog { + width: 95%!important; +} + /* Upload Button */ .file-upload { position: relative; diff --git a/digits/static/js/timeline-tracing.js b/digits/static/js/timeline-tracing.js new file mode 100644 index 000000000..bb75c4111 --- /dev/null +++ b/digits/static/js/timeline-tracing.js @@ -0,0 +1,31 @@ +//# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. +'use strict'; +var TimelineTrace = function(cont_id) { + var container = document.createElement('track-view-container'); + container.id = 'track_view_container'; + this.viewer = document.createElement('tr-ui-timeline-view'); + this.viewer.track_view_container = container; + Polymer.dom(this.viewer).appendChild(container); + this.viewer.id = 'trace-viewer'; + this.viewer.globalMode = false; + this.viewer.viewTitle = 'No Trace Selected'; + Polymer.dom(document.getElementById(cont_id)).appendChild(this.viewer); +}; + +TimelineTrace.prototype.onResult = function(step, result) { + var model = new tr.Model(); + var viewer = this.viewer; + var i = new tr.importer.Import(model); + var p = i.importTracesWithProgressDialog([result]); + function onModelLoaded() { + viewer.model = model; + viewer.viewTitle = 'Trace #' + step; + } + function onImportFail() { + var overlay = new tr.ui.b.Overlay(); + overlay.textContent = tr.b.normalizeException(err).message; + overlay.title = 'Import error'; + overlay.visible = true; + } + p.then(onModelLoaded, onImportFail); +}; diff --git a/digits/static/tb/summary-icon.svg b/digits/static/tb/summary-icon.svg new file mode 100644 index 000000000..f66c99580 --- /dev/null +++ b/digits/static/tb/summary-icon.svg @@ -0,0 +1,3 @@ + + + diff --git a/digits/static/tb/tf-graph-basic.build.html b/digits/static/tb/tf-graph-basic.build.html new file mode 100644 index 000000000..617b1d4b3 --- /dev/null +++ b/digits/static/tb/tf-graph-basic.build.html @@ -0,0 +1,1056 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/digits/static/tb/tf-graph-basic.build.js b/digits/static/tb/tf-graph-basic.build.js new file mode 100644 index 000000000..37cadccf2 --- /dev/null +++ b/digits/static/tb/tf-graph-basic.build.js @@ -0,0 +1,23 @@ +//Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. +//Copyright 2017 The TensorFlow Authors. All rights reserved. + +!function(){window.WebComponents=window.WebComponents||{flags:{}};var a="webcomponents-lite.js",b=document.querySelector('script[src*="'+a+'"]'),c={};if(!c.noOpts){if(location.search.slice(1).split("&").forEach(function(a){var b,d=a.split("=");d[0]&&(b=d[0].match(/wc-(.+)/))&&(c[b[1]]=d[1]||!0)}),b)for(var d,e=0;d=b.attributes[e];e++)"src"!==d.name&&(c[d.name]=d.value||!0);if(c.log&&c.log.split){var f=c.log.split(",");c.log={},f.forEach(function(a){c.log[a]=!0})}else c.log={}}c.register&&(window.CustomElements=window.CustomElements||{flags:{}},window.CustomElements.flags.register=c.register),WebComponents.flags=c}(),function(a){"use strict";function b(a){return void 0!==l[a]}function c(){h.call(this),this._isInvalid=!0}function d(a){return""==a&&c.call(this),a.toLowerCase()}function e(a){var b=a.charCodeAt(0);return b>32&&127>b&&-1==[34,35,60,62,63,96].indexOf(b)?a:encodeURIComponent(a)}function f(a){var b=a.charCodeAt(0);return b>32&&127>b&&-1==[34,35,60,62,96].indexOf(b)?a:encodeURIComponent(a)}function g(a,g,h){function i(a){t.push(a)}var j=g||"scheme start",k=0,q="",r=!1,s=!1,t=[];a:for(;(a[k-1]!=n||0==k)&&!this._isInvalid;){var u=a[k];switch(j){case"scheme start":if(!u||!o.test(u)){if(g){i("Invalid scheme.");break a}q="",j="no scheme";continue}q+=u.toLowerCase(),j="scheme";break;case"scheme":if(u&&p.test(u))q+=u.toLowerCase();else{if(":"!=u){if(g){if(n==u)break a;i("Code point not allowed in scheme: "+u);break a}q="",k=0,j="no scheme";continue}if(this._scheme=q,q="",g)break a;b(this._scheme)&&(this._isRelative=!0),j="file"==this._scheme?"relative":this._isRelative&&h&&h._scheme==this._scheme?"relative or authority":this._isRelative?"authority first slash":"scheme data"}break;case"scheme data":"?"==u?(this._query="?",j="query"):"#"==u?(this._fragment="#",j="fragment"):n!=u&&"\t"!=u&&"\n"!=u&&"\r"!=u&&(this._schemeData+=e(u));break;case"no scheme":if(h&&b(h._scheme)){j="relative";continue}i("Missing scheme."),c.call(this);break;case"relative or authority":if("/"!=u||"/"!=a[k+1]){i("Expected /, got: "+u),j="relative";continue}j="authority ignore slashes";break;case"relative":if(this._isRelative=!0,"file"!=this._scheme&&(this._scheme=h._scheme),n==u){this._host=h._host,this._port=h._port,this._path=h._path.slice(),this._query=h._query,this._username=h._username,this._password=h._password;break a}if("/"==u||"\\"==u)"\\"==u&&i("\\ is an invalid code point."),j="relative slash";else if("?"==u)this._host=h._host,this._port=h._port,this._path=h._path.slice(),this._query="?",this._username=h._username,this._password=h._password,j="query";else{if("#"!=u){var v=a[k+1],w=a[k+2];("file"!=this._scheme||!o.test(u)||":"!=v&&"|"!=v||n!=w&&"/"!=w&&"\\"!=w&&"?"!=w&&"#"!=w)&&(this._host=h._host,this._port=h._port,this._username=h._username,this._password=h._password,this._path=h._path.slice(),this._path.pop()),j="relative path";continue}this._host=h._host,this._port=h._port,this._path=h._path.slice(),this._query=h._query,this._fragment="#",this._username=h._username,this._password=h._password,j="fragment"}break;case"relative slash":if("/"!=u&&"\\"!=u){"file"!=this._scheme&&(this._host=h._host,this._port=h._port,this._username=h._username,this._password=h._password),j="relative path";continue}"\\"==u&&i("\\ is an invalid code point."),j="file"==this._scheme?"file host":"authority ignore slashes";break;case"authority first slash":if("/"!=u){i("Expected '/', got: "+u),j="authority ignore slashes";continue}j="authority second slash";break;case"authority second slash":if(j="authority ignore slashes","/"!=u){i("Expected '/', got: "+u);continue}break;case"authority ignore slashes":if("/"!=u&&"\\"!=u){j="authority";continue}i("Expected authority, got: "+u);break;case"authority":if("@"==u){r&&(i("@ already seen."),q+="%40"),r=!0;for(var x=0;x>>0)+(b++ +"__")};c.prototype={set:function(b,c){var d=b[this.name];return d&&d[0]===b?d[1]=c:a(b,this.name,{value:[b,c],writable:!0}),this},get:function(a){var b;return(b=a[this.name])&&b[0]===a?b[1]:void 0},delete:function(a){var b=a[this.name];return!(!b||b[0]!==a)&&(b[0]=b[1]=void 0,!0)},has:function(a){var b=a[this.name];return!!b&&b[0]===a}},window.WeakMap=c}(),function(a){function b(a){u.push(a),t||(t=!0,p(d))}function c(a){return window.ShadowDOMPolyfill&&window.ShadowDOMPolyfill.wrapIfNeeded(a)||a}function d(){t=!1;var a=u;u=[],a.sort(function(a,b){return a.uid_-b.uid_});var b=!1;a.forEach(function(a){var c=a.takeRecords();e(a),c.length&&(a.callback_(c,a),b=!0)}),b&&d()}function e(a){a.nodes_.forEach(function(b){var c=q.get(b);c&&c.forEach(function(b){b.observer===a&&b.removeTransientObservers()})})}function f(a,b){for(var c=a;c;c=c.parentNode){var d=q.get(c);if(d)for(var e=0;e0){var e=c[d-1],f=n(e,a);if(f)return void(c[d-1]=f)}else b(this.observer);c[d]=a},addListeners:function(){this.addListeners_(this.target)},addListeners_:function(a){var b=this.options;b.attributes&&a.addEventListener("DOMAttrModified",this,!0),b.characterData&&a.addEventListener("DOMCharacterDataModified",this,!0),b.childList&&a.addEventListener("DOMNodeInserted",this,!0),(b.childList||b.subtree)&&a.addEventListener("DOMNodeRemoved",this,!0)},removeListeners:function(){this.removeListeners_(this.target)},removeListeners_:function(a){var b=this.options;b.attributes&&a.removeEventListener("DOMAttrModified",this,!0),b.characterData&&a.removeEventListener("DOMCharacterDataModified",this,!0),b.childList&&a.removeEventListener("DOMNodeInserted",this,!0),(b.childList||b.subtree)&&a.removeEventListener("DOMNodeRemoved",this,!0)},addTransientObserver:function(a){if(a!==this.target){this.addListeners_(a),this.transientObservedNodes.push(a);var b=q.get(a);b||q.set(a,b=[]),b.push(this)}},removeTransientObservers:function(){var a=this.transientObservedNodes;this.transientObservedNodes=[],a.forEach(function(a){this.removeListeners_(a);for(var b=q.get(a),c=0;c":return">";case" ":return" "}}function b(b){return b.replace(g,a)}var c="template",d=document.implementation.createHTMLDocument("template"),e=!0;HTMLTemplateElement=function(){},HTMLTemplateElement.prototype=Object.create(HTMLElement.prototype),HTMLTemplateElement.decorate=function(a){a.content||(a.content=d.createDocumentFragment());for(var c;c=a.firstChild;)a.content.appendChild(c);if(e)try{Object.defineProperty(a,"innerHTML",{get:function(){for(var a="",c=this.content.firstChild;c;c=c.nextSibling)a+=c.outerHTML||b(c.data);return a},set:function(a){for(d.body.innerHTML=a,HTMLTemplateElement.bootstrap(d);this.content.firstChild;)this.content.removeChild(this.content.firstChild);for(;d.body.firstChild;)this.content.appendChild(d.body.firstChild)},configurable:!0})}catch(a){e=!1}},HTMLTemplateElement.bootstrap=function(a){for(var b,d=a.querySelectorAll(c),e=0,f=d.length;f>e&&(b=d[e]);e++)HTMLTemplateElement.decorate(b)},document.addEventListener("DOMContentLoaded",function(){HTMLTemplateElement.bootstrap(document)});var f=document.createElement;document.createElement=function(){"use strict";var a=f.apply(document,arguments);return"template"==a.localName&&HTMLTemplateElement.decorate(a),a};var g=/[&\u00A0<>]/g}(),function(a){"use strict";if(!window.performance){var b=Date.now();window.performance={now:function(){return Date.now()-b}}}window.requestAnimationFrame||(window.requestAnimationFrame=function(){var a=window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame;return a?function(b){return a(function(){b(performance.now())})}:function(a){return window.setTimeout(a,1e3/60)}}()),window.cancelAnimationFrame||(window.cancelAnimationFrame=function(){return window.webkitCancelAnimationFrame||window.mozCancelAnimationFrame||function(a){clearTimeout(a)}}());var c=function(){var a=document.createEvent("Event");return a.initEvent("foo",!0,!0),a.preventDefault(),a.defaultPrevented}();if(!c){var d=Event.prototype.preventDefault;Event.prototype.preventDefault=function(){this.cancelable&&(d.call(this),Object.defineProperty(this,"defaultPrevented",{get:function(){return!0}}))}}var e=/Trident/.test(navigator.userAgent);if((!window.CustomEvent||e&&"function"!=typeof window.CustomEvent)&&(window.CustomEvent=function(a,b){b=b||{};var c=document.createEvent("CustomEvent");return c.initCustomEvent(a,Boolean(b.bubbles),Boolean(b.cancelable),b.detail),c},window.CustomEvent.prototype=window.Event.prototype),!window.Event||e&&"function"!=typeof window.Event){var f=window.Event;window.Event=function(a,b){b=b||{};var c=document.createEvent("Event");return c.initEvent(a,Boolean(b.bubbles),Boolean(b.cancelable)),c},window.Event.prototype=f.prototype}}(window.WebComponents),window.HTMLImports=window.HTMLImports||{flags:{}},function(a){function b(a,b){b=b||o,d(function(){f(a,b)},b)}function c(a){return"complete"===a.readyState||a.readyState===r}function d(a,b){if(c(b))a&&a();else{var e=function(){("complete"===b.readyState||b.readyState===r)&&(b.removeEventListener(s,e),d(a,b))};b.addEventListener(s,e)}}function e(a){a.target.__loaded=!0}function f(a,b){function c(){i==j&&a&&a({allImports:h,loadedImports:k,errorImports:l})}function d(a){e(a),k.push(this),i++,c()}function f(a){l.push(this),i++,c()}var h=b.querySelectorAll("link[rel=import]"),i=0,j=h.length,k=[],l=[];if(j)for(var m,n=0;j>n&&(m=h[n]);n++)g(m)?(i++,c()):(m.addEventListener("load",d),m.addEventListener("error",f));else c()}function g(a){return l?a.__loaded||a.import&&"loading"!==a.import.readyState:a.__importParsed}function h(a){for(var b,c=0,d=a.length;d>c&&(b=a[c]);c++)i(b)&&j(b)}function i(a){return"link"===a.localName&&"import"===a.rel}function j(a){var b=a.import;b?e({target:a}):(a.addEventListener("load",e),a.addEventListener("error",e))}var k="import",l=Boolean(k in document.createElement("link")),m=Boolean(window.ShadowDOMPolyfill),n=function(a){return m?window.ShadowDOMPolyfill.wrapIfNeeded(a):a},o=n(document),p={get:function(){var a=window.HTMLImports.currentScript||document.currentScript||("complete"!==document.readyState?document.scripts[document.scripts.length-1]:null);return n(a)},configurable:!0};Object.defineProperty(document,"_currentScript",p),Object.defineProperty(o,"_currentScript",p);var q=/Trident/.test(navigator.userAgent),r=q?"complete":"interactive",s="readystatechange";l&&(new MutationObserver(function(a){for(var b,c=0,d=a.length;d>c&&(b=a[c]);c++)b.addedNodes&&h(b.addedNodes)}).observe(document.head,{childList:!0}),function(){if("loading"===document.readyState)for(var a,b=document.querySelectorAll("link[rel=import]"),c=0,d=b.length;d>c&&(a=b[c]);c++)j(a)}()),b(function(a){window.HTMLImports.ready=!0,window.HTMLImports.readyTime=(new Date).getTime();var b=o.createEvent("CustomEvent");b.initCustomEvent("HTMLImportsLoaded",!0,!0,a),o.dispatchEvent(b)}),a.IMPORT_LINK_TYPE=k,a.useNative=l,a.rootDocument=o,a.whenReady=b,a.isIE=q}(window.HTMLImports),function(a){var b=[],c=function(a){b.push(a)},d=function(){b.forEach(function(b){b(a)})};a.addModule=c,a.initializeModules=d}(window.HTMLImports),window.HTMLImports.addModule(function(a){var b=/(url\()([^)]*)(\))/g,c=/(@import[\s]+(?!url\())([^;]*)(;)/g,d={resolveUrlsInStyle:function(a,b){var c=a.ownerDocument,d=c.createElement("a");return a.textContent=this.resolveUrlsInCssText(a.textContent,b,d),a},resolveUrlsInCssText:function(a,d,e){var f=this.replaceUrls(a,e,d,b);return f=this.replaceUrls(f,e,d,c)},replaceUrls:function(a,b,c,d){return a.replace(d,function(a,d,e,f){var g=e.replace(/["']/g,"");return c&&(g=new URL(g,c).href),b.href=g,g=b.href,d+"'"+g+"'"+f})}};a.path=d}),window.HTMLImports.addModule(function(a){var b={async:!0,ok:function(a){return a.status>=200&&a.status<300||304===a.status||0===a.status},load:function(c,d,e){var f=new XMLHttpRequest;return(a.flags.debug||a.flags.bust)&&(c+="?"+Math.random()),f.open("GET",c,b.async),f.addEventListener("readystatechange",function(a){if(4===f.readyState){var c=null;try{var g=f.getResponseHeader("Location");g&&(c="/"===g.substr(0,1)?location.origin+g:g)}catch(a){console.error(a.message)}d.call(e,!b.ok(f)&&f,f.response||f.responseText,c)}}),f.send(),f},loadDocument:function(a,b,c){this.load(a,b,c).responseType="document"}};a.xhr=b}),window.HTMLImports.addModule(function(a){var b=a.xhr,c=a.flags,d=function(a,b){this.cache={},this.onload=a,this.oncomplete=b,this.inflight=0,this.pending={}};d.prototype={addNodes:function(a){this.inflight+=a.length;for(var b,c=0,d=a.length;d>c&&(b=a[c]);c++)this.require(b);this.checkDone()},addNode:function(a){this.inflight++,this.require(a),this.checkDone()},require:function(a){var b=a.src||a.href;a.__nodeUrl=b,this.dedupe(b,a)||this.fetch(b,a)},dedupe:function(a,b){return this.pending[a]?(this.pending[a].push(b),!0):this.cache[a]?(this.onload(a,b,this.cache[a]),this.tail(),!0):(this.pending[a]=[b],!1)},fetch:function(a,d){if(c.load&&console.log("fetch",a,d),a)if(a.match(/^data:/)){var e=a.split(","),f=e[0],g=e[1];g=f.indexOf(";base64")>-1?atob(g):decodeURIComponent(g),setTimeout(function(){this.receive(a,d,null,g)}.bind(this),0)}else{var h=function(b,c,e){this.receive(a,d,b,c,e)}.bind(this);b.load(a,h)}else setTimeout(function(){this.receive(a,d,{error:"href must be specified"},null)}.bind(this),0)},receive:function(a,b,c,d,e){this.cache[a]=d;for(var f,g=this.pending[a],h=0,i=g.length;i>h&&(f=g[h]);h++)this.onload(a,f,d,c,e),this.tail();this.pending[a]=null},tail:function(){--this.inflight,this.checkDone()},checkDone:function(){this.inflight||this.oncomplete()}},a.Loader=d}),window.HTMLImports.addModule(function(a){var b=function(a){this.addCallback=a,this.mo=new MutationObserver(this.handler.bind(this))};b.prototype={handler:function(a){for(var b,c=0,d=a.length;d>c&&(b=a[c]);c++)"childList"===b.type&&b.addedNodes.length&&this.addedNodes(b.addedNodes)},addedNodes:function(a){this.addCallback&&this.addCallback(a);for(var b,c=0,d=a.length;d>c&&(b=a[c]);c++)b.children&&b.children.length&&this.addedNodes(b.children)},observe:function(a){this.mo.observe(a,{childList:!0,subtree:!0})}},a.Observer=b}),window.HTMLImports.addModule(function(a){function b(a){return"link"===a.localName&&a.rel===k}function c(a){var b=d(a);return"data:text/javascript;charset=utf-8,"+encodeURIComponent(b)}function d(a){return a.textContent+e(a)}function e(a){var b=a.ownerDocument;b.__importedScripts=b.__importedScripts||0;var c=a.ownerDocument.baseURI,d=b.__importedScripts?"-"+b.__importedScripts:"";return b.__importedScripts++,"\n//# sourceURL="+c+d+".js\n"}function f(a){var b=a.ownerDocument.createElement("style");return b.textContent=a.textContent,g.resolveUrlsInStyle(b),b}var g=a.path,h=a.rootDocument,i=a.flags,j=a.isIE,k=a.IMPORT_LINK_TYPE,l="link[rel="+k+"]",m={documentSelectors:l,importsSelectors:[l,"link[rel=stylesheet]:not([type])","style:not([type])","script:not([type])",'script[type="application/javascript"]','script[type="text/javascript"]'].join(","),map:{link:"parseLink",script:"parseScript",style:"parseStyle"},dynamicElements:[],parseNext:function(){var a=this.nextToParse();a&&this.parse(a)},parse:function(a){if(this.isParsed(a))return void(i.parse&&console.log("[%s] is already parsed",a.localName));var b=this[this.map[a.localName]];b&&(this.markParsing(a),b.call(this,a))},parseDynamic:function(a,b){this.dynamicElements.push(a),b||this.parseNext()},markParsing:function(a){i.parse&&console.log("parsing",a),this.parsingElement=a},markParsingComplete:function(a){a.__importParsed=!0,this.markDynamicParsingComplete(a),a.__importElement&&(a.__importElement.__importParsed=!0,this.markDynamicParsingComplete(a.__importElement)),this.parsingElement=null,i.parse&&console.log("completed",a)},markDynamicParsingComplete:function(a){var b=this.dynamicElements.indexOf(a);b>=0&&this.dynamicElements.splice(b,1)},parseImport:function(a){if(a.import=a.__doc,window.HTMLImports.__importsParsingHook&&window.HTMLImports.__importsParsingHook(a),a.import&&(a.import.__importParsed=!0),this.markParsingComplete(a),a.__resource&&!a.__error?a.dispatchEvent(new CustomEvent("load",{bubbles:!1})):a.dispatchEvent(new CustomEvent("error",{bubbles:!1})),a.__pending)for(var b;a.__pending.length;)b=a.__pending.shift(),b&&b({target:a});this.parseNext()},parseLink:function(a){b(a)?this.parseImport(a):(a.href=a.href,this.parseGeneric(a))},parseStyle:function(a){var b=a;a=f(a),b.__appliedElement=a,a.__importElement=b,this.parseGeneric(a)},parseGeneric:function(a){this.trackElement(a),this.addElementToDocument(a)},rootImportForElement:function(a){for(var b=a;b.ownerDocument.__importLink;)b=b.ownerDocument.__importLink;return b},addElementToDocument:function(a){var b=this.rootImportForElement(a.__importElement||a);b.parentNode.insertBefore(a,b)},trackElement:function(a,b){var c=this,d=function(e){a.removeEventListener("load",d),a.removeEventListener("error",d),b&&b(e),c.markParsingComplete(a),c.parseNext()};if(a.addEventListener("load",d),a.addEventListener("error",d),j&&"style"===a.localName){var e=!1;if(-1==a.textContent.indexOf("@import"))e=!0;else if(a.sheet){e=!0;for(var f,g=a.sheet.cssRules,h=g?g.length:0,i=0;h>i&&(f=g[i]);i++)f.type===CSSRule.IMPORT_RULE&&(e=e&&Boolean(f.styleSheet))}e&&setTimeout(function(){a.dispatchEvent(new CustomEvent("load",{bubbles:!1}))})}},parseScript:function(b){var d=document.createElement("script");d.__importElement=b,d.src=b.src?b.src:c(b),a.currentScript=b,this.trackElement(d,function(b){d.parentNode&&d.parentNode.removeChild(d),a.currentScript=null}),this.addElementToDocument(d)},nextToParse:function(){return this._mayParse=[],!this.parsingElement&&(this.nextToParseInDoc(h)||this.nextToParseDynamic())},nextToParseInDoc:function(a,c){if(a&&this._mayParse.indexOf(a)<0){this._mayParse.push(a);for(var d,e=a.querySelectorAll(this.parseSelectorsForNode(a)),f=0,g=e.length;g>f&&(d=e[f]);f++)if(!this.isParsed(d))return this.hasResource(d)?b(d)?this.nextToParseInDoc(d.__doc,d):d:void 0}return c},nextToParseDynamic:function(){return this.dynamicElements[0]},parseSelectorsForNode:function(a){var b=a.ownerDocument||a;return b===h?this.documentSelectors:this.importsSelectors},isParsed:function(a){return a.__importParsed},needsDynamicParsing:function(a){return this.dynamicElements.indexOf(a)>=0},hasResource:function(a){return!b(a)||void 0!==a.__doc}};a.parser=m,a.IMPORT_SELECTOR=l}),window.HTMLImports.addModule(function(a){function b(a){return c(a,g)}function c(a,b){return"link"===a.localName&&a.getAttribute("rel")===b}function d(a){return!!Object.getOwnPropertyDescriptor(a,"baseURI")}function e(a,b){var c=document.implementation.createHTMLDocument(g);c._URL=b;var e=c.createElement("base");e.setAttribute("href",b),c.baseURI||d(c)||Object.defineProperty(c,"baseURI",{value:b});var f=c.createElement("meta");return f.setAttribute("charset","utf-8"),c.head.appendChild(f),c.head.appendChild(e),c.body.innerHTML=a,window.HTMLTemplateElement&&HTMLTemplateElement.bootstrap&&HTMLTemplateElement.bootstrap(c),c}var f=a.flags,g=a.IMPORT_LINK_TYPE,h=a.IMPORT_SELECTOR,i=a.rootDocument,j=a.Loader,k=a.Observer,l=a.parser,m={documents:{},documentPreloadSelectors:h,importsPreloadSelectors:[h].join(","),loadNode:function(a){n.addNode(a)},loadSubtree:function(a){var b=this.marshalNodes(a);n.addNodes(b)},marshalNodes:function(a){return a.querySelectorAll(this.loadSelectorsForNode(a))},loadSelectorsForNode:function(a){var b=a.ownerDocument||a;return b===i?this.documentPreloadSelectors:this.importsPreloadSelectors},loaded:function(a,c,d,g,h){if(f.load&&console.log("loaded",a,c),c.__resource=d,c.__error=g,b(c)){var i=this.documents[a];void 0===i&&(i=g?null:e(d,h||a),i&&(i.__importLink=c,this.bootDocument(i)),this.documents[a]=i),c.__doc=i}l.parseNext()},bootDocument:function(a){this.loadSubtree(a),this.observer.observe(a),l.parseNext()},loadedAll:function(){l.parseNext()}},n=new j(m.loaded.bind(m),m.loadedAll.bind(m));if(m.observer=new k,!document.baseURI){var o={get:function(){var a=document.querySelector("base");return a?a.href:window.location.href},configurable:!0};Object.defineProperty(document,"baseURI",o),Object.defineProperty(i,"baseURI",o)}a.importer=m,a.importLoader=n}),window.HTMLImports.addModule(function(a){var b=a.parser,c=a.importer,d={added:function(a){for(var d,e,f,g,h=0,i=a.length;i>h&&(g=a[h]);h++)d||(d=g.ownerDocument,e=b.isParsed(d)),f=this.shouldLoadNode(g),f&&c.loadNode(g),this.shouldParseNode(g)&&e&&b.parseDynamic(g,f)},shouldLoadNode:function(a){return 1===a.nodeType&&e.call(a,c.loadSelectorsForNode(a))},shouldParseNode:function(a){return 1===a.nodeType&&e.call(a,b.parseSelectorsForNode(a))}};c.observer.addCallback=d.added.bind(d);var e=HTMLElement.prototype.matches||HTMLElement.prototype.matchesSelector||HTMLElement.prototype.webkitMatchesSelector||HTMLElement.prototype.mozMatchesSelector||HTMLElement.prototype.msMatchesSelector}),function(a){function b(){window.HTMLImports.importer.bootDocument(d)}var c=a.initializeModules;if(a.isIE,!a.useNative){c();var d=a.rootDocument;"complete"===document.readyState||"interactive"===document.readyState&&!window.attachEvent?b():document.addEventListener("DOMContentLoaded",b)}}(window.HTMLImports),window.CustomElements=window.CustomElements||{flags:{}},function(a){var b=a.flags,c=[],d=function(a){c.push(a)},e=function(){c.forEach(function(b){b(a)})};a.addModule=d,a.initializeModules=e,a.hasNative=Boolean(document.registerElement),a.isIE=/Trident/.test(navigator.userAgent),a.useNative=!b.register&&a.hasNative&&!window.ShadowDOMPolyfill&&(!window.HTMLImports||window.HTMLImports.useNative)}(window.CustomElements),window.CustomElements.addModule(function(a){function b(a,b){c(a,function(a){return!!b(a)||void d(a,b)}),d(a,b)}function c(a,b,d){var e=a.firstElementChild;if(!e)for(e=a.firstChild;e&&e.nodeType!==Node.ELEMENT_NODE;)e=e.nextSibling;for(;e;)b(e,d)!==!0&&c(e,b,d),e=e.nextElementSibling;return null}function d(a,c){for(var d=a.shadowRoot;d;)b(d,c),d=d.olderShadowRoot}function e(a,b){f(a,b,[])}function f(a,b,c){if(a=window.wrap(a),!(c.indexOf(a)>=0)){c.push(a);for(var d,e=a.querySelectorAll("link[rel="+g+"]"),h=0,i=e.length;i>h&&(d=e[h]);h++)d.import&&f(d.import,b,c);b(a)}}var g=window.HTMLImports?window.HTMLImports.IMPORT_LINK_TYPE:"none";a.forDocumentTree=e,a.forSubtree=b}),window.CustomElements.addModule(function(a){function b(a,b){return c(a,b)||d(a,b)}function c(b,c){return!!a.upgrade(b,c)||void(c&&g(b))}function d(a,b){t(a,function(a){return!!c(a,b)||void 0})}function e(a){x.push(a),w||(w=!0,setTimeout(f))}function f(){w=!1;for(var a,b=x,c=0,d=b.length;d>c&&(a=b[c]);c++)a();x=[]}function g(a){v?e(function(){h(a)}):h(a)}function h(a){a.__upgraded__&&!a.__attached&&(a.__attached=!0,a.attachedCallback&&a.attachedCallback())}function i(a){j(a),t(a,function(a){j(a)})}function j(a){v?e(function(){k(a)}):k(a)}function k(a){a.__upgraded__&&a.__attached&&(a.__attached=!1,a.detachedCallback&&a.detachedCallback())}function l(a){for(var b=a,c=window.wrap(document);b;){if(b==c)return!0;b=b.parentNode||b.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&b.host}}function m(a){if(a.shadowRoot&&!a.shadowRoot.__watched){s.dom&&console.log("watching shadow-root for: ",a.localName);for(var b=a.shadowRoot;b;)p(b),b=b.olderShadowRoot}}function n(a,c){if(s.dom){var d=c[0];if(d&&"childList"===d.type&&d.addedNodes&&d.addedNodes){for(var e=d.addedNodes[0];e&&e!==document&&!e.host;)e=e.parentNode;var f=e&&(e.URL||e._URL||e.host&&e.host.localName)||"";f=f.split("/?").shift().split("/").pop()}console.group("mutations (%d) [%s]",c.length,f||"")}var g=l(a);c.forEach(function(a){"childList"===a.type&&(y(a.addedNodes,function(a){a.localName&&b(a,g)}),y(a.removedNodes,function(a){a.localName&&i(a)}))}),s.dom&&console.groupEnd()}function o(a){for(a=window.wrap(a),a||(a=window.wrap(document));a.parentNode;)a=a.parentNode;var b=a.__observer;b&&(n(a,b.takeRecords()),f())}function p(a){if(!a.__observer){var b=new MutationObserver(n.bind(this,a));b.observe(a,{childList:!0,subtree:!0}),a.__observer=b}}function q(a){a=window.wrap(a),s.dom&&console.group("upgradeDocument: ",a.baseURI.split("/").pop());var c=a===window.wrap(document);b(a,c),p(a),s.dom&&console.groupEnd(); +}function r(a){u(a,q)}var s=a.flags,t=a.forSubtree,u=a.forDocumentTree,v=window.MutationObserver._isPolyfilled&&s["throttle-attached"];a.hasPolyfillMutations=v,a.hasThrottledAttached=v;var w=!1,x=[],y=Array.prototype.forEach.call.bind(Array.prototype.forEach),z=Element.prototype.createShadowRoot;z&&(Element.prototype.createShadowRoot=function(){var a=z.call(this);return window.CustomElements.watchShadow(this),a}),a.watchShadow=m,a.upgradeDocumentTree=r,a.upgradeDocument=q,a.upgradeSubtree=d,a.upgradeAll=b,a.attached=g,a.takeRecords=o}),window.CustomElements.addModule(function(a){function b(b,d){if(!b.__upgraded__&&b.nodeType===Node.ELEMENT_NODE){var e=b.getAttribute("is"),f=a.getRegisteredDefinition(b.localName)||a.getRegisteredDefinition(e);if(f&&(e&&f.tag==b.localName||!e&&!f.extends))return c(b,f,d)}}function c(b,c,e){return g.upgrade&&console.group("upgrade:",b.localName),c.is&&b.setAttribute("is",c.is),d(b,c),b.__upgraded__=!0,f(b),e&&a.attached(b),a.upgradeSubtree(b,e),g.upgrade&&console.groupEnd(),b}function d(a,b){Object.__proto__?a.__proto__=b.prototype:(e(a,b.prototype,b.native),a.__proto__=b.prototype)}function e(a,b,c){for(var d={},e=b;e!==c&&e!==HTMLElement.prototype;){for(var f,g=Object.getOwnPropertyNames(e),h=0;f=g[h];h++)d[f]||(Object.defineProperty(a,f,Object.getOwnPropertyDescriptor(e,f)),d[f]=1);e=Object.getPrototypeOf(e)}}function f(a){a.createdCallback&&a.createdCallback()}var g=a.flags;a.upgrade=b,a.upgradeWithDefinition=c,a.implementPrototype=d}),window.CustomElements.addModule(function(a){function b(b,d){var i=d||{};if(!b)throw new Error("document.registerElement: first argument `name` must not be empty");if(b.indexOf("-")<0)throw new Error("document.registerElement: first argument ('name') must contain a dash ('-'). Argument provided was '"+String(b)+"'.");if(e(b))throw new Error("Failed to execute 'registerElement' on 'Document': Registration failed for type '"+String(b)+"'. The type name is invalid.");if(j(b))throw new Error("DuplicateDefinitionError: a type with name '"+String(b)+"' is already registered");return i.prototype||(i.prototype=Object.create(HTMLElement.prototype)),i.__name=b.toLowerCase(),i.lifecycle=i.lifecycle||{},i.ancestry=f(i.extends),g(i),h(i),c(i.prototype),k(i.__name,i),i.ctor=l(i),i.ctor.prototype=i.prototype,i.prototype.constructor=i.ctor,a.ready&&r(document),i.ctor}function c(a){if(!a.setAttribute._polyfilled){var b=a.setAttribute;a.setAttribute=function(a,c){d.call(this,a,c,b)};var c=a.removeAttribute;a.removeAttribute=function(a){d.call(this,a,null,c)},a.setAttribute._polyfilled=!0}}function d(a,b,c){a=a.toLowerCase();var d=this.getAttribute(a);c.apply(this,arguments);var e=this.getAttribute(a);this.attributeChangedCallback&&e!==d&&this.attributeChangedCallback(a,d,e)}function e(a){for(var b=0;b=0&&u(d,HTMLElement),d)}function o(a,b){var c=a[b];a[b]=function(){var a=c.apply(this,arguments);return s(a),a}}var p,q=a.isIE,r=a.upgradeDocumentTree,s=a.upgradeAll,t=a.upgradeWithDefinition,u=a.implementPrototype,v=a.useNative,w=["annotation-xml","color-profile","font-face","font-face-src","font-face-uri","font-face-format","font-face-name","missing-glyph"],x={},y="http://www.w3.org/1999/xhtml",z=document.createElement.bind(document),A=document.createElementNS.bind(document);p=Object.__proto__||v?function(a,b){return a instanceof b}:function(a,b){if(a instanceof b)return!0;for(var c=a;c;){if(c===b.prototype)return!0;c=c.__proto__}return!1},o(Node.prototype,"cloneNode"),o(document,"importNode"),q&&!function(){var a=document.importNode;document.importNode=function(){var b=a.apply(document,arguments);if(b.nodeType==b.DOCUMENT_FRAGMENT_NODE){var c=document.createDocumentFragment();return c.appendChild(b),c}return b}}(),document.registerElement=b,document.createElement=n,document.createElementNS=m,a.registry=x,a.instanceof=p,a.reservedTagList=w,a.getRegisteredDefinition=j,document.register=document.registerElement}),function(a){function b(){f(window.wrap(document)),window.CustomElements.ready=!0;var a=window.requestAnimationFrame||function(a){setTimeout(a,16)};a(function(){setTimeout(function(){window.CustomElements.readyTime=Date.now(),window.HTMLImports&&(window.CustomElements.elapsed=window.CustomElements.readyTime-window.HTMLImports.readyTime),document.dispatchEvent(new CustomEvent("WebComponentsReady",{bubbles:!0}))})})}var c=a.useNative,d=a.initializeModules;if(a.isIE,c){var e=function(){};a.watchShadow=e,a.upgrade=e,a.upgradeAll=e,a.upgradeDocumentTree=e,a.upgradeSubtree=e,a.takeRecords=e,a.instanceof=function(a,b){return a instanceof b}}else d();var f=a.upgradeDocumentTree,g=a.upgradeDocument;if(window.wrap||(window.ShadowDOMPolyfill?(window.wrap=window.ShadowDOMPolyfill.wrapIfNeeded,window.unwrap=window.ShadowDOMPolyfill.unwrapIfNeeded):window.wrap=window.unwrap=function(a){return a}),window.HTMLImports&&(window.HTMLImports.__importsParsingHook=function(a){a.import&&g(wrap(a.import))}),"complete"===document.readyState||a.flags.eager)b();else if("interactive"!==document.readyState||window.attachEvent||window.HTMLImports&&!window.HTMLImports.ready){var h=window.HTMLImports&&!window.HTMLImports.ready?"HTMLImportsLoaded":"DOMContentLoaded";window.addEventListener(h,b)}else b()}(window.CustomElements),function(a){var b=document.createElement("style");b.textContent="body {transition: opacity ease-in 0.2s; } \nbody[unresolved] {opacity: 0; display: block; overflow: hidden; position: relative; } \n";var c=document.querySelector("head");c.insertBefore(b,c.firstChild)}(window.WebComponents),function(){function a(){document.body.removeAttribute("unresolved")}window.WebComponents?addEventListener("WebComponentsReady",a):"interactive"===document.readyState||"complete"===document.readyState?a():addEventListener("DOMContentLoaded",a)}(),window.Polymer={Settings:function(){var a=window.Polymer||{};location.search.slice(1).split("&").forEach(function(b){b=b.split("="),b[0]&&(a[b[0]]=b[1]||!0)});var b="shadow"===a.dom,c=Boolean(Element.prototype.createShadowRoot),d=c&&!window.ShadowDOMPolyfill,e=b&&c,f=Boolean("import"in document.createElement("link")),g=f,h=!window.CustomElements||window.CustomElements.useNative;return{wantShadow:b,hasShadow:c,nativeShadow:d,useShadow:e,useNativeShadow:e&&d,useNativeImports:g,useNativeCustomElements:h}}()},function(){var a=window.Polymer;window.Polymer=function(a){"function"==typeof a&&(a=a.prototype),a||(a={});var c=b(a);a=c.prototype;var d={prototype:a};return a.extends&&(d.extends=a.extends),Polymer.telemetry._registrate(a),document.registerElement(a.is,d),c};var b=function(a){var b=Polymer.Base;return a.extends&&(b=Polymer.Base._getExtendedPrototype(a.extends)),a=Polymer.Base.chainObject(a,b),a.registerCallback(),a.constructor};if(window.Polymer=Polymer,a)for(var c in a)Polymer[c]=a[c];Polymer.Class=b}(),Polymer.telemetry={registrations:[],_regLog:function(a){console.log("["+a.is+"]: registered")},_registrate:function(a){this.registrations.push(a),Polymer.log&&this._regLog(a)},dumpRegistrations:function(){this.registrations.forEach(this._regLog)}},Object.defineProperty(window,"currentImport",{enumerable:!0,configurable:!0,get:function(){return(document._currentScript||document.currentScript).ownerDocument}}),Polymer.RenderStatus={_ready:!1,_callbacks:[],whenReady:function(a){this._ready?a():this._callbacks.push(a)},_makeReady:function(){this._ready=!0,this._callbacks.forEach(function(a){a()}),this._callbacks=[]},_catchFirstRender:function(){requestAnimationFrame(function(){Polymer.RenderStatus._makeReady()})}},window.HTMLImports?HTMLImports.whenReady(function(){Polymer.RenderStatus._catchFirstRender()}):Polymer.RenderStatus._catchFirstRender(),Polymer.ImportStatus=Polymer.RenderStatus,Polymer.ImportStatus.whenLoaded=Polymer.ImportStatus.whenReady,Polymer.Base={__isPolymerInstance__:!0,_addFeature:function(a){this.extend(this,a)},registerCallback:function(){this._desugarBehaviors(),this._doBehavior("beforeRegister"),this._registerFeatures(),this._doBehavior("registered")},createdCallback:function(){Polymer.telemetry.instanceCount++,this.root=this,this._doBehavior("created"),this._initFeatures()},attachedCallback:function(){Polymer.RenderStatus.whenReady(function(){this.isAttached=!0,this._doBehavior("attached")}.bind(this))},detachedCallback:function(){this.isAttached=!1,this._doBehavior("detached")},attributeChangedCallback:function(a){this._attributeChangedImpl(a),this._doBehavior("attributeChanged",arguments)},_attributeChangedImpl:function(a){this._setAttributeToProperty(this,a)},extend:function(a,b){return a&&b&&Object.getOwnPropertyNames(b).forEach(function(c){this.copyOwnProperty(c,b,a)},this),a||b},mixin:function(a,b){for(var c in b)a[c]=b[c];return a},copyOwnProperty:function(a,b,c){var d=Object.getOwnPropertyDescriptor(b,a);d&&Object.defineProperty(c,a,d)},_log:console.log.apply.bind(console.log,console),_warn:console.warn.apply.bind(console.warn,console),_error:console.error.apply.bind(console.error,console),_logf:function(){return this._logPrefix.concat([this.is]).concat(Array.prototype.slice.call(arguments,0))}},Polymer.Base._logPrefix=function(){var a=window.chrome||/firefox/i.test(navigator.userAgent);return a?["%c[%s::%s]:","font-weight: bold; background-color:#EEEE00;"]:["[%s::%s]:"]}(),Polymer.Base.chainObject=function(a,b){return a&&b&&a!==b&&(Object.__proto__||(a=Polymer.Base.extend(Object.create(b),a)),a.__proto__=b),a},Polymer.Base=Polymer.Base.chainObject(Polymer.Base,HTMLElement.prototype),window.CustomElements?Polymer.instanceof=CustomElements.instanceof:Polymer.instanceof=function(a,b){return a instanceof b},Polymer.isInstance=function(a){return Boolean(a&&a.__isPolymerInstance__)},Polymer.telemetry.instanceCount=0,function(){function f(){if(e){var a=document._currentScript||document.currentScript,b=a&&a.ownerDocument||document;b&&CustomElements.upgradeAll(b)}}var a={},b={},c=function(c){return a[c]||b[c.toLowerCase()]},d=function(){return document.createElement("dom-module")};d.prototype=Object.create(HTMLElement.prototype),Polymer.Base.extend(d.prototype,{constructor:d,createdCallback:function(){this.register()},register:function(c){var c=c||this.id||this.getAttribute("name")||this.getAttribute("is");c&&(this.id=c,a[c]=this,b[c.toLowerCase()]=this)},import:function(a,b){if(a){var d=c(a);return d||(f(),d=c(a)),d&&b&&(d=d.querySelector(b)),d}}});var e=window.CustomElements&&!CustomElements.useNative;document.registerElement("dom-module",d)}(),Polymer.Base._addFeature({_prepIs:function(){if(!this.is){var a=(document._currentScript||document.currentScript).parentNode;if("dom-module"===a.localName){var b=a.id||a.getAttribute("name")||a.getAttribute("is");this.is=b}}this.is&&(this.is=this.is.toLowerCase())}}),Polymer.Base._addFeature({behaviors:[],_desugarBehaviors:function(){this.behaviors.length&&(this.behaviors=this._desugarSomeBehaviors(this.behaviors))},_desugarSomeBehaviors:function(a){a=this._flattenBehaviorsList(a);for(var b=a.length-1;b>=0;b--)this._mixinBehavior(a[b]);return a},_flattenBehaviorsList:function(a){var b=[];return a.forEach(function(a){a instanceof Array?b=b.concat(this._flattenBehaviorsList(a)):a?b.push(a):this._warn(this._logf("_flattenBehaviorsList","behavior is null, check for missing or 404 import"))},this),b},_mixinBehavior:function(a){Object.getOwnPropertyNames(a).forEach(function(b){switch(b){case"hostAttributes":case"registered":case"properties":case"observers":case"listeners":case"created":case"attached":case"detached":case"attributeChanged":case"configure":case"ready":break;default:this.hasOwnProperty(b)||this.copyOwnProperty(b,a,this)}},this)},_prepBehaviors:function(){this._prepFlattenedBehaviors(this.behaviors)},_prepFlattenedBehaviors:function(a){for(var b=0,c=a.length;b.")),this._template&&!this._template.content&&HTMLTemplateElement.bootstrap&&(HTMLTemplateElement.decorate(this._template),HTMLTemplateElement.bootstrap(this._template.content))},_stampTemplate:function(){this._template&&(this.root=this.instanceTemplate(this._template))},instanceTemplate:function(a){var b=document.importNode(a._content||a.content,!0);return b}}),function(){var a=Polymer.Base.attachedCallback;Polymer.Base._addFeature({_hostStack:[],ready:function(){},_pushHost:function(a){this.dataHost=a=a||Polymer.Base._hostStack[Polymer.Base._hostStack.length-1],a&&a._clients&&a._clients.push(this),this._beginHost()},_beginHost:function(){Polymer.Base._hostStack.push(this),this._clients||(this._clients=[])},_popHost:function(){Polymer.Base._hostStack.pop()},_tryReady:function(){this._canReady()&&this._ready()},_canReady:function(){return!this.dataHost||this.dataHost._clientsReadied},_ready:function(){this._beforeClientsReady(),this._setupRoot(),this._readyClients(),this._afterClientsReady(),this._readySelf()},_readyClients:function(){this._beginDistribute();for(var d,a=this._clients,b=0,c=a.length;b0||g>0;)if(0!=f)if(0!=g){var m,j=a[f-1][g-1],k=a[f-1][g],l=a[f][g-1];m=k":return">";case'"':return""";case" ":return" "}}function d(b){return b.replace(a,c)}function e(a){return a.replace(b,c)}function f(a){for(var b={},c=0;c";case Node.TEXT_NODE:var n=a.data;return b&&h[b.localName]?n:e(n);case Node.COMMENT_NODE:return"";default:throw console.error(a),new Error("not implemented")}}function j(a,b){a instanceof HTMLTemplateElement&&(a=a.content);var c="",d=Polymer.dom(a).childNodes;d=b?a._composedChildren:d;for(var g,e=0,f=d.length;e]/g,g=f(["area","base","br","col","command","embed","hr","img","input","keygen","link","meta","param","source","track","wbr"]),h=f(["style","script","xmp","iframe","noembed","noframes","plaintext","noscript"]);return{getInnerHTML:j}}(),Polymer.DomApi=function(){"use strict";function m(a,b){return a=a||document,a.__domApi||(a.__domApi=new h(a,b)),a.__domApi}function n(a){return Boolean(a.__domApi)}function o(a){var b=a._lightChildren;return b?b:a.childNodes}function p(a){return a._composedChildren||(a._composedChildren=Array.prototype.slice.call(a.childNodes)),a._composedChildren}function q(a,b,c){var d=p(a),e=c?d.indexOf(c):-1;if(b.nodeType===Node.DOCUMENT_FRAGMENT_NODE){for(var f=p(b),g=0;g=0?d:c.length,0,a)}function t(a,b){if(b._composedParent=null,a){var c=p(a),d=c.indexOf(b);d>=0&&c.splice(d,1)}}function u(a){if(!a._lightChildren){for(var e,b=Array.prototype.slice.call(a.childNodes),c=0,d=b.length;c=this._FLUSH_MAX&&console.warn("Polymer.dom.flush aborted. Flush may not be complete.")},_prepareFlush:function(){this._needsTakeRecords&&CustomElements.takeRecords();for(var a=0;a=0&&this._staticFlushList.splice(b,1)},addDebouncer:function(a){this._debouncers.push(a),this._finishDebouncer=Polymer.Debounce(this._finishDebouncer,this._finishFlush)},_finishFlush:function(){Polymer.dom._debouncers=[]}}),Polymer.EventApi=function(){"use strict";var a=Polymer.DomApi.ctor,b=Polymer.Settings;a.Event=function(a){this.event=a},b.useShadow?a.Event.prototype={get rootTarget(){return this.event.path[0]},get localTarget(){return this.event.target},get path(){return this.event.path}}:a.Event.prototype={get rootTarget(){return this.event.target},get localTarget(){for(var a=this.event.currentTarget,b=a&&Polymer.dom(a).getOwnerRoot(),c=this.path,d=0;d=0&&(this._listeners.splice(b,1),a._nodes=[]),this._hasListeners()||(this._cleanup(),this._isSetup=!1)},_setup:function(){this._observeContentElements(this.domApi.childNodes)},_cleanup:function(){this._unobserveContentElements(this.domApi.childNodes)},_hasListeners:function(){return Boolean(this._listeners.length)},_scheduleNotify:function(){this._debouncer&&this._debouncer.stop(),this._debouncer=Polymer.Debounce(this._debouncer,this._notify),this._debouncer.context=this,Polymer.dom.addDebouncer(this._debouncer)},notify:function(){this._hasListeners()&&this._scheduleNotify()},_notify:function(a){this._beforeCallListeners(),this._callListeners()},_beforeCallListeners:function(){this._updateContentElements()},_updateContentElements:function(){this._observeContentElements(this.domApi.childNodes)},_observeContentElements:function(a){for(var c,b=0;b0&&(j=g.substring(k+2),g=g.substring(0,k),i=!0),c.push({compoundIndex:c.length,value:g,mode:f,negate:h,event:j,customEvent:i}),e=b.lastIndex}if(e&&e=c.left&&d<=c.right&&e>=c.top&&e<=c.bottom)}return!1}function s(a){for(var f,b=Polymer.dom(a).path,c="auto",e=0;e-1&&m.reset&&m.reset();for(var m,l=0;lf:"pan-y"===d&&(e=f>g)),e?a.preventDefault():v.prevent("track")}},add:function(a,c,d){var e=this.gestures[c],f=e.deps,g=e.name,h=a[b];h||(a[b]=h={});for(var k,m,j=0;j-1||(m=h[k],m||(h[k]=m={_count:0}),0===m._count&&a.addEventListener(k,this.handleNative),m[g]=(m[g]||0)+1,m._count=(m._count||0)+1);a.addEventListener(c,d),e.touchAction&&this.setTouchAction(a,e.touchAction)},remove:function(a,c,d){var e=this.gestures[c],f=e.deps,g=e.name,h=a[b];if(h)for(var j,k,i=0;ig&&this.moves.shift(),this.moves.push(a)},movefn:function(){},upfn:function(){},prevent:!1},reset:function(){this.info.state="start",this.info.started=!1,this.info.moves=[],this.info.x=0,this.info.y=0,this.info.prevent=!1,u(this.info)},hasMovedEnough:function(a,b){if(this.info.prevent)return!1;if(this.info.started)return!0;var c=Math.abs(this.info.x-a),d=Math.abs(this.info.y-b);return c>=f||d>=f},mousedown:function(a){if(p(a)){var b=v.findOriginalTarget(a),c=this,d=function(d){var e=d.clientX,f=d.clientY;c.hasMovedEnough(e,f)&&(c.info.state=c.info.started?"mouseup"===d.type?"end":"track":"start",c.info.addMove({x:e,y:f}),p(d)||(c.info.state="end",u(c.info)),c.fire(b,d),c.info.started=!0)},e=function(b){c.info.started&&(v.prevent("tap"),d(b)),u(c.info)};t(this.info,d,e),this.info.x=a.clientX,this.info.y=a.clientY}},touchstart:function(a){var b=a.changedTouches[0];this.info.x=b.clientX,this.info.y=b.clientY},touchmove:function(a){var b=v.findOriginalTarget(a),c=a.changedTouches[0],d=c.clientX,e=c.clientY;this.hasMovedEnough(d,e)&&(this.info.addMove({x:d,y:e}),this.fire(b,c),this.info.state="track",this.info.started=!0)},touchend:function(a){var b=v.findOriginalTarget(a),c=a.changedTouches[0];this.info.started&&(v.prevent("tap"),this.info.state="end",this.info.addMove({x:c.clientX,y:c.clientY}),this.fire(b,c))},fire:function(a,b){var g,c=this.info.moves[this.info.moves.length-2],d=this.info.moves[this.info.moves.length-1],e=d.x-this.info.x,f=d.y-this.info.y,h=0;return c&&(g=d.x-c.x,h=d.y-c.y),v.fire(a,"track",{state:this.info.state,x:b.clientX,y:b.clientY,dx:e,dy:f,ddx:g,ddy:h,sourceEvent:b,hover:function(){return v.deepTargetFind(b.clientX,b.clientY)}})}}),v.register({name:"tap",deps:["mousedown","click","touchstart","touchend"],flow:{start:["mousedown","touchstart"],end:["click","touchend"]},emits:["tap"],info:{x:NaN,y:NaN,prevent:!1},reset:function(){this.info.x=NaN,this.info.y=NaN,this.info.prevent=!1},save:function(a){this.info.x=a.clientX,this.info.y=a.clientY},mousedown:function(a){p(a)&&this.save(a)},click:function(a){p(a)&&this.forward(a)},touchstart:function(a){this.save(a.changedTouches[0])},touchend:function(a){this.forward(a.changedTouches[0])},forward:function(a){var b=Math.abs(a.clientX-this.info.x),c=Math.abs(a.clientY-this.info.y),d=v.findOriginalTarget(a);(isNaN(b)||isNaN(c)||b<=e&&c<=e||q(a))&&(this.info.prevent||v.fire(d,"tap",{x:a.clientX,y:a.clientY,sourceEvent:a}))}});var w={x:"pan-x",y:"pan-y",none:"none",all:"auto"};Polymer.Base._addFeature({_listen:function(a,b,c){v.gestures[b]?v.add(a,b,c):a.addEventListener(b,c); +},_unlisten:function(a,b,c){v.gestures[b]?v.remove(a,b,c):a.removeEventListener(b,c)},setScrollDirection:function(a,b){b=b||this,v.setTouchAction(b,w[a]||"auto")}}),Polymer.Gestures=v}(),Polymer.Async={_currVal:0,_lastVal:0,_callbacks:[],_twiddleContent:0,_twiddle:document.createTextNode(""),run:function(a,b){return b>0?~setTimeout(a,b):(this._twiddle.textContent=this._twiddleContent++,this._callbacks.push(a),this._currVal++)},cancel:function(a){if(a<0)clearTimeout(~a);else{var b=a-this._lastVal;if(b>=0){if(!this._callbacks[b])throw"invalid async handle: "+a;this._callbacks[b]=null}}},_atEndOfMicrotask:function(){for(var a=this._callbacks.length,b=0;b=0)return a.splice(c,1)}else{var d=this._get(a);if(c=d.indexOf(b),c>=0)return this.splice(a,c,1)}},transform:function(a,b){b=b||this,b.style.webkitTransform=a,b.style.transform=a},translate3d:function(a,b,c,d){d=d||this,this.transform("translate3d("+a+","+b+","+c+")",d)},importHref:function(a,b,c){var d=document.createElement("link");return d.rel="import",d.href=a,b&&(d.onload=b.bind(this)),c&&(d.onerror=c.bind(this)),document.head.appendChild(d),d},create:function(a,b){var c=document.createElement(a);if(b)for(var d in b)c[d]=b[d];return c},isLightDescendant:function(a){return this!==a&&this.contains(a)&&Polymer.dom(this).getOwnerRoot()===Polymer.dom(a).getOwnerRoot()},isLocalDescendant:function(a){return this.root===Polymer.dom(a).getOwnerRoot()}}),Polymer.Bind={prepareModel:function(a){a._propertyEffects={},a._bindListeners=[],Polymer.Base.mixin(a,this._modelApi)},_modelApi:{_notifyChange:function(a){var b=Polymer.CaseMap.camelToDashCase(a)+"-changed";Polymer.Base.fire(b,{value:this[a]},{bubbles:!1,node:this})},_propertySetter:function(a,b,c,d){var e=this.__data__[a];return e===b||e!==e&&b!==b||(this.__data__[a]=b,"object"==typeof b&&this._clearPath(a),this._propertyChanged&&this._propertyChanged(a,b,e),c&&this._effectEffects(a,b,c,e,d)),e},__setProperty:function(a,b,c,d){d=d||this;var e=d._propertyEffects&&d._propertyEffects[a];e?d._propertySetter(a,b,e,c):d[a]=b},_effectEffects:function(a,b,c,d,e){c.forEach(function(c){var f=Polymer.Bind["_"+c.kind+"Effect"];f&&f.call(this,a,b,c.effect,d,e)},this)},_clearPath:function(a){for(var b in this.__data__)0===b.indexOf(a+".")&&(this.__data__[b]=void 0)}},ensurePropertyEffects:function(a,b){var c=a._propertyEffects[b];return c||(c=a._propertyEffects[b]=[]),c},addPropertyEffect:function(a,b,c,d){var e=this.ensurePropertyEffects(a,b);e.push({kind:c,effect:d})},createBindings:function(a){var b=a._propertyEffects;if(b)for(var c in b){var d=b[c];d.sort(this._sortPropertyEffects),this._createAccessors(a,c,d)}},_sortPropertyEffects:function(){var a={compute:0,annotation:1,computedAnnotation:2,reflect:3,notify:4,observer:5,complexObserver:6,function:7};return function(b,c){return a[b.kind]-a[c.kind]}}(),_createAccessors:function(a,b,c){var d={get:function(){return this.__data__[b]}},e=function(a){this._propertySetter(b,a,c)},f=a.getPropertyInfo&&a.getPropertyInfo(b);f&&f.readOnly?f.computed||(a["_set"+this.upper(b)]=e):d.set=e,Object.defineProperty(a,b,d)},upper:function(a){return a[0].toUpperCase()+a.substring(1)},_addAnnotatedListener:function(a,b,c,d,e){var f=this._notedListenerFactory(c,d,this._isStructured(d),this._isEventBogus),g=e||Polymer.CaseMap.camelToDashCase(c)+"-changed";a._bindListeners.push({index:b,property:c,path:d,changedFn:f,event:g})},_isStructured:function(a){return a.indexOf(".")>0},_isEventBogus:function(a,b){return a.path&&a.path[0]!==b},_notedListenerFactory:function(a,b,c,d){return function(e,f){if(!d(e,f))if(e.detail&&e.detail.path)this._notifyPath(this._fixPath(b,a,e.detail.path),e.detail.value);else{var g=f[a];c?this.__data__[b]!=g&&this.set(b,g):this[b]=f[a]}}},prepareInstance:function(a){a.__data__=Object.create(null)},setupBindListeners:function(a){a._bindListeners.forEach(function(b){var c=a._nodes[b.index];c.addEventListener(b.event,a._notifyListener.bind(a,b.changedFn))})}},Polymer.Base.extend(Polymer.Bind,{_shouldAddListener:function(a){return a.name&&"attribute"!=a.kind&&"text"!=a.kind&&!a.isCompound&&"{"===a.parts[0].mode&&!a.parts[0].negate},_annotationEffect:function(a,b,c){a!=c.value&&(b=this._get(c.value),this.__data__[c.value]=b);var d=c.negate?!b:b;if(!c.customEvent||this._nodes[c.index][c.name]!==d)return this._applyEffectValue(c,d)},_reflectEffect:function(a){this.reflectPropertyToAttribute(a)},_notifyEffect:function(a,b,c,d,e){e||this._notifyChange(a)},_functionEffect:function(a,b,c,d,e){c.call(this,a,b,d,e)},_observerEffect:function(a,b,c,d){var e=this[c.method];e?e.call(this,b,d):this._warn(this._logf("_observerEffect","observer method `"+c.method+"` not defined"))},_complexObserverEffect:function(a,b,c){var d=this[c.method];if(d){var e=Polymer.Bind._marshalArgs(this.__data__,c,a,b);e&&d.apply(this,e)}else this._warn(this._logf("_complexObserverEffect","observer method `"+c.method+"` not defined"))},_computeEffect:function(a,b,c){var d=Polymer.Bind._marshalArgs(this.__data__,c,a,b);if(d){var e=this[c.method];e?this.__setProperty(c.name,e.apply(this,d)):this._warn(this._logf("_computeEffect","compute method `"+c.method+"` not defined"))}},_annotatedComputationEffect:function(a,b,c){var d=this._rootDataHost||this,e=d[c.method];if(e){var f=Polymer.Bind._marshalArgs(this.__data__,c,a,b);if(f){var g=e.apply(d,f);c.negate&&(g=!g),this._applyEffectValue(c,g)}}else d._warn(d._logf("_annotatedComputationEffect","compute method `"+c.method+"` not defined"))},_marshalArgs:function(a,b,c,d){for(var e=[],f=b.args,g=0,h=f.length;g1&&void 0===k)return;if(i.wildcard){var l=0===j.indexOf(c+"."),m=0===b.trigger.name.indexOf(j)&&!l;e[g]={path:m?c:j,value:m?d:k,base:k}}else e[g]=k}return e}}),Polymer.Base._addFeature({_addPropertyEffect:function(a,b,c){Polymer.Bind.addPropertyEffect(this,a,b,c)},_prepEffects:function(){Polymer.Bind.prepareModel(this),this._addAnnotationEffects(this._notes)},_prepBindings:function(){Polymer.Bind.createBindings(this)},_addPropertyEffects:function(a){if(a)for(var b in a){var c=a[b];c.observer&&this._addObserverEffect(b,c.observer),c.computed&&(c.readOnly=!0,this._addComputedEffect(b,c.computed)),c.notify&&this._addPropertyEffect(b,"notify"),c.reflectToAttribute&&this._addPropertyEffect(b,"reflect"),c.readOnly&&Polymer.Bind.ensurePropertyEffects(this,b)}},_addComputedEffect:function(a,b){var c=this._parseMethod(b);c.args.forEach(function(b){this._addPropertyEffect(b.model,"compute",{method:c.method,args:c.args,trigger:b,name:a})},this)},_addObserverEffect:function(a,b){this._addPropertyEffect(a,"observer",{method:b,property:a})},_addComplexObserverEffects:function(a){a&&a.forEach(function(a){this._addComplexObserverEffect(a)},this)},_addComplexObserverEffect:function(a){var b=this._parseMethod(a);b.args.forEach(function(a){this._addPropertyEffect(a.model,"complexObserver",{method:b.method,args:b.args,trigger:a})},this)},_addAnnotationEffects:function(a){this._nodes=[],a.forEach(function(a){var b=this._nodes.push(a)-1;a.bindings.forEach(function(a){this._addAnnotationEffect(a,b)},this)},this)},_addAnnotationEffect:function(a,b){Polymer.Bind._shouldAddListener(a)&&Polymer.Bind._addAnnotatedListener(this,b,a.name,a.parts[0].value,a.parts[0].event);for(var c=0;c="0"&&d<="9"&&(d="#"),d){case"'":case'"':c.value=b.slice(1,-1),c.literal=!0;break;case"#":c.value=Number(b),c.literal=!0}return c.literal||(c.structured=b.indexOf(".")>0,c.structured&&(c.wildcard=".*"==b.slice(-2),c.wildcard&&(c.name=b.slice(0,-2)))),c},_marshalInstanceEffects:function(){Polymer.Bind.prepareInstance(this),Polymer.Bind.setupBindListeners(this)},_applyEffectValue:function(a,b){var c=this._nodes[a.index],d=a.name;if(a.isCompound){var e=c.__compoundStorage__[d];e[a.compoundIndex]=b,b=e.join("")}return"attribute"!=a.kind?("className"===d&&(b=this._scopeElementClass(c,b)),("textContent"===d||"input"==c.localName&&"value"==d)&&(b=void 0==b?"":b),c[d]=b):void this.serializeValueToAttribute(b,d,c)},_executeStaticEffects:function(){this._propertyEffects.__static__&&this._effectEffects("__static__",null,this._propertyEffects.__static__)}}),Polymer.Base._addFeature({_setupConfigure:function(a){this._config={};for(var b in a)void 0!==a[b]&&(this._config[b]=a[b]);this._handlers=[]},_marshalAttributes:function(){this._takeAttributesToModel(this._config)},_attributeChangedImpl:function(a){var b=this._clientsReadied?this:this._config;this._setAttributeToProperty(b,a)},_configValue:function(a,b){this._config[a]=b},_beforeClientsReady:function(){this._configure()},_configure:function(){this._configureAnnotationReferences(),this._aboveConfig=this.mixin({},this._config);var a={};this.behaviors.forEach(function(b){this._configureProperties(b.properties,a)},this),this._configureProperties(this.properties,a),this._mixinConfigure(a,this._aboveConfig),this._config=a,this._distributeConfig(this._config)},_configureProperties:function(a,b){for(var c in a){var d=a[c];if(void 0!==d.value){var e=d.value;"function"==typeof e&&(e=e.call(this,this._config)),b[c]=e}}},_mixinConfigure:function(a,b){for(var c in b)this.getPropertyInfo(c).readOnly||(a[c]=b[c])},_distributeConfig:function(a){var b=this._propertyEffects;if(b)for(var c in a){var d=b[c];if(d)for(var g,e=0,f=d.length;e1){for(var h=0;h=0},removeCustomProps:function(a){return a=this.removeCustomPropAssignment(a),this.removeCustomPropApply(a)},removeCustomPropAssignment:function(a){return a.replace(this._rx.customProp,"").replace(this._rx.mixinProp,"")},removeCustomPropApply:function(a){return a.replace(this._rx.mixinApply,"").replace(this._rx.varApply,"")},types:{STYLE_RULE:1,KEYFRAMES_RULE:7,MEDIA_RULE:4,MIXIN_RULE:1e3},OPEN_BRACE:"{",CLOSE_BRACE:"}",_rx:{comments:/\/\*[^*]*\*+([^\/*][^*]*\*+)*\//gim,port:/@import[^;]*;/gim,customProp:/(?:^|[\s;])--[^;{]*?:[^{};]*?(?:[;\n]|$)/gim,mixinProp:/(?:^|[\s;])?--[^;{]*?:[^{;]*?{[^}]*?}(?:[;\n]|$)?/gim,mixinApply:/@apply[\s]*\([^)]*?\)[\s]*(?:[;\n]|$)?/gim,varApply:/[^;:]*?:[^;]*var[^;]*(?:[;\n]|$)?/gim,keyframesRule:/^@[^\s]*keyframes/},VAR_START:"--",MEDIA_START:"@media",AT_START:"@"};return a}(),Polymer.StyleUtil=function(){return{MODULE_STYLES_SELECTOR:"style, link[rel=import][type~=css], template",INCLUDE_ATTR:"include",toCssText:function(a,b,c){return"string"==typeof a&&(a=this.parser.parse(a)),b&&this.forEachStyleRule(a,b),this.parser.stringify(a,c)},forRulesInStyles:function(a,b){if(a)for(var e,c=0,d=a.length;c=0?f=!0:a.indexOf(h)>=0?(a=a.replace(j,function(a,b,c){return d+c}),a=a.replace(h,d)):0!==e&&(a=c?this._transformSimpleSelector(a,c):a),a.indexOf(m)>=0&&(b="");var g;return e>=0&&(a=a.replace(n," "),g=!0),{value:a,combinator:b,stop:g,hostContext:f}},_transformSimpleSelector:function(a,b){var c=a.split(r);return c[0]+=b,c.join(r)},documentRule:function(b){b.selector=b.parsedSelector,this.normalizeRootSelector(b),a||this._transformRule(b,this._transformDocumentSelector)},normalizeRootSelector:function(a){a.selector===i&&(a.selector="body")},_transformDocumentSelector:function(a){return a.match(n)?this._transformComplexSelector(a,e):this._transformSimpleSelector(a.trim(),e)},SCOPE_NAME:"style-scope"},d=c.SCOPE_NAME,e=":not(["+d+"]):not(."+d+")",f=",",g=/(^|[\s>+~]+)([^\s>+~]+)/g,h=":host",i=":root",j=/(\:host)(?:\(((?:\([^)(]*\)|[^)(]*)+?)\))/g,k=":host-context",l=/(.*)(?:\:host-context)(?:\(((?:\([^)(]*\)|[^)(]*)+?)\))(.*)/,m="::content",n=/\:\:content|\:\:shadow|\/deep\//,o=".",p="["+d+"~=",q="]",r=":",s="class";return c}(),Polymer.StyleExtends=function(){var a=Polymer.StyleUtil;return{hasExtends:function(a){return Boolean(a.match(this.rx.EXTEND))},transform:function(b){var c=a.rulesForStyle(b),d=this;return a.forEachStyleRule(c,function(a){d._mapRule(a);if(a.parent)for(var c;c=d.rx.EXTEND.exec(a.cssText);){var e=c[1],f=d._findExtendor(e,a);f&&d._extendRule(a,f)}a.cssText=a.cssText.replace(d.rx.EXTEND,"")}),a.toCssText(c,function(a){a.selector.match(d.rx.STRIP)&&(a.cssText="")},!0)},_mapRule:function(a){if(a.parent){for(var e,b=a.parent.map||(a.parent.map={}),c=a.selector.split(","),d=0;d=0)a=this.valueForProperties(a,b);else{var c=this,d=function(a,d,e,f){var g=c.valueForProperty(b[e],b)||(b[f]?c.valueForProperty(b[f],b):f);return d+(g||"")};a=a.replace(this.rx.VAR_MATCH,d)}return a&&a.trim()||""},valueForProperties:function(a,b){for(var e,f,c=a.split(";"),d=0;d+~])"},HOST_SELECTORS:[":host"],SCOPE_SELECTORS:[":root"],XSCOPE_NAME:"x-scope"}}(),function(){Polymer.StyleCache=function(){this.cache={}},Polymer.StyleCache.prototype={MAX:100,store:function(a,b,c,d){b.keyValues=c,b.styles=d;var e=this.cache[a]=this.cache[a]||[];e.push(b),e.length>this.MAX&&e.shift()},retrieve:function(a,b,c){var d=this.cache[a];if(d)for(var f,e=d.length-1;e>=0;e--)if(f=d[e],c===f.styles&&this._objectsEqual(b,f.keyValues))return f},clear:function(){this.cache={}},_objectsEqual:function(a,b){var c,d;for(var e in a)if(c=a[e],d=b[e],!("object"==typeof c&&c?this._objectsStrictlyEqual(c,d):c===d))return!1;return!Array.isArray(a)||a.length===b.length},_objectsStrictlyEqual:function(a,b){return this._objectsEqual(a,b)&&this._objectsEqual(b,a)}}}(),Polymer.StyleDefaults=function(){var a=Polymer.StyleProperties,c=(Polymer.StyleUtil,Polymer.StyleCache),d={_styles:[],_properties:null,customStyle:{},_styleCache:new c,addStyle:function(a){this._styles.push(a),this._properties=null},get _styleProperties(){return this._properties||(a.decorateStyles(this._styles),this._styles._scopeStyleProperties=null,this._properties=a.scopePropertiesFromStyles(this._styles),a.mixinCustomStyle(this._properties,this.customStyle),a.reify(this._properties)),this._properties},_needsStyleProperties:function(){},_computeStyleProperties:function(){return this._styleProperties},updateStyles:function(a){this._properties=null,a&&Polymer.Base.mixin(this.customStyle,a),this._styleCache.clear();for(var c,b=0;b0&&f.push(c);return[{removed:e,added:f}]}},Polymer.Collection.get=function(a){return Polymer._collections.get(a)||new Polymer.Collection(a)},Polymer.Collection.applySplices=function(a,b){var c=Polymer._collections.get(a);return c?c._applySplices(b):null},Polymer({is:"dom-repeat",extends:"template",properties:{items:{type:Array},as:{type:String,value:"item"},indexAs:{type:String,value:"index"},sort:{type:Function,observer:"_sortChanged"},filter:{type:Function,observer:"_filterChanged"},observe:{type:String,observer:"_observeChanged"},delay:Number},behaviors:[Polymer.Templatizer],observers:["_itemsChanged(items.*)"],created:function(){this._instances=[]},detached:function(){for(var a=0;a=0;j--){var k=g[j];void 0!==k&&(e.push(this._detachRow(k)),c.splice(k,1))}}if(h.length){this._filterFn&&(h=h.filter(function(a){return this._filterFn(b.getItem(a))},this)),h.sort(function(a,c){return this._sortFn(b.getItem(a),b.getItem(c))}.bind(this));for(var l=0,j=0;j>1,j=this._instances[i].__key__,k=h(d.getItem(j),e);if(k<0)a=i+1;else{if(!(k>0)){g=i;break}f=i-1}}return g<0&&(g=f+1),this._instances.splice(g,0,this._insertRow(g,b,c)),g},_applySplicesArrayOrder:function(a){var b=[];this.collection;a.forEach(function(a){for(var c=0;c=0;d--){var e=this._instances[d];e.isPlaceholder&&(this._instances[d]=this._insertRow(d,e.key,b,!0))}},_detachRow:function(a){var b=this._instances[a];if(!b.isPlaceholder)for(var d=(Polymer.dom(this).parentNode,0);d=0?(a=this.as+"."+a.substring(c+1),f._notifyPath(a,b,!0)):f.__setProperty(this.as,b,!0))}},itemForElement:function(a){var b=this.modelForElement(a);return b&&b[this.as]},keyForElement:function(a){var b=this.modelForElement(a);return b&&b.__key__},indexForElement:function(a){var b=this.modelForElement(a);return b&&b[this.indexAs]}}),Polymer({is:"array-selector",properties:{items:{type:Array,observer:"clearSelection"},multi:{type:Boolean,value:!1,observer:"clearSelection"},selected:{type:Object,notify:!0},selectedItem:{type:Object,notify:!0},toggle:{type:Boolean,value:!1}},clearSelection:function(){if(Array.isArray(this.selected))for(var a=0;a=65&&a<=90?String.fromCharCode(32+a):a>=112&&a<=123?"f"+(a-112):a>=48&&a<=57?String(48-a):a>=96&&a<=105?String(96-a):b[a]),c}function k(a){return h(a.key)||i(a.keyIdentifier)||j(a.keyCode)||h(a.detail.key)||""}function l(a,b){return k(b)===a.key&&!!b.shiftKey==!!a.shiftKey&&!!b.ctrlKey==!!a.ctrlKey&&!!b.altKey==!!a.altKey&&!!b.metaKey==!!a.metaKey}function m(a){return a.split("+").reduce(function(a,b){var d=b.split(":"),e=d[0],f=d[1];return e in c?a[c[e]]=!0:(a.key=e,a.event=f||"keydown"),a},{combo:a.split(":").shift()})}function n(a){return a.split(" ").map(function(a){return m(a)})}var a={"U+0009":"tab","U+001B":"esc","U+0020":"space","U+002A":"*","U+0030":"0","U+0031":"1","U+0032":"2","U+0033":"3","U+0034":"4","U+0035":"5","U+0036":"6","U+0037":"7","U+0038":"8","U+0039":"9","U+0041":"a","U+0042":"b","U+0043":"c","U+0044":"d","U+0045":"e","U+0046":"f","U+0047":"g","U+0048":"h","U+0049":"i","U+004A":"j","U+004B":"k","U+004C":"l","U+004D":"m","U+004E":"n","U+004F":"o","U+0050":"p","U+0051":"q", +"U+0052":"r","U+0053":"s","U+0054":"t","U+0055":"u","U+0056":"v","U+0057":"w","U+0058":"x","U+0059":"y","U+005A":"z","U+007F":"del"},b={9:"tab",13:"enter",27:"esc",33:"pageup",34:"pagedown",35:"end",36:"home",32:"space",37:"left",38:"up",39:"right",40:"down",46:"del",106:"*"},c={shift:"shiftKey",ctrl:"ctrlKey",alt:"altKey",meta:"metaKey"},d=/[a-z0-9*]/,e=/U\+/,f=/^arrow/,g=/^space(bar)?/;Polymer.IronA11yKeysBehavior={properties:{keyEventTarget:{type:Object,value:function(){return this}},stopKeyboardEventPropagation:{type:Boolean,value:!1},_boundKeyHandlers:{type:Array,value:function(){return[]}},_imperativeKeyBindings:{type:Object,value:function(){return{}}}},observers:["_resetKeyEventListeners(keyEventTarget, _boundKeyHandlers)"],keyBindings:{},registered:function(){this._prepKeyBindings()},attached:function(){this._listenKeyEventListeners()},detached:function(){this._unlistenKeyEventListeners()},addOwnKeyBinding:function(a,b){this._imperativeKeyBindings[a]=b,this._prepKeyBindings(),this._resetKeyEventListeners()},removeOwnKeyBindings:function(){this._imperativeKeyBindings={},this._prepKeyBindings(),this._resetKeyEventListeners()},keyboardEventMatchesKeys:function(a,b){var d,c=n(b);for(d=0;d32&&a.keyCode<41||a.keyCode>111&&a.keyCode<124;return!(b||0==a.charCode&&c)},_onKeypress:function(a){if(this.preventInvalidInput||"number"===this.type){var b=this._patternRegExp;if(b&&!(a.metaKey||a.ctrlKey||a.altKey)){this._patternAlreadyChecked=!0;var c=String.fromCharCode(a.charCode);this._isPrintable(a)&&!b.test(c)&&a.preventDefault()}}},_checkPatternValidity:function(){var a=this._patternRegExp;if(!a)return!0;for(var b=0;bb?1:a>=b?0:NaN}function m(a){return null===a?NaN:+a}function n(a){return!isNaN(a)}function o(a){return{left:function(b,c,d,e){for(arguments.length<3&&(d=0),arguments.length<4&&(e=b.length);d>>1;a(b[f],c)<0?d=f+1:e=f}return d},right:function(b,c,d,e){for(arguments.length<3&&(d=0),arguments.length<4&&(e=b.length);d>>1;a(b[f],c)>0?e=f:d=f+1}return d}}}function q(a){return a.length}function s(a){for(var b=1;a*b%1;)b*=10;return b}function t(a,b){for(var c in b)Object.defineProperty(a.prototype,c,{value:b[c],enumerable:!1})}function u(){this._=Object.create(null)}function x(a){return(a+="")===v||a[0]===w?w+a:a}function y(a){return(a+="")[0]===w?a.slice(1):a}function z(a){return x(a)in this._}function A(a){return(a=x(a))in this._&&delete this._[a]}function B(){var a=[];for(var b in this._)a.push(y(b));return a}function C(){var a=0;for(var b in this._)++a;return a}function D(){for(var a in this._)return!1;return!0}function E(){this._=Object.create(null)}function F(a){return a}function G(a,b,c){return function(){var d=c.apply(b,arguments);return d===b?a:d}}function H(a,b){if(b in a)return b;b=b.charAt(0).toUpperCase()+b.slice(1);for(var c=0,d=I.length;c=b&&(b=e+1);!(i=g[b])&&++b0&&(b=b.slice(0,g));var i=pa.get(b);return i&&(b=i,h=ra),g?d?k:j:d?J:l}function qa(b,c){return function(d){var e=a.event;a.event=d,c[0]=this.__data__;try{b.apply(this,c)}finally{a.event=e}}}function ra(a,b){var c=qa(a,b);return function(a){var b=this,d=a.relatedTarget;d&&(d===b||8&d.compareDocumentPosition(b))||c.call(b,a)}}function ua(b){var c=".dragsuppress-"+ ++ta,d="click"+c,g=a.select(f(b)).on("touchmove"+c,M).on("dragstart"+c,M).on("selectstart"+c,M);if(null==sa&&(sa=!("onselectstart"in b)&&H(b.style,"userSelect")),sa){var h=e(b).style,i=h[sa];h[sa]="none"}return function(a){if(g.on(c,null),sa&&(h[sa]=i),a){var b=function(){g.on(d,null)};g.on(d,function(){M(),b()},!0),setTimeout(b,0)}}}function wa(b,c){c.changedTouches&&(c=c.changedTouches[0]);var d=b.ownerSVGElement||b;if(d.createSVGPoint){var e=d.createSVGPoint();if(va<0){var g=f(b);if(g.scrollX||g.scrollY){d=a.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var h=d[0][0].getScreenCTM();va=!(h.f||h.e),d.remove()}}return va?(e.x=c.pageX,e.y=c.pageY):(e.x=c.clientX,e.y=c.clientY),e=e.matrixTransform(b.getScreenCTM().inverse()),[e.x,e.y]}var i=b.getBoundingClientRect();return[c.clientX-i.left-b.clientLeft,c.clientY-i.top-b.clientTop]}function xa(){return a.event.changedTouches[0].identifier}function Ga(a){return a>0?1:a<0?-1:0}function Ha(a,b,c){return(b[0]-a[0])*(c[1]-a[1])-(b[1]-a[1])*(c[0]-a[0])}function Ia(a){return a>1?0:a<-1?Aa:Math.acos(a)}function Ja(a){return a>1?Da:a<-1?-Da:Math.asin(a)}function Ka(a){return((a=Math.exp(a))-1/a)/2}function La(a){return((a=Math.exp(a))+1/a)/2}function Ma(a){return((a=Math.exp(2*a))-1)/(a+1)}function Na(a){return(a=Math.sin(a/2))*a}function Ua(){}function Va(a,b,c){return this instanceof Va?(this.h=+a,this.s=+b,void(this.l=+c)):arguments.length<2?a instanceof Va?new Va(a.h,a.s,a.l):pb(""+a,qb,Va):new Va(a,b,c)}function Xa(a,b,c){function f(a){return a>360?a-=360:a<0&&(a+=360),a<60?d+(e-d)*a/60:a<180?e:a<240?d+(e-d)*(240-a)/60:d}function g(a){return Math.round(255*f(a))}var d,e;return a=isNaN(a)?0:(a%=360)<0?a+360:a,b=isNaN(b)?0:b<0?0:b>1?1:b,c=c<0?0:c>1?1:c,e=c<=.5?c*(1+b):c+b-c*b,d=2*c-e,new kb(g(a+120),g(a),g(a-120))}function Ya(b,c,d){return this instanceof Ya?(this.h=+b,this.c=+c,void(this.l=+d)):arguments.length<2?b instanceof Ya?new Ya(b.h,b.c,b.l):b instanceof _a?gb(b.l,b.a,b.b):gb((b=rb((b=a.rgb(b)).r,b.g,b.b)).l,b.a,b.b):new Ya(b,c,d)}function $a(a,b,c){return isNaN(a)&&(a=0),isNaN(b)&&(b=0),new _a(c,Math.cos(a*=Ea)*b,Math.sin(a)*b)}function _a(a,b,c){return this instanceof _a?(this.l=+a,this.a=+b,void(this.b=+c)):arguments.length<2?a instanceof _a?new _a(a.l,a.a,a.b):a instanceof Ya?$a(a.h,a.c,a.l):rb((a=kb(a)).r,a.g,a.b):new _a(a,b,c)}function fb(a,b,c){var d=(a+16)/116,e=d+b/500,f=d-c/200;return e=hb(e)*bb,d=hb(d)*cb,f=hb(f)*db,new kb(jb(3.2404542*e-1.5371385*d-.4985314*f),jb(-.969266*e+1.8760108*d+.041556*f),jb(.0556434*e-.2040259*d+1.0572252*f))}function gb(a,b,c){return a>0?new Ya(Math.atan2(c,b)*Fa,Math.sqrt(b*b+c*c),a):new Ya(NaN,NaN,a)}function hb(a){return a>.206893034?a*a*a:(a-4/29)/7.787037}function ib(a){return a>.008856?Math.pow(a,1/3):7.787037*a+4/29}function jb(a){return Math.round(255*(a<=.00304?12.92*a:1.055*Math.pow(a,1/2.4)-.055))}function kb(a,b,c){return this instanceof kb?(this.r=~~a,this.g=~~b,void(this.b=~~c)):arguments.length<2?a instanceof kb?new kb(a.r,a.g,a.b):pb(""+a,kb,Xa):new kb(a,b,c)}function lb(a){return new kb(a>>16,a>>8&255,255&a)}function mb(a){return lb(a)+""}function ob(a){return a<16?"0"+Math.max(0,a).toString(16):Math.min(255,a).toString(16)}function pb(a,b,c){a=a.toLowerCase();var g,h,i,d=0,e=0,f=0;if(g=/([a-z]+)\((.*)\)/.exec(a))switch(h=g[2].split(","),g[1]){case"hsl":return c(parseFloat(h[0]),parseFloat(h[1])/100,parseFloat(h[2])/100);case"rgb":return b(tb(h[0]),tb(h[1]),tb(h[2]))}return(i=ub.get(a))?b(i.r,i.g,i.b):(null==a||"#"!==a.charAt(0)||isNaN(i=parseInt(a.slice(1),16))||(4===a.length?(d=(3840&i)>>4,d|=d>>4,e=240&i,e|=e>>4,f=15&i,f|=f<<4):7===a.length&&(d=(16711680&i)>>16,e=(65280&i)>>8,f=255&i)),b(d,e,f))}function qb(a,b,c){var g,h,d=Math.min(a/=255,b/=255,c/=255),e=Math.max(a,b,c),f=e-d,i=(e+d)/2;return f?(h=i<.5?f/(e+d):f/(2-e-d),g=a==e?(b-c)/f+(b0&&i<1?0:g),new Va(g,h,i)}function rb(a,b,c){a=sb(a),b=sb(b),c=sb(c);var d=ib((.4124564*a+.3575761*b+.1804375*c)/bb),e=ib((.2126729*a+.7151522*b+.072175*c)/cb),f=ib((.0193339*a+.119192*b+.9503041*c)/db);return _a(116*e-16,500*(d-e),200*(e-f))}function sb(a){return(a/=255)<=.04045?a/12.92:Math.pow((a+.055)/1.055,2.4)}function tb(a){var b=parseFloat(a);return"%"===a.charAt(a.length-1)?Math.round(2.55*b):b}function vb(a){return"function"==typeof a?a:function(){return a}}function wb(a){return function(b,c,d){return 2===arguments.length&&"function"==typeof c&&(d=c,c=null),xb(b,c,a,d)}}function xb(b,d,e,f){function l(){var b,a=j.status;if(!a&&zb(j)||a>=200&&a<300||304===a){try{b=e.call(g,j)}catch(a){return void h.error.call(g,a)}h.load.call(g,b)}else h.error.call(g,j)}var g={},h=a.dispatch("beforesend","progress","load","error"),i={},j=new XMLHttpRequest,k=null;return!this.XDomainRequest||"withCredentials"in j||!/^(http(s)?:)?\/\//.test(b)||(j=new XDomainRequest),"onload"in j?j.onload=j.onerror=l:j.onreadystatechange=function(){j.readyState>3&&l()},j.onprogress=function(b){var c=a.event;a.event=b;try{h.progress.call(g,j)}finally{a.event=c}},g.header=function(a,b){return a=(a+"").toLowerCase(),arguments.length<2?i[a]:(null==b?delete i[a]:i[a]=b+"",g)},g.mimeType=function(a){return arguments.length?(d=null==a?null:a+"",g):d},g.responseType=function(a){return arguments.length?(k=a,g):k},g.response=function(a){return e=a,g},["get","post"].forEach(function(a){g[a]=function(){return g.send.apply(g,[a].concat(c(arguments)))}}),g.send=function(a,c,e){if(2===arguments.length&&"function"==typeof c&&(e=c,c=null),j.open(a,b,!0),null==d||"accept"in i||(i.accept=d+",*/*"),j.setRequestHeader)for(var f in i)j.setRequestHeader(f,i[f]);return null!=d&&j.overrideMimeType&&j.overrideMimeType(d),null!=k&&(j.responseType=k),null!=e&&g.on("error",e).on("load",function(a){e(null,a)}),h.beforesend.call(g,j),j.send(null==c?null:c),g},g.abort=function(){return j.abort(),g},a.rebind(g,h,"on"),null==f?g:g.get(yb(f))}function yb(a){return 1===a.length?function(b,c){a(null==b?c:null)}:a}function zb(a){var b=a.responseType;return b&&"text"!==b?a.response:a.responseText}function Gb(){var a=Hb(),b=Ib()-a;b>24?(isFinite(b)&&(clearTimeout(Db),Db=setTimeout(Gb,b)),Cb=0):(Cb=1,Fb(Gb))}function Hb(){var a=Date.now();for(Eb=Ab;Eb;)a>=Eb.t&&(Eb.f=Eb.c(a-Eb.t)),Eb=Eb.n;return a}function Ib(){for(var a,b=Ab,c=1/0;b;)b.f?b=a?a.n=b.n:Ab=b.n:(b.t8?function(a){return a/c}:function(a){return a*c},symbol:a}}function Mb(b){var c=b.decimal,d=b.thousands,e=b.grouping,f=b.currency,g=e&&d?function(a,b){for(var c=a.length,f=[],g=0,h=e[0],i=0;c>0&&h>0&&(i+h+1>b&&(h=Math.max(1,b-i)),f.push(a.substring(c-=h,c+h)),!((i+=h+1)>b));)h=e[g=(g+1)%e.length];return f.reverse().join(d)}:F;return function(b){var d=Nb.exec(b),e=d[1]||" ",h=d[2]||">",i=d[3]||"-",j=d[4]||"",k=d[5],l=+d[6],m=d[7],n=d[8],o=d[9],p=1,q="",r="",s=!1,t=!0;switch(n&&(n=+n.substring(1)),(k||"0"===e&&"="===h)&&(k=e="0",h="="),o){case"n":m=!0,o="g";break;case"%":p=100,r="%",o="f";break;case"p":p=100,r="%",o="r";break;case"b":case"o":case"x":case"X":"#"===j&&(q="0"+o.toLowerCase());case"c":t=!1;case"d":s=!0,n=0;break;case"s":p=-1,o="r"}"$"===j&&(q=f[0],r=f[1]),"r"!=o||n||(o="g"),null!=n&&("g"==o?n=Math.max(1,Math.min(21,n)):"e"!=o&&"f"!=o||(n=Math.max(0,Math.min(20,n)))),o=Ob.get(o)||Pb;var u=k&&m;return function(b){var d=r;if(s&&b%1)return"";var f=b<0||0===b&&1/b<0?(b=-b,"-"):"-"===i?"":i;if(p<0){var j=a.formatPrefix(b,n);b=j.scale(b),d=j.symbol+r}else b*=p;b=o(b,n);var w,x,v=b.lastIndexOf(".");if(v<0){var y=t?b.lastIndexOf("e"):-1;y<0?(w=b,x=""):(w=b.substring(0,y),x=b.substring(y))}else w=b.substring(0,v),x=c+b.substring(v+1);!k&&m&&(w=g(w,1/0));var z=q.length+w.length+x.length+(u?0:f.length),A=z"===h?A+f+b:"^"===h?A.substring(0,z>>=1)+f+b+A.substring(z):f+(u?b:A+b))+d}}}function Pb(a){return a+""}function Sb(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Ub(a,b,c){function d(b){var c=a(b),d=f(c,1);return b-c1)for(;g=j)return-1;if(e=b.charCodeAt(h++),37===e){if(g=b.charAt(h++),f=w[g in Xb?b.charAt(h++):g],!f||(d=f(a,c,d))<0)return-1}else if(e!=c.charCodeAt(d++))return-1}return d}function x(a,b,c){p.lastIndex=0;var d=p.exec(b.slice(c));return d?(a.w=q.get(d[0].toLowerCase()),c+d[0].length):-1}function y(a,b,c){n.lastIndex=0;var d=n.exec(b.slice(c));return d?(a.w=o.get(d[0].toLowerCase()),c+d[0].length):-1}function z(a,b,c){t.lastIndex=0;var d=t.exec(b.slice(c));return d?(a.m=u.get(d[0].toLowerCase()),c+d[0].length):-1}function A(a,b,c){r.lastIndex=0;var d=r.exec(b.slice(c));return d?(a.m=s.get(d[0].toLowerCase()),c+d[0].length):-1}function B(a,b,c){return l(a,v.c.toString(),b,c)}function C(a,b,c){return l(a,v.x.toString(),b,c)}function D(a,b,c){return l(a,v.X.toString(),b,c)}function E(a,b,c){var d=m.get(b.slice(c,c+=2).toLowerCase());return null==d?-1:(a.p=d,c)}var c=b.dateTime,d=b.date,e=b.time,f=b.periods,g=b.days,h=b.shortDays,i=b.months,j=b.shortMonths;k.utc=function(a){function c(a){try{Rb=Sb;var c=new Rb;return c._=a,b(c)}finally{Rb=Date}}var b=k(a);return c.parse=function(a){try{Rb=Sb;var c=b.parse(a);return c&&c._}finally{Rb=Date}},c.toString=b.toString,c},k.multi=k.utc.multi=rc;var m=a.map(),n=_b(g),o=ac(g),p=_b(h),q=ac(h),r=_b(i),s=ac(i),t=_b(j),u=ac(j);f.forEach(function(a,b){m.set(a.toLowerCase(),b)});var v={a:function(a){return h[a.getDay()]; +},A:function(a){return g[a.getDay()]},b:function(a){return j[a.getMonth()]},B:function(a){return i[a.getMonth()]},c:k(c),d:function(a,b){return $b(a.getDate(),b,2)},e:function(a,b){return $b(a.getDate(),b,2)},H:function(a,b){return $b(a.getHours(),b,2)},I:function(a,b){return $b(a.getHours()%12||12,b,2)},j:function(a,b){return $b(1+Qb.dayOfYear(a),b,3)},L:function(a,b){return $b(a.getMilliseconds(),b,3)},m:function(a,b){return $b(a.getMonth()+1,b,2)},M:function(a,b){return $b(a.getMinutes(),b,2)},p:function(a){return f[+(a.getHours()>=12)]},S:function(a,b){return $b(a.getSeconds(),b,2)},U:function(a,b){return $b(Qb.sundayOfYear(a),b,2)},w:function(a){return a.getDay()},W:function(a,b){return $b(Qb.mondayOfYear(a),b,2)},x:k(d),X:k(e),y:function(a,b){return $b(a.getFullYear()%100,b,2)},Y:function(a,b){return $b(a.getFullYear()%1e4,b,4)},Z:pc,"%":function(){return"%"}},w={a:x,A:y,b:z,B:A,c:B,d:jc,e:jc,H:lc,I:lc,j:kc,L:oc,m:ic,M:mc,p:E,S:nc,U:cc,w:bc,W:dc,x:C,X:D,y:fc,Y:ec,Z:gc,"%":qc};return k}function $b(a,b,c){var d=a<0?"-":"",e=(d?-a:a)+"",f=e.length;return d+(f68?1900:2e3)}function ic(a,b,c){Yb.lastIndex=0;var d=Yb.exec(b.slice(c,c+2));return d?(a.m=d[0]-1,c+d[0].length):-1}function jc(a,b,c){Yb.lastIndex=0;var d=Yb.exec(b.slice(c,c+2));return d?(a.d=+d[0],c+d[0].length):-1}function kc(a,b,c){Yb.lastIndex=0;var d=Yb.exec(b.slice(c,c+3));return d?(a.j=+d[0],c+d[0].length):-1}function lc(a,b,c){Yb.lastIndex=0;var d=Yb.exec(b.slice(c,c+2));return d?(a.H=+d[0],c+d[0].length):-1}function mc(a,b,c){Yb.lastIndex=0;var d=Yb.exec(b.slice(c,c+2));return d?(a.M=+d[0],c+d[0].length):-1}function nc(a,b,c){Yb.lastIndex=0;var d=Yb.exec(b.slice(c,c+2));return d?(a.S=+d[0],c+d[0].length):-1}function oc(a,b,c){Yb.lastIndex=0;var d=Yb.exec(b.slice(c,c+3));return d?(a.L=+d[0],c+d[0].length):-1}function pc(a){var b=a.getTimezoneOffset(),c=b>0?"-":"+",d=r(b)/60|0,e=r(b)%60;return c+$b(d,"0",2)+$b(e,"0",2)}function qc(a,b,c){Zb.lastIndex=0;var d=Zb.exec(b.slice(c,c+1));return d?c+d[0].length:-1}function rc(a){for(var b=a.length,c=-1;++c=0?1:-1,h=g*f,i=Math.cos(b),j=Math.sin(b),k=e*j,l=d*i+k*Math.cos(h),m=k*g*Math.sin(h);Cc.add(Math.atan2(m,l)),c=a,d=i,e=j}var a,b,c,d,e;Dc.point=function(g,h){Dc.point=f,c=(a=g)*Ea,d=Math.cos(h=(b=h)*Ea/2+Aa/4),e=Math.sin(h)},Dc.lineEnd=function(){f(a,b)}}function Fc(a){var b=a[0],c=a[1],d=Math.cos(c);return[d*Math.cos(b),d*Math.sin(b),Math.sin(c)]}function Gc(a,b){return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]}function Hc(a,b){return[a[1]*b[2]-a[2]*b[1],a[2]*b[0]-a[0]*b[2],a[0]*b[1]-a[1]*b[0]]}function Ic(a,b){a[0]+=b[0],a[1]+=b[1],a[2]+=b[2]}function Jc(a,b){return[a[0]*b,a[1]*b,a[2]*b]}function Kc(a){var b=Math.sqrt(a[0]*a[0]+a[1]*a[1]+a[2]*a[2]);a[0]/=b,a[1]/=b,a[2]/=b}function Lc(a){return[Math.atan2(a[1],a[0]),Ja(a[2])]}function Mc(a,b){return r(a[0]-b[0])=0;--h)e.point((m=l[h])[0],m[1])}else d(n.x,n.p.x,-1,e);n=n.p}n=n.o,l=n.z,o=!o}while(!n.v);e.lineEnd()}}}function fd(a){if(b=a.length){for(var b,e,c=0,d=a[0];++c0){for(r||(g.polygonStart(),r=!0),g.lineStart();++e1&&2&a&&b.push(b.pop().concat(b.shift())),o.push(b.filter(id))}var o,s,t,h=c(g),i=f.invert(e[0],e[1]),j={point:k,lineStart:m,lineEnd:n,polygonStart:function(){j.point=u,j.lineStart=v,j.lineEnd=w,o=[],s=[]},polygonEnd:function(){j.point=k,j.lineStart=m,j.lineEnd=n,o=a.merge(o);var b=pd(i,s);o.length?(r||(g.polygonStart(),r=!0),ed(o,kd,b,d,g)):b&&(r||(g.polygonStart(),r=!0),g.lineStart(),d(null,null,1,g),g.lineEnd()),r&&(g.polygonEnd(),r=!1),o=s=null},sphere:function(){g.polygonStart(),g.lineStart(),d(null,null,1,g),g.lineEnd(),g.polygonEnd()}},p=jd(),q=c(p),r=!1;return j}}function id(a){return a.length>1}function jd(){var b,a=[];return{lineStart:function(){a.push(b=[])},point:function(a,c){b.push([a,c])},lineEnd:J,buffer:function(){var c=a;return a=[],b=null,c},rejoin:function(){a.length>1&&a.push(a.pop().concat(a.shift()))}}}function kd(a,b){return((a=a.x)[0]<0?a[1]-Da-ya:Da-a[1])-((b=b.x)[0]<0?b[1]-Da-ya:Da-b[1])}function md(a){var e,b=NaN,c=NaN,d=NaN;return{lineStart:function(){a.lineStart(),e=1},point:function(f,g){var h=f>0?Aa:-Aa,i=r(f-b);r(i-Aa)0?Da:-Da),a.point(d,c),a.lineEnd(),a.lineStart(),a.point(h,c),a.point(f,c),e=0):d!==h&&i>=Aa&&(r(b-d)ya?Math.atan((Math.sin(b)*(f=Math.cos(d))*Math.sin(c)-Math.sin(d)*(e=Math.cos(b))*Math.sin(a))/(e*f*g)):(b+d)/2}function od(a,b,c,d){var e;if(null==a)e=c*Da,d.point(-Aa,e),d.point(0,e),d.point(Aa,e),d.point(Aa,0),d.point(Aa,-e),d.point(0,-e),d.point(-Aa,-e),d.point(-Aa,0),d.point(-Aa,e);else if(r(a[0]-b[0])>ya){var f=a[0]=0?1:-1,x=w*v,y=x>Aa,z=o*t;if(Cc.add(Math.atan2(z*w*Math.sin(x),p*u+z*Math.cos(x))),f+=y?v+w*Ba:v,y^m>=c^r>=c){var A=Hc(Fc(l),Fc(a));Kc(A);var B=Hc(e,A);Kc(B);var C=(y^v>=0?-1:1)*Ja(B[2]);(d>C||d===C&&(A[0]||A[1]))&&(g+=y^v>=0?1:-1)}if(!q++)break;m=r,o=t,p=u,l=a}}return(f<-ya||fb}function g(a){var b,e,g,j,k;return{lineStart:function(){j=g=!1,k=1},point:function(l,m){var o,n=[l,m],p=f(l,m),q=c?p?0:i(l,m):p?i(l+(l<0?Aa:-Aa),m):0;if(!b&&(j=g=p)&&a.lineStart(),p!==g&&(o=h(b,n),(Mc(b,o)||Mc(n,o))&&(n[0]+=ya,n[1]+=ya,p=f(n[0],n[1]))),p!==g)k=0,p?(a.lineStart(),o=h(n,b),a.point(o[0],o[1])):(o=h(b,n),a.point(o[0],o[1]),a.lineEnd()),b=o;else if(d&&b&&c^p){var r;q&e||!(r=h(n,b,!0))||(k=0,c?(a.lineStart(),a.point(r[0][0],r[0][1]),a.point(r[1][0],r[1][1]),a.lineEnd()):(a.point(r[1][0],r[1][1]),a.lineEnd(),a.lineStart(),a.point(r[0][0],r[0][1])))}!p||b&&Mc(b,n)||a.point(n[0],n[1]),b=n,g=p,e=q},lineEnd:function(){g&&a.lineEnd(),b=null},clean:function(){return k|(j&&g)<<1}}}function h(a,c,d){var e=Fc(a),f=Fc(c),g=[1,0,0],h=Hc(e,f),i=Gc(h,h),j=h[0],k=i-j*j;if(!k)return!d&&a;var l=b*i/k,m=-b*j/k,n=Hc(g,h),o=Jc(g,l),p=Jc(h,m);Ic(o,p);var q=n,s=Gc(o,q),t=Gc(q,q),u=s*s-t*(Gc(o,o)-1);if(!(u<0)){var v=Math.sqrt(u),w=Jc(q,(-s-v)/t);if(Ic(w,o),w=Lc(w),!d)return w;var B,x=a[0],y=c[0],z=a[1],A=c[1];y0^w[1]<(r(w[0]-x)Aa^(x<=w[0]&&w[0]<=y)){var F=Jc(q,(-s+v)/t);return Ic(F,o),[w,Lc(F)]}}}function i(b,d){var e=c?a:Aa-a,f=0;return b<-e?f|=1:b>e&&(f|=2),d<-e?f|=4:d>e&&(f|=8),f}var b=Math.cos(a),c=b>0,d=r(b)>ya,e=_d(a,6*Ea);return hd(f,g,e,c?[0,-a]:[-Aa,a-Aa])}function rd(a,b,c,d){return function(e){var p,f=e.a,g=e.b,h=f.x,i=f.y,j=g.x,k=g.y,l=0,m=1,n=j-h,o=k-i;if(p=a-h,n||!(p>0)){if(p/=n,n<0){if(p0){if(p>m)return;p>l&&(l=p)}if(p=c-h,n||!(p<0)){if(p/=n,n<0){if(p>m)return;p>l&&(l=p)}else if(n>0){if(p0)){if(p/=o,o<0){if(p0){if(p>m)return;p>l&&(l=p)}if(p=d-i,o||!(p<0)){if(p/=o,o<0){if(p>m)return;p>l&&(l=p)}else if(o>0){if(p0&&(e.a={x:h+l*n,y:i+l*o}),m<1&&(e.b={x:h+m*n,y:i+m*o}),e}}}}}}function td(b,c,d,e){function f(a,e){return r(a[0]-b)0?0:3:r(a[0]-d)0?2:1:r(a[1]-c)0?1:0:e>0?3:2}function g(a,b){return h(a.x,b.x)}function h(a,b){var c=f(a,1),d=f(b,1);return c!==d?c-d:0===c?b[1]-a[1]:1===c?a[0]-b[0]:2===c?a[1]-b[1]:b[0]-a[0]}return function(i){function q(a){for(var b=0,c=n.length,d=a[1],e=0;ed&&Ha(i,j,a)>0&&++b:j[1]<=d&&Ha(i,j,a)<0&&--b,i=j;return 0!==b}function r(a,g,i,j){var k=0,l=0;if(null==a||(k=f(a,i))!==(l=f(g,i))||h(a,g)<0^i>0){do j.point(0===k||3===k?b:d,k>1?e:c);while((k=(k+i+4)%4)!==l)}else j.point(g[0],g[1])}function s(a,f){return b<=a&&a<=d&&c<=f&&f<=e}function t(a,b){s(a,b)&&i.point(a,b)}function C(){p.point=E,n&&n.push(o=[]),A=!0,z=!1,x=y=NaN}function D(){m&&(E(u,v),w&&z&&k.rejoin(),m.push(k.buffer())),p.point=t,z&&i.lineEnd()}function E(a,b){a=Math.max(-sd,Math.min(sd,a)),b=Math.max(-sd,Math.min(sd,b));var c=s(a,b);if(n&&o.push([a,b]),A)u=a,v=b,w=c,A=!1,c&&(i.lineStart(),i.point(a,b));else if(c&&z)i.point(a,b);else{var d={a:{x:x,y:y},b:{x:a,y:b}};l(d)?(z||(i.lineStart(),i.point(d.a.x,d.a.y)),i.point(d.b.x,d.b.y),c||i.lineEnd(),B=!1):c&&(i.lineStart(),i.point(a,b),B=!1)}x=a,y=b,z=c}var m,n,o,u,v,w,x,y,z,A,B,j=i,k=jd(),l=rd(b,c,d,e),p={point:t,lineStart:C,lineEnd:D,polygonStart:function(){i=k,m=[],n=[],B=!0},polygonEnd:function(){i=j,m=a.merge(m);var c=q([b,e]),d=B&&c,f=m.length;(d||f)&&(i.polygonStart(),d&&(i.lineStart(),r(null,null,1,i),i.lineEnd()),f&&ed(m,g,c,r,i),i.polygonEnd()),m=n=o=null}};return p}}function ud(a){var b=0,c=Aa/3,d=Td(a),e=d(b,c);return e.parallels=function(a){return arguments.length?d(b=a[0]*Aa/180,c=a[1]*Aa/180):[b/Aa*180,c/Aa*180]},e}function vd(a,b){function g(a,b){var c=Math.sqrt(e-2*d*Math.sin(b))/d;return[c*Math.sin(a*=d),f-c*Math.cos(a)]}var c=Math.sin(a),d=(c+Math.sin(b))/2,e=1+c*(2*d-c),f=Math.sqrt(e)/d;return g.invert=function(a,b){var c=f-b;return[Math.atan2(a,c)/d,Ja((e-(a*a+c*c)*d*d)/(2*d))]},g}function zd(){function e(a,b){xd+=d*a-c*b,c=a,d=b}var a,b,c,d;yd.point=function(f,g){yd.point=e,a=c=f,b=d=g},yd.lineEnd=function(){e(a,b)}}function Fd(a,b){aCd&&(Cd=a),bDd&&(Dd=b)}function Gd(){function d(c,d){b.push("M",c,",",d,a)}function e(a,d){b.push("M",a,",",d),c.point=f}function f(a,c){b.push("L",a,",",c)}function g(){c.point=d}function h(){b.push("Z")}var a=Hd(4.5),b=[],c={point:d,lineStart:function(){c.point=e},lineEnd:g,polygonStart:function(){c.lineEnd=h},polygonEnd:function(){c.lineEnd=g,c.point=d},pointRadius:function(b){return a=Hd(b),c},result:function(){if(b.length){var a=b.join("");return b=[],a}}};return c}function Hd(a){return"m0,"+a+"a"+a+","+a+" 0 1,1 0,"+-2*a+"a"+a+","+a+" 0 1,1 0,"+2*a+"z"}function Jd(a,b){Pc+=a,Qc+=b,++Rc}function Kd(){function c(c,d){var e=c-a,f=d-b,g=Math.sqrt(e*e+f*f);Sc+=g*(a+c)/2,Tc+=g*(b+d)/2,Uc+=g,Jd(a=c,b=d)}var a,b;Id.point=function(d,e){Id.point=c,Jd(a=d,b=e)}}function Ld(){Id.point=Jd}function Md(){function e(a,b){var e=a-c,f=b-d,g=Math.sqrt(e*e+f*f);Sc+=g*(c+a)/2,Tc+=g*(d+b)/2,Uc+=g,g=d*a-c*b,Vc+=g*(c+a),Wc+=g*(d+b),Xc+=3*g,Jd(c=a,d=b)}var a,b,c,d;Id.point=function(f,g){Id.point=e,Jd(a=c=f,b=d=g)},Id.lineEnd=function(){e(a,b)}}function Nd(a){function d(c,d){a.moveTo(c+b,d),a.arc(c,d,b,0,Ba)}function e(b,d){a.moveTo(b,d),c.point=f}function f(b,c){a.lineTo(b,c)}function g(){c.point=d}function h(){a.closePath()}var b=4.5,c={point:d,lineStart:function(){c.point=e},lineEnd:g,polygonStart:function(){c.lineEnd=h},polygonEnd:function(){c.lineEnd=g,c.point=d},pointRadius:function(a){return b=a,c},result:J};return c}function Od(a){function e(a){return(d?g:f)(a)}function f(b){return Rd(b,function(c,d){c=a(c,d),b.point(c[0],c[1])})}function g(b){function s(c,d){c=a(c,d),b.point(c[0],c[1])}function t(){m=NaN,r.point=u,b.lineStart()}function u(c,e){var f=Fc([c,e]),g=a(c,e);h(m,n,l,o,p,q,m=g[0],n=g[1],l=c,o=f[0],p=f[1],q=f[2],d,b),b.point(m,n)}function v(){r.point=s,b.lineEnd()}function w(){t(),r.point=x,r.lineEnd=y}function x(a,b){u(c=a,e=b),f=m,g=n,i=o,j=p,k=q,r.point=u}function y(){h(m,n,l,o,p,q,f,g,c,i,j,k,d,b),r.lineEnd=v,v()}var c,e,f,g,i,j,k,l,m,n,o,p,q,r={point:s,lineStart:t,lineEnd:v,polygonStart:function(){b.polygonStart(),r.lineStart=w},polygonEnd:function(){b.polygonEnd(),r.lineStart=t}};return r}function h(d,e,f,g,i,j,k,l,m,n,o,p,q,s){var t=k-d,u=l-e,v=t*t+u*u;if(v>4*b&&q--){var w=g+n,x=i+o,y=j+p,z=Math.sqrt(w*w+x*x+y*y),A=Math.asin(y/=z),B=r(r(y)-1)b||r((t*F+u*G)/v-.5)>.3||g*n+i*o+j*p0&&16,e):Math.sqrt(b)},e}function Pd(a){var b=Od(function(b,c){return a([b*Fa,c*Fa])});return function(a){return Ud(b(a))}}function Qd(a){this.stream=a}function Rd(a,b){return{point:b,sphere:function(){a.sphere()},lineStart:function(){a.lineStart()},lineEnd:function(){a.lineEnd()},polygonStart:function(){a.polygonStart()},polygonEnd:function(){a.polygonEnd()}}}function Sd(a){return Td(function(){return a})()}function Td(b){function v(a){return a=e(a[0]*Ea,a[1]*Ea),[a[0]*g+o,p-a[1]*g]}function w(a){return a=e.invert((a[0]-o)/g,(p-a[1])/g),a&&[a[0]*Fa,a[1]*Fa]}function x(){e=cd(d=Xd(l,m,n),c);var a=c(j,k);return o=h-a[0]*g,p=i+a[1]*g,y()}function y(){return u&&(u.valid=!1,u=null),v}var c,d,e,o,p,u,f=Od(function(a,b){return a=c(a,b),[a[0]*g+o,p-a[1]*g]}),g=150,h=480,i=250,j=0,k=0,l=0,m=0,n=0,q=ld,r=F,s=null,t=null;return v.stream=function(a){return u&&(u.valid=!1),u=Ud(q(d,f(r(a)))),u.valid=!0,u},v.clipAngle=function(a){return arguments.length?(q=null==a?(s=a,ld):qd((s=+a)*Ea),y()):s},v.clipExtent=function(a){return arguments.length?(t=a,r=a?td(a[0][0],a[0][1],a[1][0],a[1][1]):F,y()):t},v.scale=function(a){return arguments.length?(g=+a,x()):g},v.translate=function(a){return arguments.length?(h=+a[0],i=+a[1],x()):[h,i]},v.center=function(a){return arguments.length?(j=a[0]%360*Ea,k=a[1]%360*Ea,x()):[j*Fa,k*Fa]},v.rotate=function(a){return arguments.length?(l=a[0]%360*Ea,m=a[1]%360*Ea,n=a.length>2?a[2]%360*Ea:0,x()):[l*Fa,m*Fa,n*Fa]},a.rebind(v,f,"precision"),function(){return c=b.apply(this,arguments),v.invert=c.invert&&w,x()}}function Ud(a){return Rd(a,function(b,c){a.point(b*Ea,c*Ea)})}function Vd(a,b){return[a,b]}function Wd(a,b){return[a>Aa?a-Ba:a<-Aa?a+Ba:a,b]}function Xd(a,b,c){return a?b||c?cd(Zd(a),$d(b,c)):Zd(a):b||c?$d(b,c):Wd}function Yd(a){return function(b,c){return b+=a,[b>Aa?b-Ba:b<-Aa?b+Ba:b,c]}}function Zd(a){var b=Yd(a);return b.invert=Yd(-a),b}function $d(a,b){function g(a,b){var g=Math.cos(b),h=Math.cos(a)*g,i=Math.sin(a)*g,j=Math.sin(b),k=j*c+h*d;return[Math.atan2(i*e-k*f,h*c-j*d),Ja(k*e+i*f)]}var c=Math.cos(a),d=Math.sin(a),e=Math.cos(b),f=Math.sin(b);return g.invert=function(a,b){var g=Math.cos(b),h=Math.cos(a)*g,i=Math.sin(a)*g,j=Math.sin(b),k=j*e-i*f;return[Math.atan2(i*e+j*f,h*c+k*d),Ja(k*c-h*d)]},g}function _d(a,b){var c=Math.cos(a),d=Math.sin(a);return function(e,f,g,h){var i=g*b;null!=e?(e=ae(c,e),f=ae(c,f),(g>0?ef)&&(e+=g*Ba)):(e=a+g*Ba,f=a-.5*i);for(var j,k=e;g>0?k>f:k0?b<-Da+ya&&(b=-Da+ya):b>Da-ya&&(b=Da-ya);var c=f/Math.pow(d(b),e);return[c*Math.sin(e*a),f-c*Math.cos(e*a)]}var c=Math.cos(a),d=function(a){return Math.tan(Aa/4+a/2)},e=a===b?Math.sin(a):Math.log(c/Math.cos(b))/Math.log(d(b)/d(a)),f=c*Math.pow(d(a),e)/e;return e?(g.invert=function(a,b){var c=f-b,d=Ga(e)*Math.sqrt(a*a+c*c);return[Math.atan2(a,c)/e,2*Math.atan(Math.pow(f/d,1/e))-Da]},g):pe}function ne(a,b){function f(a,b){var c=e-b;return[c*Math.sin(d*a),e-c*Math.cos(d*a)]}var c=Math.cos(a),d=a===b?Math.sin(a):(c-Math.cos(b))/(b-a),e=c/d+a;return r(d)1&&Ha(a[c[d-2]],a[c[d-1]],a[e])<=0;)--d;c[d++]=e}return c.slice(0,d)}function xe(a,b){return a[0]-b[0]||a[1]-b[1]}function ze(a,b,c){return(c[0]-b[0])*(a[1]-b[1])<(c[1]-b[1])*(a[0]-b[0])}function Ae(a,b,c,d){var e=a[0],f=c[0],g=b[0]-e,h=d[0]-f,i=a[1],j=c[1],k=b[1]-i,l=d[1]-j,m=(h*(i-j)-l*(e-f))/(l*g-h*k);return[e+m*g,i+m*k]}function Be(a){var b=a[0],c=a[a.length-1];return!(b[0]-c[0]||b[1]-c[1])}function Je(){cf(this),this.edge=this.site=this.circle=null}function Ke(a){var b=Fe.pop()||new Je;return b.site=a,b}function Le(a){Ve(a),Ee.remove(a),Fe.push(a),cf(a)}function Me(a){var b=a.circle,c=b.x,d=b.cy,e={x:c,y:d},f=a.P,g=a.N,h=[a];Le(a);for(var i=f;i.circle&&r(c-i.circle.x)ya)h=h.L;else{if(g=b-Pe(h,c),!(g>ya)){f>-ya?(d=h.P,e=h):g>-ya?(d=h,e=h.N):d=e=h;break}if(!h.R){d=h;break}h=h.R}var i=Ke(a);if(Ee.insert(d,i),d||e){if(d===e)return Ve(d),e=Ke(d.site),Ee.insert(i,e),i.edge=e.edge=Ze(d.site,i.site),Ue(d),void Ue(e);if(!e)return void(i.edge=Ze(d.site,i.site));Ve(d),Ve(e);var j=d.site,k=j.x,l=j.y,m=a.x-k,n=a.y-l,o=e.site,p=o.x-k,q=o.y-l,r=2*(m*q-n*p),s=m*m+n*n,t=p*p+q*q,u={x:(q*s-n*t)/r+k,y:(m*t-p*s)/r+l};_e(e.edge,j,o,u),i.edge=Ze(j,a,null,u),e.edge=Ze(a,o,null,u),Ue(d),Ue(e)}}function Oe(a,b){var c=a.site,d=c.x,e=c.y,f=e-b;if(!f)return d;var g=a.P;if(!g)return-(1/0);c=g.site;var h=c.x,i=c.y,j=i-b;if(!j)return h;var k=h-d,l=1/f-1/j,m=k/j;return l?(-m+Math.sqrt(m*m-2*l*(k*k/(-2*j)-i+j/2+e-f/2)))/l+d:(d+h)/2}function Pe(a,b){var c=a.N;if(c)return Oe(c,b);var d=a.site;return d.y===b?d.x:1/0}function Qe(a){this.site=a,this.edges=[]}function Re(a){for(var f,g,h,i,l,m,n,o,p,q,b=a[0][0],c=a[1][0],d=a[0][1],e=a[1][1],j=De,k=j.length;k--;)if(l=j[k],l&&l.prepare())for(n=l.edges,o=n.length,m=0;mya||r(i-g)>ya)&&(n.splice(m,0,new af($e(l.site,q,r(h-b)ya?{x:b,y:r(f-b)ya?{x:r(g-e)ya?{x:c,y:r(f-c)ya?{x:r(g-d)=-za)){var n=i*i+j*j,o=k*k+l*l,p=(l*n-j*o)/m,q=(i*o-k*n)/m,l=q+h,r=Ie.pop()||new Te;r.arc=a,r.site=e,r.x=p+g,r.y=l+Math.sqrt(p*p+q*q),r.cy=l,a.circle=r;for(var s=null,t=He._;t;)if(r.y=f)return;if(k>m){if(d){if(d.y>=h)return}else d={x:o,y:g};c={x:o,y:h}}else{if(d){if(d.y1)if(k>m){if(d){if(d.y>=h)return}else d={x:(g-r)/q,y:g};c={x:(h-r)/q,y:h}}else{if(d){if(d.y=f)return}else d={x:e,y:q*e+r};c={x:f,y:q*f+r}}else{if(d){if(d.xf||l>g||m=u,x=c>=v,y=x<<1|w,z=y+4;yc&&(f=b.slice(c,f),h[g]?h[g]+=f:h[++g]=f),(d=d[0])===(e=e[0])?h[g]?h[g]+=e:h[++g]=e:(h[++g]=null,i.push({i:g,x:sf(d,e)})),c=vf.lastIndex;return c=0&&!(e=a.interpolators[d](b,c)););return e}function xf(a,b){var h,c=[],d=[],e=a.length,f=b.length,g=Math.min(a.length,b.length);for(h=0;h=1?1:a(b)}}function Cf(a){return function(b){return 1-a(1-b)}}function Df(a){return function(b){return.5*(b<.5?a(2*b):2-a(2-2*b))}}function Ef(a){return a*a}function Ff(a){return a*a*a}function Gf(a){if(a<=0)return 0;if(a>=1)return 1;var b=a*a,c=b*a;return 4*(a<.5?c:3*(a-b)+c-.75)}function Hf(a){return function(b){return Math.pow(b,a)}}function If(a){return 1-Math.cos(a*Da)}function Jf(a){return Math.pow(2,10*(a-1))}function Kf(a){return 1-Math.sqrt(1-a*a)}function Lf(a,b){var c;return arguments.length<2&&(b=.45),arguments.length?c=b/Ba*Math.asin(1/a):(a=1,c=b/4),function(d){return 1+a*Math.pow(2,-10*d)*Math.sin((d-c)*Ba/b)}}function Mf(a){return a||(a=1.70158),function(b){return b*b*((a+1)*b-a)}}function Nf(a){return a<1/2.75?7.5625*a*a:a<2/2.75?7.5625*(a-=1.5/2.75)*a+.75:a<2.5/2.75?7.5625*(a-=2.25/2.75)*a+.9375:7.5625*(a-=2.625/2.75)*a+.984375}function Of(b,c){b=a.hcl(b),c=a.hcl(c);var d=b.h,e=b.c,f=b.l,g=c.h-d,h=c.c-e,i=c.l-f;return isNaN(h)&&(h=0,e=isNaN(e)?c.c:e),isNaN(g)?(g=0,d=isNaN(d)?c.h:d):g>180?g-=360:g<-180&&(g+=360),function(a){return $a(d+g*a,e+h*a,f+i*a)+""}}function Pf(b,c){b=a.hsl(b),c=a.hsl(c);var d=b.h,e=b.s,f=b.l,g=c.h-d,h=c.s-e,i=c.l-f;return isNaN(h)&&(h=0,e=isNaN(e)?c.s:e),isNaN(g)?(g=0,d=isNaN(d)?c.h:d):g>180?g-=360:g<-180&&(g+=360),function(a){return Xa(d+g*a,e+h*a,f+i*a)+""}}function Qf(b,c){b=a.lab(b),c=a.lab(c);var d=b.l,e=b.a,f=b.b,g=c.l-d,h=c.a-e,i=c.b-f;return function(a){return fb(d+g*a,e+h*a,f+i*a)+""}}function Rf(a,b){return b-=a,function(c){return Math.round(a+b*c)}}function Sf(a){var b=[a.a,a.b],c=[a.c,a.d],d=Uf(b),e=Tf(b,c),f=Uf(Vf(c,b,-e))||0;b[0]*c[1]180?l+=360:l-k>180&&(k+=360),e.push({i:d.push(d.pop()+"rotate(",null,")")-2,x:sf(k,l)})):l&&d.push(d.pop()+"rotate("+l+")"),m!=n?e.push({i:d.push(d.pop()+"skewX(",null,")")-2,x:sf(m,n)}):n&&d.push(d.pop()+"skewX("+n+")"),o[0]!=p[0]||o[1]!=p[1]?(f=d.push(d.pop()+"scale(",null,",",null,")"),e.push({i:f-4,x:sf(o[0],p[0])},{i:f-2,x:sf(o[1],p[1])})):1==p[0]&&1==p[1]||d.push(d.pop()+"scale("+p+")"),f=e.length,function(a){for(var c,b=-1;++b=0;)c.push(e[d])}function lg(a,b){for(var c=[a],d=[];null!=(a=c.pop());)if(d.push(a),(g=a.children)&&(f=g.length))for(var f,g,e=-1;++ed&&(c=b,d=e);return c}function zg(a){return a.reduce(Ag,0)}function Ag(a,b){return a+b[1]}function Bg(a,b){return Cg(a,Math.ceil(Math.log(b.length)/Math.LN2+1))}function Cg(a,b){for(var c=-1,d=+a[0],e=(a[1]-d)/b,f=[];++c<=b;)f[c]=e*c+d;return f}function Dg(b){return[a.min(b),a.max(b)]}function Eg(a,b){return a.value-b.value}function Fg(a,b){var c=a._pack_next;a._pack_next=b,b._pack_prev=a,b._pack_next=c,c._pack_prev=b}function Gg(a,b){a._pack_next=b,b._pack_prev=a}function Hg(a,b){var c=b.x-a.x,d=b.y-a.y,e=a.r+b.r;return.999*e*e>c*c+d*d}function Ig(a){function n(a){c=Math.min(a.x-a.r,c),d=Math.max(a.x+a.r,d),e=Math.min(a.y-a.r,e),f=Math.max(a.y+a.r,f)}if((b=a.children)&&(m=b.length)){var b,g,h,i,j,k,l,m,c=1/0,d=-(1/0),e=1/0,f=-(1/0);if(b.forEach(Jg),g=b[0],g.x=-g.r,g.y=0,n(g),m>1&&(h=b[1],h.x=h.r,h.y=0,n(h),m>2))for(i=b[2],Mg(g,h,i),n(i),Fg(g,i),g._pack_prev=i,Fg(i,h),h=g._pack_next,j=3;j=0;)f=d[e],f.z+=b,f.m+=b,b+=f.s+(c+=f.c)}function Sg(a,b,c){return a.a.parent===b.parent?a.a:c}function Tg(b){return 1+a.max(b,function(a){return a.y})}function Ug(a){return a.reduce(function(a,b){return a+b.x},0)/a.length}function Vg(a){var b=a.children;return b&&b.length?Vg(b[0]):a}function Wg(a){var c,b=a.children;return b&&(c=b.length)?Wg(b[c-1]):a}function Xg(a){return{x:a.x,y:a.y,dx:a.dx,dy:a.dy}}function Yg(a,b){var c=a.x+b[3],d=a.y+b[0],e=a.dx-b[1]-b[3],f=a.dy-b[0]-b[2];return e<0&&(c+=e/2,e=0),f<0&&(d+=f/2,f=0),{x:c,y:d,dx:e,dy:f}}function Zg(a){var b=a[0],c=a[a.length-1];return b2?dh:_g,i=d?Zf:Yf;return e=g(a,b,i,c),f=g(b,a,i,wf),h}function h(a){return e(a)}var e,f;return h.invert=function(a){return f(a)},h.domain=function(b){return arguments.length?(a=b.map(Number),g()):a},h.range=function(a){return arguments.length?(b=a,g()):b},h.rangeRound=function(a){return h.range(a).interpolate(Rf)},h.clamp=function(a){return arguments.length?(d=a,g()):d},h.interpolate=function(a){return arguments.length?(c=a,g()):c},h.ticks=function(b){return ih(a,b)},h.tickFormat=function(b,c){return jh(a,b,c)},h.nice=function(b){return gh(a,b),g()},h.copy=function(){return eh(a,b,c,d)},g()}function fh(b,c){return a.rebind(b,c,"range","rangeRound","interpolate","clamp")}function gh(a,b){return ah(a,bh(hh(a,b)[2]))}function hh(a,b){null==b&&(b=10);var c=Zg(a),d=c[1]-c[0],e=Math.pow(10,Math.floor(Math.log(d/b)/Math.LN10)),f=b/d*e;return f<=.15?e*=10:f<=.35?e*=5:f<=.75&&(e*=2),c[0]=Math.ceil(c[0]/e)*e,c[1]=Math.floor(c[1]/e)*e+.5*e,c[2]=e,c}function ih(b,c){return a.range.apply(a,hh(b,c))}function jh(b,c,d){var e=hh(b,c);if(d){var f=Nb.exec(d);if(f.shift(),"s"===f[8]){var g=a.formatPrefix(Math.max(r(e[0]),r(e[1])));return f[7]||(f[7]="."+lh(g.scale(e[2]))),f[8]="f",d=a.format(f.join("")),function(a){return d(g.scale(a))+g.symbol}}f[7]||(f[7]="."+mh(f[8],e)),d=f.join("")}else d=",."+lh(e[2])+"f";return a.format(d)}function lh(a){return-Math.floor(Math.log(a)/Math.LN10+.01)}function mh(a,b){var c=lh(b[2]);return a in kh?Math.abs(c-lh(Math.max(r(b[0]),r(b[1]))))+ +("e"!==a):c-2*("%"===a)}function nh(b,c,d,e){function f(a){return(d?Math.log(a<0?0:a):-Math.log(a>0?0:-a))/Math.log(c)}function g(a){return d?Math.pow(c,a):-Math.pow(c,-a)}function h(a){return b(f(a))}return h.invert=function(a){return g(b.invert(a))},h.domain=function(a){return arguments.length?(d=a[0]>=0,b.domain((e=a.map(Number)).map(f)),h):e},h.base=function(a){return arguments.length?(c=+a,b.domain(e.map(f)),h):c},h.nice=function(){var a=ah(e.map(f),d?Math:ph);return b.domain(a),e=a.map(g),h},h.ticks=function(){var a=Zg(e),b=[],h=a[0],i=a[1],j=Math.floor(f(h)),k=Math.ceil(f(i)),l=c%1?2:c;if(isFinite(k-j)){if(d){for(;j0;m--)b.push(g(j)*m);for(j=0;b[j]i;k--);b=b.slice(j,k)}return b},h.tickFormat=function(b,c){if(!arguments.length)return oh;arguments.length<2?c=oh:"function"!=typeof c&&(c=a.format(c));var j,e=Math.max(.1,b/h.ticks().length),i=d?(j=1e-12,Math.ceil):(j=-1e-12,Math.floor);return function(a){return a/g(i(f(a)+j))<=e?c(a):""}},h.copy=function(){return nh(b.copy(),c,d,e)},fh(h,b)}function qh(a,b,c){function f(b){return a(d(b))}var d=rh(b),e=rh(1/b);return f.invert=function(b){return e(a.invert(b))},f.domain=function(b){return arguments.length?(a.domain((c=b.map(Number)).map(d)),f):c},f.ticks=function(a){return ih(c,a)},f.tickFormat=function(a,b){return jh(c,a,b)},f.nice=function(a){return f.domain(gh(c,a))},f.exponent=function(g){return arguments.length?(d=rh(b=g),e=rh(1/b),a.domain(c.map(d)),f):b},f.copy=function(){return qh(a.copy(),b,c)},fh(f,a)}function rh(a){return function(b){return b<0?-Math.pow(-b,a):Math.pow(b,a)}}function sh(b,c){function g(a){return e[((d.get(a)||("range"===c.t?d.set(a,b.push(a)):NaN))-1)%e.length]}function h(c,d){return a.range(b.length).map(function(a){return c+d*a})}var d,e,f;return g.domain=function(a){if(!arguments.length)return b;b=[],d=new u;for(var h,e=-1,f=a.length;++e0?d[a-1]:b[0],a0?0:1}function Jh(a,b,c,d,e){var f=a[0]-b[0],g=a[1]-b[1],h=(e?d:-d)/Math.sqrt(f*f+g*g),i=h*g,j=-h*f,k=a[0]+i,l=a[1]+j,m=b[0]+i,n=b[1]+j,o=(k+m)/2,p=(l+n)/2,q=m-k,r=n-l,s=q*q+r*r,t=c-d,u=k*n-m*l,v=(r<0?-1:1)*Math.sqrt(t*t*s-u*u),w=(u*r-q*v)/s,x=(-u*q-r*v)/s,y=(u*r+q*v)/s,z=(-u*q+r*v)/s,A=w-o,B=x-p,C=y-o,D=z-p;return A*A+B*B>C*C+D*D&&(w=y,x=z),[[w-i,x-j],[w*c/t,x*c/t]]}function Kh(a){function h(f){function o(){h.push("M",e(a(i),g))}for(var l,h=[],i=[],j=-1,k=f.length,m=vb(b),n=vb(c);++j1&&e.push("H",d[0]),e.join("")}function Ph(a){for(var b=0,c=a.length,d=a[0],e=[d[0],",",d[1]];++b1){h=b[1],f=a[i],i++,d+="C"+(e[0]+g[0])+","+(e[1]+g[1])+","+(f[0]-h[0])+","+(f[1]-h[1])+","+f[0]+","+f[1];for(var j=2;j9&&(f=3*c/Math.sqrt(f),g[h]=f*d,g[h+1]=f*e));for(h=-1;++h<=i;)f=(a[Math.min(i,h+1)][0]-a[Math.max(0,h-1)][0])/(6*(1+g[h]*g[h])),b.push([f||0,g[h]*f||0]);return b}function gi(a){return a.length<3?Mh(a):a[0]+Uh(a,fi(a))}function hi(a){for(var b,e,f,c=-1,d=a.length;++ce)return q();var f=g[g.active];f&&(--g.count,delete g[g.active],f.event&&f.event.interrupt.call(b,b.__data__,f.index)),g.active=e,h.event&&h.event.start.call(b,b.__data__,c),h.tween.forEach(function(a,d){(d=d.call(b,b.__data__,c))&&n.push(d)}),l=h.ease,k=h.duration,a.timer(function(){return m.c=p(d||1)?dd:p,1},0,i)}function p(a){if(g.active!==e)return 1;for(var d=a/k,f=l(d),i=n.length;i>0;)n[--i].call(b,f);return d>=1?(h.event&&h.event.end.call(b,b.__data__,c),q()):void 0}function q(){return--g.count?delete g[e]:delete b[d],1}var k,l,j=h.delay,m=Eb,n=[];return m.t=j+i,j<=f?o(f-j):void(m.c=o)},0,i)}}function Fi(a,b,c){a.attr("transform",function(a){var d=b(a);return"translate("+(isFinite(d)?d:c(a))+",0)"})}function Gi(a,b,c){a.attr("transform",function(a){var d=b(a);return"translate(0,"+(isFinite(d)?d:c(a))+")"})}function Mi(a){return a.toISOString()}function Ni(b,c,d){function e(a){return b(a)}function f(b,d){var e=b[1]-b[0],f=e/d,g=a.bisect(Pi,f);return g==Pi.length?[c.year,hh(b.map(function(a){return a/31536e6}),d)[2]]:g?c[f/Pi[g-1]1?{floor:function(b){for(;h(b=a.floor(b));)b=Oi(b-1);return b},ceil:function(b){for(;h(b=a.ceil(b));)b=Oi(+b+1);return b}}:a))},e.ticks=function(a,b){var c=Zg(e.domain()),d=null==a?f(c,10):"number"==typeof a?f(c,a):!a.range&&[{range:a},b];return d&&(a=d[0],b=d[1]),a.range(c[0],Oi(+c[1]+1),b<1?1:b)},e.tickFormat=function(){return d},e.copy=function(){return Ni(b.copy(),c,d)},fh(e,b)}function Oi(a){return new Date(a)}function Vi(a){return JSON.parse(a.responseText)}function Wi(a){var b=d.createRange();return b.selectNode(d.body),b.createContextualFragment(a.responseText)}var a={version:"3.5.6"},b=[].slice,c=function(a){return b.call(a)},d=this.document;if(d)try{c(d.documentElement.childNodes)[0].nodeType}catch(a){c=function(a){for(var b=a.length,c=new Array(b);b--;)c[b]=a[b];return c}}if(Date.now||(Date.now=function(){return+new Date}),d)try{d.createElement("DIV").style.setProperty("opacity",0,"")}catch(a){var g=this.Element.prototype,h=g.setAttribute,i=g.setAttributeNS,j=this.CSSStyleDeclaration.prototype,k=j.setProperty;g.setAttribute=function(a,b){h.call(this,a,b+"")},g.setAttributeNS=function(a,b,c){i.call(this,a,b,c+"")},j.setProperty=function(a,b,c){k.call(this,a,b+"",c)}}a.ascending=l,a.descending=function(a,b){return ba?1:b>=a?0:NaN},a.min=function(a,b){var e,f,c=-1,d=a.length;if(1===arguments.length){for(;++c=f){e=f;break}for(;++cf&&(e=f)}else{for(;++c=f){e=f;break}for(;++cf&&(e=f)}return e},a.max=function(a,b){var e,f,c=-1,d=a.length;if(1===arguments.length){for(;++c=f){e=f;break}for(;++ce&&(e=f)}else{for(;++c=f){e=f;break}for(;++ce&&(e=f)}return e},a.extent=function(a,b){var e,f,g,c=-1,d=a.length;if(1===arguments.length){for(;++c=f){e=g=f;break}for(;++cf&&(e=f),g=f){e=g=f;break}for(;++cf&&(e=f),g1)return g/(i-1)},a.deviation=function(){var b=a.variance.apply(this,arguments);return b?Math.sqrt(b):b};var p=o(l);a.bisectLeft=p.left,a.bisect=a.bisectRight=p.right,a.bisector=function(a){return o(1===a.length?function(b,c){return l(a(b),c)}:a)},a.shuffle=function(a,b,c){(d=arguments.length)<3&&(c=a.length,d<2&&(b=0));for(var e,f,d=c-b;d;)f=Math.random()*d--|0,e=a[d+b],a[d+b]=a[f+b],a[f+b]=e;return a},a.permute=function(a,b){for(var c=b.length,d=new Array(c);c--;)d[c]=a[b[c]];return d},a.pairs=function(a){for(var d,b=0,c=a.length-1,e=a[0],f=new Array(c<0?0:c);b=0;)for(g=a[b],c=g.length;--c>=0;)f[--e]=g[c];return f};var r=Math.abs;a.range=function(a,b,c){if(arguments.length<3&&(c=1,arguments.length<2&&(b=a,a=0)),(b-a)/c===1/0)throw new Error("infinite range");var g,d=[],e=s(r(c)),f=-1;if(a*=e,b*=e,c*=e,c<0)for(;(g=a+c*++f)>b;)d.push(g/e);else for(;(g=a+c*++f)=c.length)return f?f.call(b,d):e?d.sort(e):d;for(var l,m,n,p,i=-1,j=d.length,k=c[h++],o=new u;++i=c.length)return a;var e=[],f=d[b++];return a.forEach(function(a,c){e.push({key:a,values:h(c,b)})}),f?e.sort(function(a,b){return f(a.key,b.key)}):e}var e,f,b={},c=[],d=[];return b.map=function(a,b){return g(b,a,0)},b.entries=function(b){return h(g(a.map,b,0),0)},b.key=function(a){return c.push(a),b},b.sortKeys=function(a){return d[c.length-1]=a,b},b.sortValues=function(a){return e=a,b},b.rollup=function(a){return f=a,b},b},a.set=function(a){var b=new E;if(a)for(var c=0,d=a.length;c=0&&(d=a.slice(c+1),a=a.slice(0,c)),a)return arguments.length<2?this[a].on(d):this[a].on(d,b);if(2===arguments.length){if(null==b)for(a in this)this.hasOwnProperty(a)&&this[a].on(d,null);return this}},a.event=null,a.requote=function(a){return a.replace(P,"\\$&")};var P=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,Q={}.__proto__?function(a,b){a.__proto__=b}:function(a,b){for(var c in b)a[c]=b[c]},S=function(a,b){return b.querySelector(a)},T=function(a,b){return b.querySelectorAll(a)},U=function(a,b){var c=a.matches||a[H(a,"matchesSelector")];return(U=function(a,b){return c.call(a,b)})(a,b)};"function"==typeof Sizzle&&(S=function(a,b){return Sizzle(a,b)[0]||null},T=Sizzle,U=Sizzle.matchesSelector),a.selection=function(){return a.select(d.documentElement)};var V=a.selection.prototype=[];V.select=function(a){var c,d,e,f,b=[];a=W(a);for(var g=-1,h=this.length;++g=0&&(c=a.slice(0,b),a=a.slice(b+1)),Y.hasOwnProperty(c)?{space:Y[c],local:a}:a}},V.attr=function(b,c){if(arguments.length<2){if("string"==typeof b){var d=this.node();return b=a.ns.qualify(b),b.local?d.getAttributeNS(b.space,b.local):d.getAttribute(b)}for(c in b)this.each(Z(c,b[c]));return this}return this.each(Z(b,c))},V.classed=function(a,b){if(arguments.length<2){if("string"==typeof a){var c=this.node(),d=(a=aa(a)).length,e=-1;if(b=c.classList){for(;++e=0;)(f=c[d])&&(e&&e!==f.nextSibling&&e.parentNode.insertBefore(f,e),e=f);return this},V.sort=function(a){a=ja.apply(this,arguments);for(var b=-1,c=this.length;++b0&&(c=c.transition().duration(j)),c.call(w.event)}function C(){t&&t.domain(s.range().map(function(a){return(a-b.x)/b.k}).map(s.invert)),v&&v.domain(u.range().map(function(a){return(a-b.y)/b.k}).map(u.invert))}function D(a){k++||a({type:"zoomstart"})}function E(a){C(),a({type:"zoom",scale:b.k,translate:[b.x,b.y]})}function F(a){--k||(a({type:"zoomend"}),e=null)}function G(){function j(){e=1,A(a.mouse(b),h),E(d)}function k(){g.on(m,null).on(n,null),i(e&&a.event.target===c),F(d)}var b=this,c=a.event.target,d=r.of(b,arguments),e=0,g=a.select(f(b)).on(m,j).on(n,k),h=x(a.mouse(b)),i=ua(b);si.call(b),D(d)}function H(){function o(){var d=a.touches(c);return g=b.k,d.forEach(function(a){a.identifier in e&&(e[a.identifier]=x(a))}),d}function s(){var d=a.event.target;a.select(d).on(i,t).on(j,u),k.push(d);for(var g=a.event.changedTouches,h=0,l=g.length;h1){var p=m[0],r=m[1],s=p[0]-r[0],v=p[1]-r[1];f=s*s+v*v}}function t(){var h,i,j,k,b=a.touches(c);si.call(c);for(var l=0,m=b.length;l=g)return e;if(k)return k=!1,c;var b=h;if(34===a.charCodeAt(b)){for(var f=b;f++=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,Ob=a.map({b:function(a){return a.toString(2)},c:function(a){return String.fromCharCode(a)},o:function(a){return a.toString(8)},x:function(a){return a.toString(16)},X:function(a){return a.toString(16).toUpperCase()},g:function(a,b){return a.toPrecision(b)},e:function(a,b){return a.toExponential(b)},f:function(a,b){return a.toFixed(b)},r:function(b,c){return(b=a.round(b,Jb(b,c))).toFixed(Math.max(0,Math.min(20,Jb(b*(1+1e-15),c))))}}),Qb=a.time={},Rb=Date;Sb.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){Tb.setUTCDate.apply(this._,arguments)},setDay:function(){Tb.setUTCDay.apply(this._,arguments)},setFullYear:function(){Tb.setUTCFullYear.apply(this._,arguments)},setHours:function(){Tb.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){Tb.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){Tb.setUTCMinutes.apply(this._,arguments)},setMonth:function(){Tb.setUTCMonth.apply(this._,arguments)},setSeconds:function(){Tb.setUTCSeconds.apply(this._,arguments)},setTime:function(){Tb.setTime.apply(this._,arguments)}};var Tb=Date.prototype;Qb.year=Ub(function(a){return a=Qb.day(a),a.setMonth(0,1),a},function(a,b){a.setFullYear(a.getFullYear()+b)},function(a){return a.getFullYear()}),Qb.years=Qb.year.range,Qb.years.utc=Qb.year.utc.range,Qb.day=Ub(function(a){var b=new Rb(2e3,0);return b.setFullYear(a.getFullYear(),a.getMonth(),a.getDate()),b},function(a,b){a.setDate(a.getDate()+b)},function(a){return a.getDate()-1}),Qb.days=Qb.day.range,Qb.days.utc=Qb.day.utc.range,Qb.dayOfYear=function(a){var b=Qb.year(a);return Math.floor((a-b-6e4*(a.getTimezoneOffset()-b.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(a,b){b=7-b;var c=Qb[a]=Ub(function(a){return(a=Qb.day(a)).setDate(a.getDate()-(a.getDay()+b)%7),a},function(a,b){a.setDate(a.getDate()+7*Math.floor(b))},function(a){var c=Qb.year(a).getDay();return Math.floor((Qb.dayOfYear(a)+(c+b)%7)/7)-(c!==b)});Qb[a+"s"]=c.range,Qb[a+"s"].utc=c.utc.range,Qb[a+"OfYear"]=function(a){var c=Qb.year(a).getDay();return Math.floor((Qb.dayOfYear(a)+(c+b)%7)/7)}}),Qb.week=Qb.sunday,Qb.weeks=Qb.sunday.range,Qb.weeks.utc=Qb.sunday.utc.range,Qb.weekOfYear=Qb.sundayOfYear;var Xb={"-":"",_:" ",0:"0"},Yb=/^\s*\d+/,Zb=/^%/;a.locale=function(a){return{numberFormat:Mb(a),timeFormat:Wb(a)}};var sc=a.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});a.format=sc.numberFormat,a.geo={},tc.prototype={s:0,t:0,add:function(a){vc(a,this.t,uc),vc(uc.s,this.s,this),this.s?this.t+=uc.t:this.s=uc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var uc=new tc;a.geo.stream=function(a,b){a&&xc.hasOwnProperty(a.type)?xc[a.type](a,b):wc(a,b)};var xc={Feature:function(a,b){wc(a.geometry,b)},FeatureCollection:function(a,b){for(var c=a.features,d=-1,e=c.length;++de&&(e=f)}function o(a,g){var h=Fc([a*Ea,g*Ea]);if(i){var j=Hc(i,h),k=[j[1],-j[0],0],l=Hc(k,j);Kc(l),l=Lc(l);var m=a-f,o=m>0?1:-1,p=l[0]*Fa*o,q=r(m)>180;if(q^(o*fe&&(e=s)}else if(p=(p+360)%360-180,q^(o*fe&&(e=g);q?av(b,d)&&(d=a):v(a,d)>v(b,d)&&(b=a):d>=b?(ad&&(d=a)):a>f?v(b,a)>v(b,d)&&(d=a):v(a,d)>v(b,d)&&(b=a)}else n(a,g);i=h,f=a}function p(){m.point=o}function q(){l[0]=b,l[1]=d,m.point=n,i=null}function s(a,b){if(i){var c=a-f;j+=r(c)>180?c+(c>0?360:-360):c}else g=a,h=b;Dc.point(a,b),o(a,b)}function t(){Dc.lineStart()}function u(){s(g,h),Dc.lineEnd(),r(j)>ya&&(b=-(d=180)),l[0]=b,l[1]=d,i=null}function v(a,b){return(b-=a)<0?b+360:b}function w(a,b){return a[0]-b[0]}function x(a,b){return b[0]<=b[1]?b[0]<=a&&a<=b[1]:aya?e=90:j<-ya&&(c=-90),l[0]=b,l[1]=d}};return function(f){e=d=-(b=c=1/0),k=[],a.geo.stream(f,m);var g=k.length;if(g){k.sort(w);for(var j,h=1,i=k[0],n=[i];hv(i[0],i[1])&&(i[1]=j[1]),v(j[0],i[1])>v(i[0],i[1])&&(i[0]=j[0])):n.push(i=j);for(var p,j,o=-(1/0),g=n.length-1,h=0,i=n[g];h<=g;i=j,++h)j=n[h],(p=v(i[1],j[0]))>o&&(o=p,b=j[0],d=i[1])}return k=l=null,b===1/0||c===1/0?[[NaN,NaN],[NaN,NaN]]:[[b,c],[d,e]]}}(),a.geo.centroid=function(b){Nc=Oc=Pc=Qc=Rc=Sc=Tc=Uc=Vc=Wc=Xc=0,a.geo.stream(b,Yc);var c=Vc,d=Wc,e=Xc,f=c*c+d*d+e*e;return f=.12&&h<.234&&g>=-.425&&g<-.214?c:h>=.166&&h<.234&&g>=-.214&&g<-.115?d:b).invert(a)},j.stream=function(a){var e=b.stream(a),f=c.stream(a),g=d.stream(a);return{point:function(a,b){e.point(a,b),f.point(a,b),g.point(a,b)},sphere:function(){e.sphere(),f.sphere(),g.sphere()},lineStart:function(){e.lineStart(),f.lineStart(),g.lineStart()},lineEnd:function(){e.lineEnd(),f.lineEnd(),g.lineEnd()},polygonStart:function(){e.polygonStart(),f.polygonStart(),g.polygonStart()},polygonEnd:function(){e.polygonEnd(),f.polygonEnd(),g.polygonEnd()}}},j.precision=function(a){return arguments.length?(b.precision(a),c.precision(a),d.precision(a),j):b.precision()},j.scale=function(a){return arguments.length?(b.scale(a),c.scale(.35*a),d.scale(a),j.translate(b.translate())):b.scale()},j.translate=function(a){if(!arguments.length)return b.translate();var e=b.scale(),k=+a[0],l=+a[1];return g=b.translate(a).clipExtent([[k-.455*e,l-.238*e],[k+.455*e,l+.238*e]]).stream(f).point,h=c.translate([k-.307*e,l+.201*e]).clipExtent([[k-.425*e+ya,l+.12*e+ya],[k-.214*e-ya,l+.234*e-ya]]).stream(f).point,i=d.translate([k-.205*e,l+.212*e]).clipExtent([[k-.214*e+ya,l+.166*e+ya],[k-.115*e-ya,l+.234*e-ya]]).stream(f).point,j},j.scale(1070)};var wd,xd,Ad,Bd,Cd,Dd,yd={point:J,lineStart:J,lineEnd:J,polygonStart:function(){xd=0,yd.lineStart=zd},polygonEnd:function(){yd.lineStart=yd.lineEnd=yd.point=J,wd+=r(xd/2)}},Ed={point:Fd,lineStart:J,lineEnd:J,polygonStart:J,polygonEnd:J},Id={point:Jd,lineStart:Kd,lineEnd:Ld,polygonStart:function(){Id.lineStart=Md},polygonEnd:function(){Id.point=Jd,Id.lineStart=Kd,Id.lineEnd=Ld}};a.geo.path=function(){function h(c){return c&&("function"==typeof b&&f.pointRadius(+b.apply(this,arguments)),g&&g.valid||(g=e(f)),a.geo.stream(c,g)),f.result()}function i(){return g=null,h}var c,d,e,f,g,b=4.5;return h.area=function(b){return wd=0,a.geo.stream(b,e(yd)),wd},h.centroid=function(b){return Pc=Qc=Rc=Sc=Tc=Uc=Vc=Wc=Xc=0,a.geo.stream(b,e(Id)),Xc?[Vc/Xc,Wc/Xc]:Uc?[Sc/Uc,Tc/Uc]:Rc?[Pc/Rc,Qc/Rc]:[NaN,NaN]},h.bounds=function(b){return Cd=Dd=-(Ad=Bd=1/0),a.geo.stream(b,e(Ed)),[[Ad,Bd],[Cd,Dd]]},h.projection=function(a){return arguments.length?(e=(c=a)?a.stream||Pd(a):F,i()):c},h.context=function(a){return arguments.length?(f=null==(d=a)?new Gd:new Nd(a),"function"!=typeof b&&f.pointRadius(b),i()):d},h.pointRadius=function(a){return arguments.length?(b="function"==typeof a?a:(f.pointRadius(+a),+a),h):b},h.projection(a.geo.albersUsa()).context(null)},a.geo.transform=function(a){return{stream:function(b){var c=new Qd(b);for(var d in a)c[d]=a[d];return c}}},Qd.prototype={point:function(a,b){this.stream.point(a,b)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},a.geo.projection=Sd,a.geo.projectionMutator=Td,(a.geo.equirectangular=function(){return Sd(Vd)}).raw=Vd.invert=Vd,a.geo.rotation=function(a){function b(b){return b=a(b[0]*Ea,b[1]*Ea),b[0]*=Fa,b[1]*=Fa,b}return a=Xd(a[0]%360*Ea,a[1]*Ea,a.length>2?a[2]*Ea:0),b.invert=function(b){return b=a.invert(b[0]*Ea,b[1]*Ea),b[0]*=Fa,b[1]*=Fa,b},b},Wd.invert=Vd,a.geo.circle=function(){function e(){var b="function"==typeof a?a.apply(this,arguments):a,c=Xd(-b[0]*Ea,-b[1]*Ea,0).invert,e=[];return d(null,null,1,{point:function(a,b){e.push(a=c(a,b)),a[0]*=Fa,a[1]*=Fa}}),{type:"Polygon",coordinates:[e]}}var b,d,a=[0,0],c=6;return e.origin=function(b){return arguments.length?(a=b,e):a},e.angle=function(a){return arguments.length?(d=_d((b=+a)*Ea,c*Ea),e):b},e.precision=function(a){return arguments.length?(d=_d(b*Ea,(c=+a)*Ea),e):c},e.angle(90)},a.geo.distance=function(a,b){var l,c=(b[0]-a[0])*Ea,d=a[1]*Ea,e=b[1]*Ea,f=Math.sin(c),g=Math.cos(c),h=Math.sin(d),i=Math.cos(d),j=Math.sin(e),k=Math.cos(e);return Math.atan2(Math.sqrt((l=k*f)*l+(l=i*j-h*k*g)*l),h*j+i*k*g)},a.geo.graticule=function(){function t(){return{type:"MultiLineString",coordinates:u()}}function u(){return a.range(Math.ceil(e/l)*l,d,l).map(p).concat(a.range(Math.ceil(i/m)*m,h,m).map(q)).concat(a.range(Math.ceil(c/j)*j,b,j).filter(function(a){return r(a%l)>ya}).map(n)).concat(a.range(Math.ceil(g/k)*k,f,k).filter(function(a){return r(a%m)>ya}).map(o))}var b,c,d,e,f,g,h,i,n,o,p,q,j=10,k=j,l=90,m=360,s=2.5;return t.lines=function(){return u().map(function(a){return{type:"LineString",coordinates:a}})},t.outline=function(){return{type:"Polygon",coordinates:[p(e).concat(q(h).slice(1),p(d).reverse().slice(1),q(i).reverse().slice(1))]}},t.extent=function(a){return arguments.length?t.majorExtent(a).minorExtent(a):t.minorExtent()},t.majorExtent=function(a){return arguments.length?(e=+a[0][0],d=+a[1][0],i=+a[0][1],h=+a[1][1],e>d&&(a=e,e=d,d=a),i>h&&(a=i,i=h,h=a),t.precision(s)):[[e,i],[d,h]]},t.minorExtent=function(a){return arguments.length?(c=+a[0][0],b=+a[1][0],g=+a[0][1],f=+a[1][1],c>b&&(a=c,c=b,b=a),g>f&&(a=g,g=f,f=a),t.precision(s)):[[c,g],[b,f]]},t.step=function(a){return arguments.length?t.majorStep(a).minorStep(a):t.minorStep()},t.majorStep=function(a){return arguments.length?(l=+a[0],m=+a[1],t):[l,m]},t.minorStep=function(a){return arguments.length?(j=+a[0],k=+a[1],t):[j,k]},t.precision=function(a){return arguments.length?(s=+a,n=be(g,f,90),o=ce(c,b,s),p=be(i,h,90),q=ce(e,d,s),t):s},t.majorExtent([[-180,-90+ya],[180,90-ya]]).minorExtent([[-180,-80-ya],[180,80+ya]])},a.geo.greatArc=function(){function f(){return{type:"LineString",coordinates:[c||b.apply(this,arguments),e||d.apply(this,arguments)]}}var c,e,b=de,d=ee;return f.distance=function(){return a.geo.distance(c||b.apply(this,arguments),e||d.apply(this,arguments))},f.source=function(a){return arguments.length?(b=a,c="function"==typeof a?null:a,f):b},f.target=function(a){return arguments.length?(d=a,e="function"==typeof a?null:a,f):d},f.precision=function(){return arguments.length?f:0},f},a.geo.interpolate=function(a,b){return fe(a[0]*Ea,a[1]*Ea,b[0]*Ea,b[1]*Ea)},a.geo.length=function(b){return ge=0,a.geo.stream(b,he),ge};var ge,he={sphere:J,point:J,lineStart:ie,lineEnd:J,polygonStart:J,polygonEnd:J},ke=je(function(a){return Math.sqrt(2/(1+a))},function(a){return 2*Math.asin(a/2)});(a.geo.azimuthalEqualArea=function(){return Sd(ke)}).raw=ke;var le=je(function(a){var b=Math.acos(a);return b&&b/Math.sin(b)},F);(a.geo.azimuthalEquidistant=function(){return Sd(le)}).raw=le,(a.geo.conicConformal=function(){return ud(me)}).raw=me,(a.geo.conicEquidistant=function(){return ud(ne)}).raw=ne;var oe=je(function(a){return 1/a},Math.atan);(a.geo.gnomonic=function(){return Sd(oe)}).raw=oe,pe.invert=function(a,b){return[a,2*Math.atan(Math.exp(b))-Da]},(a.geo.mercator=function(){return qe(pe)}).raw=pe;var re=je(function(){return 1},Math.asin);(a.geo.orthographic=function(){return Sd(re)}).raw=re;var se=je(function(a){return 1/(1+a)},function(a){return 2*Math.atan(a)});(a.geo.stereographic=function(){return Sd(se)}).raw=se,te.invert=function(a,b){return[-b,2*Math.atan(Math.exp(a))-Da]},(a.geo.transverseMercator=function(){var a=qe(te),b=a.center,c=a.rotate;return a.center=function(a){return a?b([-a[1],a[0]]):(a=b(),[a[1],-a[0]])},a.rotate=function(a){return a?c([a[0],a[1],a.length>2?a[2]+90:90]):(a=c(),[a[0],a[1],a[2]-90])},c([0,0,90])}).raw=te,a.geom={},a.geom.hull=function(a){function d(a){if(a.length<3)return[];var f,d=vb(b),e=vb(c),g=a.length,h=[],i=[];for(f=0;f=0;--f)n.push(a[h[j[f]][2]]);for(f=+l;f=c&&j.x<=e&&j.y>=d&&j.y<=g?[[c,g],[e,g],[e,d],[c,d]]:[];k.point=a[h]}),b}function h(a){return a.map(function(a,b){return{x:Math.round(d(a,b)/ya)*ya,y:Math.round(e(a,b)/ya)*ya,i:b}})}var b=ue,c=ve,d=b,e=c,f=jf;return a?g(a):(g.links=function(a){return gf(h(a)).edges.filter(function(a){return a.l&&a.r}).map(function(b){return{source:a[b.l.i],target:a[b.r.i]}})},g.triangles=function(a){var b=[];return gf(h(a)).cells.forEach(function(c,d){for(var i,j,e=c.site,f=c.edges.sort(Se),g=-1,h=f.length,k=f[h-1].edge,l=k.l===e?k.r:k.l;++g=i,l=d>=j,m=l<<1|k;a.leaf=!1,a=a.nodes[m]||(a.nodes[m]=nf()),k?e=i:g=i,l?f=j:h=j,y(a,b,c,d,e,f,g,h)}var i,l,m,n,o,p,q,s,t,j=vb(f),k=vb(g);if(null!=b)p=b,q=c,s=d,t=e;else if(s=t=-(p=q=1/0),l=[],m=[],o=a.length,h)for(n=0;ns&&(s=i.x),i.y>t&&(t=i.y),l.push(i.x),m.push(i.y);else for(n=0;ns&&(s=u),v>t&&(t=v),l.push(u),m.push(v)}var w=s-p,x=t-q;w>x?t=q+w:s=p+x;var A=nf();if(A.add=function(a){y(A,a,+j(a,++n),+k(a,n),p,q,s,t)},A.visit=function(a){of(a,A,p,q,s,t)},A.find=function(a){return pf(A,a[0],a[1],p,q,s,t)},n=-1,null==b){for(;++n=0?a.slice(0,c):a,e=c>=0?a.slice(c+1):"in";return d=zf.get(d)||yf,e=Af.get(e)||F,Bf(e(d.apply(null,b.call(arguments,1))))},a.interpolateHcl=Of,a.interpolateHsl=Pf,a.interpolateLab=Qf,a.interpolateRound=Rf,a.transform=function(b){var c=d.createElementNS(a.ns.prefix.svg,"g");return(a.transform=function(a){if(null!=a){c.setAttribute("transform",a);var b=c.transform.baseVal.consolidate()}return new Sf(b?b.matrix:Wf)})(b)},Sf.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var Wf={a:1,b:0,c:0,d:1,e:0,f:0};a.interpolateTransform=Xf,a.layout={},a.layout.bundle=function(){return function(a){for(var b=[],c=-1,d=a.length;++c0?d:0:d>0&&(c.start({type:"start",alpha:f=d}),a.timer(b.tick)),b):f},b.start=function(){function m(b,d){if(!k){for(k=new Array(c),h=0;h=0;)f.push(k=j[i]),k.parent=h,k.depth=h.depth+1;c&&(h.value=0),h.children=j}else c&&(h.value=+c.call(d,h,h.depth)||0),delete h.children;return lg(e,function(b){var d,e;a&&(d=b.children)&&d.sort(a),c&&(e=b.parent)&&(e.value+=b.value)}),g}var a=og,b=mg,c=ng;return d.sort=function(b){return arguments.length?(a=b,d):a},d.children=function(a){return arguments.length?(b=a,d):b},d.value=function(a){return arguments.length?(c=a,d):c},d.revalue=function(a){return c&&(kg(a,function(a){a.children&&(a.value=0)}),lg(a,function(a){var b;a.children||(a.value=+c.call(d,a,a.depth)||0),(b=a.parent)&&(b.value+=a.value)})),a},d},a.layout.partition=function(){function d(a,b,c,e){var f=a.children;if(a.x=b,a.y=a.depth*e,a.dx=c,a.dy=e,f&&(h=f.length)){var h,i,j,g=-1;for(c=a.value?c/a.value:0;++ge&&(e=h),d.push(h)}for(g=0;g0)for(g=-1;++g=j[0]&&p<=j[1]&&(l=h[a.bisect(k,p,1,n)-1],l.y+=o,l.push(f[g]));return h}var b=!0,c=Number,d=Dg,e=Bg;return f.value=function(a){return arguments.length?(c=a,f):c},f.range=function(a){return arguments.length?(d=vb(a),f):d},f.bins=function(a){return arguments.length?(e="number"==typeof a?function(b){return Cg(b,a)}:vb(a),f):e},f.frequency=function(a){return arguments.length?(b=!!a,f):b},f},a.layout.pack=function(){function f(a,f){var g=b.call(this,a,f),h=g[0],i=d[0],j=d[1],k=null==e?Math.sqrt:"function"==typeof e?e:function(){return e};if(h.x=h.y=0,lg(h,function(a){a.r=+k(a.value)}),lg(h,Ig),c){var l=c*(e?1:Math.max(2*h.r/i,2*h.r/j))/2;lg(h,function(a){a.r+=l}),lg(h,Ig),lg(h,function(a){a.r-=l})}return Lg(h,i/2,j/2,e?1:1/Math.max(2*h.r/i,2*h.r/j)),g}var e,b=a.layout.hierarchy().sort(Eg),c=0,d=[1,1];return f.size=function(a){return arguments.length?(d=a,f):d},f.radius=function(a){return arguments.length?(e=null==a||"function"==typeof a?a:+a,f):e},f.padding=function(a){return arguments.length?(c=+a,f):c},jg(f,b)},a.layout.tree=function(){function f(a,f){var j=b.call(this,a,f),l=j[0],m=g(l);if(lg(m,h),m.parent.m=-m.z,kg(m,i),e)kg(l,k);else{var n=l,o=l,p=l;kg(l,function(a){a.xo.x&&(o=a),a.depth>p.depth&&(p=a)});var q=c(n,o)/2-n.x,r=d[0]/(o.x+c(o,n)/2+q),s=d[1]/(p.depth||1);kg(l,function(a){a.x=(a.x+q)*r,a.y=a.depth*s})}return j}function g(a){for(var d,b={A:null,children:[a]},c=[b];null!=(d=c.pop());)for(var f,e=d.children,g=0,h=e.length;g0&&(Qg(Sg(g,a,d),a,m),i+=m,j+=m),k+=g.m,i+=e.m,l+=h.m,j+=f.m;g&&!Pg(f)&&(f.t=g,f.m+=k-j),e&&!Og(h)&&(h.t=e,h.m+=i-l,d=a)}return d}function k(a){a.x*=d[0],a.y=a.depth*d[1]}var b=a.layout.hierarchy().sort(null).value(null),c=Ng,d=[1,1],e=null;return f.separation=function(a){return arguments.length?(c=a,f):c},f.size=function(a){return arguments.length?(e=null==(d=a)?k:null,f):e?null:d},f.nodeSize=function(a){return arguments.length?(e=null==(d=a)?null:k,f):e?d:null},jg(f,b)},a.layout.cluster=function(){function f(a,f){var i,g=b.call(this,a,f),h=g[0],j=0;lg(h,function(a){var b=a.children;b&&b.length?(a.x=Ug(b),a.y=Tg(b)):(a.x=i?j+=c(a,i):0,a.y=0,i=a)});var k=Vg(h),l=Wg(h),m=k.x-c(k,l)/2,n=l.x+c(l,k)/2;return lg(h,e?function(a){a.x=(a.x-h.x)*d[0],a.y=(h.y-a.y)*d[1]}:function(a){a.x=(a.x-m)/(n-m)*d[0],a.y=(1-(h.y?a.y/h.y:1))*d[1]}),g}var b=a.layout.hierarchy().sort(null).value(null),c=Ng,d=[1,1],e=!1;return f.separation=function(a){return arguments.length?(c=a,f):c},f.size=function(a){return arguments.length?(e=null==(d=a),f):e?null:d},f.nodeSize=function(a){return arguments.length?(e=null!=(d=a),f):e?d:null},jg(f,b)},a.layout.treemap=function(){function k(a,b){for(var e,f,c=-1,d=a.length;++c0;)d.push(g=e[p-1]),d.area+=g.area,"squarify"!==i||(j=n(d,m))<=h?(e.pop(),h=j):(d.area-=d.pop().area,o(d,m,c,!1),m=Math.min(c.dx,c.dy),d.length=d.area=0,h=1/0);d.length&&(o(d,m,c,!0),d.length=d.area=0),b.forEach(l)}}function m(a){var b=a.children;if(b&&b.length){var e,c=f(a),d=b.slice(),g=[];for(k(d,c.dx*c.dy/a.value),g.area=0;e=d.pop();)g.push(e),g.area+=e.area,null!=e.z&&(o(g,e.z?c.dx:c.dy,c,!d.length),g.length=g.area=0);b.forEach(m)}}function n(a,b){for(var d,c=a.area,e=0,f=1/0,g=-1,h=a.length;++ge&&(e=d));return c*=c,b*=b,c?Math.max(b*e*j/c,c/(b*f*j)):1/0}function o(a,b,d,e){var k,f=-1,g=a.length,h=d.x,i=d.y,j=b?c(a.area/b):0;if(b==d.dx){for((e||j>d.dy)&&(j=d.dy);++fd.dx)&&(j=d.dx);++f1);return a+b*c*Math.sqrt(-2*Math.log(e)/e)}},logNormal:function(){var b=a.random.normal.apply(a,arguments);return function(){return Math.exp(b())}},bates:function(b){var c=a.random.irwinHall(b);return function(){return c()/b}},irwinHall:function(a){return function(){for(var b=0,c=0;cl?0:1;if(j=Ca)return i(j,n)+(h?i(h,1-n):"")+"Z";var o,p,q,r,u,v,w,x,y,z,A,B,s=0,t=0,C=[];if((r=(+g.apply(this,arguments)||0)/2)&&(q=d===Ch?Math.sqrt(h*h+j*j):+d.apply(this,arguments),n||(t*=-1),j&&(t=Ja(q/j*Math.sin(r))),h&&(s=Ja(q/h*Math.sin(r)))),j){u=j*Math.cos(k+t),v=j*Math.sin(k+t),w=j*Math.cos(l-t),x=j*Math.sin(l-t);var D=Math.abs(l-k-2*t)<=Aa?0:1;if(t&&Ih(u,v,w,x)===n^D){var E=(k+l)/2;u=j*Math.cos(E),v=j*Math.sin(E),w=x=null}}else u=v=0;if(h){y=h*Math.cos(l-s),z=h*Math.sin(l-s),A=h*Math.cos(k+s),B=h*Math.sin(k+s);var F=Math.abs(k-l+2*s)<=Aa?0:1;if(s&&Ih(y,z,A,B)===1-n^F){var G=(k+l)/2;y=h*Math.cos(G),z=h*Math.sin(G),A=B=null}}else y=z=0;if((o=Math.min(Math.abs(j-h)/2,+c.apply(this,arguments)))>.001){p=hAa)+",1 "+b}function j(a,b,c,d){return"Q 0,0 "+d}var a=de,b=ee,c=ji,d=Fh,e=Gh;return f.radius=function(a){return arguments.length?(c=vb(a),f):c},f.source=function(b){return arguments.length?(a=vb(b),f):a},f.target=function(a){return arguments.length?(b=vb(a),f):b},f.startAngle=function(a){return arguments.length?(d=vb(a),f):d},f.endAngle=function(a){return arguments.length?(e=vb(a),f):e},f},a.svg.diagonal=function(){function d(d,e){var f=a.call(this,d,e),g=b.call(this,d,e),h=(f.y+g.y)/2,i=[f,{x:f.x,y:h},{x:g.x,y:h},g];return i=i.map(c),"M"+i[0]+"C"+i[1]+" "+i[2]+" "+i[3]}var a=de,b=ee,c=ki;return d.source=function(b){return arguments.length?(a=vb(b),d):a},d.target=function(a){return arguments.length?(b=vb(a),d):b},d.projection=function(a){return arguments.length?(c=a,d):c},d},a.svg.diagonal.radial=function(){var b=a.svg.diagonal(),c=ki,d=b.projection;return b.projection=function(a){return arguments.length?d(li(c=a)):c},b},a.svg.symbol=function(){function c(c,d){return(pi.get(a.call(this,c,d))||oi)(b.call(this,c,d))}var a=ni,b=mi;return c.type=function(b){return arguments.length?(a=vb(b),c):a},c.size=function(a){return arguments.length?(b=vb(a),c):b},c};var pi=a.map({circle:oi,cross:function(a){var b=Math.sqrt(a/5)/2;return"M"+-3*b+","+-b+"H"+-b+"V"+-3*b+"H"+b+"V"+-b+"H"+3*b+"V"+b+"H"+b+"V"+3*b+"H"+-b+"V"+b+"H"+-3*b+"Z"},diamond:function(a){var b=Math.sqrt(a/(2*ri)),c=b*ri;return"M0,"+-b+"L"+c+",0 0,"+b+" "+-c+",0Z"},square:function(a){var b=Math.sqrt(a)/2;return"M"+-b+","+-b+"L"+b+","+-b+" "+b+","+b+" "+-b+","+b+"Z"},"triangle-down":function(a){var b=Math.sqrt(a/qi),c=b*qi/2;return"M0,"+c+"L"+b+","+-c+" "+-b+","+-c+"Z"},"triangle-up":function(a){var b=Math.sqrt(a/qi),c=b*qi/2;return"M0,"+-c+"L"+b+","+c+" "+-b+","+c+"Z"}});a.svg.symbolTypes=pi.keys();var qi=Math.sqrt(3),ri=Math.tan(30*Ea);V.transition=function(a){for(var e,f,b=xi||++wi,c=Bi(a),d=[],g=yi||{time:Date.now(),ease:Gf,delay:0,duration:250},h=-1,i=this.length;++hrect,.s>rect").attr("width",e[1]-e[0])}function p(a){a.select(".extent").attr("y",g[0]),a.selectAll(".extent,.e>rect,.w>rect").attr("height",g[1]-g[0])}function q(){function E(){32==a.event.keyCode&&(w||(y=null,z[0]-=e[1],z[1]-=g[1],w=2),M())}function F(){ +32==a.event.keyCode&&2==w&&(z[0]+=e[1],z[1]+=g[1],w=0,M())}function G(){var b=a.mouse(l),f=!1;A&&(b[0]+=A[0],b[1]+=A[1]),w||(a.event.altKey?(y||(y=[(e[0]+e[1])/2,(g[0]+g[1])/2]),z[0]=e[+(b[0]c&&!g||!f||d&&!h&&i||e&&i)return 1;if(b-1;);return c}function Ma(a,b){for(var c=a.length;c--&&b.indexOf(a.charAt(c))>-1;);return c}function Na(a,b){return Ga(a.criteria,b.criteria)||a.index-b.index}function Oa(a,b,c){for(var d=-1,e=a.criteria,f=b.criteria,g=e.length,h=c.length;++d=h)return i;var j=c[d];return i*("asc"===j||j===!0?1:-1)}}return a.index-b.index}function Pa(a){return ta[a]}function Qa(a){return ua[a]}function Ra(a,b,c){return b?a=xa[a]:c&&(a=ya[a]),"\\"+a}function Sa(a){return"\\"+ya[a]}function Ta(a,b,c){for(var d=a.length,e=b+(c?0:-1);c?e--:++e=9&&a<=13||32==a||160==a||5760==a||6158==a||a>=8192&&(a<=8202||8232==a||8233==a||8239==a||8287==a||12288==a||65279==a)}function Wa(a,b){for(var c=-1,d=a.length,e=-1,f=[];++c=p?pd(b):null,i=b.length;h&&(f=Wb,g=!1,b=h);a:for(;++ef?0:f+d),e=e===a||e>f?f:+e||0,e<0&&(e+=f),f=d>e?0:e>>>0,d>>>=0;df?0:f+c),d=d===a||d>f?f:+d||0,d<0&&(d+=f),f=c>d?0:d-c>>>0,c>>>=0;for(var g=E(f);++e=p,h=g?pd():null,i=[];h?(d=Wb,f=!1):(g=!1,h=b?[]:i);a:for(;++c>>1,g=a[f];(c?g<=b:g2?d[f-2]:a,h=f>2?d[2]:a,i=f>1?d[f-1]:a;for("function"==typeof g?(g=gd(g,i,5),f-=2):(g="function"==typeof i?i:a,f-=g?1:0),h&&be(d[0],d[1],h)&&(g=f<3?a:g,f=1);++e-1?d[g]:a}return Ac(d,e,b)}}function wd(a){return function(b,c,d){return b&&b.length?(c=Pd(c,d,3),Ha(b,c,a)):-1}}function xd(a){return function(b,c,d){return c=Pd(c,d,3),Ac(b,c,a,!0)}}function yd(b){return function(){for(var c,d=arguments.length,e=b?d:-1,g=0,i=E(d);b?e--:++e=p)return c.plant(b).value();for(var e=0,f=d?i[e].apply(this,a):b;++e=b||!tb(b))return"";var e=b-d;return c=null==c?" ":c+"",zh(c,pb(e/c.length)).slice(0,e)}function Id(a,b,d,e){function h(){for(var b=-1,c=arguments.length,i=-1,j=e.length,k=E(j+c);++ij))return!1;for(;++i-1&&a%1==0&&a-1&&a%1==0&&a<=Fb}function fe(a){return a===a&&!yg(a)}function ge(a,b){var d=a[1],g=b[1],h=d|g,i=h-1;)mb.call(b,f,1);return b}function Le(a,b,c){var d=[];if(!a||!a.length)return d;var e=-1,f=[],g=a.length;for(b=Pd(b,c,3);++e-1:!!e&&Sd(a,b,c)>-1}function zf(a,b,c){var d=pg(a)?dc:Mc;return b=Pd(b,c,3),d(a,b)}function Bf(a,b){return zf(a,Vh(b))}function Ef(a,b,c){var d=pg(a)?cc:zc;return b=Pd(b,c,3),d(a,function(a,c,d){return!b(a,c,d)})}function Ff(b,c,d){if(d?be(b,c,d):null==c){b=ne(b);var e=b.length;return e>0?b[Uc(0,e-1)]:a}var f=-1,g=Lg(b),e=g.length,h=e-1;for(c=wb(c<0?0:+c||0,e);++f0&&(d=c.apply(this,arguments)),b<=1&&(c=a),d}}function Wf(b,c,d){function p(){j&&gb(j),f&&gb(f),l=0,f=j=k=a}function q(c,d){d&&gb(d),f=j=k=a,c&&(l=Nf(),g=b.apply(i,e),j||f||(e=i=a))}function r(){var a=c-(Nf()-h);a<=0||a>c?q(k,f):j=lb(r,a)}function t(){q(n,j)}function u(){if(e=arguments,h=Nf(),i=this,k=n&&(j||!o),m===!1)var d=o&&!j;else{f||o||(l=h);var p=m-(h-l),q=p<=0||p>m;q?(f&&(f=gb(f)),l=h,g=b.apply(i,e)):f||(f=lb(t,p))}return q&&j?j=gb(j):j||c===m||(j=lb(r,c)),d&&(q=!0,g=b.apply(i,e)),!q||j||f||(e=i=a),g}var e,f,g,h,i,j,k,l=0,m=!1,n=!0;if("function"!=typeof b)throw new Aa(s);if(c=c<0?0:+c||0,d===!0){var o=!0;n=!1}else yg(d)&&(o=!!d.leading,m="maxWait"in d&&vb(+d.maxWait||0,c),n="trailing"in d?!!d.trailing:n);return u.cancel=p,u}function _f(a,b){if("function"!=typeof a||b&&"function"!=typeof b)throw new Aa(s);var c=function(){var d=arguments,e=b?b.apply(this,d):d[0],f=c.cache;if(f.has(e))return f.get(e);var g=a.apply(this,d);return c.cache=f.set(e,g),g};return c.cache=new _f.Cache,c}function bg(a){if("function"!=typeof a)throw new Aa(s);return function(){return!a.apply(this,arguments)}}function cg(a){return Qf(2,a)}function gg(b,c){if("function"!=typeof b)throw new Aa(s);return c=vb(c===a?b.length-1:+c||0,0),function(){for(var a=arguments,d=-1,e=vb(a.length-c,0),f=E(e);++db}function ng(a,b){return a>=b}function og(a){return Ua(a)&&_d(a)&&Va.call(a,"callee")&&!jb.call(a,"callee")}function qg(a){return a===!0||a===!1||Ua(a)&&cb.call(a)==w}function rg(a){return Ua(a)&&cb.call(a)==x}function sg(a){return!!a&&1===a.nodeType&&Ua(a)&&!Eg(a)}function tg(a){return null==a||(_d(a)&&(pg(a)||Gg(a)||og(a)||Ua(a)&&xg(a.splice))?!a.length:!ah(a).length)}function ug(b,c,d,e){d="function"==typeof d?gd(d,e,3):a;var f=d?d(b,c):a;return f===a?Jc(b,c,d):!!f}function vg(a){return Ua(a)&&"string"==typeof a.message&&cb.call(a)==y}function wg(a){return"number"==typeof a&&tb(a)}function xg(a){return yg(a)&&cb.call(a)==z}function yg(a){var b=typeof a;return!!a&&("object"==b||"function"==b)}function zg(b,c,d,e){return d="function"==typeof d?gd(d,e,3):a,Lc(b,Ud(c),d)}function Ag(a){return Dg(a)&&a!=+a}function Bg(a){return null!=a&&(xg(a)?eb.test(Ea.call(a)):Ua(a)&&ja.test(a))}function Cg(a){return null===a}function Dg(a){return"number"==typeof a||Ua(a)&&cb.call(a)==B}function Eg(b){var c;if(!Ua(b)||cb.call(b)!=C||og(b)||!Va.call(b,"constructor")&&(c=b.constructor,"function"==typeof c&&!(c instanceof c)))return!1;var d;return Ec(b,function(a,b){d=b}),d===a||Va.call(b,d)}function Fg(a){return yg(a)&&cb.call(a)==D}function Gg(a){return"string"==typeof a||Ua(a)&&cb.call(a)==F}function Hg(a){return Ua(a)&&ee(a.length)&&!!ra[cb.call(a)]}function Ig(b){return b===a}function Jg(a,b){return a0;++d=wb(c,d)&&b=0&&b.indexOf(c,d)==d}function sh(a){return a=Ka(a),a&&X.test(a)?a.replace(V,Qa):a}function th(a){return a=Ka(a),a&&da.test(a)?a.replace(ca,Ra):a||"(?:)"}function vh(a,b,c){a=Ka(a),b=+b;var d=a.length;if(d>=b||!tb(b))return a;var e=(b-d)/2,f=rb(e),g=pb(e);return c=Hd("",g,c),c.slice(0,f)+a+c}function yh(a,b,c){return(c?be(a,b,c):null==b)?b=0:b&&(b=+b),a=Eh(a),yb(a,b||(ia.test(a)?16:10))}function zh(a,b){var c="";if(a=Ka(a),b=+b,b<1||!a||!tb(b))return c;do b%2&&(c+=a),b=rb(b/2),a+=a;while(b);return c}function Ch(a,b,c){return a=Ka(a),c=null==c?0:wb(c<0?0:+c||0,a.length),a.lastIndexOf(b,c)==c}function Dh(b,c,d){var e=Ib.templateSettings;d&&be(b,c,d)&&(c=d=a),b=Ka(b),c=lc(mc({},d||c),e,kc);var i,j,f=lc(mc({},c.imports),e.imports,kc),g=ah(f),h=bd(f,g),k=0,l=c.interpolate||ma,m="__p += '",n=ya((c.escape||ma).source+"|"+l.source+"|"+(l===$?ga:ma).source+"|"+(c.evaluate||ma).source+"|$","g"),o="//# sourceURL="+("sourceURL"in c?c.sourceURL:"lodash.templateSources["+ ++qa+"]")+"\n";b.replace(n,function(a,c,d,e,f,g){return d||(d=e),m+=b.slice(k,g).replace(na,Sa),c&&(i=!0,m+="' +\n__e("+c+") +\n'"),f&&(j=!0,m+="';\n"+f+";\n__p += '"),d&&(m+="' +\n((__t = ("+d+")) == null ? '' : __t) +\n'"),k=g+a.length,a}),m+="';\n";var p=c.variable;p||(m="with (obj) {\n"+m+"\n}\n"),m=(j?m.replace(R,""):m).replace(S,"$1").replace(T,"$1;"),m="function("+(p||"obj")+") {\n"+(p?"":"obj || (obj = {});\n")+"var __t, __p = ''"+(i?", __e = _.escape":"")+(j?", __j = Array.prototype.join;\nfunction print() { __p += __j.call(arguments, '') }\n":";\n")+m+"return __p\n}";var q=Kh(function(){return ua(g,o+"return "+m).apply(a,h)});if(q.source=m,vg(q))throw q;return q}function Eh(a,b,c){var d=a;return(a=Ka(a))?(c?be(d,b,c):null==b)?a.slice(Ya(a),Za(a)+1):(b+="",a.slice(La(a,b),Ma(a,b)+1)):a}function Fh(a,b,c){var d=a;return a=Ka(a),a?(c?be(d,b,c):null==b)?a.slice(Ya(a)):a.slice(La(a,b+"")):a}function Gh(a,b,c){var d=a;return a=Ka(a),a?(c?be(d,b,c):null==b)?a.slice(0,Za(a)+1):a.slice(0,Ma(a,b+"")+1):a}function Hh(b,c,d){d&&be(b,c,d)&&(c=a);var e=l,f=m;if(null!=c)if(yg(c)){var g="separator"in c?c.separator:g;e="length"in c?+c.length||0:e,f="omission"in c?Ka(c.omission):f}else e=+c||0;if(b=Ka(b),e>=b.length)return b;var h=e-f.length;if(h<1)return f;var i=b.slice(0,h);if(null==g)return i+f;if(Fg(g)){if(b.slice(h).search(g)){var j,k,n=b.slice(0,h);for(g.global||(g=ya(g.source,(ha.exec(g)||"")+"g")),g.lastIndex=0;j=g.exec(n);)k=j.index;i=i.slice(0,null==k?h:k)}}else if(b.indexOf(g,h)!=h){var o=i.lastIndexOf(g);o>-1&&(i=i.slice(0,o))}return i+f}function Ih(a){return a=Ka(a),a&&W.test(a)?a.replace(U,$a):a}function Jh(b,c,d){return d&&be(b,c,d)&&(c=a),b=Ka(b),b.match(c||oa)||[]}function Lh(b,c,d){return d&&be(b,c,d)&&(c=a),Ua(b)?Oh(b):pc(b,c)}function Mh(a){return function(){return a}}function Nh(a){return a}function Oh(a){return Nc(qc(a,!0))}function Ph(a,b){return Oc(a,qc(b,!0))}function Sh(b,c,d){if(null==d){var e=yg(c),f=e?ah(c):a,g=f&&f.length?Hc(c,f):a;(g?g.length:e)||(g=!1,d=c,c=b,b=this)}g||(g=Hc(c,ah(c)));var h=!0,i=-1,j=xg(b),k=g.length;d===!1?h=!1:yg(d)&&"chain"in d&&(h=d.chain);for(;++i>>1,Fb=9007199254740991,Gb=ob&&new ob,Hb={};Ib.support={};Ib.templateSettings={escape:Y,evaluate:Z,interpolate:$,variable:"",imports:{_:Ib}};var rc=function(){function b(){}return function(c){if(yg(c)){b.prototype=c;var d=new b;b.prototype=a}return d||{}}}(),uc=md(Fc),vc=md(Gc,!0),Cc=nd(),Dc=nd(!0),Wc=Gb?function(a,b){return Gb.set(a,b),a}:Nh,Qd=Gb?function(a){return Gb.get(a)}:Uh,Td=Rc("length"),le=function(){var a=0,b=0;return function(c,d){var e=Nf(),f=o-(e-b);if(b=e,f>0){if(++a>=n)return c}else a=0;return Wc(c,d)}}(),te=gg(function(a,b){return Ua(a)&&_d(a)?tc(a,Bc(b,!1,!0)):[]}),ze=wd(),Ae=wd(!0),Ge=gg(function(a){for(var b=a.length,c=b,d=E(k),e=Sd(),f=e===Ia,g=[];c--;){var h=a[c]=_d(h=a[c])?h:[];d[c]=f&&h.length>=120?pd(c&&h):null}var i=a[0],j=-1,k=i?i.length:0,l=d[0];a:for(;++j2?b[c-2]:a,e=c>1?b[c-1]:a;return c>2&&"function"==typeof d?c-=2:(d=c>1&&"function"==typeof e?(--c,e):a,e=a),b.length=c,Xe(b,d,e)}),gf=gg(function(a){return a=Bc(a),this.thru(function(b){return Yb(pg(b)?b:[oe(b)],a)})}),mf=gg(function(a,b){return nc(a,Bc(b))}),nf=kd(function(a,b,c){Va.call(a,c)?++a[c]:a[c]=1}),qf=vd(uc),rf=vd(vc,!0),tf=zd($b,uc),uf=zd(_b,vc),vf=kd(function(a,b,c){Va.call(a,c)?a[c].push(b):a[c]=[b]}),xf=kd(function(a,b,c){a[c]=b}),yf=gg(function(b,c,d){var e=-1,f="function"==typeof c,g=ce(c),h=_d(b)?E(b.length):[];return uc(b,function(b){var i=f?c:g&&null!=b?b[c]:a;h[++e]=i?i.apply(b,d):$d(b,c,d)}),h}),Af=kd(function(a,b,c){a[c?0:1].push(b)},function(){return[[],[]]}),Cf=Fd(fc,uc),Df=Fd(gc,vc),Kf=gg(function(a,b){if(null==a)return[];var c=b[2];return c&&be(b[0],b[1],c)&&(b.length=1),$c(a,Bc(b),[])}),Nf=xb||function(){return(new G).getTime()},Rf=gg(function(a,b,d){var e=c;if(d.length){var f=Wa(d,Rf.placeholder);e|=h}return Ld(a,e,b,d,f)}),Sf=gg(function(a,b){b=b.length?Bc(b):Yg(a);for(var d=-1,e=b.length;++d0||c<0)?new Mb(d):(b<0?d=d.takeRight(-b):b&&(d=d.drop(b)),c!==a&&(c=+c||0,d=c<0?d.dropRight(-c):d.take(c-b)),d)},Mb.prototype.takeRightWhile=function(a,b){return this.reverse().takeWhile(a,b).reverse()},Mb.prototype.toArray=function(){return this.take(Bb)},Fc(Mb.prototype,function(b,c){var d=/^(?:filter|map|reject)|While$/.test(c),e=/^(?:first|last)$/.test(c),f=Ib[e?"take"+("last"==c?"Right":""):c];f&&(Ib.prototype[c]=function(){var c=e?[1]:arguments,g=this.__chain__,h=this.__wrapped__,i=!!this.__actions__.length,j=h instanceof Mb,k=c[0],l=j||pg(h);l&&d&&"function"==typeof k&&1!=k.length&&(j=l=!1);var m=function(b){return e&&g?f(b,1)[0]:f.apply(a,ec([b],c))},n={func:df,args:[m],thisArg:a},o=j&&!i;if(e&&!g)return o?(h=h.clone(),h.__actions__.push(n),b.call(h)):f.call(a,this.value())[0];if(!e&&l){h=o?h:new Mb(this);var p=b.apply(h,c);return p.__actions__.push(n),new Kb(p,g)}return this.thru(m)})}),$b(["join","pop","push","replace","shift","sort","splice","split","unshift"],function(a){var b=(/^(?:replace|split)$/.test(a)?Da:Ba)[a],c=/^(?:push|sort|unshift)$/.test(a)?"tap":"thru",d=/^(?:join|pop|replace|shift)$/.test(a);Ib.prototype[a]=function(){var a=arguments;return d&&!this.__chain__?b.apply(this.value(),a):this[c](function(c){return b.apply(c,a)})}}),Fc(Mb.prototype,function(a,b){var c=Ib[b];if(c){var d=c.name+"",e=Hb[d]||(Hb[d]=[]);e.push({name:b,func:c})}}),Hb[Gd(a,d).name]=[{name:"wrapper",func:a}],Mb.prototype.clone=Nb,Mb.prototype.reverse=Ob,Mb.prototype.value=Pb,Ib.prototype.chain=ef,Ib.prototype.commit=ff,Ib.prototype.concat=gf,Ib.prototype.plant=hf,Ib.prototype.reverse=jf,Ib.prototype.toString=kf,Ib.prototype.run=Ib.prototype.toJSON=Ib.prototype.valueOf=Ib.prototype.value=lf,Ib.prototype.collect=Ib.prototype.map,Ib.prototype.head=Ib.prototype.first,Ib.prototype.select=Ib.prototype.filter,Ib.prototype.tail=Ib.prototype.rest,Ib}var a,b="3.10.1",c=1,d=2,e=4,f=8,g=16,h=32,i=64,j=128,k=256,l=30,m="...",n=150,o=16,p=200,q=1,r=2,s="Expected a function",t="__lodash_placeholder__",u="[object Arguments]",v="[object Array]",w="[object Boolean]",x="[object Date]",y="[object Error]",z="[object Function]",A="[object Map]",B="[object Number]",C="[object Object]",D="[object RegExp]",E="[object Set]",F="[object String]",G="[object WeakMap]",H="[object ArrayBuffer]",I="[object Float32Array]",J="[object Float64Array]",K="[object Int8Array]",L="[object Int16Array]",M="[object Int32Array]",N="[object Uint8Array]",O="[object Uint8ClampedArray]",P="[object Uint16Array]",Q="[object Uint32Array]",R=/\b__p \+= '';/g,S=/\b(__p \+=) '' \+/g,T=/(__e\(.*?\)|\b__t\)) \+\n'';/g,U=/&(?:amp|lt|gt|quot|#39|#96);/g,V=/[&<>"'`]/g,W=RegExp(U.source),X=RegExp(V.source),Y=/<%-([\s\S]+?)%>/g,Z=/<%([\s\S]+?)%>/g,$=/<%=([\s\S]+?)%>/g,_=/\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\n\\]|\\.)*?\1)\]/,aa=/^\w*$/,ba=/[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\n\\]|\\.)*?)\2)\]/g,ca=/^[:!,]|[\\^$.*+?()[\]{}|\/]|(^[0-9a-fA-Fnrtuvx])|([\n\r\u2028\u2029])/g,da=RegExp(ca.source),ea=/[\u0300-\u036f\ufe20-\ufe23]/g,fa=/\\(\\)?/g,ga=/\$\{([^\\}]*(?:\\.[^\\}]*)*)\}/g,ha=/\w*$/,ia=/^0[xX]/,ja=/^\[object .+?Constructor\]$/,ka=/^\d+$/,la=/[\xc0-\xd6\xd8-\xde\xdf-\xf6\xf8-\xff]/g,ma=/($^)/,na=/['\n\r\u2028\u2029\\]/g,oa=function(){var a="[A-Z\\xc0-\\xd6\\xd8-\\xde]",b="[a-z\\xdf-\\xf6\\xf8-\\xff]+";return RegExp(a+"+(?="+a+b+")|"+a+"?"+b+"|"+a+"+|[0-9]+","g")}(),pa=["Array","ArrayBuffer","Date","Error","Float32Array","Float64Array","Function","Int8Array","Int16Array","Int32Array","Math","Number","Object","RegExp","Set","String","_","clearTimeout","isFinite","parseFloat","parseInt","setTimeout","TypeError","Uint8Array","Uint8ClampedArray","Uint16Array","Uint32Array","WeakMap"],qa=-1,ra={};ra[I]=ra[J]=ra[K]=ra[L]=ra[M]=ra[N]=ra[O]=ra[P]=ra[Q]=!0,ra[u]=ra[v]=ra[H]=ra[w]=ra[x]=ra[y]=ra[z]=ra[A]=ra[B]=ra[C]=ra[D]=ra[E]=ra[F]=ra[G]=!1;var sa={};sa[u]=sa[v]=sa[H]=sa[w]=sa[x]=sa[I]=sa[J]=sa[K]=sa[L]=sa[M]=sa[B]=sa[C]=sa[D]=sa[F]=sa[N]=sa[O]=sa[P]=sa[Q]=!0,sa[y]=sa[z]=sa[A]=sa[E]=sa[G]=!1;var ta={"À":"A","Á":"A","Â":"A","Ã":"A","Ä":"A","Å":"A","à":"a","á":"a","â":"a","ã":"a","ä":"a","å":"a","Ç":"C","ç":"c","Ð":"D","ð":"d","È":"E","É":"E","Ê":"E","Ë":"E","è":"e","é":"e","ê":"e","ë":"e","Ì":"I","Í":"I","Î":"I","Ï":"I","ì":"i","í":"i","î":"i","ï":"i","Ñ":"N","ñ":"n","Ò":"O","Ó":"O","Ô":"O","Õ":"O","Ö":"O","Ø":"O","ò":"o","ó":"o","ô":"o","õ":"o","ö":"o","ø":"o","Ù":"U","Ú":"U","Û":"U","Ü":"U","ù":"u","ú":"u","û":"u","ü":"u","Ý":"Y","ý":"y","ÿ":"y","Æ":"Ae","æ":"ae","Þ":"Th","þ":"th","ß":"ss"},ua={"&":"&","<":"<",">":">",'"':""","'":"'","`":"`"},va={"&":"&","<":"<",">":">",""":'"',"'":"'","`":"`"},wa={function:!0,object:!0},xa={0:"x30",1:"x31",2:"x32",3:"x33",4:"x34",5:"x35",6:"x36",7:"x37",8:"x38",9:"x39",A:"x41",B:"x42",C:"x43",D:"x44",E:"x45",F:"x46",a:"x61",b:"x62",c:"x63",d:"x64",e:"x65",f:"x66",n:"x6e",r:"x72",t:"x74",u:"x75",v:"x76",x:"x78"},ya={"\\":"\\","'":"'","\n":"n","\r":"r","\u2028":"u2028","\u2029":"u2029"},za=wa[typeof exports]&&exports&&!exports.nodeType&&exports,Aa=wa[typeof module]&&module&&!module.nodeType&&module,Ba=za&&Aa&&"object"==typeof global&&global&&global.Object&&global,Ca=wa[typeof self]&&self&&self.Object&&self,Da=wa[typeof window]&&window&&window.Object&&window,Ea=Aa&&Aa.exports===za&&za,Fa=Ba||Da!==(this&&this.window)&&Da||Ca||this,ab=_a();"function"==typeof define&&"object"==typeof define.amd&&define.amd?(Fa._=ab,define(function(){return ab})):za&&Aa?Ea?(Aa.exports=ab)._=ab:za._=ab:Fa._=ab}.call(this),function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);var j=new Error("Cannot find module '"+g+"'");throw j.code="MODULE_NOT_FOUND",j}var k=c[g]={exports:{}};b[g][0].call(k.exports,function(a){var c=b[g][1][a];return e(c?c:a)},k,k.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g0&&(h=g.removeMin(),i=f[h],i.distance!==Number.POSITIVE_INFINITY);)d(h).forEach(j);return f}var d=a("../lodash"),e=a("../data/priority-queue");b.exports=g;var f=d.constant(1)},{"../data/priority-queue":16,"../lodash":20}],7:[function(a,b,c){function f(a){return d.filter(e(a),function(b){return b.length>1||1===b.length&&a.hasEdge(b[0],b[0])})}var d=a("../lodash"),e=a("./tarjan");b.exports=f},{"../lodash":20,"./tarjan":14}],8:[function(a,b,c){function f(a,b,c){return g(a,b||e,c||function(b){return a.outEdges(b)})}function g(a,b,c){var d={},e=a.nodes();return e.forEach(function(a){d[a]={},d[a][a]={distance:0},e.forEach(function(b){a!==b&&(d[a][b]={distance:Number.POSITIVE_INFINITY})}),c(a).forEach(function(c){var e=c.v===a?c.w:c.v,f=b(c);d[a][e]={distance:f,predecessor:a}})}),e.forEach(function(a){var b=d[a];e.forEach(function(c){var f=d[c];e.forEach(function(c){var d=f[a],e=b[c],g=f[c],h=d.distance+e.distance;h0;){if(i=h.removeMin(),d.has(g,i))c.setEdge(i,g[i]);else{if(k)throw new Error("Input graph is not connected: "+a);k=!0}a.nodeEdges(i).forEach(j)}return c}var d=a("../lodash"),e=a("../graph"),f=a("../data/priority-queue");b.exports=g},{"../data/priority-queue":16,"../graph":17,"../lodash":20}],14:[function(a,b,c){function e(a){function g(h){var i=e[h]={onStack:!0,lowlink:b,index:b++};if(c.push(h),a.successors(h).forEach(function(a){d.has(e,a)?e[a].onStack&&(i.lowlink=Math.min(i.lowlink,e[a].index)):(g(a),i.lowlink=Math.min(i.lowlink,e[a].lowlink))}),i.lowlink===i.index){var k,j=[];do k=c.pop(),e[k].onStack=!1,j.push(k);while(h!==k);f.push(j)}}var b=0,c=[],e={},f=[];return a.nodes().forEach(function(a){d.has(e,a)||g(a)}),f}var d=a("../lodash");b.exports=e},{"../lodash":20}],15:[function(a,b,c){function e(a){function g(h){if(d.has(c,h))throw new f;d.has(b,h)||(c[h]=!0,b[h]=!0,d.each(a.predecessors(h),g),delete c[h],e.push(h))}var b={},c={},e=[];if(d.each(a.sinks(),g),d.size(b)!==a.nodeCount())throw new f;return e}function f(){}var d=a("../lodash");b.exports=e,e.CycleException=f},{"../lodash":20}],16:[function(a,b,c){function e(){this._arr=[],this._keyIndices={}}var d=a("../lodash");b.exports=e,e.prototype.size=function(){return this._arr.length},e.prototype.keys=function(){return this._arr.map(function(a){return a.key})},e.prototype.has=function(a){return d.has(this._keyIndices,a)},e.prototype.priority=function(a){var b=this._keyIndices[a];if(void 0!==b)return this._arr[b].priority},e.prototype.min=function(){if(0===this.size())throw new Error("Queue underflow");return this._arr[0].key},e.prototype.add=function(a,b){var c=this._keyIndices;if(a=String(a),!d.has(c,a)){var e=this._arr,f=e.length;return c[a]=f,e.push({key:a,priority:b}),this._decrease(f),!0}return!1},e.prototype.removeMin=function(){this._swap(0,this._arr.length-1);var a=this._arr.pop();return delete this._keyIndices[a.key],this._heapify(0),a.key},e.prototype.decrease=function(a,b){var c=this._keyIndices[a];if(b>this._arr[c].priority)throw new Error("New priority is greater than current priority. Key: "+a+" Old: "+this._arr[c].priority+" New: "+b);this._arr[c].priority=b,this._decrease(c); +},e.prototype._heapify=function(a){var b=this._arr,c=2*a,d=c+1,e=a;c>1,!(b[d].priorityi){var j=h;h=i,i=j}return h+g+i+g+(d.isUndefined(f)?e:f)}function l(a,b,c,d){var e=""+b,f=""+c;if(!a&&e>f){var g=e;e=f,f=g}var h={v:e,w:f};return d&&(h.name=d),h}function m(a,b){return k(a,b.v,b.w,b.name)}var d=a("./lodash");b.exports=h;var e="\0",f="\0",g="";h.prototype._nodeCount=0,h.prototype._edgeCount=0,h.prototype.isDirected=function(){return this._isDirected},h.prototype.isMultigraph=function(){return this._isMultigraph},h.prototype.isCompound=function(){return this._isCompound},h.prototype.setGraph=function(a){return this._label=a,this},h.prototype.graph=function(){return this._label},h.prototype.setDefaultNodeLabel=function(a){return d.isFunction(a)||(a=d.constant(a)),this._defaultNodeLabelFn=a,this},h.prototype.nodeCount=function(){return this._nodeCount},h.prototype.nodes=function(){return d.keys(this._nodes)},h.prototype.sources=function(){return d.filter(this.nodes(),function(a){return d.isEmpty(this._in[a])},this)},h.prototype.sinks=function(){return d.filter(this.nodes(),function(a){return d.isEmpty(this._out[a])},this)},h.prototype.setNodes=function(a,b){var c=arguments;return d.each(a,function(a){c.length>1?this.setNode(a,b):this.setNode(a)},this),this},h.prototype.setNode=function(a,b){return d.has(this._nodes,a)?(arguments.length>1&&(this._nodes[a]=b),this):(this._nodes[a]=arguments.length>1?b:this._defaultNodeLabelFn(a),this._isCompound&&(this._parent[a]=f,this._children[a]={},this._children[f][a]=!0),this._in[a]={},this._preds[a]={},this._out[a]={},this._sucs[a]={},++this._nodeCount,this)},h.prototype.node=function(a){return this._nodes[a]},h.prototype.hasNode=function(a){return d.has(this._nodes,a)},h.prototype.removeNode=function(a){var b=this;if(d.has(this._nodes,a)){var c=function(a){b.removeEdge(b._edgeObjs[a])};delete this._nodes[a],this._isCompound&&(this._removeFromParentsChildList(a),delete this._parent[a],d.each(this.children(a),function(a){this.setParent(a)},this),delete this._children[a]),d.each(d.keys(this._in[a]),c),delete this._in[a],delete this._preds[a],d.each(d.keys(this._out[a]),c),delete this._out[a],delete this._sucs[a],--this._nodeCount}return this},h.prototype.setParent=function(a,b){if(!this._isCompound)throw new Error("Cannot set parent in a non-compound graph");if(d.isUndefined(b))b=f;else{b+="";for(var c=b;!d.isUndefined(c);c=this.parent(c))if(c===a)throw new Error("Setting "+b+" as parent of "+a+" would create create a cycle");this.setNode(b)}return this.setNode(a),this._removeFromParentsChildList(a),this._parent[a]=b,this._children[b][a]=!0,this},h.prototype._removeFromParentsChildList=function(a){delete this._children[this._parent[a]][a]},h.prototype.parent=function(a){if(this._isCompound){var b=this._parent[a];if(b!==f)return b}},h.prototype.children=function(a){if(d.isUndefined(a)&&(a=f),this._isCompound){var b=this._children[a];if(b)return d.keys(b)}else{if(a===f)return this.nodes();if(this.hasNode(a))return[]}},h.prototype.predecessors=function(a){var b=this._preds[a];if(b)return d.keys(b)},h.prototype.successors=function(a){var b=this._sucs[a];if(b)return d.keys(b)},h.prototype.neighbors=function(a){var b=this.predecessors(a);if(b)return d.union(b,this.successors(a))},h.prototype.filterNodes=function(a){function f(a){var d=c.parent(a);return void 0===d||b.hasNode(d)?(e[a]=d,d):d in e?e[d]:f(d)}var b=new this.constructor({directed:this._isDirected,multigraph:this._isMultigraph,compound:this._isCompound});b.setGraph(this.graph()),d.each(this._nodes,function(c,d){a(d)&&b.setNode(d,c)},this),d.each(this._edgeObjs,function(a){b.hasNode(a.v)&&b.hasNode(a.w)&&b.setEdge(a,this.edge(a))},this);var c=this,e={};return this._isCompound&&d.each(b.nodes(),function(a){b.setParent(a,f(a))}),b},h.prototype.setDefaultEdgeLabel=function(a){return d.isFunction(a)||(a=d.constant(a)),this._defaultEdgeLabelFn=a,this},h.prototype.edgeCount=function(){return this._edgeCount},h.prototype.edges=function(){return d.values(this._edgeObjs)},h.prototype.setPath=function(a,b){var c=this,e=arguments;return d.reduce(a,function(a,d){return e.length>1?c.setEdge(a,d,b):c.setEdge(a,d),d}),this},h.prototype.setEdge=function(){var a,b,c,e,f=!1,g=arguments[0];"object"==typeof g&&null!==g&&"v"in g?(a=g.v,b=g.w,c=g.name,2===arguments.length&&(e=arguments[1],f=!0)):(a=g,b=arguments[1],c=arguments[3],arguments.length>2&&(e=arguments[2],f=!0)),a=""+a,b=""+b,d.isUndefined(c)||(c=""+c);var h=k(this._isDirected,a,b,c);if(d.has(this._edgeLabels,h))return f&&(this._edgeLabels[h]=e),this;if(!d.isUndefined(c)&&!this._isMultigraph)throw new Error("Cannot set a named edge when isMultigraph = false");this.setNode(a),this.setNode(b),this._edgeLabels[h]=f?e:this._defaultEdgeLabelFn(a,b,c);var j=l(this._isDirected,a,b,c);return a=j.v,b=j.w,Object.freeze(j),this._edgeObjs[h]=j,i(this._preds[b],a),i(this._sucs[a],b),this._in[b][h]=j,this._out[a][h]=j,this._edgeCount++,this},h.prototype.edge=function(a,b,c){var d=1===arguments.length?m(this._isDirected,arguments[0]):k(this._isDirected,a,b,c);return this._edgeLabels[d]},h.prototype.hasEdge=function(a,b,c){var e=1===arguments.length?m(this._isDirected,arguments[0]):k(this._isDirected,a,b,c);return d.has(this._edgeLabels,e)},h.prototype.removeEdge=function(a,b,c){var d=1===arguments.length?m(this._isDirected,arguments[0]):k(this._isDirected,a,b,c),e=this._edgeObjs[d];return e&&(a=e.v,b=e.w,delete this._edgeLabels[d],delete this._edgeObjs[d],j(this._preds[b],a),j(this._sucs[a],b),delete this._in[b][d],delete this._out[a][d],this._edgeCount--),this},h.prototype.inEdges=function(a,b){var c=this._in[a];if(c){var e=d.values(c);return b?d.filter(e,function(a){return a.v===b}):e}},h.prototype.outEdges=function(a,b){var c=this._out[a];if(c){var e=d.values(c);return b?d.filter(e,function(a){return a.w===b}):e}},h.prototype.nodeEdges=function(a,b){var c=this.inEdges(a,b);if(c)return c.concat(this.outEdges(a,b))}},{"./lodash":20}],18:[function(a,b,c){b.exports={Graph:a("./graph"),version:a("./version")}},{"./graph":17,"./version":21}],19:[function(a,b,c){function f(a){var b={options:{directed:a.isDirected(),multigraph:a.isMultigraph(),compound:a.isCompound()},nodes:g(a),edges:h(a)};return d.isUndefined(a.graph())||(b.value=d.clone(a.graph())),b}function g(a){return d.map(a.nodes(),function(b){var c=a.node(b),e=a.parent(b),f={v:b};return d.isUndefined(c)||(f.value=c),d.isUndefined(e)||(f.parent=e),f})}function h(a){return d.map(a.edges(),function(b){var c=a.edge(b),e={v:b.v,w:b.w};return d.isUndefined(b.name)||(e.name=b.name),d.isUndefined(c)||(e.value=c),e})}function i(a){var b=new e(a.options).setGraph(a.value);return d.each(a.nodes,function(a){b.setNode(a.v,a.value),a.parent&&b.setParent(a.v,a.parent)}),d.each(a.edges,function(a){b.setEdge({v:a.v,w:a.w,name:a.name},a.value)}),b}var d=a("./lodash"),e=a("./graph");b.exports={write:f,read:i}},{"./graph":17,"./lodash":20}],20:[function(a,b,c){var d;if("function"==typeof a)try{d=a("lodash")}catch(a){}d||(d=window._),b.exports=d},{lodash:void 0}],21:[function(a,b,c){b.exports="1.0.7"},{}]},{},[1]);var tf;!function(a){function c(a,b){var c=Date.now(),d=b();return console.log(a,":",Date.now()-c,"ms"),d}function d(a,b,c){return{setMessage:function(b){a.setMessage(c+" : "+b)},updateProgress:function(c){a.updateProgress(c*b/100)},reportError:function(b){a.reportError(c+" : "+b)}}}function e(c,d,e,f){return new Promise(function(g,h){f.setMessage(c),setTimeout(function(){try{var b=a.time(c,e);f.updateProgress(d),g(b)}catch(a){h(b)}},b)})}function f(a){return a.replace(/([:.\[\],\/\\\(\)])/g,"\\$1")}var b=20;a.time=c,a.getSubtaskTracker=d,a.runAsyncTask=e,a.escapeQuerySelector=f}(tf||(tf={}));var tf;!function(a){var b;!function(b){function i(a,b){return void 0===b&&(b={}),new l(a,b)}function j(a,c){_.each(c.devStats,function(c){_.each(c.nodeStats,function(c){var d=c.nodeName in a.nodes?c.nodeName:c.nodeName+b.NAMESPACE_DELIM+"("+c.nodeName+")";if(d in a.nodes){var e=0;c.memory&&_.each(c.memory,function(a){a.totalBytes&&(e+=Number(a.totalBytes))});var f=null;c.output&&(f=_.map(c.output,function(a){return _.map(a.tensorDescription.shape.dim,function(a){return Number(a.size)})})),a.nodes[d].stats=new k(e,Number(c.allEndRelMicros),f)}})})}function m(a,b){return new n(a,b)}function o(a,b,c,d,e){return new q(a,b,c,d,e)}function p(a,b,c,d,e){var f="undefined"!=typeof d&&"undefined"!=typeof e?"["+d+"-"+e+"]":"#",g=a+f+b;return(c?c+"/":"")+g}function r(a){return _.reduce(a,function(a,b){var c="^"===b[0],d=b.lastIndexOf(":"),e=d!==-1&&b.length-d>1&&!/\D/.test(b.substring(d+1))?d:b.length,f=b.substring(c?1:0,e);return 0!==a.length&&f===a[a.length-1].name||a.push({name:f,hasNumberPart:e!==b.length,isControlDependency:c}),a},[])}function s(b,c,d){var e={},g={},i=u(c.inEmbeddingTypes),j=u(c.outEmbeddingTypes),k=[],l=new Array(b.length);return a.runAsyncTask("Normalizing names",30,function(){var a=new Array(b.length),c=0;return _.each(b,function(b){var d=r(b.input),f=new h(b,d);return i(f)?(k.push(f.name),void(e[f.name]=f)):j(f)?(k.push(f.name),void _.each(f.inputs,function(a){var b=a.name;g[b]=g[b]||[],g[b].push(f)})):(a[c]=f,l[c]=f.name,void c++)}),a.splice(c),l.splice(c),a},d).then(function(b){return a.runAsyncTask("Building the data structure",70,function(){var a=w(l,k),d=new f;return _.each(b,function(b){var c=a[b.name]||b.name;d.nodes[c]=b,b.name in g&&(b.outEmbeddings=g[b.name],_.each(b.outEmbeddings,function(b){b.name=a[b.name]||b.name})),b.name=c}),_.each(b,function(b){_.each(b.inputs,function(f,g){var h=f.name;h in e?b.inEmbeddings.push(e[h]):d.edges.push({v:a[h]||h,w:b.name,isControlDependency:f.isControlDependency,isReferenceEdge:c.refEdges[b.op+" "+g]===!0})})}),_.each(e,function(b,c){b.name=a[b.name]||b.name}),d},d)}).catch(function(a){throw new Error("Failure creating graph")})}function t(a,b,c){void 0===c&&(c={});var d=new graphlib.Graph(c);return d.setGraph({name:a,rankdir:"BT",type:b}),d}function u(a){return function(b){for(var c=0;c=0;)d.push(a.substring(0,e)),e=a.indexOf(b.NAMESPACE_DELIM,e+1);if(c){var f=c[a];f&&d.push(f)}return d.push(a),d}b.NAMESPACE_DELIM="/";b.ROOT_NAME="__root__",b.EDGE_KEY_DELIM="--",function(a){a[a.FULL=0]="FULL",a[a.EMBEDDED=1]="EMBEDDED",a[a.META=2]="META",a[a.SERIES=3]="SERIES",a[a.CORE=4]="CORE",a[a.SHADOW=5]="SHADOW",a[a.BRIDGE=6]="BRIDGE",a[a.EDGE=7]="EDGE"}(b.GraphType||(b.GraphType={}));var d=b.GraphType;!function(a){a[a.META=0]="META",a[a.OP=1]="OP",a[a.SERIES=2]="SERIES",a[a.BRIDGE=3]="BRIDGE",a[a.ELLIPSIS=4]="ELLIPSIS"}(b.NodeType||(b.NodeType={}));var e=b.NodeType,f=function(){function a(){this.nodes={},this.edges=[]}return a}();b.SlimGraph=f;var g=function(){function a(a){this.type=e.ELLIPSIS,this.isGroupNode=!1,this.cardinality=1,this.parentNode=null,this.stats=null,this.setNumMoreNodes(a)}return a.prototype.setNumMoreNodes=function(a){this.numMoreNodes=a,this.name="... "+a+" more"},a}();b.EllipsisNodeImpl=g;var h=function(){function a(a,b){this.op=a.op,this.name=a.name,this.device=a.device,this.attr=a.attr,this.inputs=b,this.type=e.OP,this.isGroupNode=!1,this.cardinality=1,this.inEmbeddings=[],this.outEmbeddings=[],this.parentNode=null}return a}();b.createMetanode=i,b.joinStatsInfoWithGraph=j;var k=function(){function a(a,b,c){this.totalBytes=a,this.totalMicros=b,this.outputSize=c}return a.prototype.combine=function(a){null!=a.totalBytes&&(this.totalBytes+=a.totalBytes),null!=a.totalMicros&&(this.totalMicros+=a.totalMicros)},a}(),l=function(){function a(a,b){void 0===b&&(b={}),this.name=a,this.type=e.META,this.depth=1,this.isGroupNode=!0,this.cardinality=0,this.metagraph=t(a,d.META,b),this.bridgegraph=null,this.opHistogram={},this.deviceHistogram={},this.templateId=null,this.parentNode=null,this.stats=new k(0,0,null),this.hasNonControlEdges=!1}return a.prototype.getFirstChild=function(){return this.metagraph.node(this.metagraph.nodes()[0])},a.prototype.getRootOp=function(){var a=this.name.split("/"),b=this.name+"/("+a[a.length-1]+")";return this.metagraph.node(b)},a.prototype.leaves=function(){for(var c,a=[],b=[this];b.length;){var d=b.shift();d.isGroupNode?(c=d.metagraph,_.each(c.nodes(),function(a){return b.push(c.node(a))})):a.push(d.name)}return a},a}();b.createMetaedge=m;var n=function(){function a(a,b){this.v=a,this.w=b,this.baseEdgeList=[],this.inbound=null,this.numRegularEdges=0,this.numControlEdges=0,this.numRefEdges=0}return a.prototype.addBaseEdge=function(a){this.baseEdgeList.push(a),a.isControlDependency?this.numControlEdges+=1:this.numRegularEdges+=1,a.isReferenceEdge&&(this.numRefEdges+=1)},a}();b.createSeriesNode=o,b.getSeriesNodeName=p;var q=function(){function a(a,b,c,f,g){this.name=g||p(a,b,c),this.type=e.SERIES,this.hasLoop=!1,this.prefix=a,this.suffix=b,this.clusterId=f,this.ids=[],this.parent=c,this.isGroupNode=!0,this.cardinality=0,this.metagraph=t(g,d.SERIES),this.bridgegraph=null,this.parentNode=null,this.deviceHistogram={},this.hasNonControlEdges=!1,this.stats=new k(0,0,null)}return a}();b.build=s,b.createGraph=t,b.hasSimilarDegreeSequence=y,b.getHierarchicalPath=z}(b=a.graph||(a.graph={}))}(tf||(tf={}));var tf;!function(a){var b;!function(b){var c;!function(b){function c(a){if("true"===a)return!0;if("false"===a)return!1;var b=a[0];if('"'===b)return a.substring(1,a.length-1);var c=parseFloat(a);return isNaN(c)?a:c}function d(a){return new Promise(function(b,c){d3.text(a,function(a,d){return a?void c(a):void b(d)})})}function e(a){return new Promise(function(b,c){d3.json(a,function(a,d){return a?void c(a):void b(d)})})}function f(b,c,f){var h,i;return a.runAsyncTask("Reading graph.pbtxt",20,function(){return c||d(b.path)},f).then(function(c){return h=c,a.runAsyncTask("Reading stats.pbtxt",20,function(){return null!=b&&null!=b.statsPath?e(b.statsPath):null},f)}).then(function(b){return i=b,a.runAsyncTask("Parsing graph.pbtxt",60,function(){return g(h)},f)}).then(function(a){return{nodes:a,statsJson:i}}).catch(function(a){throw new Error("Failure parsing graph definition")})}function g(a){function g(a){var b=a.indexOf(":"),d=a.substring(0,b).trim(),e=c(a.substring(b+2).trim());return{name:d,value:e}}function i(a,b,c,d){var e=a[b];null==e?a[b]=d.join(".")in h?[c]:c:Array.isArray(e)?e.push(c):a[b]=[e,c]}for(var b={node:[]},d=[],e=[],f=b,h={node:!0,"node.input":!0,"node.attr":!0,"node.attr.value.list.type":!0,"node.attr.value.shape.dim":!0,"node.attr.value.tensor.string_val":!0,"node.attr.value.tensor.tensor_shape.dim":!0},j=0;j1){var f=b.getSeriesNodeName(a[0].prefix,a[0].suffix,a[0].parent,a[0].clusterId,a[a.length-1].clusterId),g=b.createSeriesNode(a[0].prefix,a[0].suffix,a[0].parent,d,f);_.each(a,function(a){g.ids.push(a.clusterId),g.metagraph.setNode(a.name,e.node(a.name))}),c[f]=g}}var e=function(){function a(){this.root=b.createMetanode(b.ROOT_NAME,{compound:!0}),this.templates=null,this.devices=null,this.index={},this.index[b.ROOT_NAME]=this.root,this.orderings={}}return a.prototype.getNodeMap=function(){return this.index},a.prototype.node=function(a){return this.index[a]},a.prototype.setNode=function(a,b){this.index[a]=b},a.prototype.getBridgegraph=function(a){var c=this,d=this.index[a];if(!d)throw Error("Could not find node in hierarchy: "+a);if(!("metagraph"in d))return null;var e=d;if(e.bridgegraph)return e.bridgegraph;var f=e.bridgegraph=b.createGraph("BRIDGEGRAPH",b.GraphType.BRIDGE);if(!(d.parentNode&&"metagraph"in d.parentNode))return f;var g=d.parentNode,h=g.metagraph,i=this.getBridgegraph(g.name);return _.each([h,i],function(d){_(d.edges()).filter(function(b){return b.v===a||b.w===a}).each(function(e){var g=e.w===a,h=d.edge(e);_.each(h.baseEdgeList,function(d){var h=g?[d.w,e.v]:[d.v,e.w],i=h[0],j=h[1],k=c.getChildName(a,i),l={v:g?j:k,w:g?k:j},m=f.edge(l);m||(m=b.createMetaedge(l.v,l.w),m.inbound=g,f.setEdge(l.v,l.w,m)),m.addBaseEdge(d)})}).value()}),f},a.prototype.getChildName=function(a,b){for(var c=this.index[b];c;){if(c.parentNode&&c.parentNode.name===a)return c.name;c=c.parentNode}throw Error("Could not find immediate child for descendant: "+b)},a.prototype.getPredecessors=function(a){var b=this.index[a];if(!b)throw Error("Could not find node with name: "+a);var c=this.getOneWayEdges(b,!0);return b.isGroupNode||_.each(b.inEmbeddings,function(a){c.regular.push(a.name)}),c},a.prototype.getSuccessors=function(a){var b=this.index[a];if(!b)throw Error("Could not find node with name: "+a);var c=this.getOneWayEdges(b,!1);return b.isGroupNode||_.each(b.outEmbeddings,function(a){c.regular.push(a.name)}),c},a.prototype.getOneWayEdges=function(a,b){var c={control:[],regular:[]};if(!a.parentNode)return c;if(a.parentNode.isGroupNode){var d=a.parentNode,e=d.metagraph,g=this.getBridgegraph(d.name);f(e,a,b,c),f(g,a,b,c)}return c},a.prototype.getTopologicalOrdering=function(a){var b=this.index[a];if(!b)throw Error("Could not find node with name: "+a);if(!b.isGroupNode)return null;if(a in this.orderings)return this.orderings[a];var c={},d={},e=b.metagraph;_.each(e.edges(),function(a){e.edge(a).numRegularEdges&&(a.v in c||(c[a.v]=[]),c[a.v].push(a.w),d[a.w]=!0)});for(var f=_.difference(_.keys(c),_.keys(d)),g=this.orderings[a]={},h=0;f.length;){var i=f.shift();g[i]=h++,_.each(c[i],function(a){return f.push(a)}),delete c[i]}return g},a}();c.build=g}(c=b.hierarchy||(b.hierarchy={}))}(b=a.graph||(a.graph={}))}(tf||(tf={}));var __extends=this&&this.__extends||function(a,b){function d(){this.constructor=a}for(var c in b)b.hasOwnProperty(c)&&(a[c]=b[c]);d.prototype=b.prototype,a.prototype=new d},tf;!function(a){var b;!function(b){var c;!function(c){function j(a,b,c,d,f,g){var h=new e(b,c,d,f,!0);a.inAnnotations.push(h,g)}function k(a,b,c,d,f,g){var h=new e(b,c,d,f,!1);a.outAnnotations.push(h,g)}function l(a,c){_.each(a.nodes(),function(d){var e=a.node(d);if(e.expanded=c>1,c>0)switch(e.node.type){case b.NodeType.META:case b.NodeType.SERIES:n(e,c-1)}})}function n(a,b){a.coreGraph&&l(a.coreGraph,b)}function o(a,b,c,d){var e=a.node(b),g=a.node(c),h=a.edge(b,c);k(e,g.node,g,h,f.SHORTCUT,d),j(g,e.node,e,h,f.SHORTCUT,d),a.removeEdge(b,c)}function p(a,b,c){var d=a.coreGraph;d.node(b).isOutExtract=!0,_.each(d.predecessors(b),function(a,e){o(d,a,b,c)}),c.detachAllEdgesForHighDegree&&_.each(d.successors(b),function(a,e){o(d,b,a,c)}),(c.detachAllEdgesForHighDegree||0===d.neighbors(b).length)&&(a.isolatedOutExtract.push(d.node(b)),d.removeNode(b))}function q(a,b,c){var d=a.coreGraph;d.node(b).isInExtract=!0,_.each(d.successors(b),function(a,e){o(d,b,a,c)}),c.detachAllEdgesForHighDegree&&_.each(d.predecessors(b),function(a,e){o(d,a,b,c)}),(c.detachAllEdgesForHighDegree||0===d.neighbors(b).length)&&(a.isolatedInExtract.push(d.node(b)),d.removeNode(b))}function r(a,c){if(a.type===b.NodeType.OP){for(var d=0;d0&&(b=c.predecessors(a).length),b>d});_.each(e,function(c){p(a,c,b)})}function v(a,b){var c=a.coreGraph,d=b.maxOutDegree,e=_.filter(c.nodes(),function(a){var b=_.reduce(c.successors(a),function(b,d){var e=c.edge(a,d).metaedge;return b+(e.numRegularEdges?1:0)},0);return 0===b&&c.successors(a).length>0&&(b=c.successors(a).length),b>d});_.each(e,function(c){q(a,c,b)})}function w(a,b){var c=a.coreGraph,d={};_.each(c.edges(),function(a){c.edge(a).metaedge.numRegularEdges||((d[a.v]=d[a.v]||[]).push(a),(d[a.w]=d[a.w]||[]).push(a))}),_.each(d,function(a,d){a.length>b.maxControlDegree&&_.each(a,function(a){return o(c,a.v,a.w,b)})})}function x(a){var b=1.61803398875,c=1,d=359,e=d-c;return c+e*b*a%e}function y(a,b){b.outExtractTypes&&s(a,b),b.inExtractTypes&&t(a,b),b.maxInDegree&&u(a,b),b.maxOutDegree&&v(a,b),b.maxControlDegree&&w(a,b);var c=a.coreGraph;_.each(c.nodes(),function(d){var e=c.node(d),f=c.neighbors(d).length;if(0===f){var g=e.outAnnotations.list.length>0,h=e.inAnnotations.list.length>0;e.isInExtract?(a.isolatedInExtract.push(e),c.removeNode(d)):e.isOutExtract?(a.isolatedOutExtract.push(e),c.removeNode(d)):b.extractIsolatedNodesWithAnnotationsOnOneSide&&(g&&!h?(e.isInExtract=!0,a.isolatedInExtract.push(e),c.removeNode(d)):h&&!g&&(e.isOutExtract=!0,a.isolatedOutExtract.push(e),c.removeNode(d)))}})}c.MetanodeColors={SATURATION:.6,LIGHTNESS:.85,EXPANDED_COLOR:"#f0f0f0",HUES:[220,100,180,40,20,340,260,300,140,60],STRUCTURE_PALETTE:function(a,b){var d=c.MetanodeColors.HUES,e=d.length,f=d[a%e],g=Math.sin(f*Math.PI/360),h=b?30:90-60*g,i=b?95:80;return d3.hsl(f,.01*h,.01*i).toString()},DEVICE_PALETTE:function(a){return c.MetanodeColors.STRUCTURE_PALETTE(a)},UNKNOWN:"#eee",GRADIENT_OUTLINE:"#888"};var d=function(){function a(a,b){this.hierarchy=a,this.index={},this.deviceColorMap=d3.scale.ordinal().domain(a.devices).range(_.map(d3.range(a.devices.length),c.MetanodeColors.DEVICE_PALETTE));var d=a.root.metagraph,e=d3.extent(d.nodes(),function(a,b){var c=d.node(a);if(null!=c.stats)return c.stats.totalBytes});this.memoryUsageScale=d3.scale.linear().domain(e).range(b.minMaxColors);var f=d3.extent(d.nodes(),function(a,b){var c=d.node(a);if(null!=c.stats)return c.stats.totalMicros});this.computeTimeScale=d3.scale.linear().domain(f).range(b.minMaxColors),this.hasSubhierarchy={},this.params=b,this.root=new m(a.root),this.index[a.root.name]=this.root,this.buildSubhierarchy(a.root.name),this.root.expanded=!0}return a.prototype.getRenderNodeByName=function(a){return this.index[a]},a.prototype.getNearestVisibleAncestor=function(a){for(var c=b.getHierarchicalPath(a),d=0;d0){var o=_.sum(g,_.last);d.deviceColors=_.map(g,function(a){return{color:c.deviceColorMap(a[0]),proportion:a[1]/o}})}}else{_.each(b.inEmbeddings,function(a){var b=new i(null);j(d,a,null,b,f.CONSTANT,c.params),c.index[a.name]=new h(a)}),_.each(b.outEmbeddings,function(a){var b=new i(null);k(d,a,null,b,f.SUMMARY,c.params),c.index[a.name]=new h(a)});var e=d.node.device;null!=e&&(d.deviceColors=[{color:c.deviceColorMap(e),proportion:1}])}}),_.each(l.edges(),function(a){var b=l.edge(a),c=new i(b);n.setEdge(a.v,a.w,c)}),this.params.enableExtraction&&g.node.type===b.NodeType.META&&y(g,this.params),this.hasSubhierarchy[a]=!0;var o=g.node.parentNode;if(o){var p=this.index[o.name],q=function(a){for(var b=[],c=1;cc.params.maxControlDegree,A=k?[d.inAnnotations,w.inAnnotations]:[d.outAnnotations,w.outAnnotations],C=(A[0],A[1]),D=k?s.out[v]>c.params.maxOutDegree:s.in[v]>c.params.maxInDegree,E=null,F=!1;if(c.params.enableBridgegraph&&!D&&!z&&w.isInCore()){var G=function(b){var c=k?{v:b,w:a}:{v:a,w:b};return p.coreGraph.edge(c)};E=G(v),E||(E=G(q(k,v,o.name))),F=!!E}var H=!1;if(E&&!j.numRegularEdges){for(var I=E,J=p.node;I.adjoiningMetaedge;)I=I.adjoiningMetaedge,J=J.parentNode;var K=c.hierarchy.getTopologicalOrdering(J.name),L=I.metaedge;H=K[L.v]>K[L.w]}if(F=F&&!H,!F)return void C.push(new e(y,x,new i(j),f.SHORTCUT,k),c.params);var M=q(k,a),N=q(k,v,a),O=n.node(N);if(!O){var P=n.node(M);if(!P){var Q={name:M,type:b.NodeType.BRIDGE,isGroupNode:!1,cardinality:0,parentNode:null,stats:null,inbound:k};P=new h(Q),c.index[M]=P,n.setNode(M,P)}var R={name:N,type:b.NodeType.BRIDGE,isGroupNode:!1,cardinality:1,parentNode:null,stats:null,inbound:k};O=new h(R),c.index[N]=O,n.setNode(N,O),n.setParent(N,M),P.node.cardinality++}var S=new i(j);S.adjoiningMetaedge=E,k?n.setEdge(N,u,S):n.setEdge(u,N,S)}),_.each([!0,!1],function(d){var e=q(d,a),f=n.node(e);f&&_.each(n.nodes(),function(g){var j=n.node(g);if(j.node.type!==b.NodeType.BRIDGE){var k=d?!n.predecessors(g).length:!n.successors(g).length;if(k){var l=q(d,a,"STRUCTURAL_TARGET"),m=n.node(l);if(!m){var o={name:l,type:b.NodeType.BRIDGE,isGroupNode:!1,cardinality:1,parentNode:null,stats:null,inbound:d};m=new h(o),m.structural=!0,c.index[l]=m,n.setNode(l,m),f.node.cardinality++,n.setParent(l,e)}var p=new i(null);p.structural=!0,p.weight--,d?n.setEdge(l,g,p):n.setEdge(g,l,p)}}})})}}}},a}();c.RenderGraphInformation=d;var e=function(){function a(a,b,c,d,e){this.node=a,this.renderNodeInfo=b,this.renderMetaedgeInfo=c,this.annotationType=d,this.dx=0,this.dy=0,this.width=0,this.height=0,this.isIn=e,this.points=[]}return a}();c.Annotation=e,function(a){a[a.SHORTCUT=0]="SHORTCUT",a[a.CONSTANT=1]="CONSTANT",a[a.SUMMARY=2]="SUMMARY",a[a.ELLIPSIS=3]="ELLIPSIS"}(c.AnnotationType||(c.AnnotationType={}));var f=c.AnnotationType,g=function(){function b(){this.list=[],this.nodeNames={}}return b.prototype.push=function(b,c){if(!(b.node.name in this.nodeNames)){if(this.nodeNames[b.node.name]=!0,this.list.lengthf&&(h.level=f),a},{});return _(c).pairs().filter(function(a){return a[1].nodes.length>1}).sortBy(function(a){return a[1].nodes[0].depth}).value()}function g(a,b){return _.reduce(a,function(a,c){var d=c[0],e=c[1].nodes,f=[];return e.forEach(function(a){for(var c=0;c0;){var p=i.pop(),q=j(b.node(p.n1),c.node(p.n2));if(!q)return!1;var r=b.successors(p.n1),s=c.successors(p.n2);if(r.length!==s.length)return console.log("# of successors mismatch",r,s),!1;r=h(r,b,d),s=h(s,c,e);for(var t=0;tc},m=c.getBoundingClientRect();if(l(j.x,k.x,m.width)||l(j.y,k.y,m.height)){var n=(j.x+k.x)/2,o=(j.y+k.y)/2,p=m.width/2-n,q=m.height/2-o,r=e.translate([g[0]+p,g[1]+q]).event;return d3.select(d).transition().duration(500).call(r),!0}return!1}function e(a,b,c,d){var e=f(a,b,c);if(!e.empty())return e;var g=document.createElementNS("http://www.w3.org/2000/svg",b);return c&&g.classList.add(c),d?a.node().insertBefore(g,d):a.node().appendChild(g),d3.select(g).datum(a.datum())}function f(a,b,c){for(var d=a.node().childNodes,e=0;e0){var n=e(k,"g",b.Class.Scene.INEXTRACT);b.node.buildGroup(n,d.isolatedInExtract,g)}else f(k,"g",b.Class.Scene.INEXTRACT).remove();if(d.isolatedOutExtract.length>0){var o=e(k,"g",b.Class.Scene.OUTEXTRACT);b.node.buildGroup(o,d.isolatedOutExtract,g)}else f(k,"g",b.Class.Scene.OUTEXTRACT).remove();return h(k,d),j&&k.attr("opacity",0).transition().attr("opacity",1),k}function h(c,d){var e=d.node.type===a.NodeType.SERIES?0:a.layout.PARAMS.subscene.meta.labelHeight;j(f(c,"g",b.Class.Scene.CORE),0,e);var g=0===d.coreBox.width?0:d.coreBox.width,h=d.isolatedInExtract.length>0;h&&j(f(c,"g",b.Class.Scene.INEXTRACT),g,e);var i=d.isolatedOutExtract.length>0;if(i){var k=g+d.inExtractBox.width+d.extractXOffset;j(f(c,"g",b.Class.Scene.OUTEXTRACT),k,e)}}function i(a,b){d3.select(a).on("click",function(){b.fire("graph-select")})}function j(a,b,c){a.attr("transform","translate("+b+","+c+")")}function k(a,b,c,d,e){a.transition().attr({x:b-d/2,y:c-e/2,width:d,height:e})}function l(b,c){var d=c.x+c.width/2-6,e=c.y-c.height/2+6;c.node.type!==a.NodeType.SERIES||c.expanded||(d+=10,e-=2);var f="translate("+d+","+e+")";b.selectAll("path").transition().attr("transform",f),b.select("circle").transition().attr({cx:d,cy:e,r:a.layout.PARAMS.nodeSize.meta.expandButtonRadius})}function m(a,b,c,d,e){a.transition().attr({cx:b,cy:c,rx:d/2,ry:e/2})}b.Class={Node:{CONTAINER:"nodes",GROUP:"node",SHAPE:"nodeshape",COLOR_TARGET:"nodecolortarget",LABEL:"nodelabel",BUTTON_CONTAINER:"buttoncontainer",BUTTON_CIRCLE:"buttoncircle",EXPAND_BUTTON:"expandbutton",COLLAPSE_BUTTON:"collapsebutton"},Edge:{CONTAINER:"edges",GROUP:"edge",LINE:"edgeline",REF_LINE:"refline",STRUCTURAL:"structural"},Annotation:{OUTBOX:"out-annotations",INBOX:"in-annotations",GROUP:"annotation",NODE:"annotation-node",EDGE:"annotation-edge",CONTROL_EDGE:"annotation-control-edge",LABEL:"annotation-label",ELLIPSIS:"annotation-ellipsis"},Scene:{GROUP:"scene",CORE:"core",INEXTRACT:"in-extract",OUTEXTRACT:"out-extract"},Subscene:{GROUP:"subscene"},OPNODE:"op",METANODE:"meta",SERIESNODE:"series",BRIDGENODE:"bridge",ELLIPSISNODE:"ellipsis"},b.fit=c,b.panToNode=d,b.selectOrCreateChild=e,b.selectChild=f,b.buildGroup=g,b.addGraphClickListener=i,b.translate=j,b.positionRect=k,b.positionButton=l,b.positionEllipse=m}(b=a.scene||(a.scene={}))}(b=a.graph||(a.graph={}))}(tf||(tf={}));var tf;!function(a){var b;!function(b){var c;!function(b){var c;!function(c){function d(c,d,k,l){var m=c.selectAll(function(){return this.childNodes}).data(d.list,function(a){return a.node.name});return m.enter().append("g").attr("data-name",function(a){return a.node.name}).each(function(c){var d=d3.select(this);l.addAnnotationGroup(c,k,d);var e=b.Class.Annotation.EDGE,i=c.renderMetaedgeInfo&&c.renderMetaedgeInfo.metaedge;i&&!i.numRegularEdges&&(e+=" "+b.Class.Annotation.CONTROL_EDGE),i&&i.numRefEdges&&(e+=" "+b.Class.Edge.REF_LINE),b.edge.appendEdge(d,c,l,e),c.annotationType!==a.graph.render.AnnotationType.ELLIPSIS?(g(d,c),f(d,c,l)):h(d,c.node.name,c,b.Class.Annotation.ELLIPSIS)}),m.attr("class",function(a){return b.Class.Annotation.GROUP+" "+e(a.annotationType)+" "+b.node.nodeClass(a)}).each(function(b){var c=d3.select(this);j(c,k,b,l),b.annotationType!==a.graph.render.AnnotationType.ELLIPSIS&&i(c,k,l)}),m.exit().each(function(a){var b=d3.select(this);l.removeAnnotationGroup(a,k,b)}).remove(),m}function e(b){return(a.graph.render.AnnotationType[b]||"").toLowerCase()||null}function f(c,d,e){if(d.annotationType===a.graph.render.AnnotationType.SUMMARY){var f=b.selectOrCreateChild(c,"image");f.attr({"xlink:href":"../../../static/tb/summary-icon.svg",height:"12px",width:"12px",cursor:"pointer"})}else{var g=b.node.buildShape(c,d,b.Class.Annotation.NODE);b.selectOrCreateChild(g,"title").text(d.node.name)}}function g(a,b){var c=b.node.name.split("/"),d=c[c.length-1],e=d.length>8?d.substring(0,8)+"...":d;return h(a,e,b,null,d)}function h(a,c,d,e,f){var g=b.Class.Annotation.LABEL;e&&(g+=" "+e);var h=f?f:c;return a.append("text").attr("class",g).attr("dy",".35em").attr("text-anchor",d.isIn?"end":"start").text(c).append("title").text(h)}function i(a,b,c){a.on("mouseover",function(a){c.fire("annotation-highlight",{name:a.node.name,hostName:b.node.name})}).on("mouseout",function(a){c.fire("annotation-unhighlight",{name:a.node.name,hostName:b.node.name})}).on("click",function(a){d3.event.stopPropagation(),c.fire("annotation-select",{name:a.node.name,hostName:b.node.name})})}function j(c,d,e,f){e.renderNodeInfo&&e.annotationType!==a.graph.render.AnnotationType.ELLIPSIS&&b.node.stylize(c,e.renderNodeInfo,f,b.Class.Annotation.NODE),e.annotationType===a.graph.render.AnnotationType.SUMMARY&&(e.width+=10),c.select("text."+b.Class.Annotation.LABEL).transition().attr({x:d.x+e.dx+(e.isIn?-1:1)*(e.width/2+e.labelOffset),y:d.y+e.dy}),c.select("image").transition().attr({x:d.x+e.dx-3,y:d.y+e.dy-6}),b.positionEllipse(c.select("."+b.Class.Annotation.NODE+" ellipse"),d.x+e.dx,d.y+e.dy,e.width,e.height),b.positionRect(c.select("."+b.Class.Annotation.NODE+" rect"),d.x+e.dx,d.y+e.dy,e.width,e.height),b.positionRect(c.select("."+b.Class.Annotation.NODE+" use"),d.x+e.dx,d.y+e.dy,e.width,e.height),c.select("path."+b.Class.Annotation.EDGE).transition().attr("d",function(a){var c=a.points.map(function(a){return{x:a.dx+d.x,y:a.dy+d.y}});return b.edge.interpolate(c)})}c.buildGroup=d}(c=b.annotation||(b.annotation={}))}(c=b.scene||(b.scene={}))}(b=a.graph||(a.graph={}))}(tf||(tf={}));var tf;!function(a){var b;!function(b){var c;!function(b){var c;!function(c){function e(b){return b.v+a.graph.EDGE_KEY_DELIM+b.w}function f(a,c,d){var f=_.reduce(c.edges(),function(a,b){var d=c.edge(b);return a.push({v:b.v,w:b.w,label:d}),a},[]),h=b.selectOrCreateChild(a,"g",b.Class.Edge.CONTAINER),l=(h.node(),h.selectAll(function(){return this.childNodes}).data(f,e));return l.enter().append("g").attr("class",b.Class.Edge.GROUP).attr("data-edge",e).each(function(a){var c=d3.select(this);a.label.edgeGroup=c,d._edgeGroupIndex[e(a)]=c;var f=a.label.metaedge&&a.label.metaedge.numRefEdges?b.Class.Edge.REF_LINE+" "+b.Class.Edge.LINE:void 0;g(c,a,b,f)}),l.each(i),l.each(function(a){j(d3.select(this),a,d)}),l.exit().each(function(a){delete d._edgeGroupIndex[e(a)]}).remove(),l}function g(a,c,d,e){e=e||b.Class.Edge.LINE,c.label&&c.label.structural&&(e+=" "+b.Class.Edge.STRUCTURAL),a.append("path").attr("class",e)}function h(a,b,d){var e=a.label,f=e.adjoiningMetaedge;if(!f)return d3.interpolate(d,c.interpolate(e.points));var g=this,h=f.edgeGroup.node().firstChild,i=e.metaedge.inbound;return function(a){var b=h.getPointAtLength(i?h.getTotalLength():0).matrixTransform(h.getCTM()).matrixTransform(g.getCTM().inverse()),d=e.points,f=i?0:d.length-1;d[f].x=b.x,d[f].y=b.y;var j=c.interpolate(d);return j}}function i(a){d3.select(this).select("path."+b.Class.Edge.LINE).each(function(a){var b=d3.select(this);b.transition().attrTween("d",h)})}function j(a,c,d){var f=c.label.metaedge;a.select("path."+b.Class.Edge.LINE).classed("control-dep",f&&!f.numRegularEdges)}a.graph.scene;c.getEdgeKey=e,c.buildGroup=f,c.appendEdge=g,c.interpolate=d3.svg.line().interpolate("basis").x(function(a){return a.x}).y(function(a){return a.y})}(c=b.edge||(b.edge={}))}(c=b.scene||(b.scene={}))}(b=a.graph||(a.graph={}))}(tf||(tf={}));var tf;!function(a){var b;!function(b){var c;!function(c){var d;!function(d){function e(a,d,e){var g=c.selectOrCreateChild(a,"g",c.Class.Node.CONTAINER),k=g.selectAll(function(){return this.childNodes}).data(d,function(a){return a.node.name+":"+a.node.type});return k.enter().append("g").attr("data-name",function(a){return a.node.name}).each(function(a){var b=d3.select(this);e.addNodeGroup(a.node.name,b)}),k.attr("class",function(a){return c.Class.Node.GROUP+" "+o(a)}).each(function(a){var d=d3.select(this),g=c.selectOrCreateChild(d,"g",c.Class.Annotation.INBOX);c.annotation.buildGroup(g,a.inAnnotations,a,e);var k=c.selectOrCreateChild(d,"g",c.Class.Annotation.OUTBOX);c.annotation.buildGroup(k,a.outAnnotations,a,e);var l=j(d,a,e);i(l,a,e,a.node.type===b.NodeType.META);var m=n(d,a,c.Class.Node.SHAPE,l.node());a.node.isGroupNode&&h(m,a,e),i(m,a,e),f(d,a,e),s(d,a,e),p(d,a,e)}),k.exit().each(function(a){e.removeNodeGroup(a.node.name);var b=d3.select(this);a.inAnnotations.list.length>0&&b.select("."+c.Class.Annotation.INBOX).selectAll("."+c.Class.Annotation.GROUP).each(function(b){e.removeAnnotationGroup(b,a)}),a.outAnnotations.list.length>0&&b.select("."+c.Class.Annotation.OUTBOX).selectAll("."+c.Class.Annotation.GROUP).each(function(b){e.removeAnnotationGroup(b,a)})}).remove(),k}function f(a,b,d){if(b.node.isGroupNode){if(b.expanded)return c.buildGroup(a,b,d,c.Class.Subscene.GROUP);c.selectChild(a,"g",c.Class.Subscene.GROUP).remove()}return null}function g(a,b){var d=b.x-b.width/2+b.paddingLeft,e=b.y-b.height/2+b.paddingTop,f=c.selectChild(a,"g",c.Class.Subscene.GROUP);c.translate(f,d,e)}function h(a,b,d){var e=c.selectOrCreateChild(a,"g",c.Class.Node.BUTTON_CONTAINER);c.selectOrCreateChild(e,"circle",c.Class.Node.BUTTON_CIRCLE),c.selectOrCreateChild(e,"path",c.Class.Node.EXPAND_BUTTON).attr("d","M0,-2.2 V2.2 M-2.2,0 H2.2"),c.selectOrCreateChild(e,"path",c.Class.Node.COLLAPSE_BUTTON).attr("d","M-2.2,0 H2.2"),e.on("click",function(a){d3.event.stopPropagation(),d.fire("node-toggle-expand",{name:a.node.name})}),c.positionButton(e,b)}function i(a,b,c,d){return d?void a.attr("pointer-events","none"):void a.on("dblclick",function(a){c.fire("node-toggle-expand",{name:a.node.name})}).on("mouseover",function(a){c.isNodeExpanded(a)||c.fire("node-highlight",{name:a.node.name})}).on("mouseout",function(a){c.isNodeExpanded(a)||c.fire("node-unhighlight",{name:a.node.name})}).on("click",function(a){d3.event.stopPropagation(),c.fire("node-select",{name:a.node.name})})}function j(a,d,e){var f=d.node.name.split("/"),g=f[f.length-1],h=d.node.type===b.NodeType.META&&!d.expanded,i=c.selectOrCreateChild(a,"text",c.Class.Node.LABEL);if(i.attr("dy",".35em").attr("text-anchor","middle"),h){g.length>e.maxMetanodeLabelLength&&(g=g.substr(0,e.maxMetanodeLabelLength-2)+"...");var j=l(e);i.attr("font-size",j(g.length)+"px")}return i.text(g),i}function l(a){return k||(k=d3.scale.linear().domain([a.maxMetanodeLabelLengthLargeFont,a.maxMetanodeLabelLength]).range([a.maxMetanodeLabelLengthFontSize,a.minMetanodeLabelLengthFontSize]).clamp(!0)),k}function m(a,b,d){c.selectChild(a,"text",c.Class.Node.LABEL).transition().attr("x",b.x).attr("y",b.y+d)}function n(a,d,e,f){var g=c.selectOrCreateChild(a,"g",e,f);switch(d.node.type){case b.NodeType.OP:c.selectOrCreateChild(g,"ellipse",c.Class.Node.COLOR_TARGET);break;case b.NodeType.SERIES:var h="annotation",i=d;i.coreGraph&&(h=i.node.hasNonControlEdges?"vertical":"horizontal"),c.selectOrCreateChild(g,"use",c.Class.Node.COLOR_TARGET).attr("xlink:href","#op-series-"+h+"-stamp"),c.selectOrCreateChild(g,"rect",c.Class.Node.COLOR_TARGET).attr({rx:d.radius,ry:d.radius});break;case b.NodeType.BRIDGE:c.selectOrCreateChild(g,"rect",c.Class.Node.COLOR_TARGET).attr({rx:d.radius,ry:d.radius});break;case b.NodeType.META:c.selectOrCreateChild(g,"rect",c.Class.Node.COLOR_TARGET).attr({rx:d.radius,ry:d.radius});break;default:throw Error("Unrecognized node type: "+d.node.type)}return g}function o(a){switch(a.node.type){case b.NodeType.OP:return c.Class.OPNODE;case b.NodeType.META:return c.Class.METANODE;case b.NodeType.SERIES:return c.Class.SERIESNODE;case b.NodeType.BRIDGE:return c.Class.BRIDGENODE;case b.NodeType.ELLIPSIS:return c.Class.ELLIPSISNODE}throw Error("Unrecognized node type: "+a.node.type)}function p(a,d,e){var f=c.selectChild(a,"g",c.Class.Node.SHAPE);switch(d.node.type){case b.NodeType.OP:var h=c.selectChild(f,"ellipse");c.positionEllipse(h,d.x,d.y,d.width,d.height),m(a,d,d.labelOffset);break;case b.NodeType.META:var h=c.selectChild(f,"rect");c.positionRect(h,d.x,d.y,d.width,d.height),d.expanded?(g(a,d),m(a,d,-d.height/2+d.labelHeight/2)):m(a,d,0);break;case b.NodeType.SERIES:var h=c.selectChild(f,"use");c.positionRect(h,d.x,d.y,d.width,d.height),d.expanded?(g(a,d),m(a,d,-d.height/2+d.labelHeight/2)):m(a,d,d.labelOffset);case b.NodeType.BRIDGE:var h=c.selectChild(f,"rect");c.positionRect(h,d.x,d.y,d.width,d.height);break;default:throw Error("Unrecognized node type: "+d.node.type)}}function r(c,d,e,f){var g=a.graph.render.MetanodeColors;switch(d){case q.STRUCTURE:if(e.node.type===a.graph.NodeType.META){var h=e.node.templateId;return null===h?g.UNKNOWN:g.STRUCTURE_PALETTE(c.templateIndex(h),e.expanded)}return e.node.type===a.graph.NodeType.SERIES?e.expanded?g.EXPANDED_COLOR:"white":e.node.type===b.NodeType.BRIDGE?e.structural?"#f0e":e.node.inbound?"#0ef":"#fe0":"white";case q.DEVICE:if(null==e.deviceColors)return g.UNKNOWN;var i=e.node.name,j=a.escapeQuerySelector(i),k=d3.select("svg#svg defs #linearGradients"),l=k.select("linearGradient#"+j);if(0===l.size()){l=k.append("linearGradient").attr("id",i),l.selectAll("*").remove();var m=0;_.each(e.deviceColors,function(a){var b=a.color;l.append("stop").attr("offset",m).attr("stop-color",b),l.append("stop").attr("offset",m+a.proportion).attr("stop-color",b),m+=a.proportion})}return f?g.EXPANDED_COLOR:"url(#"+j+")";case q.COMPUTE_TIME:return f?g.EXPANDED_COLOR:e.computeTimeColor||g.UNKNOWN;case q.MEMORY:return f?g.EXPANDED_COLOR:e.memoryColor||g.UNKNOWN;default:throw new Error("Unknown case to color nodes by")}}function s(b,d,e,f){f=f||c.Class.Node.SHAPE;var g=e.isNodeHighlighted(d.node.name),h=e.isNodeSelected(d.node.name),i=d.isInExtract||d.isOutExtract,j=d.expanded;b.classed("highlighted",g),b.classed("selected",h),b.classed("extract",i),b.classed("expanded",j);var k=b.select("."+f+" ."+c.Class.Node.COLOR_TARGET),l=r(e,q[e.colorBy.toUpperCase()],d,j);if(k.style("fill",l),h)k.style("stroke",null);else{var m="url"===l.substring(0,3)?a.graph.render.MetanodeColors.GRADIENT_OUTLINE:d3.rgb(l).darker().toString();k.style("stroke",m)}}d.buildGroup=e;var k=null;d.buildShape=n,d.nodeClass=o;var q={STRUCTURE:0,DEVICE:1,COMPUTE_TIME:2,MEMORY:3};d.stylize=s}(d=c.node||(c.node={}))}(c=b.scene||(b.scene={}))}(b=a.graph||(a.graph={}))}(tf||(tf={}));var tf;!function(a){var b;!function(a){var b;!function(b){function c(b){b.node.isGroupNode&&d(b),b.node.type===a.NodeType.META?f(b):b.node.type===a.NodeType.SERIES&&g(b)}function d(d){var e=d.coreGraph.nodes().map(function(a){return d.coreGraph.node(a)}).concat(d.isolatedInExtract,d.isolatedOutExtract);_.each(e,function(d){switch(d.node.type){case a.NodeType.OP:_.extend(d,b.PARAMS.nodeSize.op);break;case a.NodeType.BRIDGE:_.extend(d,b.PARAMS.nodeSize.bridge);break;case a.NodeType.META:if(d.expanded){var e=d;c(e)}else _.extend(d,b.PARAMS.nodeSize.meta),d.height=b.PARAMS.nodeSize.meta.height(d.node.cardinality);break;case a.NodeType.SERIES:if(d.expanded){_.extend(d,b.PARAMS.nodeSize.series.expanded);var e=d;c(e)}else{var e=d,f=e.node.hasNonControlEdges?b.PARAMS.nodeSize.series.vertical:b.PARAMS.nodeSize.series.horizontal;_.extend(d,f)}break;default:throw Error("Unrecognized node type: "+d.node.type)}h(d)})}function e(b,c){_.extend(b.graph(),{nodeSep:c.nodeSep,rankSep:c.rankSep});var d=[],e=[];if(_.each(b.nodes(),function(c){var f=b.node(c);f.node.type===a.NodeType.BRIDGE?d.push(c):e.push(c)}),!e.length)return{width:0,height:0};dagre.layout(b);var g=(b.graph(),1/0),h=1/0,i=-(1/0),j=-(1/0);return _.each(e,function(c){var d=b.node(c),e=.5*d.width,f=d.x-e-d.inboxWidth,k=d.x+e+d.outboxWidth;g=fi?k:i;var l=c.length-c.lastIndexOf(a.NAMESPACE_DELIM),m=3,n=.5*l*m,o=d.x-n,p=d.x+n;g=oi?p:i;var q=.5*d.outerHeight,r=d.y-q,s=d.y+q;h=rj?s:j}),_.each(b.edges(),function(a){var c=b.edge(a);c.structural||_.each(c.points,function(a){g=a.xi?a.x:i,h=a.yj?a.y:j})}),_.each(b.nodes(),function(a){var c=b.node(a);c.x-=g,c.y-=h}),_.each(b.edges(),function(a){_.each(b.edge(a).points,function(a){a.x-=g,a.y-=h})}),{width:i-g,height:j-h}}function f(a){var c=b.PARAMS.subscene.meta;a=_.extend(a,c),_.extend(a.coreBox,e(a.coreGraph,b.PARAMS.graph.meta));var d=a.isolatedInExtract.length>0;a.inExtractBox.width=d?_(a.isolatedInExtract).pluck("outerWidth").max():0,a.inExtractBox.height=_.reduce(a.isolatedInExtract,function(b,d,e){var f=e>0?c.extractYOffset:0;return d.x=a.inExtractBox.width/2,d.y=b+f+d.outerHeight/2,b+f+d.outerHeight},0);var f=a.isolatedOutExtract.length>0;a.outExtractBox.width=f?_(a.isolatedOutExtract).pluck("outerWidth").max():0,a.outExtractBox.height=_.reduce(a.isolatedOutExtract,function(b,d,e){var f=e>0?c.extractYOffset:0;return d.x=a.outExtractBox.width/2,d.y=b+f+d.outerHeight/2,b+f+d.outerHeight},0),a.width=c.paddingLeft+a.coreBox.width+c.paddingRight+(d?a.inExtractBox.width+c.extractXOffset:0)+(f?c.extractXOffset+a.outExtractBox.width:0),a.height=a.labelHeight+c.paddingTop+Math.max(a.inExtractBox.height,a.coreBox.height,a.outExtractBox.height)+c.paddingBottom}function g(a){var c=a.coreGraph,d=b.PARAMS.subscene.series;_.extend(a,d),_.extend(a.coreBox,e(a.coreGraph,b.PARAMS.graph.series)),_.each(c.nodes(),function(a){c.node(a).excluded=!1}),a.width=a.coreBox.width+d.paddingLeft+d.paddingRight,a.height=a.coreBox.height+d.paddingTop+d.paddingBottom}function h(a){if(a.expanded)return void _.extend(a,{inboxWidth:0,inboxHeight:0,outboxWidth:0,outboxHeight:0,outerWidth:a.width,outerHeight:a.height});var c=a.inAnnotations.list,d=a.outAnnotations.list;_.each(c,function(a){return i(a)}),_.each(d,function(a){return i(a)});var e=b.PARAMS.annotations;a.inboxWidth=c.length>0?_(c).pluck("width").max()+e.xOffset+e.labelWidth+e.labelOffset:0,a.outboxWidth=d.length>0?_(d).pluck("width").max()+e.xOffset+e.labelWidth+e.labelOffset:0;var f=_.reduce(c,function(b,c,d){var f=d>0?e.yOffset:0;return c.dx=-(a.width+c.width)/2-e.xOffset,c.dy=b+f+c.height/2,b+f+c.height},0);_.each(c,function(a){a.dy-=f/2,a.labelOffset=e.labelOffset});var g=_.reduce(d,function(b,c,d){var f=d>0?e.yOffset:0;return c.dx=(a.width+c.width)/2+e.xOffset,c.dy=b+f+c.height/2,b+f+c.height},0);_.each(d,function(a){a.dy-=g/2,a.labelOffset=e.labelOffset});var h=Math.min(a.height/2-a.radius,f/2);h=h<0?0:h;var j=d3.scale.linear().domain([0,c.length-1]).range([-h,h]);_.each(c,function(b,d){b.points=[{dx:b.dx+b.width/2,dy:b.dy},{dx:-a.width/2,dy:c.length>1?j(d):0}]});var k=Math.min(a.height/2-a.radius,g/2);k=k<0?0:k;var l=d3.scale.linear().domain([0,d.length-1]).range([-k,k]);_.each(d,function(b,c){b.points=[{dx:a.width/2,dy:d.length>1?l(c):0},{dx:b.dx-b.width/2,dy:b.dy}]}),a.outerWidth=a.width+a.inboxWidth+a.outboxWidth,a.outerHeight=Math.max(a.height,f,g)}function i(c){switch(c.annotationType){case a.render.AnnotationType.CONSTANT:_.extend(c,b.PARAMS.constant.size);break;case a.render.AnnotationType.SHORTCUT:if(c.node.type===a.NodeType.OP)_.extend(c,b.PARAMS.shortcutSize.op);else if(c.node.type===a.NodeType.META)_.extend(c,b.PARAMS.shortcutSize.meta);else{if(c.node.type!==a.NodeType.SERIES)throw Error("Invalid node type: "+c.node.type);_.extend(c,b.PARAMS.shortcutSize.series)}break;case a.render.AnnotationType.SUMMARY:_.extend(c,b.PARAMS.constant.size)}}b.PARAMS={animation:{duration:250},graph:{meta:{nodeSep:110,rankSep:25},series:{nodeSep:90,rankSep:25},padding:{paddingTop:40,paddingLeft:20}},subscene:{meta:{paddingTop:10,paddingBottom:10,paddingLeft:10,paddingRight:10,labelHeight:20,extractXOffset:50,extractYOffset:20},series:{paddingTop:10,paddingBottom:10,paddingLeft:10,paddingRight:10,labelHeight:10}},nodeSize:{meta:{radius:5,width:60,height:d3.scale.linear().domain([1,200]).range([15,60]).clamp(!0),expandButtonRadius:3},op:{width:15,height:6,radius:3,labelOffset:-8},series:{expanded:{radius:10,labelOffset:0},vertical:{width:16,height:13,labelOffset:-13},horizontal:{width:24,height:8,radius:10,labelOffset:-10}},bridge:{width:20,height:20,radius:2,labelOffset:0}},shortcutSize:{op:{width:10,height:4},meta:{width:12,height:4,radius:1},series:{width:14,height:4}},annotations:{xOffset:10,yOffset:3,labelOffset:2,labelWidth:35},constant:{size:{width:4,height:4}},series:{maxStackCount:3,parallelStackOffsetRatio:.2,towerStackOffsetRatio:.5},minimap:{size:150}},b.scene=c}(b=a.layout||(a.layout={}))}(b=a.graph||(a.graph={}))}(tf||(tf={}));var tf;!function(a){a.COLORS=[{name:"Google Blue",color:"#4184f3",active:"#3a53c5",disabled:"#cad8fc"},{name:"Google Red",color:"#db4437",active:"#8f2a0c",disabled:"#e8c6c1"},{name:"Google Yellow",color:"#f4b400",active:"#db9200",disabled:"#f7e8b0"},{name:"Google Green",color:"#0f9d58",active:"#488046",disabled:"#c2e1cc"},{name:"Purple",color:"#aa46bb",active:"#5c1398",disabled:"#d7bce6"},{name:"Teal",color:"#00abc0",active:"#47828e",disabled:"#c2eaf2"},{name:"Deep Orange",color:"#ff6f42",active:"#ca4a06",disabled:"#f2cbba"},{name:"Lime",color:"#9d9c23",active:"#7f771d",disabled:"#f1f4c2"},{name:"Indigo",color:"#5b6abf",active:"#3e47a9",disabled:"#c5c8e8"},{name:"Pink",color:"#ef6191",active:"#ca1c60",disabled:"#e9b9ce"},{name:"Deep Teal",color:"#00786a",active:"#2b4f43",disabled:"#bededa"},{name:"Deep Pink",color:"#c1175a",active:"#75084f",disabled:"#de8cae"},{name:"Gray",color:"#9E9E9E",active:"#424242",disabled:"F5F5F5"}].reduce(function(a,b){return a[b.name]=b,a},{}),a.OP_GROUP_COLORS=[{color:"Google Red",groups:["gen_legacy_ops","legacy_ops","legacy_flogs_input","legacy_image_input","legacy_input_example_input","legacy_sequence_input","legacy_seti_input_input"]},{color:"Deep Orange",groups:["constant_ops"]},{color:"Indigo",groups:["state_ops"]},{color:"Purple",groups:["nn_ops","nn"]},{color:"Google Green",groups:["math_ops"]},{color:"Lime",groups:["array_ops"]},{color:"Teal",groups:["control_flow_ops","data_flow_ops"]},{color:"Pink",groups:["summary_ops"]},{color:"Deep Pink",groups:["io_ops"]}].reduce(function(a,b){return b.groups.forEach(function(c){a[c]=b.color}),a},{})}(tf||(tf={}));var tf;!function(a){var b;!function(a){var b=.8,c=function(){function a(a,b,c,d,e,f){var g=this;this.svg=a,this.labelPadding=f,this.zoomG=b,this.mainZoom=c,this.maxWandH=e;var h=d3.select(d),i=h.select("svg"),j=i.select("rect"),k=function(a){g.viewpointCoord.x=d3.event.x,g.viewpointCoord.y=d3.event.y,g.updateViewpoint()};this.viewpointCoord={x:0,y:0};var l=d3.behavior.drag().origin(Object).on("drag",k);j.datum(this.viewpointCoord).call(l),i.on("click",function(){if(!d3.event.defaultPrevented){var a=Number(j.attr("width")),b=Number(j.attr("height")),c=d3.mouse(i.node());g.viewpointCoord.x=c[0]-a/2,g.viewpointCoord.y=c[1]-b/2,g.updateViewpoint()}}),this.viewpoint=j.node(),this.minimapSvg=i.node(),this.minimap=d,this.canvas=h.select("canvas.first").node(),this.canvasBuffer=h.select("canvas.second").node()}return a.prototype.updateViewpoint=function(){d3.select(this.viewpoint).attr("x",this.viewpointCoord.x).attr("y",this.viewpointCoord.y);var a=-this.viewpointCoord.x*this.scaleMain/this.scaleMinimap,b=-this.viewpointCoord.y*this.scaleMain/this.scaleMinimap,c=this.mainZoom.translate([a,b]).event;d3.select(this.zoomG).call(c)},a.prototype.update=function(){for(var a=this,b=d3.select(this.svg),c="",d=0;di?k:i;var l=c.length-c.lastIndexOf(a.NAMESPACE_DELIM),m=3,n=.5*l*m,o=d.x-n,p=d.x+n;g=oi?p:i;var q=.5*d.outerHeight,r=d.y-q,s=d.y+q;h=rj?s:j}),_.each(b.edges(),function(a){var c=b.edge(a);c.structural||_.each(c.points,function(a){g=a.xi?a.x:i,h=a.yj?a.y:j})}),_.each(b.nodes(),function(a){var c=b.node(a);c.x-=g,c.y-=h}),_.each(b.edges(),function(a){_.each(b.edge(a).points,function(a){a.x-=g,a.y-=h})}),{width:i-g,height:j-h}}function f(a){var c=b.PARAMS.subscene.meta;a=_.extend(a,c),_.extend(a.coreBox,e(a.coreGraph,b.PARAMS.graph.meta));var d=a.isolatedInExtract.length>0;a.inExtractBox.width=d?_(a.isolatedInExtract).pluck("outerWidth").max():0,a.inExtractBox.height=_.reduce(a.isolatedInExtract,function(b,d,e){var f=e>0?c.extractYOffset:0;return d.x=a.inExtractBox.width/2,d.y=b+f+d.outerHeight/2,b+f+d.outerHeight},0);var f=a.isolatedOutExtract.length>0;a.outExtractBox.width=f?_(a.isolatedOutExtract).pluck("outerWidth").max():0,a.outExtractBox.height=_.reduce(a.isolatedOutExtract,function(b,d,e){var f=e>0?c.extractYOffset:0;return d.x=a.outExtractBox.width/2,d.y=b+f+d.outerHeight/2,b+f+d.outerHeight},0),a.width=c.paddingLeft+a.coreBox.width+c.paddingRight+(d?a.inExtractBox.width+c.extractXOffset:0)+(f?c.extractXOffset+a.outExtractBox.width:0),a.height=a.labelHeight+c.paddingTop+Math.max(a.inExtractBox.height,a.coreBox.height,a.outExtractBox.height)+c.paddingBottom}function g(a){var c=a.coreGraph,d=b.PARAMS.subscene.series;_.extend(a,d),_.extend(a.coreBox,e(a.coreGraph,b.PARAMS.graph.series)),_.each(c.nodes(),function(a){c.node(a).excluded=!1}),a.width=a.coreBox.width+d.paddingLeft+d.paddingRight,a.height=a.coreBox.height+d.paddingTop+d.paddingBottom}function h(a){if(a.expanded)return void _.extend(a,{inboxWidth:0,inboxHeight:0,outboxWidth:0,outboxHeight:0,outerWidth:a.width,outerHeight:a.height});var c=a.inAnnotations.list,d=a.outAnnotations.list;_.each(c,function(a){return i(a)}),_.each(d,function(a){return i(a)});var e=b.PARAMS.annotations;a.inboxWidth=c.length>0?_(c).pluck("width").max()+e.xOffset+e.labelWidth+e.labelOffset:0,a.outboxWidth=d.length>0?_(d).pluck("width").max()+e.xOffset+e.labelWidth+e.labelOffset:0;var f=_.reduce(c,function(b,c,d){var f=d>0?e.yOffset:0;return c.dx=-(a.width+c.width)/2-e.xOffset,c.dy=b+f+c.height/2,b+f+c.height},0);_.each(c,function(a){a.dy-=f/2,a.labelOffset=e.labelOffset});var g=_.reduce(d,function(b,c,d){var f=d>0?e.yOffset:0;return c.dx=(a.width+c.width)/2+e.xOffset,c.dy=b+f+c.height/2,b+f+c.height},0);_.each(d,function(a){a.dy-=g/2,a.labelOffset=e.labelOffset});var h=Math.min(a.height/2-a.radius,f/2);h=h<0?0:h;var j=d3.scale.linear().domain([0,c.length-1]).range([-h,h]);_.each(c,function(b,d){b.points=[{dx:b.dx+b.width/2,dy:b.dy},{dx:-a.width/2,dy:c.length>1?j(d):0}]});var k=Math.min(a.height/2-a.radius,g/2);k=k<0?0:k;var l=d3.scale.linear().domain([0,d.length-1]).range([-k,k]);_.each(d,function(b,c){b.points=[{dx:a.width/2,dy:d.length>1?l(c):0},{dx:b.dx-b.width/2,dy:b.dy}]}),a.outerWidth=a.width+a.inboxWidth+a.outboxWidth,a.outerHeight=Math.max(a.height,f,g)}function i(c){switch(c.annotationType){case a.render.AnnotationType.CONSTANT:_.extend(c,b.PARAMS.constant.size);break;case a.render.AnnotationType.SHORTCUT:if(c.node.type===a.NodeType.OP)_.extend(c,b.PARAMS.shortcutSize.op);else if(c.node.type===a.NodeType.META)_.extend(c,b.PARAMS.shortcutSize.meta);else{if(c.node.type!==a.NodeType.SERIES)throw Error("Invalid node type: "+c.node.type);_.extend(c,b.PARAMS.shortcutSize.series)}break;case a.render.AnnotationType.SUMMARY:_.extend(c,b.PARAMS.constant.size)}}b.PARAMS={animation:{duration:250},graph:{meta:{nodeSep:110,rankSep:25},series:{nodeSep:90,rankSep:25},padding:{paddingTop:40,paddingLeft:20}},subscene:{meta:{paddingTop:10,paddingBottom:10,paddingLeft:10,paddingRight:10,labelHeight:20,extractXOffset:50,extractYOffset:20},series:{paddingTop:10,paddingBottom:10,paddingLeft:10,paddingRight:10,labelHeight:10}},nodeSize:{meta:{radius:5,width:60,height:d3.scale.linear().domain([1,200]).range([15,60]).clamp(!0),expandButtonRadius:3},op:{width:15,height:6,radius:3,labelOffset:-8},series:{expanded:{radius:10,labelOffset:0},vertical:{width:16,height:13,labelOffset:-13},horizontal:{width:24,height:8,radius:10,labelOffset:-10}},bridge:{width:20,height:20,radius:2,labelOffset:0}},shortcutSize:{op:{width:10,height:4},meta:{width:12,height:4,radius:1},series:{width:14,height:4}},annotations:{xOffset:10,yOffset:3,labelOffset:2,labelWidth:35},constant:{size:{width:4,height:4}},series:{maxStackCount:3,parallelStackOffsetRatio:.2,towerStackOffsetRatio:.5},minimap:{size:150}},b.scene=c}(b=a.layout||(a.layout={}))}(b=a.graph||(a.graph={}))}(tf||(tf={})),!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;"undefined"!=typeof window?b=window:"undefined"!=typeof global?b=global:"undefined"!=typeof self&&(b=self),b.dagre=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);var j=new Error("Cannot find module '"+g+"'");throw j.code="MODULE_NOT_FOUND",j}var k=c[g]={exports:{}};b[g][0].call(k.exports,function(a){var c=b[g][1][a];return e(c?c:a)},k,k.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g0;--h)if(g=b[h].dequeue()){d=d.concat(j(a,b,c,g,!0));break}}return d}function j(a,b,c,e,f){var g=f?[]:void 0;return d.each(a.inEdges(e.v),function(d){var e=a.edge(d),h=a.node(d.v);f&&g.push({v:d.v,w:d.w}),h.out-=e,l(b,c,h)}),d.each(a.outEdges(e.v),function(d){var e=a.edge(d),f=d.w,g=a.node(f);g.in-=e,l(b,c,g)}),a.removeNode(e.v),g}function k(a,b){var c=new e,g=0,h=0;d.each(a.nodes(),function(a){c.setNode(a,{v:a,in:0,out:0})}),d.each(a.edges(),function(a){var d=c.edge(a.v,a.w)||0,e=b(a),f=d+e;c.setEdge(a.v,a.w,f),h=Math.max(h,c.node(a.v).out+=e),g=Math.max(g,c.node(a.w).in+=e)});var i=d.range(h+g+3).map(function(){return new f}),j=g+1;return d.each(c.nodes(),function(a){l(i,j,c.node(a))}),{graph:c,buckets:i,zeroIdx:j}}function l(a,b,c){c.out?c.in?a[c.out-c.in+b].enqueue(c):a[a.length-1].enqueue(c):a[0].enqueue(c)}var d=a("./lodash"),e=a("./graphlib").Graph,f=a("./data/list");b.exports=h;var g=d.constant(1)},{"./data/list":5,"./graphlib":7,"./lodash":10}],9:[function(a,b,c){"use strict";function r(a,b){var c=b&&b.debugTiming?p.time:p.notime;c("layout",function(){var b=c(" buildLayoutGraph",function(){return C(a)});c(" runLayout",function(){s(b,c)}),c(" updateInputGraph",function(){t(a,b)})})}function s(a,b){b(" makeSpaceForEdgeLabels",function(){D(a)}),b(" removeSelfEdges",function(){M(a)}),b(" acyclic",function(){e.run(a)}),b(" nestingGraph.run",function(){k.run(a)}),b(" rank",function(){g(p.asNonCompoundGraph(a))}),b(" injectEdgeLabelProxies",function(){E(a)}),b(" removeEmptyRanks",function(){j(a)}),b(" nestingGraph.cleanup",function(){k.cleanup(a)}),b(" normalizeRanks",function(){h(a)}),b(" assignRankMinMax",function(){F(a)}),b(" removeEdgeLabelProxies",function(){G(a)}),b(" normalize.run",function(){f.run(a)}),b(" parentDummyChains",function(){i(a)}),b(" addBorderSegments",function(){l(a)}),b(" order",function(){n(a)}),b(" insertSelfEdges",function(){N(a)}),b(" adjustCoordinateSystem",function(){m.adjust(a)}),b(" position",function(){o(a)}),b(" positionSelfEdges",function(){O(a)}),b(" removeBorderNodes",function(){L(a)}),b(" normalize.undo",function(){f.undo(a)}),b(" fixupEdgeLabelCoords",function(){J(a)}),b(" undoCoordinateSystem",function(){m.undo(a)}),b(" translateGraph",function(){H(a)}),b(" assignNodeIntersects",function(){I(a)}),b(" reversePoints",function(){K(a)}),b(" acyclic.undo",function(){e.undo(a)})}function t(a,b){d.each(a.nodes(),function(c){var d=a.node(c),e=b.node(c);d&&(d.x=e.x,d.y=e.y,b.children(c).length&&(d.width=e.width,d.height=e.height))}),d.each(a.edges(),function(c){var e=a.edge(c),f=b.edge(c);e.points=f.points,d.has(f,"x")&&(e.x=f.x,e.y=f.y)}),a.graph().width=b.graph().width,a.graph().height=b.graph().height}function C(a){var b=new q({multigraph:!0,compound:!0}),c=Q(a.graph());return b.setGraph(d.merge({},v,P(c,u),d.pick(c,w))),d.each(a.nodes(),function(c){var e=Q(a.node(c));b.setNode(c,d.defaults(P(e,x),y)),b.setParent(c,a.parent(c))}),d.each(a.edges(),function(c){var e=Q(a.edge(c));b.setEdge(c,d.merge({},A,P(e,z),d.pick(e,B)))}),b}function D(a){var b=a.graph();b.ranksep/=2,d.each(a.edges(),function(c){var d=a.edge(c);d.minlen*=2,"c"!==d.labelpos.toLowerCase()&&("TB"===b.rankdir||"BT"===b.rankdir?d.width+=d.labeloffset:d.height+=d.labeloffset)})}function E(a){d.each(a.edges(),function(b){var c=a.edge(b);if(c.width&&c.height){var d=a.node(b.v),e=a.node(b.w),f={rank:(e.rank-d.rank)/2+d.rank,e:b};p.addDummyNode(a,"edge-proxy",f,"_ep")}})}function F(a){var b=0;d.each(a.nodes(),function(c){var e=a.node(c);e.borderTop&&(e.minRank=a.node(e.borderTop).rank,e.maxRank=a.node(e.borderBottom).rank,b=d.max(b,e.maxRank))}),a.graph().maxRank=b}function G(a){d.each(a.nodes(),function(b){var c=a.node(b);"edge-proxy"===c.dummy&&(a.edge(c.e).labelRank=c.rank,a.removeNode(b))})}function H(a){function j(a){var d=a.x,g=a.y,h=a.width,i=a.height;b=Math.min(b,d-h/2),c=Math.max(c,d+h/2),e=Math.min(e,g-i/2),f=Math.max(f,g+i/2)}var b=Number.POSITIVE_INFINITY,c=0,e=Number.POSITIVE_INFINITY,f=0,g=a.graph(),h=g.marginx||0,i=g.marginy||0;d.each(a.nodes(),function(b){j(a.node(b))}),d.each(a.edges(),function(b){var c=a.edge(b);d.has(c,"x")&&j(c)}),b-=h,e-=i,d.each(a.nodes(),function(c){var d=a.node(c);d.x-=b,d.y-=e}),d.each(a.edges(),function(c){var f=a.edge(c);d.each(f.points,function(a){a.x-=b,a.y-=e}),d.has(f,"x")&&(f.x-=b),d.has(f,"y")&&(f.y-=e)}),g.width=c-b+h,g.height=f-e+i}function I(a){d.each(a.edges(),function(b){var f,g,c=a.edge(b),d=a.node(b.v),e=a.node(b.w);c.points?(f=c.points[0],g=c.points[c.points.length-1]):(c.points=[],f=e,g=d),c.points.unshift(p.intersectRect(d,f)),c.points.push(p.intersectRect(e,g))})}function J(a){d.each(a.edges(),function(b){var c=a.edge(b);if(d.has(c,"x"))switch("l"!==c.labelpos&&"r"!==c.labelpos||(c.width-=c.labeloffset),c.labelpos){case"l":c.x-=c.width/2+c.labeloffset;break;case"r":c.x+=c.width/2+c.labeloffset}})}function K(a){d.each(a.edges(),function(b){var c=a.edge(b);c.reversed&&c.points.reverse()})}function L(a){d.each(a.nodes(),function(b){if(a.children(b).length){var c=a.node(b),e=a.node(c.borderTop),f=a.node(c.borderBottom),g=a.node(d.last(c.borderLeft)),h=a.node(d.last(c.borderRight));c.width=Math.abs(h.x-g.x),c.height=Math.abs(f.y-e.y),c.x=g.x+c.width/2,c.y=e.y+c.height/2}}),d.each(a.nodes(),function(b){"border"===a.node(b).dummy&&a.removeNode(b)})}function M(a){d.each(a.edges(),function(b){if(b.v===b.w){var c=a.node(b.v);c.selfEdges||(c.selfEdges=[]),c.selfEdges.push({e:b,label:a.edge(b)}),a.removeEdge(b)}})}function N(a){var b=p.buildLayerMatrix(a);d.each(b,function(b){var c=0;d.each(b,function(b,e){var f=a.node(b);f.order=e+c,d.each(f.selfEdges,function(b){p.addDummyNode(a,"selfedge",{width:b.label.width,height:b.label.height,rank:f.rank,order:e+ ++c,e:b.e,label:b.label},"_se")}),delete f.selfEdges})})}function O(a){d.each(a.nodes(),function(b){var c=a.node(b);if("selfedge"===c.dummy){var d=a.node(c.e.v),e=d.x+d.width/2,f=d.y,g=c.x-e,h=d.height/2;a.setEdge(c.e,c.label),a.removeNode(b),c.label.points=[{x:e+2*g/3,y:f-h},{x:e+5*g/6,y:f-h},{x:e+g,y:f},{x:e+5*g/6,y:f+h},{x:e+2*g/3,y:f+h}],c.label.x=c.x,c.label.y=c.y}})}function P(a,b){return d.mapValues(d.pick(a,b),Number)}function Q(a){var b={};return d.each(a,function(a,c){b[c.toLowerCase()]=a}),b}var d=a("./lodash"),e=a("./acyclic"),f=a("./normalize"),g=a("./rank"),h=a("./util").normalizeRanks,i=a("./parent-dummy-chains"),j=a("./util").removeEmptyRanks,k=a("./nesting-graph"),l=a("./add-border-segments"),m=a("./coordinate-system"),n=a("./order"),o=a("./position"),p=a("./util"),q=a("./graphlib").Graph;b.exports=r;var u=["nodesep","edgesep","ranksep","marginx","marginy"],v={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},w=["acyclicer","ranker","rankdir","align"],x=["width","height"],y={width:0,height:0},z=["minlen","weight","width","height","labeloffset"],A={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},B=["labelpos"]},{"./acyclic":2,"./add-border-segments":3,"./coordinate-system":4,"./graphlib":7,"./lodash":10,"./nesting-graph":11,"./normalize":12,"./order":17,"./parent-dummy-chains":22,"./position":24,"./rank":26,"./util":29}],10:[function(a,b,c){var d;if("function"==typeof a)try{d=a("lodash")}catch(a){}d||(d=window._),b.exports=d},{lodash:void 0}],11:[function(a,b,c){function f(a){var b=e.addDummyNode(a,"root",{},"_root"),c=h(a),f=d.max(c)-1,j=2*f+1;a.graph().nestingRoot=b,d.each(a.edges(),function(b){a.edge(b).minlen*=j});var k=i(a)+1;d.each(a.children(),function(d){g(a,b,j,k,f,c,d)}),a.graph().nodeRankFactor=j}function g(a,b,c,f,h,i,j){var k=a.children(j);if(!k.length)return void(j!==b&&a.setEdge(b,j,{weight:0,minlen:c}));var l=e.addBorderNode(a,"_bt"),m=e.addBorderNode(a,"_bb"),n=a.node(j);a.setParent(l,j),n.borderTop=l,a.setParent(m,j),n.borderBottom=m,d.each(k,function(d){g(a,b,c,f,h,i,d);var e=a.node(d),k=e.borderTop?e.borderTop:d,n=e.borderBottom?e.borderBottom:d,o=e.borderTop?f:2*f,p=k!==n?1:h-i[j]+1;a.setEdge(l,k,{weight:o,minlen:p,nestingEdge:!0}),a.setEdge(n,m,{weight:o,minlen:p,nestingEdge:!0})}),a.parent(j)||a.setEdge(b,l,{weight:0,minlen:h+i[j]})}function h(a){function c(e,f){var g=a.children(e);g&&g.length&&d.each(g,function(a){c(a,f+1)}),b[e]=f}var b={};return d.each(a.children(),function(a){c(a,1)}),b}function i(a){return d.reduce(a.edges(),function(b,c){return b+a.edge(c).weight},0)}function j(a){var b=a.graph();a.removeNode(b.nestingRoot),delete b.nestingRoot,d.each(a.edges(),function(b){var c=a.edge(b);c.nestingEdge&&a.removeEdge(b)})}var d=a("./lodash"),e=a("./util");b.exports={run:f,cleanup:j}},{"./lodash":10,"./util":29}],12:[function(a,b,c){"use strict";function f(a){a.graph().dummyChains=[],d.each(a.edges(),function(b){g(a,b)})}function g(a,b){var c=b.v,d=a.node(c).rank,f=b.w,g=a.node(f).rank,h=b.name,i=a.edge(b),j=i.labelRank;if(g!==d+1){a.removeEdge(b);var k,l,m;for(m=0,++d;d0;)b%2&&(c+=i[b+1]),b=b-1>>1,i[b]+=a.weight;j+=a.weight*c})),j}var d=a("../lodash");b.exports=e},{"../lodash":10}],17:[function(a,b,c){"use strict";function l(a){var b=k.maxRank(a),c=m(a,d.range(1,b+1),"inEdges"),g=m(a,d.range(b-1,-1,-1),"outEdges"),h=e(a);o(a,h);for(var j,i=Number.POSITIVE_INFINITY,l=0,p=0;p<4;++l,++p){n(l%2?c:g,l%4>=2),h=k.buildLayerMatrix(a);var q=f(a,h);q=a.barycenter)&&g(a,b)}}function e(b){return function(c){c.in.push(b),0===--c.indegree&&a.push(c)}}for(var b=[];a.length;){var f=a.pop();b.push(f),d.each(f.in.reverse(),c(f)),d.each(f.out,e(f))}return d.chain(b).filter(function(a){return!a.merged}).map(function(a){return d.pick(a,["vs","i","barycenter","weight"])}).value()}function g(a,b){var c=0,d=0;a.weight&&(c+=a.barycenter*a.weight,d+=a.weight),b.weight&&(c+=b.barycenter*b.weight,d+=b.weight),a.vs=b.vs.concat(a.vs),a.barycenter=c/d,a.weight=d,a.i=Math.min(b.i,a.i),b.merged=!0}var d=a("../lodash");b.exports=e},{"../lodash":10}],20:[function(a,b,c){function h(a,b,c,k){var l=a.children(b),m=a.node(b),n=m?m.borderLeft:void 0,o=m?m.borderRight:void 0,p={};n&&(l=d.filter(l,function(a){return a!==n&&a!==o}));var q=e(a,l);d.each(q,function(b){if(a.children(b.v).length){var e=h(a,b.v,c,k);p[b.v]=e,d.has(e,"barycenter")&&j(b,e)}});var r=f(q,c);i(r,p);var s=g(r,k);if(n&&(s.vs=d.flatten([n,s.vs,o],!0),a.predecessors(n).length)){var t=a.node(a.predecessors(n)[0]),u=a.node(a.predecessors(o)[0]);d.has(s,"barycenter")||(s.barycenter=0,s.weight=0),s.barycenter=(s.barycenter*s.weight+t.order+u.order)/(s.weight+2),s.weight+=2}return s}function i(a,b){d.each(a,function(a){a.vs=d.flatten(a.vs.map(function(a){return b[a]?b[a].vs:a}),!0)})}function j(a,b){d.isUndefined(a.barycenter)?(a.barycenter=b.barycenter,a.weight=b.weight):(a.barycenter=(a.barycenter*a.weight+b.barycenter*b.weight)/(a.weight+b.weight),a.weight+=b.weight)}var d=a("../lodash"),e=a("./barycenter"),f=a("./resolve-conflicts"),g=a("./sort");b.exports=h},{"../lodash":10,"./barycenter":14,"./resolve-conflicts":19,"./sort":21}],21:[function(a,b,c){function f(a,b){var c=e.partition(a,function(a){return d.has(a,"barycenter")}),f=c.lhs,i=d.sortBy(c.rhs,function(a){return-a.i}),j=[],k=0,l=0,m=0;f.sort(h(!!b)),m=g(j,i,m),d.each(f,function(a){m+=a.vs.length,j.push(a.vs),k+=a.barycenter*a.weight,l+=a.weight,m=g(j,i,m)});var n={vs:d.flatten(j,!0)};return l&&(n.barycenter=k/l,n.weight=l),n}function g(a,b,c){for(var e;b.length&&(e=d.last(b)).i<=c;)b.pop(),a.push(e.vs),c++;return c}function h(a){return function(b,c){return b.barycenterc.barycenter?1:a?c.i-b.i:b.i-c.i}}var d=a("../lodash"),e=a("../util");b.exports=f},{"../lodash":10,"../util":29}],22:[function(a,b,c){function e(a){var b=g(a);d.each(a.graph().dummyChains,function(c){for(var d=a.node(c),e=d.edgeObj,g=f(a,b,e.v,e.w),h=g.path,i=g.lca,j=0,k=h[j],l=!0;c!==e.w;){if(d=a.node(c),l){for(;(k=h[j])!==i&&a.node(k).maxRankg||h>b[i].lim));for(j=i,i=d;(i=a.parent(i))!==j;)f.push(i);return{path:e.concat(f.reverse()),lca:j}}function g(a){function e(f){var g=c;d.each(a.children(f),e),b[f]={low:g,lim:c++}}var b={},c=0;return d.each(a.children(),e),b}var d=a("./lodash");b.exports=e},{"./lodash":10}],23:[function(a,b,c){"use strict";function g(a,b){function e(b,e){var f=0,g=0,h=b.length,k=d.last(e);return d.each(e,function(b,l){var m=i(a,b),n=m?a.node(m).order:h;(m||b===k)&&(d.each(e.slice(g,l+1),function(b){d.each(a.predecessors(b),function(d){var e=a.node(d),g=e.order;!(gh)&&j(c,b,i)})})}function f(b,c){var g,f=-1,h=0;return d.each(c,function(d,i){if("border"===a.node(d).dummy){var j=a.predecessors(d);j.length&&(g=a.node(j[0]).order,e(c,h,i,f,g),h=i,f=g)}e(c,h,c.length,g,b.length)}),c}var c={};return d.reduce(b,f),c}function i(a,b){if(a.node(b).dummy)return d.find(a.predecessors(b),function(b){return a.node(b).dummy})}function j(a,b,c){if(b>c){var d=b;b=c,c=d}var e=a[b];e||(a[b]=e={}),e[c]=!0}function k(a,b,c){if(b>c){var e=b;b=c,c=e}return d.has(a[b],c)}function l(a,b,c,e){var f={},g={},h={};return d.each(b,function(a){d.each(a,function(a,b){f[a]=a,g[a]=a,h[a]=b})}),d.each(b,function(a){var b=-1;d.each(a,function(a){var i=e(a);if(i.length){i=d.sortBy(i,function(a){return h[a]});for(var j=(i.length-1)/2,l=Math.floor(j),m=Math.ceil(j);l<=m;++l){var n=i[l];g[a]===a&&bi.lim&&(j=i,k=!0);var l=d.filter(b.edges(),function(b){return k===v(a,a.node(b.v),j)&&k!==v(a,a.node(b.w),j)});return d.min(l,function(a){return f(b,a)})}function s(a,b,c,d){var e=c.v,f=c.w;a.removeEdge(e,f),a.setEdge(d.v,d.w,{}),o(a),l(a,b),t(a,b)}function t(a,b){var c=d.find(a.nodes(),function(a){return!b.node(a).parent}),e=h(a,c);e=e.slice(1),d.each(e,function(c){var d=a.node(c).parent,e=b.edge(c,d),f=!1;e||(e=b.edge(d,c),f=!0),b.node(c).rank=b.node(d).rank+(f?e.minlen:-e.minlen)})}function u(a,b,c){return a.hasEdge(b,c)}function v(a,b,c){return c.low<=b.lim&&b.lim<=c.lim}var d=a("../lodash"),e=a("./feasible-tree"),f=a("./util").slack,g=a("./util").longestPath,h=a("../graphlib").alg.preorder,i=a("../graphlib").alg.postorder,j=a("../util").simplify;b.exports=k,k.initLowLimValues=o,k.initCutValues=l,k.calcCutValue=n,k.leaveEdge=q,k.enterEdge=r,k.exchangeEdges=s},{"../graphlib":7,"../lodash":10,"../util":29,"./feasible-tree":25,"./util":28}],28:[function(a,b,c){"use strict";function e(a){function c(e){var f=a.node(e);if(d.has(b,e))return f.rank;b[e]=!0;var g=d.min(d.map(a.outEdges(e),function(b){return c(b.w)-a.edge(b).minlen}));return g===Number.POSITIVE_INFINITY&&(g=0),f.rank=g}var b={};d.each(a.sources(),c)}function f(a,b){return a.node(b.w).rank-a.node(b.v).rank-a.edge(b).minlen}var d=a("../lodash");b.exports={longestPath:e,slack:f}},{"../lodash":10}],29:[function(a,b,c){"use strict";function f(a,b,c,e){var f;do f=d.uniqueId(e);while(a.hasNode(f));return c.dummy=b,a.setNode(f,c),f}function g(a){var b=(new e).setGraph(a.graph());return d.each(a.nodes(),function(c){b.setNode(c,a.node(c))}),d.each(a.edges(),function(c){var d=b.edge(c.v,c.w)||{weight:0,minlen:1},e=a.edge(c);b.setEdge(c.v,c.w,{weight:d.weight+e.weight,minlen:Math.max(d.minlen,e.minlen)})}),b}function h(a){var b=new e({multigraph:a.isMultigraph()}).setGraph(a.graph());return d.each(a.nodes(),function(c){a.children(c).length||b.setNode(c,a.node(c))}),d.each(a.edges(),function(c){b.setEdge(c,a.edge(c))}),b}function i(a){var b=d.map(a.nodes(),function(b){var c={};return d.each(a.outEdges(b),function(b){c[b.w]=(c[b.w]||0)+a.edge(b).weight}),c});return d.zipObject(a.nodes(),b)}function j(a){var b=d.map(a.nodes(),function(b){var c={};return d.each(a.inEdges(b),function(b){c[b.v]=(c[b.v]||0)+a.edge(b).weight}),c});return d.zipObject(a.nodes(),b)}function k(a,b){var c=a.x,d=a.y,e=b.x-c,f=b.y-d,g=a.width/2,h=a.height/2;if(!e&&!f)throw new Error("Not possible to find intersection inside of the rectangle");var i,j;return Math.abs(f)*g>Math.abs(e)*h?(f<0&&(h=-h),i=h*e/f,j=h):(e<0&&(g=-g),i=g,j=g*f/e),{x:c+i,y:d+j}}function l(a){var b=d.map(d.range(p(a)+1),function(){return[]});return d.each(a.nodes(),function(c){var e=a.node(c),f=e.rank;d.isUndefined(f)||(b[f][e.order]=c)}),b}function m(a){var b=d.min(d.map(a.nodes(),function(b){return a.node(b).rank}));d.each(a.nodes(),function(c){var e=a.node(c);d.has(e,"rank")&&(e.rank-=b)})}function n(a){var b=d.min(d.map(a.nodes(),function(b){return a.node(b).rank})),c=[];d.each(a.nodes(),function(d){var e=a.node(d).rank-b;c[e]||(c[e]=[]),c[e].push(d)});var e=0,f=a.graph().nodeRankFactor;d.each(c,function(b,c){d.isUndefined(b)&&c%f!==0?--e:e&&d.each(b,function(b){a.node(b).rank+=e})})}function o(a,b,c,d){var e={width:0,height:0};return arguments.length>=4&&(e.rank=c,e.order=d),f(a,"border",e,b)}function p(a){return d.max(d.map(a.nodes(),function(b){var c=a.node(b).rank;if(!d.isUndefined(c))return c}))}function q(a,b){var c={lhs:[],rhs:[]};return d.each(a,function(a){b(a)?c.lhs.push(a):c.rhs.push(a)}),c}function r(a,b){var c=d.now();try{return b()}finally{console.log(a+" time: "+(d.now()-c)+"ms")}}function s(a,b){return b()}var d=a("./lodash"),e=a("./graphlib").Graph;b.exports={addDummyNode:f,simplify:g,asNonCompoundGraph:h,successorWeights:i,predecessorWeights:j,intersectRect:k,buildLayerMatrix:l,normalizeRanks:m,removeEmptyRanks:n,addBorderNode:o,maxRank:p,partition:q,time:r,notime:s}},{"./graphlib":7,"./lodash":10}],30:[function(a,b,c){b.exports="0.7.4"},{}]},{},[1])(1)}),Polymer.IronResizableBehavior={properties:{_parentResizable:{type:Object,observer:"_parentResizableChanged"},_notifyingDescendant:{type:Boolean,value:!1}},listeners:{"iron-request-resize-notifications":"_onIronRequestResizeNotifications"},created:function(){this._interestedResizables=[],this._boundNotifyResize=this.notifyResize.bind(this)},attached:function(){this.fire("iron-request-resize-notifications",null,{node:this,bubbles:!0,cancelable:!0}),this._parentResizable||(window.addEventListener("resize",this._boundNotifyResize),this.notifyResize())},detached:function(){this._parentResizable?this._parentResizable.stopResizeNotificationsFor(this):window.removeEventListener("resize",this._boundNotifyResize),this._parentResizable=null},notifyResize:function(){this.isAttached&&(this._interestedResizables.forEach(function(a){this.resizerShouldNotify(a)&&this._notifyDescendant(a)},this),this._fireResize())},assignParentResizable:function(a){this._parentResizable=a},stopResizeNotificationsFor:function(a){var b=this._interestedResizables.indexOf(a);b>-1&&(this._interestedResizables.splice(b,1),this.unlisten(a,"iron-resize","_onDescendantIronResize"))},resizerShouldNotify:function(a){return!0},_onDescendantIronResize:function(a){return this._notifyingDescendant?void a.stopPropagation():void(Polymer.Settings.useShadow||this._fireResize())},_fireResize:function(){this.fire("iron-resize",null,{node:this,bubbles:!1})},_onIronRequestResizeNotifications:function(a){var b=a.path?a.path[0]:a.target;b!==this&&(this._interestedResizables.indexOf(b)===-1&&(this._interestedResizables.push(b),this.listen(b,"iron-resize","_onDescendantIronResize")),b.assignParentResizable(this),this._notifyDescendant(b),a.stopPropagation())},_parentResizableChanged:function(a){a&&window.removeEventListener("resize",this._boundNotifyResize)},_notifyDescendant:function(a){this.isAttached&&(this._notifyingDescendant=!0,a.notifyResize(),this._notifyingDescendant=!1)}},Polymer.IronRangeBehavior={properties:{value:{type:Number,value:0,notify:!0,reflectToAttribute:!0},min:{type:Number,value:0,notify:!0},max:{type:Number,value:100,notify:!0},step:{type:Number,value:1,notify:!0},ratio:{type:Number,value:0,readOnly:!0,notify:!0}},observers:["_update(value, min, max, step)"],_calcRatio:function(a){return(this._clampValue(a)-this.min)/(this.max-this.min)},_clampValue:function(a){return Math.min(this.max,Math.max(this.min,this._calcStep(a)))},_calcStep:function(a){return a=parseFloat(a),this.step?(Math.round((a+this.min)/this.step)-this.min/this.step)/(1/this.step):a},_validateValue:function(){var a=this._clampValue(this.value);return this.value=this.oldValue=isNaN(a)?this.oldValue:a,this.value!==a},_update:function(){this._validateValue(),this._setRatio(100*this._calcRatio(this.value))}},Polymer.IronSelection=function(a){this.selection=[],this.selectCallback=a},Polymer.IronSelection.prototype={get:function(){return this.multi?this.selection.slice():this.selection[0]},clear:function(a){this.selection.slice().forEach(function(b){(!a||a.indexOf(b)<0)&&this.setItemSelected(b,!1)},this)},isSelected:function(a){return this.selection.indexOf(a)>=0},setItemSelected:function(a,b){if(null!=a){if(b)this.selection.push(a);else{var c=this.selection.indexOf(a);c>=0&&this.selection.splice(c,1)}this.selectCallback&&this.selectCallback(a,b)}},select:function(a){this.multi?this.toggle(a):this.get()!==a&&(this.setItemSelected(this.get(),!1),this.setItemSelected(a,!0))},toggle:function(a){this.setItemSelected(a,!this.isSelected(a))}},Polymer.IronSelectableBehavior={properties:{attrForSelected:{type:String,value:null},selected:{type:String,notify:!0},selectedItem:{type:Object,readOnly:!0,notify:!0},activateEvent:{type:String,value:"tap",observer:"_activateEventChanged"},selectable:String,selectedClass:{type:String,value:"iron-selected"},selectedAttribute:{type:String,value:null},items:{type:Array,readOnly:!0,value:function(){return[]}},_excludedLocalNames:{type:Object,value:function(){return{template:1}}}},observers:["_updateSelected(attrForSelected, selected)"],created:function(){this._bindFilterItem=this._filterItem.bind(this),this._selection=new Polymer.IronSelection(this._applySelection.bind(this))},attached:function(){this._observer=this._observeItems(this),this._updateItems(),this._shouldUpdateSelection||this._updateSelected(this.attrForSelected,this.selected),this._addListener(this.activateEvent)},detached:function(){this._observer&&Polymer.dom(this).unobserveNodes(this._observer),this._removeListener(this.activateEvent)},indexOf:function(a){return this.items.indexOf(a)},select:function(a){this.selected=a},selectPrevious:function(){var a=this.items.length,b=(Number(this._valueToIndex(this.selected))-1+a)%a;this.selected=this._indexToValue(b)},selectNext:function(){var a=(Number(this._valueToIndex(this.selected))+1)%this.items.length;this.selected=this._indexToValue(a)},get _shouldUpdateSelection(){return null!=this.selected},_addListener:function(a){this.listen(this,a,"_activateHandler")},_removeListener:function(a){this.unlisten(this,a,"_activateHandler")},_activateEventChanged:function(a,b){this._removeListener(b),this._addListener(a)},_updateItems:function(){var a=Polymer.dom(this).queryDistributedElements(this.selectable||"*");a=Array.prototype.filter.call(a,this._bindFilterItem),this._setItems(a)},_updateSelected:function(){this._selectSelected(this.selected)},_selectSelected:function(a){this._selection.select(this._valueToItem(this.selected))},_filterItem:function(a){return!this._excludedLocalNames[a.localName]},_valueToItem:function(a){return null==a?null:this.items[this._valueToIndex(a)]},_valueToIndex:function(a){if(!this.attrForSelected)return Number(a);for(var c,b=0;c=this.items[b];b++)if(this._valueForItem(c)==a)return b},_indexToValue:function(a){if(!this.attrForSelected)return a;var b=this.items[a];return b?this._valueForItem(b):void 0},_valueForItem:function(a){return a[this.attrForSelected]||a.getAttribute(this.attrForSelected)},_applySelection:function(a,b){this.selectedClass&&this.toggleClass(this.selectedClass,b,a),this.selectedAttribute&&this.toggleAttribute(this.selectedAttribute,b,a),this._selectionChange(),this.fire("iron-"+(b?"select":"deselect"),{item:a})},_selectionChange:function(){this._setSelectedItem(this._selection.get())},_observeItems:function(a){return Polymer.dom(a).observeNodes(function(a){this.fire("iron-items-changed",a,{bubbles:!1,cancelable:!1}),this._updateItems(),this._shouldUpdateSelection&&this._updateSelected()})},_activateHandler:function(a){for(var b=a.target,c=this.items;b&&b!=this;){var d=c.indexOf(b);if(d>=0){var e=this._indexToValue(d);return void this._itemActivate(e,b)}b=b.parentNode}},_itemActivate:function(a,b){this.fire("iron-activate",{selected:a,item:b},{cancelable:!0}).defaultPrevented||this.select(a)}},Polymer.IronMultiSelectableBehaviorImpl={properties:{multi:{type:Boolean,value:!1,observer:"multiChanged"},selectedValues:{type:Array,notify:!0},selectedItems:{type:Array,readOnly:!0,notify:!0}},observers:["_updateSelected(attrForSelected, selectedValues)"],select:function(a){this.multi?this.selectedValues?this._toggleSelected(a):this.selectedValues=[a]:this.selected=a},multiChanged:function(a){this._selection.multi=a},get _shouldUpdateSelection(){return null!=this.selected||null!=this.selectedValues&&this.selectedValues.length},_updateSelected:function(){this.multi?this._selectMulti(this.selectedValues):this._selectSelected(this.selected)},_selectMulti:function(a){if(this._selection.clear(),a)for(var b=0;b=0&&(a.splice(c,1),g(b,""))}function f(a,b){g(a,b+2)}function g(a,b){a.style.zIndex=b}function h(){for(var b=a.length-1;a[b]&&!a[b].opened;)--b;return a[b]}function i(){var a,c=h();if(c){var d=window.getComputedStyle(c).zIndex;isNaN(d)||(a=Number(d))}return a||b}function j(){var a=h();a&&!a.transitioning&&a._applyFocus()}function k(a){if(a.opened)c.push(a);else{var b=c.indexOf(a);b>=0&&c.splice(b,1)}}function l(){return c}var a=[],b=10,c=[];return{addOverlay:d,removeOverlay:e,currentOverlay:h,currentOverlayZ:i,focusOverlay:j,trackBackdrop:k,getBackdrops:l}}(),Polymer.IronOverlayBehaviorImpl={properties:{opened:{observer:"_openedChanged",type:Boolean,value:!1,notify:!0},canceled:{observer:"_canceledChanged",readOnly:!0,type:Boolean,value:!1},withBackdrop:{type:Boolean,value:!1},noAutoFocus:{type:Boolean,value:!1},noCancelOnEscKey:{type:Boolean,value:!1},noCancelOnOutsideClick:{type:Boolean,value:!1},closingReason:{type:Object},_manager:{type:Object,value:Polymer.IronOverlayManager},_boundOnCaptureClick:{type:Function,value:function(){return this._onCaptureClick.bind(this)}},_boundOnCaptureKeydown:{type:Function,value:function(){return this._onCaptureKeydown.bind(this)}}},listeners:{tap:"_onClick","iron-resize":"_onIronResize"},get backdropElement(){return this._backdrop},get _focusNode(){return Polymer.dom(this).querySelector("[autofocus]")||this},registered:function(){this._backdrop=document.createElement("iron-overlay-backdrop")},ready:function(){this._ensureSetup(),this._callOpenedWhenReady&&this._openedChanged()},detached:function(){this.opened=!1,this._completeBackdrop(),this._manager.removeOverlay(this)},toggle:function(){this.opened=!this.opened},open:function(){this.opened=!0,this.closingReason={canceled:!1}},close:function(){this.opened=!1,this._setCanceled(!1)},cancel:function(){var a=this.fire("iron-overlay-canceled",void 0,{cancelable:!0});a.defaultPrevented||(this.opened=!1,this._setCanceled(!0))},_ensureSetup:function(){this._overlaySetup||(this._overlaySetup=!0,this.style.outline="none",this.style.display="none")},_openedChanged:function(){return this.opened?this.removeAttribute("aria-hidden"):this.setAttribute("aria-hidden","true"),this._overlaySetup?(this._openChangedAsync&&this.cancelAsync(this._openChangedAsync),this._toggleListeners(),this.opened&&this._prepareRenderOpened(),void(this._openChangedAsync=this.async(function(){this.style.display="",this.offsetWidth,this.opened?this._renderOpened():this._renderClosed(),this._openChangedAsync=null}))):void(this._callOpenedWhenReady=this.opened)},_canceledChanged:function(){this.closingReason=this.closingReason||{},this.closingReason.canceled=this.canceled},_toggleListener:function(a,b,c,d,e){a?("tap"===c&&Polymer.Gestures.add(document,"tap",null),b.addEventListener(c,d,e)):("tap"===c&&Polymer.Gestures.remove(document,"tap",null),b.removeEventListener(c,d,e))},_toggleListeners:function(){this._toggleListenersAsync&&this.cancelAsync(this._toggleListenersAsync),this._toggleListenersAsync=this.async(function(){this._toggleListener(this.opened,document,"tap",this._boundOnCaptureClick,!0),this._toggleListener(this.opened,document,"keydown",this._boundOnCaptureKeydown,!0),this._toggleListenersAsync=null},1)},_prepareRenderOpened:function(){this._manager.addOverlay(this),this.withBackdrop&&(this.backdropElement.prepare(),this._manager.trackBackdrop(this)),this._preparePositioning(),this.fit(),this._finishPositioning()},_renderOpened:function(){this.withBackdrop&&this.backdropElement.open(),this._finishRenderOpened()},_renderClosed:function(){this.withBackdrop&&this.backdropElement.close(),this._finishRenderClosed()},_onTransitionend:function(a){a&&a.target!==this||(this.opened?this._finishRenderOpened():this._finishRenderClosed())},_finishRenderOpened:function(){this.noAutoFocus||this._focusNode.focus(),this.fire("iron-overlay-opened"),this._squelchNextResize=!0,this.async(this.notifyResize)},_finishRenderClosed:function(){this.resetFit(),this.style.display="none",this._completeBackdrop(),this._manager.removeOverlay(this),this._focusNode.blur(),this._manager.focusOverlay(),this.fire("iron-overlay-closed",this.closingReason),this._squelchNextResize=!0,this.async(this.notifyResize)},_completeBackdrop:function(){this.withBackdrop&&(this._manager.trackBackdrop(this),this.backdropElement.complete())},_preparePositioning:function(){this.style.transition=this.style.webkitTransition="none",this.style.transform=this.style.webkitTransform="none",this.style.display=""},_finishPositioning:function(){this.style.display="none",this.style.transform=this.style.webkitTransform="",this.offsetWidth,this.style.transition=this.style.webkitTransition=""},_applyFocus:function(){this.opened?this.noAutoFocus||this._focusNode.focus():(this._focusNode.blur(),this._manager.focusOverlay())},_onCaptureClick:function(a){this.noCancelOnOutsideClick||this._manager.currentOverlay()!=this||(this._cancelJob=this.async(function(){this.cancel()},10))},_onClick:function(a){this._cancelJob&&(this.cancelAsync(this._cancelJob),this._cancelJob=null)},_onCaptureKeydown:function(a){var b=27;this.noCancelOnEscKey||a.keyCode!==b||(this.cancel(),a.stopPropagation())},_onIronResize:function(){return this._squelchNextResize?void(this._squelchNextResize=!1):void(this.opened&&this.refit())}},Polymer.IronOverlayBehavior=[Polymer.IronFitBehavior,Polymer.IronResizableBehavior,Polymer.IronOverlayBehaviorImpl],Polymer.NeonAnimationBehavior={properties:{animationTiming:{type:Object,value:function(){return{duration:500,easing:"cubic-bezier(0.4, 0, 0.2, 1)",fill:"both"}}}},registered:function(){new Polymer.IronMeta({type:"animation",key:this.is,value:this.constructor})},timingFromConfig:function(a){if(a.timing)for(var b in a.timing)this.animationTiming[b]=a.timing[b];return this.animationTiming},setPrefixedProperty:function(a,b,c){for(var f,d={transform:["webkitTransform"],transformOrigin:["mozTransformOrigin","webkitTransformOrigin"]},e=d[b],g=0;f=e[g];g++)a.style[f]=c;a.style[b]=c},complete:function(){}},!function(a,b){b.true=a;var c={},d={},e={},f=null;!function(a){function b(a){if("number"==typeof a)return a;var b={};for(var c in a)b[c]=a[c];return b}function c(){this._delay=0,this._endDelay=0,this._fill="none",this._iterationStart=0,this._iterations=1,this._duration=0,this._playbackRate=1,this._direction="normal",this._easing="linear"}function d(b,d){var e=new c;return d&&(e.fill="both",e.duration="auto"),"number"!=typeof b||isNaN(b)?void 0!==b&&Object.getOwnPropertyNames(b).forEach(function(c){if("auto"!=b[c]){if(("number"==typeof e[c]||"duration"==c)&&("number"!=typeof b[c]||isNaN(b[c])))return;if("fill"==c&&-1==s.indexOf(b[c]))return;if("direction"==c&&-1==t.indexOf(b[c]))return;if("playbackRate"==c&&1!==b[c]&&a.isDeprecated("AnimationEffectTiming.playbackRate","2014-11-28","Use Animation.playbackRate instead."))return;e[c]=b[c]}}):e.duration=b,e}function e(a){return"number"==typeof a&&(a=isNaN(a)?{duration:0}:{duration:a}),a}function f(b,c){b=a.numericTimingToObject(b);var e=d(b,c);return e._easing=i(e.easing),e}function g(a,b,c,d){return 0>a||a>1||0>c||c>1?B:function(e){function f(a,b,c){return 3*a*(1-c)*(1-c)*c+3*b*(1-c)*c*c+c*c*c}if(0==e||1==e)return e;for(var g=0,h=1;;){var i=(g+h)/2,j=f(a,c,i);if(Math.abs(e-j)<.001)return f(b,d,i);e>j?g=i:h=i}}}function h(a,b){return function(c){if(c>=1)return 1;var d=1/a;return c+=b*d,c-c%d}}function i(a){var b=z.exec(a);if(b)return g.apply(this,b.slice(1).map(Number));var c=A.exec(a);if(c)return h(Number(c[1]),{start:u,middle:v,end:w}[c[2]]);var d=x[a];return d?d:B}function j(a){return Math.abs(k(a)/a.playbackRate)}function k(a){return a.duration*a.iterations}function l(a,b,c){return null==b?C:b=c.delay+a?E:F}function m(a,b,c,d,e){switch(d){case D:return"backwards"==b||"both"==b?0:null;case F:return c-e;case E:return"forwards"==b||"both"==b?a:null;case C:return null}}function n(a,b,c,d){return(d.playbackRate<0?b-a:b)*d.playbackRate+c}function o(a,b,c,d,e){return 1/0===c||c===-1/0||c-d==b&&e.iterations&&(e.iterations+e.iterationStart)%1==0?a:c%a}function p(a,b,c,d){return 0===c?0:b==a?d.iterationStart+d.iterations-1:Math.floor(c/a)}function q(a,b,c,d){var e=a%2>=1,f="normal"==d.direction||d.direction==(e?"alternate-reverse":"alternate"),g=f?c:b-c,h=g/b;return b*d.easing(h)}function r(a,b,c){var d=l(a,b,c),e=m(a,c.fill,b,d,c.delay);if(null===e)return null;if(0===a)return d===D?0:1;var f=c.iterationStart*c.duration,g=n(a,e,f,c),h=o(c.duration,k(c),g,f,c),i=p(c.duration,h,g,c);return q(i,c.duration,h,c)/c.duration}var s="backwards|forwards|both|none".split("|"),t="reverse|alternate|alternate-reverse".split("|");c.prototype={_setMember:function(b,c){this["_"+b]=c,this._effect&&(this._effect._timingInput[b]=c,this._effect._timing=a.normalizeTimingInput(a.normalizeTimingInput(this._effect._timingInput)),this._effect.activeDuration=a.calculateActiveDuration(this._effect._timing),this._effect._animation&&this._effect._animation._rebuildUnderlyingAnimation())},get playbackRate(){return this._playbackRate},set delay(a){this._setMember("delay",a)},get delay(){return this._delay},set endDelay(a){this._setMember("endDelay",a)},get endDelay(){return this._endDelay},set fill(a){this._setMember("fill",a)},get fill(){return this._fill},set iterationStart(a){this._setMember("iterationStart",a)},get iterationStart(){return this._iterationStart},set duration(a){this._setMember("duration",a)},get duration(){return this._duration},set direction(a){this._setMember("direction",a)},get direction(){return this._direction},set easing(a){this._setMember("easing",a)},get easing(){return this._easing},set iterations(a){this._setMember("iterations",a)},get iterations(){return this._iterations}};var u=1,v=.5,w=0,x={ease:g(.25,.1,.25,1),"ease-in":g(.42,0,1,1),"ease-out":g(0,0,.58,1),"ease-in-out":g(.42,0,.58,1),"step-start":h(1,u),"step-middle":h(1,v),"step-end":h(1,w)},y="\\s*(-?\\d+\\.?\\d*|-?\\.\\d+)\\s*",z=new RegExp("cubic-bezier\\("+y+","+y+","+y+","+y+"\\)"),A=/steps\(\s*(\d+)\s*,\s*(start|middle|end)\s*\)/,B=function(a){return a},C=0,D=1,E=2,F=3;a.cloneTimingInput=b,a.makeTiming=d,a.numericTimingToObject=e,a.normalizeTimingInput=f,a.calculateActiveDuration=j,a.calculateTimeFraction=r,a.calculatePhase=l,a.toTimingFunction=i}(c,f),function(a){function b(a,b){return a in h?h[a][b]||b:b}function c(a,c,d){var g=e[a];if(g){f.style[a]=c;for(var h in g){var i=g[h],j=f.style[i];d[i]=b(i,j)}}else d[a]=b(a,c)}function d(b){function d(){var a=e.length;null==e[a-1].offset&&(e[a-1].offset=1),a>1&&null==e[0].offset&&(e[0].offset=0);for(var b=0,c=e[0].offset,d=1;a>d;d++){var f=e[d].offset;if(null!=f){for(var g=1;d-b>g;g++)e[b+g].offset=c+(f-c)*g/(d-b);b=d,c=f}}}if(!Array.isArray(b)&&null!==b)throw new TypeError("Keyframes must be null or an array of keyframes");if(null==b)return[];for(var e=b.map(function(b){var d={};for(var e in b){var f=b[e];if("offset"==e){if(null!=f&&(f=Number(f),!isFinite(f)))throw new TypeError("keyframe offsets must be numbers.")}else{if("composite"==e)throw{type:DOMException.NOT_SUPPORTED_ERR,name:"NotSupportedError",message:"add compositing is not supported"};f="easing"==e?a.toTimingFunction(f):""+f}c(e,f,d)}return void 0==d.offset&&(d.offset=null),void 0==d.easing&&(d.easing=a.toTimingFunction("linear")),d}),f=!0,g=-1/0,h=0;hi)throw{code:DOMException.INVALID_MODIFICATION_ERR,name:"InvalidModificationError",message:"Keyframes are not loosely sorted by offset. Sort or specify offsets."};g=i}else f=!1}return e=e.filter(function(a){return a.offset>=0&&a.offset<=1}),f||d(),e}var e={background:["backgroundImage","backgroundPosition","backgroundSize","backgroundRepeat","backgroundAttachment","backgroundOrigin","backgroundClip","backgroundColor"],border:["borderTopColor","borderTopStyle","borderTopWidth","borderRightColor","borderRightStyle","borderRightWidth","borderBottomColor","borderBottomStyle","borderBottomWidth","borderLeftColor","borderLeftStyle","borderLeftWidth"],borderBottom:["borderBottomWidth","borderBottomStyle","borderBottomColor"],borderColor:["borderTopColor","borderRightColor","borderBottomColor","borderLeftColor"],borderLeft:["borderLeftWidth","borderLeftStyle","borderLeftColor"],borderRadius:["borderTopLeftRadius","borderTopRightRadius","borderBottomRightRadius","borderBottomLeftRadius"],borderRight:["borderRightWidth","borderRightStyle","borderRightColor"],borderTop:["borderTopWidth","borderTopStyle","borderTopColor"],borderWidth:["borderTopWidth","borderRightWidth","borderBottomWidth","borderLeftWidth"],flex:["flexGrow","flexShrink","flexBasis"],font:["fontFamily","fontSize","fontStyle","fontVariant","fontWeight","lineHeight"],margin:["marginTop","marginRight","marginBottom","marginLeft"],outline:["outlineColor","outlineStyle","outlineWidth"],padding:["paddingTop","paddingRight","paddingBottom","paddingLeft"]},f=document.createElementNS("http://www.w3.org/1999/xhtml","div"),g={thin:"1px",medium:"3px",thick:"5px"},h={borderBottomWidth:g,borderLeftWidth:g,borderRightWidth:g,borderTopWidth:g,fontSize:{"xx-small":"60%","x-small":"75%",small:"89%",medium:"100%",large:"120%","x-large":"150%","xx-large":"200%"},fontWeight:{normal:"400",bold:"700"},outlineWidth:g,textShadow:{none:"0px 0px 0px transparent"},boxShadow:{none:"0px 0px 0px 0px transparent"}};a.normalizeKeyframes=d}(c,f),function(a){var b={};a.isDeprecated=function(a,c,d,e){var f=e?"are":"is",g=new Date,h=new Date(c); +return h.setMonth(h.getMonth()+3),!(h>g)||(a in b||console.warn("Web Animations: "+a+" "+f+" deprecated and will stop working on "+h.toDateString()+". "+d),b[a]=!0,!1)},a.deprecated=function(b,c,d,e){var f=e?"are":"is";if(a.isDeprecated(b,c,d,e))throw new Error(b+" "+f+" no longer supported. "+d)}}(c),function(){if(document.documentElement.animate){var a=document.documentElement.animate([],0),b=!0;if(a&&(b=!1,"play|currentTime|pause|reverse|playbackRate|cancel|finish|startTime|playState".split("|").forEach(function(c){void 0===a[c]&&(b=!0)})),!b)return}!function(a,b){function c(a){for(var b={},c=0;c=c&&0==a.startTime||c>=1&&1==a.endTime||c>=a.startTime&&c<=a.endTime}).forEach(function(d){var e=c-d.startTime,f=d.endTime-d.startTime,g=0==f?0:d.easing(e/f);b.apply(a,d.property,d.interpolation(g))});else for(var d in g)"offset"!=d&&"easing"!=d&&"composite"!=d&&b.clear(a,d)}}}(c,d,f),function(a){function b(a,b,c){e[c]=e[c]||[],e[c].push([a,b])}function c(a,c,d){for(var e=0;ed?a:c;if(a.length==c.length){for(var e=[],f=0;f0?this._totalDuration:0),this._ensureAlive())},get currentTime(){return this._idle||this._currentTimePending?null:this._currentTime},set currentTime(a){a=+a,isNaN(a)||(b.restart(),this._paused||null==this._startTime||(this._startTime=this._timeline.currentTime-a/this._playbackRate),this._currentTimePending=!1,this._currentTime!=a&&(this._tickCurrentTime(a,!0),b.invalidateEffects()))},get startTime(){return this._startTime},set startTime(a){a=+a,isNaN(a)||this._paused||this._idle||(this._startTime=a,this._tickCurrentTime((this._timeline.currentTime-this._startTime)*this.playbackRate),b.invalidateEffects())},get playbackRate(){return this._playbackRate},set playbackRate(a){if(a!=this._playbackRate){var b=this.currentTime;this._playbackRate=a,this._startTime=null,"paused"!=this.playState&&"idle"!=this.playState&&this.play(),null!=b&&(this.currentTime=b)}},get _isFinished(){return!this._idle&&(this._playbackRate>0&&this._currentTime>=this._totalDuration||this._playbackRate<0&&this._currentTime<=0)},get _totalDuration(){return this._effect._totalDuration},get playState(){return this._idle?"idle":null==this._startTime&&!this._paused&&0!=this.playbackRate||this._currentTimePending?"pending":this._paused?"paused":this._isFinished?"finished":"running"},play:function(){this._paused=!1,(this._isFinished||this._idle)&&(this._currentTime=this._playbackRate>0?0:this._totalDuration,this._startTime=null,b.invalidateEffects()),this._finishedFlag=!1,b.restart(),this._idle=!1,this._ensureAlive()},pause:function(){this._isFinished||this._paused||this._idle||(this._currentTimePending=!0),this._startTime=null,this._paused=!0},finish:function(){this._idle||(this.currentTime=this._playbackRate>0?this._totalDuration:0,this._startTime=this._totalDuration-this.currentTime,this._currentTimePending=!1)},cancel:function(){this._inEffect&&(this._inEffect=!1,this._idle=!0,this.currentTime=0,this._startTime=null,this._effect._update(null),b.invalidateEffects(),b.restart())},reverse:function(){this.playbackRate*=-1,this.play()},addEventListener:function(a,b){"function"==typeof b&&"finish"==a&&this._finishHandlers.push(b)},removeEventListener:function(a,b){if("finish"==a){var c=this._finishHandlers.indexOf(b);c>=0&&this._finishHandlers.splice(c,1)}},_fireEvents:function(a){var b=this._isFinished;if((b||this._idle)&&!this._finishedFlag){var d=new c(this,this._currentTime,a),e=this._finishHandlers.concat(this.onfinish?[this.onfinish]:[]);setTimeout(function(){e.forEach(function(a){a.call(d.target,d)})},0)}this._finishedFlag=b},_tick:function(a){return this._idle||this._paused||(null==this._startTime?this.startTime=a-this._currentTime/this.playbackRate:this._isFinished||this._tickCurrentTime((a-this._startTime)*this.playbackRate)),this._currentTimePending=!1,this._fireEvents(a),!this._idle&&(this._inEffect||!this._finishedFlag)}}}(c,d,f),function(a,b){function c(a){var b=i;i=[],a0&&(s.currentTime+=q*(Math.floor((a-s.currentTime)/q)+1)),g(s.currentTime)}return f(),r.apply(this,arguments)}});var s=new e;b.timeline=s}(c,d,f),function(a){function b(a,b){var c=a.exec(b);return c?(c=a.ignoreCase?c[0].toLowerCase():c[0],[c,b.substr(c.length)]):void 0}function c(a,b){b=b.replace(/^\s*/,"");var c=a(b);return c?[c[0],c[1].replace(/^\s*/,"")]:void 0}function d(a,d,e){a=c.bind(null,a);for(var f=[];;){var g=a(e);if(!g)return[f,e];if(f.push(g[0]),e=g[1],g=b(d,e),!g||""==g[1])return[f,e];e=g[1]}}function e(a,b){for(var c=0,d=0;d=c))break;var e=a(b.substr(0,d));return void 0==e?void 0:[e,b.substr(d)]}function f(a,b){for(var c=a,d=b;c&&d;)c>d?c%=d:d%=c;return c=a*b/(c+d)}function g(a){return function(b){var c=a(b);return c&&(c[0]=void 0),c}}function h(a,b){return function(c){var d=a(c);return d?d:[b,c]}}function i(b,c){for(var d=[],e=0;ek;k++){var l=b(d[k%d.length],e[k%e.length]);if(!l)return;g.push(l[0]),h.push(l[1]),i.push(l[2])}return[g,h,function(b){var d=b.map(function(a,b){return i[b](a)}).join(c);return a?a(d):d}]}function k(a,b,c){for(var d=[],e=[],f=[],g=0,h=0;h=c?a:c>=1?b:"visible"}]:void 0}a.addPropertiesHandler(String,b,["visibility"])}(d),function(a){function b(a){a=a.trim(),e.fillStyle="#000",e.fillStyle=a;var b=e.fillStyle;if(e.fillStyle="#fff",e.fillStyle=a,b==e.fillStyle){e.fillRect(0,0,1,1);var c=e.getImageData(0,0,1,1).data;e.clearRect(0,0,1,1);var d=c[3]/255;return[c[0]*d,c[1]*d,c[2]*d,d]}}function c(b,c){return[b,c,function(b){function c(a){return Math.max(0,Math.min(255,a))}if(b[3])for(var d=0;3>d;d++)b[d]=Math.round(c(b[d]/b[3]));return b[3]=a.numberToString(a.clamp(0,1,b[3])),"rgba("+b.join(",")+")"}]}var d=document.createElementNS("http://www.w3.org/1999/xhtml","canvas");d.width=d.height=1;var e=d.getContext("2d");a.addPropertiesHandler(b,c,["background-color","border-bottom-color","border-left-color","border-right-color","border-top-color","color","outline-color","text-decoration-color"]),a.consumeColor=a.consumeParenthesised.bind(null,b),a.mergeColors=c}(d,f),function(a,b){function c(a,b){if(b=b.trim().toLowerCase(),"0"==b&&"px".search(a)>=0)return{px:0};if(/^[^(]*$|^calc/.test(b)){b=b.replace(/calc\(/g,"(");var c={};b=b.replace(a,function(a){return c[a]=null,"U"+a});for(var d="U("+a.source+")",e=b.replace(/[-+]?(\d*\.)?\d+/g,"N").replace(new RegExp("N"+d,"g"),"D").replace(/\s[+-]\s/g,"O").replace(/\s/g,""),f=[/N\*(D)/g,/(N|D)[*\/]N/g,/(N|D)O\1/g,/\((N|D)\)/g],g=0;g1?"calc("+c+")":c}]}var f="px|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc",g=c.bind(null,new RegExp(f,"g")),h=c.bind(null,new RegExp(f+"|%","g")),i=c.bind(null,/deg|rad|grad|turn/g);a.parseLength=g,a.parseLengthOrPercent=h,a.consumeLengthOrPercent=a.consumeParenthesised.bind(null,h),a.parseAngle=i,a.mergeDimensions=e;var j=a.consumeParenthesised.bind(null,g),k=a.consumeRepeated.bind(void 0,j,/^/),l=a.consumeRepeated.bind(void 0,k,/^,/);a.consumeSizePairList=l;var m=function(a){var b=l(a);return b&&""==b[1]?b[0]:void 0},n=a.mergeNestedRepeated.bind(void 0,d," "),o=a.mergeNestedRepeated.bind(void 0,n,",");a.mergeNonNegativeSizePair=n,a.addPropertiesHandler(m,o,["background-size"]),a.addPropertiesHandler(h,d,["border-bottom-width","border-image-width","border-left-width","border-right-width","border-top-width","flex-basis","font-size","height","line-height","max-height","max-width","outline-width","width"]),a.addPropertiesHandler(h,e,["border-bottom-left-radius","border-bottom-right-radius","border-top-left-radius","border-top-right-radius","bottom","left","letter-spacing","margin-bottom","margin-left","margin-right","margin-top","min-height","min-width","outline-offset","padding-bottom","padding-left","padding-right","padding-top","perspective","right","shape-margin","text-indent","top","vertical-align","word-spacing"])}(d,f),function(a){function b(b){return a.consumeLengthOrPercent(b)||a.consumeToken(/^auto/,b)}function c(c){var d=a.consumeList([a.ignore(a.consumeToken.bind(null,/^rect/)),a.ignore(a.consumeToken.bind(null,/^\(/)),a.consumeRepeated.bind(null,b,/^,/),a.ignore(a.consumeToken.bind(null,/^\)/))],c);return d&&4==d[0].length?d[0]:void 0}function d(b,c){return"auto"==b||"auto"==c?[!0,!1,function(d){var e=d?b:c;if("auto"==e)return"auto";var f=a.mergeDimensions(e,e);return f[2](f[0])}]:a.mergeDimensions(b,c)}function e(a){return"rect("+a+")"}var f=a.mergeWrappedNestedRepeated.bind(null,e,d,", ");a.parseBox=c,a.mergeBoxes=f,a.addPropertiesHandler(c,f,["clip"])}(d,f),function(a){function b(a){return function(b){var c=0;return a.map(function(a){return a===j?b[c++]:a})}}function c(a){return a}function d(b){if(b=b.toLowerCase().trim(),"none"==b)return[];for(var c,d=/\s*(\w+)\(([^)]*)\)/g,e=[],f=0;c=d.exec(b);){if(c.index!=f)return;f=c.index+c[0].length;var g=c[1],h=m[g];if(!h)return;var i=c[2].split(","),j=h[0];if(j.length0)for(var c,d=0;c=a[d];d++){var e=this._animationMeta.byKey(c.name);if(e){var f=e&&new e,g=f.configure(c);g&&b.push({animation:f,config:c,effect:g})}else console.warn(this.is+":",c.name,"not found!")}return b},_runAnimationEffects:function(a){return document.timeline.play(new GroupEffect(a))},_completeAnimations:function(a){for(var b,c=0;b=a[c];c++)b.animation.complete(b.config)},playAnimation:function(a,b){var c=this.getAnimationConfig(a);if(c){var d=this._configureAnimationEffects(c),e=d.map(function(a){return a.effect});e.length>0?(this._player=this._runAnimationEffects(e),this._player.onfinish=function(){this._completeAnimations(d),this._player&&(this._player.cancel(),this._player=null),this.fire("neon-animation-finish",b,{bubbles:!1})}.bind(this)):this.fire("neon-animation-finish",b,{bubbles:!1})}},cancelAnimation:function(){this._player&&this._player.cancel()}},Polymer.NeonAnimationRunnerBehavior=[Polymer.NeonAnimatableBehavior,Polymer.NeonAnimationRunnerBehaviorImpl],function(){"use strict";Polymer.IronDropdownScrollManager={get currentLockingElement(){return this._lockingElements[this._lockingElements.length-1]},elementIsScrollLocked:function(a){var c,b=this.currentLockingElement;return!!this._hasCachedLockedElement(a)||!this._hasCachedUnlockedElement(a)&&(c=!!b&&b!==a&&!this._composedTreeContains(b,a),c?this._lockedElementCache.push(a):this._unlockedElementCache.push(a),c)},pushScrollLock:function(a){0===this._lockingElements.length&&this._lockScrollInteractions(),this._lockingElements.push(a),this._lockedElementCache=[],this._unlockedElementCache=[]},removeScrollLock:function(a){var b=this._lockingElements.indexOf(a);b!==-1&&(this._lockingElements.splice(b,1),this._lockedElementCache=[],this._unlockedElementCache=[],0===this._lockingElements.length&&this._unlockScrollInteractions())},_lockingElements:[],_lockedElementCache:null,_unlockedElementCache:null,_originalBodyStyles:{},_isScrollingKeypress:function(a){return Polymer.IronA11yKeysBehavior.keyboardEventMatchesKeys(a,"pageup pagedown home end up left down right")},_hasCachedLockedElement:function(a){return this._lockedElementCache.indexOf(a)>-1},_hasCachedUnlockedElement:function(a){return this._unlockedElementCache.indexOf(a)>-1},_composedTreeContains:function(a,b){var c,d,e,f;if(a.contains(b))return!0;for(c=Polymer.dom(a).querySelectorAll("content"),e=0;e=Math.min(this.maxRadius,c.MAX_RADIUS)},get isRestingAtMaxRadius(){return this.opacity>=this.initialOpacity&&this.radius>=Math.min(this.maxRadius,c.MAX_RADIUS)},get isAnimationComplete(){return this.mouseUpStart?this.isOpacityFullyDecayed:this.isRestingAtMaxRadius},get translationFraction(){return Math.min(1,this.radius/this.containerMetrics.size*2/Math.sqrt(2))},get xNow(){return this.xEnd?this.xStart+this.translationFraction*(this.xEnd-this.xStart):this.xStart},get yNow(){return this.yEnd?this.yStart+this.translationFraction*(this.yEnd-this.yStart):this.yStart},get isMouseDown(){return this.mouseDownStart&&!this.mouseUpStart},resetInteractionState:function(){this.maxRadius=0,this.mouseDownStart=0,this.mouseUpStart=0,this.xStart=0,this.yStart=0,this.xEnd=0,this.yEnd=0,this.slideDistance=0,this.containerMetrics=new b(this.element)},draw:function(){var a,c,d;this.wave.style.opacity=this.opacity,a=this.radius/(this.containerMetrics.size/2),c=this.xNow-this.containerMetrics.width/2,d=this.yNow-this.containerMetrics.height/2,this.waveContainer.style.webkitTransform="translate("+c+"px, "+d+"px)",this.waveContainer.style.transform="translate3d("+c+"px, "+d+"px, 0)",this.wave.style.webkitTransform="scale("+a+","+a+")",this.wave.style.transform="scale3d("+a+","+a+",1)"},downAction:function(b){var c=this.containerMetrics.width/2,d=this.containerMetrics.height/2;this.resetInteractionState(),this.mouseDownStart=a.now(),this.center?(this.xStart=c,this.yStart=d,this.slideDistance=a.distance(this.xStart,this.yStart,this.xEnd,this.yEnd)):(this.xStart=b?b.detail.x-this.containerMetrics.boundingRect.left:this.containerMetrics.width/2,this.yStart=b?b.detail.y-this.containerMetrics.boundingRect.top:this.containerMetrics.height/2),this.recenters&&(this.xEnd=c,this.yEnd=d,this.slideDistance=a.distance(this.xStart,this.yStart,this.xEnd,this.yEnd)),this.maxRadius=this.containerMetrics.furthestCornerDistanceFrom(this.xStart,this.yStart),this.waveContainer.style.top=(this.containerMetrics.height-this.containerMetrics.size)/2+"px",this.waveContainer.style.left=(this.containerMetrics.width-this.containerMetrics.size)/2+"px",this.waveContainer.style.width=this.containerMetrics.size+"px",this.waveContainer.style.height=this.containerMetrics.size+"px"},upAction:function(b){this.isMouseDown&&(this.mouseUpStart=a.now())},remove:function(){Polymer.dom(this.waveContainer.parentNode).removeChild(this.waveContainer)}},Polymer({is:"paper-ripple",behaviors:[Polymer.IronA11yKeysBehavior],properties:{initialOpacity:{type:Number,value:.25},opacityDecayVelocity:{type:Number,value:.8},recenters:{type:Boolean,value:!1},center:{type:Boolean,value:!1},ripples:{type:Array,value:function(){return[]}},animating:{type:Boolean,readOnly:!0,reflectToAttribute:!0,value:!1},holdDown:{type:Boolean,value:!1,observer:"_holdDownChanged"},noink:{type:Boolean,value:!1},_animating:{type:Boolean},_boundAnimate:{type:Function,value:function(){return this.animate.bind(this)}}},observers:["_noinkChanged(noink, isAttached)"],get target(){var b,a=Polymer.dom(this).getOwnerRoot();return b=11==this.parentNode.nodeType?a.host:this.parentNode},keyBindings:{"enter:keydown":"_onEnterKeydown","space:keydown":"_onSpaceKeydown","space:keyup":"_onSpaceKeyup"},attached:function(){this.listen(this.target,"up","uiUpAction"),this.listen(this.target,"down","uiDownAction")},detached:function(){this.unlisten(this.target,"up","uiUpAction"),this.unlisten(this.target,"down","uiDownAction")},get shouldKeepAnimating(){for(var a=0;a0)){var b=this.addRipple();b.downAction(a),this._animating||this.animate()}},uiUpAction:function(a){this.noink||this.upAction(a)},upAction:function(a){this.holdDown||(this.ripples.forEach(function(b){b.upAction(a)}),this.animate())},onAnimationComplete:function(){this._animating=!1,this.$.background.style.backgroundColor=null,this.fire("transitionend")},addRipple:function(){var a=new c(this);return Polymer.dom(this.$.waves).appendChild(a.waveContainer),this.$.background.style.backgroundColor=a.color,this.ripples.push(a),this._setAnimating(!0),a},removeRipple:function(a){var b=this.ripples.indexOf(a);b<0||(this.ripples.splice(b,1),a.remove(),this.ripples.length||this._setAnimating(!1))},animate:function(){var a,b;for(this._animating=!0,a=0;a0&&(this.$.labelAndInputContainer.style.position="static")):g&&(this.$.labelAndInputContainer.style.position="relative")}return f},_computeUnderlineClass:function(a,b){var c="underline";return b?c+=" is-invalid":a&&(c+=" is-highlighted"),c},_computeAddOnContentClass:function(a,b){var c="add-on-content";return b?c+=" is-invalid":a&&(c+=" is-highlighted"),c}}),Polymer({is:"paper-input-error",behaviors:[Polymer.PaperInputAddonBehavior],properties:{invalid:{readOnly:!0,reflectToAttribute:!0,type:Boolean}},update:function(a){this._setInvalid(a.invalid)}}),Polymer({is:"paper-input-char-counter",behaviors:[Polymer.PaperInputAddonBehavior],properties:{_charCounterStr:{type:String,value:"0"}},update:function(a){if(a.inputElement){a.value=a.value||"";var b=a.value.replace(/(\r\n|\n|\r)/g,"--").length;a.inputElement.hasAttribute("maxlength")&&(b+="/"+a.inputElement.getAttribute("maxlength")),this._charCounterStr=b}}}),Polymer({is:"paper-input",behaviors:[Polymer.IronFormElementBehavior,Polymer.PaperInputBehavior,Polymer.IronControlState]}),Polymer({is:"paper-toggle-button",behaviors:[Polymer.PaperCheckedElementBehavior],hostAttributes:{role:"button","aria-pressed":"false",tabindex:0},properties:{},listeners:{track:"_ontrack"},_ontrack:function(a){var b=a.detail;"start"===b.state?this._trackStart(b):"track"===b.state?this._trackMove(b):"end"===b.state&&this._trackEnd(b)},_trackStart:function(a){this._width=this.$.toggleBar.offsetWidth/2,this._trackChecked=this.checked,this.$.toggleButton.classList.add("dragging")},_trackMove:function(a){var b=a.dx;this._x=Math.min(this._width,Math.max(0,this._trackChecked?this._width+b:b)),this.translate3d(this._x+"px",0,0,this.$.toggleButton),this._userActivate(this._x>this._width/2)},_trackEnd:function(a){this.$.toggleButton.classList.remove("dragging"),this.transform("",this.$.toggleButton)},_createRipple:function(){this._rippleContainer=this.$.toggleButton;var a=Polymer.PaperRippleBehavior._createRipple();return a.id="ink",a.setAttribute("recenters",""),a.classList.add("circle","toggle-ink"),a}}),Polymer({is:"tf-graph-minimap",init:function(a,b,c,d,e){return new tf.scene.Minimap(a,b,c,this,d,e)}}),Polymer({is:"tf-graph-scene",properties:{graphHierarchy:Object,name:String,colorBy:{type:String,observer:"_colorByChanged"},_zoom:Object,highlightedNode:{type:String,observer:"_highlightedNodeChanged"},selectedNode:{type:String,observer:"_selectedNodeChanged"},_zoomed:{type:Boolean,observer:"_onZoomChanged",value:!1},_zoomStartCoords:{type:Array,value:null},_zoomCoords:{type:Array,value:null},_maxZoomDistanceForClick:{type:Number,value:20},templateIndex:Object,minimap:Object,_nodeGroupIndex:{type:Object,value:function(){return{}}},_annotationGroupIndex:{type:Object,value:function(){return{}}},_edgeGroupIndex:{type:Object,value:function(){return{}}},maxMetanodeLabelLengthFontSize:{type:Number,value:9},minMetanodeLabelLengthFontSize:{type:Number,value:6},maxMetanodeLabelLengthLargeFont:{type:Number,value:11},maxMetanodeLabelLength:{type:Number,value:18},progress:Object},observers:["_buildAndFit(graphHierarchy)"],getNode:function(a){return this.graphHierarchy.getRenderNodeByName(a)},isNodeExpanded:function(a){return a.expanded},setNodeExpanded:function(a){this._build(this.graphHierarchy)},_resetState:function(){this._nodeGroupIndex={},this._annotationGroupIndex={},this._edgeGroupIndex={},this._updateLabels(!1),d3.select(this.$.svg).select("#root").selectAll("*").remove(),d3.select(this.$.svg).select("defs #linearGradients").selectAll("*").remove()},_build:function(a){if(a){var b=d3.keys(a.hierarchy.templates);this.templateIndex=d3.scale.ordinal().domain(b).range(d3.range(0,b.length)),tf.time("tf-graph-scene (layout):",function(){tf.graph.layout.scene(a.root,this)}.bind(this)),tf.time("tf-graph-scene (build scene):",function(){tf.graph.scene.buildGroup(d3.select(this.$.root),a.root,this),tf.graph.scene.addGraphClickListener(this.$.svg,this)}.bind(this)),setTimeout(function(){this.minimap.update()}.bind(this),tf.graph.layout.PARAMS.animation.duration)}},ready:function(){this._zoom=d3.behavior.zoom().on("zoomend",function(){if(this._zoomStartCoords){var a=Math.sqrt(Math.pow(this._zoomStartCoords[0]-this._zoomCoords[0],2)+Math.pow(this._zoomStartCoords[1]-this._zoomCoords[1],2));a=8,c=20,d=500;Polymer({is:"iron-list",properties:{items:{type:Array},as:{type:String,value:"item"},indexAs:{type:String,value:"index"},selectedAs:{type:String,value:"selected"},selectionEnabled:{type:Boolean,value:!1},selectedItem:{type:Object,notify:!0},selectedItems:{type:Object,notify:!0},multiSelection:{type:Boolean,value:!1}},observers:["_itemsChanged(items.*)","_selectionEnabledChanged(selectionEnabled)","_multiSelectionChanged(multiSelection)"],behaviors:[Polymer.Templatizer,Polymer.IronResizableBehavior],listeners:{"iron-resize":"_resizeHandler"},_ratio:.5,_scroller:null,_scrollerPaddingTop:0,_scrollPosition:0,_physicalCount:0,_physicalStart:0,_physicalEnd:0,_physicalSize:0,_physicalAverage:0,_physicalAverageCount:0,_physicalTop:0,_virtualCount:0,_virtualStartVal:0,_physicalIndexForKey:null,_estScrollHeight:0,_scrollHeight:0,_viewportSize:0,_physicalItems:null,_physicalSizes:null,_firstVisibleIndexVal:null,_collection:null,_itemsRendered:!1,get _physicalBottom(){return this._physicalTop+this._physicalSize},get _scrollBottom(){return this._scrollPosition+this._viewportSize},get _virtualEnd(){return this._virtualStartVal+this._physicalCount-1},_minVirtualStart:0,get _maxVirtualStart(){return Math.max(0,this._virtualCount-this._physicalCount)},get _hiddenContentSize(){return this._physicalSize-this._viewportSize},get _maxScrollTop(){return this._estScrollHeight-this._viewportSize},set _virtualStart(a){this._virtualStartVal=Math.min(this._maxVirtualStart,Math.max(this._minVirtualStart,a)),this._physicalStart=this._virtualStartVal%this._physicalCount,this._physicalEnd=(this._physicalStart+this._physicalCount-1)%this._physicalCount},get _virtualStart(){return this._virtualStartVal},get _optPhysicalSize(){return 3*this._viewportSize},get _isVisible(){return this._scroller&&Boolean(this._scroller.offsetWidth||this._scroller.offsetHeight)},get firstVisibleIndex(){var a;return null===this._firstVisibleIndexVal&&(a=this._physicalTop,this._firstVisibleIndexVal=this._iterateItems(function(b,c){if(a+=this._physicalSizes[b],a>this._scrollPosition)return c})||0),this._firstVisibleIndexVal},ready:function(){b?this._scrollListener=function(){requestAnimationFrame(this._scrollHandler.bind(this))}.bind(this):this._scrollListener=this._scrollHandler.bind(this)},attached:function(){var a=Polymer.dom(this),c=a.parentNode;c&&c.scroller?this._scroller=c.scroller:(this._scroller=this,this.classList.add("has-scroller")),b&&(this._scroller.style.webkitOverflowScrolling="touch"),this._scroller.addEventListener("scroll",this._scrollListener),this.updateViewportBoundaries(),this._render()},detached:function(){this._itemsRendered=!1,this._scroller&&this._scroller.removeEventListener("scroll",this._scrollListener)},updateViewportBoundaries:function(){var a=window.getComputedStyle(this._scroller);this._scrollerPaddingTop=parseInt(a["padding-top"],10),this._viewportSize=this._scroller.offsetHeight},_refresh:function(){var b,d,e,f,a=Math.max(0,Math.min(this._maxScrollTop,this._scroller.scrollTop)),g=this._ratio,h=a-this._scrollPosition,i=0,j=this._hiddenContentSize,k=g,l=[];if(this._scrollPosition=a,this._firstVisibleIndexVal=null,f=this._scrollBottom,Math.abs(h)>this._physicalSize)this._physicalTop+=h,i=Math.round(h/this._physicalAverage);else if(h<0){var m=a-this._physicalTop,n=this._virtualStart,o=this._physicalBottom;for(e=[],d=this._physicalEnd,k=m/j;k0&&o-this._physicalSizes[d]>f;)b=this._physicalSizes[d],k+=b/j,o-=b,e.push(d),i++,d=0===d?this._physicalCount-1:d-1;l=e,i=-i}else if(h>0){var p=this._physicalBottom-f,q=this._virtualEnd,r=this._virtualCount-1;for(e=[],d=this._physicalStart,k=p/j;kthis._scrollPosition?this._increasePool(1):this._physicalSize0||this._physicalCount>0;this.isAttached&&!this._itemsRendered&&this._isVisible&&a&&(this._update(),this._itemsRendered=!0)},_ensureTemplatized:function(){if(!this.ctor){var a={};a.__key__=!0,a[this.as]=!0,a[this.indexAs]=!0,a[this.selectedAs]=!0,this._instanceProps=a,this._userTemplate=Polymer.dom(this).querySelector("template"),this._userTemplate?this.templatize(this._userTemplate):console.warn("iron-list requires a template to be provided in light-dom")}},_getStampedChildren:function(){return this._physicalItems},_forwardInstancePath:function(a,b,c){0===b.indexOf(this.as+".")&&this.notifyPath("items."+a.__key__+"."+b.slice(this.as.length+1),c)},_forwardParentProp:function(a,b){this._physicalItems&&this._physicalItems.forEach(function(c){c._templateInstance[a]=b},this)},_forwardParentPath:function(a,b){this._physicalItems&&this._physicalItems.forEach(function(c){c._templateInstance.notifyPath(a,b,!0)},this)},_forwardItemPath:function(a,b){if(this._physicalIndexForKey){var c=a.indexOf("."),d=a.substring(0,c<0?a.length:c),e=this._physicalIndexForKey[d],f=this._physicalItems[e];if(f){var g=f._templateInstance;c>=0?(a=this.as+"."+a.substring(c+1),g.notifyPath(a,b,!0)):g[this.as]=b}}},_itemsChanged:function(a){"items"===a.path?(this._itemsRendered=!1,this._virtualStartVal=0,this._physicalTop=0,this._virtualCount=this.items?this.items.length:0,this._collection=this.items?Polymer.Collection.get(this.items):null,this._physicalIndexForKey={},this._resetScrollPosition(0),this._physicalItems||(this._physicalCount=Math.max(1,Math.min(c,this._virtualCount)),this._physicalItems=this._createPool(this._physicalCount),this._physicalSizes=new Array(this._physicalCount)),this.debounce("refresh",this._render)):"items.splices"===a.path?(this._itemsRendered=!1,this._adjustVirtualIndex(a.value.indexSplices),this._virtualCount=this.items?this.items.length:0,this.debounce("refresh",this._render)):this._forwardItemPath(a.path.split(".").slice(1).join("."),a.value)},_adjustVirtualIndex:function(a){var b,c,d;for(b=0;b=this._virtualStartVal));b++)this._virtualStart=this._virtualStart+Math.max(c.addedCount-c.removed.length,d-this._virtualStartVal)},_scrollHandler:function(){this._refresh()},_iterateItems:function(a,b){var c,d,e,f;if(2===arguments.length&&b){for(f=0;f=this._physicalStart?this._virtualStartVal+(c-this._physicalStart):this._virtualStartVal+(this._physicalCount-this._physicalStart)+c,null!=(e=a.call(this,c,d)))return e}else{for(c=this._physicalStart,d=this._virtualStartVal;c=this._estScrollHeight-this._physicalSize,(a||Math.abs(this._estScrollHeight-this._scrollHeight)>=this._optPhysicalSize)&&(this.$.items.style.height=this._estScrollHeight+"px",this._scrollHeight=this._estScrollHeight)},scrollToIndex:function(a){if("number"==typeof a){this.firstVisibleIndex;a=Math.min(Math.max(a,0),this._virtualCount-1),this._virtualStart=a-1,this._assignModels(),this._updateMetrics(),this._physicalTop=this._virtualStart*this._physicalAverage;for(var c=this._physicalStart,d=this._virtualStart,e=0,f=this._hiddenContentSize;d!==a&&e not found")}else if(void 0===this._collection.getKey(a))throw new TypeError(" should be a valid item");return a},selectItem:function(a){a=this._getNormalizedItem(a);var b=this._getModelFromItem(a);!this.multiSelection&&this.selectedItem&&this.deselectItem(this.selectedItem),b&&(b[this.selectedAs]=!0),this.$.selector.select(a)},deselectItem:function(a){a=this._getNormalizedItem(a);var b=this._getModelFromItem(a);b&&(b[this.selectedAs]=!1),this.$.selector.deselect(a)},toggleSelectionForItem:function(a){a=this._getNormalizedItem(a),this.$.selector.isSelected(a)?this.deselectItem(a):this.selectItem(a)},clearSelection:function(){function a(a){var b=this._getModelFromItem(a);b&&(b[this.selectedAs]=!1)}Array.isArray(this.selectedItems)?this.selectedItems.forEach(a,this):this.selectedItem&&a.call(this,this.selectedItem),this.$.selector.clearSelection()},_selectionEnabledChanged:function(a){a?(this.listen(this,"tap","_selectionHandler"),this.listen(this,"keypress","_selectionHandler")):(this.unlisten(this,"tap","_selectionHandler"),this.unlisten(this,"keypress","_selectionHandler"))},_selectionHandler:function(a){if("keypress"!==a.type||13===a.keyCode){var b=this.modelForElement(a.target);b&&this.toggleSelectionForItem(b[this.as])}},_multiSelectionChanged:function(a){this.clearSelection(),this.$.selector.multi=a},updateSizeForItem:function(a){a=this._getNormalizedItem(a);var b=this._collection.getKey(a),c=this._physicalIndexForKey[b];void 0!==c&&(this._updateMetrics([c]),this._positionItems())}})}(),Polymer({is:"paper-icon-button",hostAttributes:{role:"button",tabindex:"0"},behaviors:[Polymer.PaperInkyFocusBehavior],properties:{src:{type:String},icon:{type:String},alt:{type:String,observer:"_altChanged"}},_altChanged:function(a,b){var c=this.getAttribute("aria-label");c&&b!=c||this.setAttribute("aria-label",a)}}),Polymer({is:"paper-item",hostAttributes:{role:"option",tabindex:"0"},behaviors:[Polymer.IronControlState,Polymer.IronButtonState]}),Polymer({is:"paper-item-body"}),Polymer({is:"paper-icon-item",hostAttributes:{role:"option",tabindex:"0"},behaviors:[Polymer.IronControlState,Polymer.IronButtonState]}),function(){Polymer({is:"tf-graph-icon",properties:{node:{type:Object,value:null},type:{type:String,value:null},vertical:{type:Boolean,value:!1},const:{type:Boolean,value:!1},summary:{type:Boolean,value:!1},height:{type:Number,value:20}},_isType:function(a,b,c){return a?tf.graph.NodeType[a.type]===c:b===c},_isVertical:function(a,b){return a?a.hasNonControlEdges:!!b},_isConst:function(a,b){return a?"Const"===a.op:!!b},_isSummary:function(a,b){return a?this._isType(a,null,"OP")&&"Summary"===a.op.substr(-7):!!b},_isRegularOp:function(a,b,c){return!this._isConst(a,b)&&!this._isSummary(a,c)}})}(),function(){Polymer({is:"tf-node-list-item",properties:{cardNode:Object,itemNode:Object,name:String,itemType:{type:String,observer:"_itemTypeChanged"}},_itemTypeChanged:function(){"subnode"!==this.itemType?this.$["list-item"].classList.add("clickable"):this.$["list-item"].classList.remove("clickable")},_nodeListener:function(a){this.fire("node-list-item-"+a.type,{cardNode:this.cardNode.name,nodeName:this.name,type:this.itemType})}})}(),function(){Polymer({is:"tf-node-info",properties:{nodeName:String,graphHierarchy:Object,_node:{type:Object,computed:"_getNode(nodeName, graphHierarchy)",observer:"_resetState"},_attributes:{type:Array,computed:"_getAttributes(_node)"},_device:{type:String,computed:"_getDevice(_node)"},_successors:{type:Object,computed:"_getSuccessors(_node, graphHierarchy)"},_predecessors:{type:Object,computed:"_getPredecessors(_node, graphHierarchy)"},_subnodes:{type:Array,computed:"_getSubnodes(_node)"},_expanded:{type:Boolean,value:!0},_totalPredecessors:{type:Number,computed:"_getTotalPred(_predecessors)"},_totalSuccessors:{type:Number,computed:"_getTotalSucc(_successors)"},_openedControlPred:{type:Boolean,value:!1},_openedControlSucc:{type:Boolean,value:!1}},expandNode:function(){this.fire("_node.expand",this.node)},_getNode:function(a,b){return b.node(a)},_getNodeName:function(a){return(a||"").replace(/\//g,"​/")},_getAttributes:function(a){return this.async(this._resizeList.bind(this,"#attributesList")),a&&a.attr?a.attr.map(function(a){return{key:a.key,value:JSON.stringify(a.value)}}):[]},_getDevice:function(a){return a?a.device:null},_getSuccessors:function(a,b){return this.async(this._resizeList.bind(this,"#inputsList")),a?b.getSuccessors(a.name):[[],[]]},_getPredecessors:function(a,b){return this.async(this._resizeList.bind(this,"#outputsList")),a?b.getPredecessors(a.name):[[],[]]},_getSubnodes:function(a){return a&&a.metagraph?a.metagraph.nodes():null},_getTotalPred:function(a){return a.regular.length+a.control.length},_getTotalSucc:function(a){return a.regular.length+a.control.length},_toggleControlPred:function(){this._openedControlPred=!this._openedControlPred},_toggleControlSucc:function(){this._openedControlSucc=!this._openedControlSucc},_toggleExpanded:function(){this._expanded=!this._expanded},_getToggleIcon:function(a){return a?"expand-less":"expand-more"},_resetState:function(){this._openedControlPred=!1,this._openedControlSucc=!1},_resizeList:function(a){var b=document.querySelector(a);b&&b.fire("iron-resize")}})}(),function(){Polymer({is:"tf-graph-info",properties:{title:String,graphHierarchy:Object,graph:Object,selectedNode:{type:String,notify:!0},highlightedNode:{type:String,notify:!0}},listeners:{"node-list-item-click":"_nodeListItemClicked","node-list-item-mouseover":"_nodeListItemMouseover","node-list-item-mouseout":"_nodeListItemMouseout"},_nodeListItemClicked:function(a){this.selectedNode=a.detail.nodeName},_nodeListItemMouseover:function(a){this.highlightedNode=a.detail.nodeName},_nodeListItemMouseout:function(){this.highlightedNode=null}})}(),Polymer({is:"paper-progress",behaviors:[Polymer.IronRangeBehavior],properties:{secondaryProgress:{type:Number,value:0},secondaryRatio:{type:Number,value:0,readOnly:!0},indeterminate:{type:Boolean,value:!1,observer:"_toggleIndeterminate"},disabled:{type:Boolean,value:!1,reflectToAttribute:!0,observer:"_disabledChanged"}},observers:["_progressChanged(secondaryProgress, value, min, max)"],hostAttributes:{role:"progressbar"},_toggleIndeterminate:function(a){this.toggleClass("indeterminate",a,this.$.primaryProgress)},_transformProgress:function(a,b){var c="scaleX("+b/100+")";a.style.transform=a.style.webkitTransform=c},_mainRatioChanged:function(a){this._transformProgress(this.$.primaryProgress,a)},_progressChanged:function(a,b,c,d){a=this._clampValue(a),b=this._clampValue(b);var e=100*this._calcRatio(a),f=100*this._calcRatio(b);this._setSecondaryRatio(e),this._transformProgress(this.$.secondaryProgress,e),this._transformProgress(this.$.primaryProgress,f),this.secondaryProgress=a,this.setAttribute("aria-valuenow",b),this.setAttribute("aria-valuemin",c),this.setAttribute("aria-valuemax",d)},_disabledChanged:function(a){this.setAttribute("aria-disabled",a?"true":"false")},_hideSecondaryProgress:function(a){return 0===a}}),Polymer({is:"tf-graph-board",properties:{graphHierarchy:Object,graph:Object,graphName:String,hasStats:Boolean,progress:Object,colorByParams:{type:Object,notify:!0},_selectedNode:String,_highlightedNode:String},_isNotComplete:function(a){return a.value<100},_getContainerClass:function(a){var b="container";return a.error&&(b+=" error"),this._isNotComplete(a)&&(b+=" loading"),b}}),Polymer({is:"tf-graph-loader",properties:{progress:{type:Object,notify:!0,readOnly:!0},datasets:Array,hasStats:{type:Boolean,readOnly:!0,notify:!0},selectedDataset:Number,selectedFile:{type:Object,observer:"_selectedFileChanged"},outGraphHierarchy:{type:Object,readOnly:!0,notify:!0},outGraph:{type:Object,readOnly:!0,notify:!0},outGraphName:{type:String,readOnly:!0,notify:!0}},observers:["_selectedDatasetChanged(selectedDataset, datasets)"],_parseAndConstructHierarchicalGraph:function(a,b){var c=this;c._setProgress({value:0,msg:""});var e,d={setMessage:function(a){c._setProgress({value:c.progress.value,msg:a})},updateProgress:function(a){c._setProgress({value:c.progress.value+a,msg:c.progress.msg})},reportError:function(a){c._setProgress({value:c.progress.value,msg:a,error:!0})}},f=tf.getSubtaskTracker(d,30,"Data");tf.graph.parser.readAndParseData(a,b,f).then(function(a){var b=a.nodes;e=a.statsJson;var c={};c["Assign 0"]=!0,c["AssignAdd 0"]=!0,c["AssignSub 0"]=!0,c["assign 0"]=!0,c["assign_add 0"]=!0,c["assign_sub 0"]=!0,c["count_up_to 0"]=!0,c["ScatterAdd 0"]=!0,c["ScatterSub 0"]=!0,c["ScatterUpdate 0"]=!0,c["scatter_add 0"]=!0,c["scatter_sub 0"]=!0,c["scatter_update 0"]=!0;var f={enableEmbedding:!0,inEmbeddingTypes:["Const"],outEmbeddingTypes:["^[a-zA-Z]+Summary$"],refEdges:c},g=tf.getSubtaskTracker(d,20,"Graph");return tf.graph.build(b,f,g)}).then(function(a){this._setOutGraph(a),e&&tf.time("Joining stats info with graph...",function(){tf.graph.joinStatsInfoWithGraph(a,e)});var b={verifyTemplate:!0,groupSeries:!0},c=tf.getSubtaskTracker(d,50,"Namespace hierarchy");return tf.graph.hierarchy.build(a,b,c)}.bind(this)).then(function(a){this._setHasStats(null!=e),this._setOutGraphHierarchy(a)}.bind(this)).catch(function(a){d.reportError("Graph visualization failed: "+a)})},_selectedDatasetChanged:function(a,b){var c=b[a];this._parseAndConstructHierarchicalGraph(c),this._setOutGraphName(c.name)},_selectedFileChanged:function(a){if(a){var b=a.target.files[0];if(b){a.target.value="";var c=new FileReader;c.onload=function(a){this._parseAndConstructHierarchicalGraph(null,a.target.result)}.bind(this),c.readAsText(b)}}}}),function(){Polymer({is:"paper-menu",behaviors:[Polymer.IronMenuBehavior]})}(),function(){Polymer({is:"iron-overlay-backdrop",properties:{opened:{readOnly:!0,reflectToAttribute:!0,type:Boolean,value:!1},_manager:{type:Object,value:Polymer.IronOverlayManager}},prepare:function(){this.parentNode||(Polymer.dom(document.body).appendChild(this),this.style.zIndex=this._manager.currentOverlayZ()-1)},open:function(){this._manager.getBackdrops().length<2&&this._setOpened(!0)},close:function(){this._manager.getBackdrops().length<2&&this._setOpened(!1)},complete:function(){0===this._manager.getBackdrops().length&&this.parentNode&&Polymer.dom(this.parentNode).removeChild(this)}})}(),function(){"use strict";Polymer({is:"iron-dropdown",behaviors:[Polymer.IronControlState,Polymer.IronA11yKeysBehavior,Polymer.IronOverlayBehavior,Polymer.NeonAnimationRunnerBehavior],properties:{horizontalAlign:{type:String,value:"left",reflectToAttribute:!0},verticalAlign:{type:String,value:"top",reflectToAttribute:!0},horizontalOffset:{type:Number,value:0,notify:!0},verticalOffset:{type:Number,value:0,notify:!0},positionTarget:{type:Object,observer:"_positionTargetChanged"},openAnimationConfig:{type:Object},closeAnimationConfig:{type:Object},focusTarget:{type:Object},noAnimations:{type:Boolean,value:!1},allowOutsideScroll:{type:Boolean,value:!1},_positionRectMemo:{type:Object}},listeners:{"neon-animation-finish":"_onNeonAnimationFinish"},observers:["_updateOverlayPosition(verticalAlign, horizontalAlign, verticalOffset, horizontalOffset)"],attached:function(){void 0===this.positionTarget&&(this.positionTarget=this._defaultPositionTarget)},get containedElement(){return Polymer.dom(this.$.content).getDistributedNodes()[0]},get _focusTarget(){return this.focusTarget||this.containedElement},get _defaultPositionTarget(){var a=Polymer.dom(this).parentNode;return a.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&(a=a.host),a},get _positionRect(){return!this._positionRectMemo&&this.positionTarget&&(this._positionRectMemo=this.positionTarget.getBoundingClientRect()),this._positionRectMemo},get _horizontalAlignTargetValue(){var a;return a="right"===this.horizontalAlign?document.documentElement.clientWidth-this._positionRect.right:this._positionRect.left,a+=this.horizontalOffset,Math.max(a,0)},get _verticalAlignTargetValue(){var a;return a="bottom"===this.verticalAlign?document.documentElement.clientHeight-this._positionRect.bottom:this._positionRect.top,a+=this.verticalOffset,Math.max(a,0)},_openedChanged:function(a){a&&this.disabled?this.cancel():(this.cancelAnimation(),this._prepareDropdown(),Polymer.IronOverlayBehaviorImpl._openedChanged.apply(this,arguments)),this.opened&&this._focusContent()},_renderOpened:function(){this.allowOutsideScroll||Polymer.IronDropdownScrollManager.pushScrollLock(this),!this.noAnimations&&this.animationConfig&&this.animationConfig.open?(this.$.contentWrapper.classList.add("animating"),this.playAnimation("open")):Polymer.IronOverlayBehaviorImpl._renderOpened.apply(this,arguments)},_renderClosed:function(){Polymer.IronDropdownScrollManager.removeScrollLock(this),!this.noAnimations&&this.animationConfig&&this.animationConfig.close?(this.$.contentWrapper.classList.add("animating"),this.playAnimation("close")):Polymer.IronOverlayBehaviorImpl._renderClosed.apply(this,arguments)},_onNeonAnimationFinish:function(){this.$.contentWrapper.classList.remove("animating"),this.opened?Polymer.IronOverlayBehaviorImpl._renderOpened.apply(this):Polymer.IronOverlayBehaviorImpl._renderClosed.apply(this)},_onIronResize:function(){var b,c,a=this.containedElement;a&&(b=a.scrollTop,c=a.scrollLeft),this.opened&&this._updateOverlayPosition(),Polymer.IronOverlayBehaviorImpl._onIronResize.apply(this,arguments),a&&(a.scrollTop=b,a.scrollLeft=c)},_positionTargetChanged:function(){this._updateOverlayPosition()},_updateAnimationConfig:function(){var a={},b=[];this.openAnimationConfig&&(a.open=[{name:"opaque-animation"}].concat(this.openAnimationConfig),b=b.concat(a.open)),this.closeAnimationConfig&&(a.close=this.closeAnimationConfig,b=b.concat(a.close)),b.forEach(function(a){a.node=this.containedElement},this),this.animationConfig=a},_prepareDropdown:function(){this.sizingTarget=this.containedElement||this.sizingTarget,this._updateAnimationConfig(),this._updateOverlayPosition()},_updateOverlayPosition:function(){this._positionRectMemo=null,this.positionTarget&&(this.style[this.horizontalAlign]=this._horizontalAlignTargetValue+"px",this.style[this.verticalAlign]=this._verticalAlignTargetValue+"px",this._fitInfo&&(this._fitInfo.inlineStyle[this.horizontalAlign]=this.style[this.horizontalAlign],this._fitInfo.inlineStyle[this.verticalAlign]=this.style[this.verticalAlign]))},_focusContent:function(){this.async(function(){this._focusTarget&&this._focusTarget.focus()})}})}(),function(){"use strict";var a=Polymer({is:"paper-menu-button",behaviors:[Polymer.IronA11yKeysBehavior,Polymer.IronControlState],properties:{opened:{type:Boolean,value:!1,notify:!0,observer:"_openedChanged"},horizontalAlign:{type:String,value:"left",reflectToAttribute:!0},verticalAlign:{type:String,value:"top",reflectToAttribute:!0},horizontalOffset:{type:Number,value:0,notify:!0},verticalOffset:{type:Number,value:0,notify:!0},noAnimations:{type:Boolean,value:!1},ignoreSelect:{type:Boolean,value:!1},openAnimationConfig:{type:Object,value:function(){return[{name:"fade-in-animation",timing:{delay:100,duration:200}},{name:"paper-menu-grow-width-animation",timing:{delay:100,duration:150,easing:a.ANIMATION_CUBIC_BEZIER}},{name:"paper-menu-grow-height-animation",timing:{delay:100,duration:275,easing:a.ANIMATION_CUBIC_BEZIER}}]}},closeAnimationConfig:{type:Object,value:function(){return[{name:"fade-out-animation",timing:{duration:150}},{name:"paper-menu-shrink-width-animation",timing:{delay:100,duration:50,easing:a.ANIMATION_CUBIC_BEZIER}},{name:"paper-menu-shrink-height-animation",timing:{duration:200,easing:"ease-in"}}]}},_dropdownContent:{type:Object}},hostAttributes:{role:"group","aria-haspopup":"true"},listeners:{"iron-select":"_onIronSelect"},get contentElement(){return Polymer.dom(this.$.content).getDistributedNodes()[0]},open:function(){this.disabled||this.$.dropdown.open()},close:function(){ +this.$.dropdown.close()},_onIronSelect:function(a){this.ignoreSelect||this.close()},_openedChanged:function(a,b){a?(this._dropdownContent=this.contentElement,this.fire("paper-dropdown-open")):null!=b&&this.fire("paper-dropdown-close")},_disabledChanged:function(a){Polymer.IronControlState._disabledChanged.apply(this,arguments),a&&this.opened&&this.close()}});a.ANIMATION_CUBIC_BEZIER="cubic-bezier(.3,.95,.5,1)",a.MAX_ANIMATION_TIME_MS=400,Polymer.PaperMenuButton=a}(),function(){"use strict";Polymer({is:"paper-dropdown-menu",behaviors:[Polymer.IronControlState,Polymer.IronButtonState,Polymer.IronFormElementBehavior,Polymer.IronValidatableBehavior],properties:{selectedItemLabel:{type:String,notify:!0,readOnly:!0},selectedItem:{type:Object,notify:!0,readOnly:!0},value:{type:String,notify:!0,readOnly:!0},label:{type:String},placeholder:{type:String},opened:{type:Boolean,notify:!0,value:!1},noLabelFloat:{type:Boolean,value:!1,reflectToAttribute:!0},alwaysFloatLabel:{type:Boolean,value:!1},noAnimations:{type:Boolean,value:!1}},listeners:{tap:"_onTap"},keyBindings:{"up down":"open",esc:"close"},hostAttributes:{role:"group","aria-haspopup":"true"},observers:["_selectedItemChanged(selectedItem)"],attached:function(){var a=this.contentElement;a&&a.selectedItem&&this._setSelectedItem(a.selectedItem)},get contentElement(){return Polymer.dom(this.$.content).getDistributedNodes()[0]},open:function(){this.$.menuButton.open()},close:function(){this.$.menuButton.close()},_onIronSelect:function(a){this._setSelectedItem(a.detail.item)},_onIronDeselect:function(a){this._setSelectedItem(null)},_onTap:function(a){Polymer.Gestures.findOriginalTarget(a)===this&&this.open()},_selectedItemChanged:function(a){var b="";b=a?a.label||a.textContent.trim():"",this._setValue(b),this._setSelectedItemLabel(b)},_computeMenuVerticalOffset:function(a){return a?-4:8},_getValidity:function(){return this.disabled||!this.required||this.required&&this.value}})}(),function(){function c(a,b,d){return d=null==d?0:d,d+1=b[d+1].numUnits?c(a/b[d+1].numUnits,b,d+1):a.toPrecision(3)-0+" "+b[d].symbol}Polymer({is:"tf-graph-controls",ready:function(){d3.select(this.$["summary-icon"]).attr("xlink:href","../../../static/tb/summary-icon.svg")},properties:{hasStats:{type:Boolean},colorBy:{type:String,notify:!0,computed:"_getColorBy(_colorByIndex)"},colorByParams:Object,datasets:{type:Array,observer:"_datasetsChanged"},selectedDataset:{type:Number,notify:!0,value:0},selectedFile:{type:Object,notify:!0},_colorByIndex:{type:Number,value:0},_currentGradientParams:{type:Object,computed:"_getCurrentGradientParams(colorByParams, colorBy)"}},_getColorBy:function(a){return["structure","device","compute_time","memory"][a]},_getBackgroundColor:function(a){return"background-color:"+a},fit:function(){document.querySelector("#scene").fit()},_isGradientColoring:function(a){return["compute_time","memory"].indexOf(a)!==-1},_equals:function(a,b){return a===b},_getCurrentGradientParams:function(d,e){if(this._isGradientColoring(e)){var f=d[e],g=f.minValue,h=f.maxValue;return"memory"===e?(g=c(g,a),h=c(h,a)):"compute_time"===e&&(g=c(g,b),h=c(h,b)),{minValue:g,maxValue:h,startColor:f.startColor,endColor:f.endColor}}},_updateFileInput:function(a){this.set("selectedFile",a)},_datasetsChanged:function(a,b){null==b&&null!=this.selected||this.set("selectedDataset",0)},_getFile:function(){this.$.file.click()}});var a=[{symbol:"B"},{symbol:"KB",numUnits:1024},{symbol:"MB",numUnits:1024},{symbol:"GB",numUnits:1024},{symbol:"TB",numUnits:1024},{symbol:"PB",numUnits:1024}],b=[{symbol:"µs"},{symbol:"ms",numUnits:1e3},{symbol:"s",numUnits:1e3},{symbol:"min",numUnits:60},{symbol:"hr",numUnits:60},{symbol:"days",numUnits:24}]}(),Polymer({is:"tf-graph-basic",properties:{hasStats:Boolean,pbtxt:{type:String,observer:"_updateGraph"},_progress:Object},_updateGraph:function(){this.$.loader._parseAndConstructHierarchicalGraph(null,this.pbtxt)}}); \ No newline at end of file diff --git a/digits/static/tb/trace_viewer_full.html b/digits/static/tb/trace_viewer_full.html new file mode 100644 index 000000000..f042913f6 --- /dev/null +++ b/digits/static/tb/trace_viewer_full.html @@ -0,0 +1,9946 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/digits/templates/datasets/images/classification/new.html b/digits/templates/datasets/images/classification/new.html index bbea816b7..5a8836970 100644 --- a/digits/templates/datasets/images/classification/new.html +++ b/digits/templates/datasets/images/classification/new.html @@ -382,12 +382,16 @@

New Image Classification Dataset

{{ form.backend(class='form-control') }} +
{{form.compression.label}} @@ -410,10 +414,17 @@

New Image Classification Dataset

$("#compression").parent().hide(); $("#encoding").parent().show(); $("#backend-hdf5-warning").hide(); + $("#backend-tfrecords-warning").hide(); } else if (val == 'hdf5') { $("#encoding").parent().hide(); $("#compression").parent().show(); $("#backend-hdf5-warning").show(); + $("#backend-tfrecords-warning").hide(); + } else if (val == 'tfrecords') { + $("#compression").parent().hide(); + $("#encoding").parent().show(); + $("#backend-hdf5-warning").hide(); + $("#backend-tfrecords-warning").show(); } } $("#backend").change(backendChanged); diff --git a/digits/templates/error.html b/digits/templates/error.html index 036796937..bc34a190e 100644 --- a/digits/templates/error.html +++ b/digits/templates/error.html @@ -10,7 +10,9 @@

{{title}}

{% if message %} +{% autoescape false %}

{{message}}

+{% endautoescape %} {% endif %} {% if description %}

{{description}}

diff --git a/digits/templates/models/data_augmentation.html b/digits/templates/models/data_augmentation.html index 5c9fed807..bf8bbc429 100644 --- a/digits/templates/models/data_augmentation.html +++ b/digits/templates/models/data_augmentation.html @@ -13,21 +13,33 @@
- {{ form.aug_rot.label }} - {{ form.aug_rot.tooltip}} - {{ form.aug_rot(class='form-control')}} + {{form.aug_rot.label}} + {{form.aug_rot.tooltip}} + {{form.aug_rot(class='form-control')}}
- {{ form.aug_scale.label }} - {{ form.aug_scale.tooltip}} - {{ form.aug_scale(class='form-control')}} + {{form.aug_scale.label}} + {{form.aug_scale.tooltip}} + {{form.aug_scale(class='form-control')}}
- {{ form.aug_noise.label }} - {{ form.aug_noise.tooltip}} - {{ form.aug_noise(class='form-control')}} + {{form.aug_noise.label}} + {{form.aug_noise.tooltip}} + {{form.aug_noise(class='form-control')}} +
+ +
+ {{form.aug_contrast.label}} + {{form.aug_contrast.tooltip}} + {{form.aug_contrast(class='form-control')}} +
+ +
+ {{form.aug_whitening}} + {{form.aug_whitening.label}} + {{form.aug_whitening.tooltip}}
@@ -36,17 +48,17 @@ {{form.aug_hsv_use.tooltip}}
- {{form.aug_hsv_h.label }} + {{form.aug_hsv_h.label}} {{form.aug_hsv_h(class='form-control')}}
- {{form.aug_hsv_s.label }} + {{form.aug_hsv_s.label}} {{form.aug_hsv_s(class='form-control')}}
- {{form.aug_hsv_v.label }} + {{form.aug_hsv_v.label}} {{form.aug_hsv_v(class='form-control')}}
diff --git a/digits/templates/models/images/classification/custom_network_explanation.html b/digits/templates/models/images/classification/custom_network_explanation.html index c18686a89..ac7413f86 100644 --- a/digits/templates/models/images/classification/custom_network_explanation.html +++ b/digits/templates/models/images/classification/custom_network_explanation.html @@ -105,3 +105,10 @@

Specifying a custom Torch network

Use this field to enter a Torch network using Lua code. Refer to the documentation for more information.

+ +

Specifying a custom Tensorflow network

+ +

+ Use this field to enter a Tensorflow network using python. + Refer to the documentation for more information. +

diff --git a/digits/templates/models/images/classification/new.html b/digits/templates/models/images/classification/new.html index bc3302241..86a4684ce 100644 --- a/digits/templates/models/images/classification/new.html +++ b/digits/templates/models/images/classification/new.html @@ -41,8 +41,7 @@

New Image Classification Model

- -
+

Python Layers

{{form.python_layer_from_client.explanation(file='/models/python_layer_explanation.html')}} @@ -66,23 +65,23 @@

Python Layers

diff --git a/digits/templates/models/images/classification/show.html b/digits/templates/models/images/classification/show.html index da2f3f971..cfdfd5a34 100644 --- a/digits/templates/models/images/classification/show.html +++ b/digits/templates/models/images/classification/show.html @@ -72,6 +72,12 @@

Dataset

href="{{url_for('digits.model.images.classification.views.large_graph', job_id=job.id())}}"> View Large + {% if job.train_task().has_timeline_traces() %} + + View Timeline Traces + + {% endif %}


diff --git a/digits/templates/models/images/generic/custom_network_explanation.html b/digits/templates/models/images/generic/custom_network_explanation.html index 7100a1724..32dde4a33 100644 --- a/digits/templates/models/images/generic/custom_network_explanation.html +++ b/digits/templates/models/images/generic/custom_network_explanation.html @@ -89,3 +89,10 @@

Specifying a custom Torch network

Use this field to enter a Torch network using Lua code. Refer to the documentation for more information.

+ +

Specifying a custom Tensorflow network

+ +

+ Use this field to enter a Tensorflow network using python. + Refer to the documentation for more information. +

diff --git a/digits/templates/models/images/generic/new.html b/digits/templates/models/images/generic/new.html index 8a99ec1c4..e517aee01 100644 --- a/digits/templates/models/images/generic/new.html +++ b/digits/templates/models/images/generic/new.html @@ -41,7 +41,7 @@

New {% if extension_title %}{{ extension_title }}{% else %}Image{% endif %}

-
+

Python Layers

{{form.python_layer_from_client.explanation(file='/models/python_layer_explanation.html')}} @@ -65,23 +65,23 @@

Python Layers

diff --git a/digits/templates/models/images/generic/show.html b/digits/templates/models/images/generic/show.html index 3e7e9b077..e40d42e5a 100644 --- a/digits/templates/models/images/generic/show.html +++ b/digits/templates/models/images/generic/show.html @@ -29,6 +29,8 @@
Pretrained Model
{{task.pretrained_model}}
{% endif %} +
Visualizations
+
Tensorboard
@@ -66,6 +68,12 @@

Dataset

href="{{url_for('digits.model.images.generic.views.large_graph', job_id=job.id())}}"> View Large + {% if job.train_task().has_timeline_traces() %} + + View Timeline Traces + + {% endif %}

@@ -208,30 +216,54 @@

Inference Options

-{% if inference_form_html %} -{{ inference_form_html|safe }} -
- - -
+
+
+
+

Select Inference form

+
+ +
+
+
+
+ +
+
+
+
- -{% else %}
+
+
+ + +
+ + + + +
+
+ +

Test a single image

@@ -338,11 +370,40 @@

Test a list of images

>
-{% endif %}
-{% endblock %} + + +{% endblock %} diff --git a/digits/templates/models/images/classification/large_graph.html b/digits/templates/models/large_graph.html similarity index 100% rename from digits/templates/models/images/classification/large_graph.html rename to digits/templates/models/large_graph.html diff --git a/digits/templates/models/timeline_tracing.html b/digits/templates/models/timeline_tracing.html new file mode 100644 index 000000000..f96722ddb --- /dev/null +++ b/digits/templates/models/timeline_tracing.html @@ -0,0 +1,76 @@ +{# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. #} + +{% extends "layout.html" %} + +{% block title %} +{{job.name()}} - Timeline Trace +{% endblock %} + +{% block head %} + + +{% endblock %} + +{% block nav %} +
  • {{job.job_type()}}
  • +{% endblock %} + +{% block content %} + +{% set task = job.train_task() %} + + + +
    +
    +
    + +
    +
    + +
    +
    +
    + + +
    + + + +{% endblock %} diff --git a/digits/test_utils.py b/digits/test_utils.py index 25e95c0ce..111a3ab55 100644 --- a/digits/test_utils.py +++ b/digits/test_utils.py @@ -68,3 +68,21 @@ def setUpClass(cls): supercls = super(TorchMixin, cls) if hasattr(supercls, 'setUpClass'): supercls.setUpClass() + + +class TensorflowMixin(object): + """ + Mixin for tensorflow tests + """ + FRAMEWORK = 'tensorflow' + + @classmethod + def setUpClass(cls): + skipIfNotFramework('tensorflow') + if cls.FRAMEWORK == 'tensorflow' and not config_value('tensorflow')['enabled']: + raise unittest.SkipTest('Tensorflow not found') + + # Call super.setUpClass() unless we're the last in the class hierarchy + supercls = super(TensorflowMixin, cls) + if hasattr(supercls, 'setUpClass'): + supercls.setUpClass() diff --git a/digits/tools/create_db.py b/digits/tools/create_db.py index b3bb35441..4dcfeddcf 100755 --- a/digits/tools/create_db.py +++ b/digits/tools/create_db.py @@ -34,6 +34,11 @@ import caffe.io # noqa import caffe_pb2 # noqa +if digits.config.config_value('tensorflow')['enabled']: + import tensorflow as tf +else: + tf = None + logger = logging.getLogger('digits.tools.create_db') @@ -303,6 +308,91 @@ def create_db(input_file, output_dir, logger.info('Database created after %d seconds.' % (time.time() - start)) +def _create_tfrecords(image_count, write_queue, batch_size, output_dir, + summary_queue, num_threads, + mean_files=None, + encoding=None, + lmdb_map_size=None, + **kwargs): + """ + Creates the TFRecords database(s) + """ + LIST_FILENAME = 'list.txt' + + if not tf: + raise ValueError("Can't create TFRecords as support for Tensorflow " + "is not enabled.") + + wait_time = time.time() + threads_done = 0 + images_loaded = 0 + images_written = 0 + image_sum = None + compute_mean = bool(mean_files) + + os.makedirs(output_dir) + + # We need shards to achieve good mixing properties because TFRecords + # is a sequential/streaming reader, and has no random access. + + num_shards = 16 # @TODO(tzaman) put some logic behind this + + writers = [] + with open(os.path.join(output_dir, LIST_FILENAME), 'w') as outfile: + for shard_id in xrange(num_shards): + shard_name = 'SHARD_%03d.tfrecords' % (shard_id) + filename = os.path.join(output_dir, shard_name) + writers.append(tf.python_io.TFRecordWriter(filename)) + outfile.write('%s\n' % (filename)) + + shard_id = 0 + while (threads_done < num_threads) or not write_queue.empty(): + + # Send update every 2 seconds + if time.time() - wait_time > 2: + logger.debug('Processed %d/%d' % (images_written, image_count)) + wait_time = time.time() + + processed_something = False + + if not summary_queue.empty(): + result_count, result_sum = summary_queue.get() + images_loaded += result_count + # Update total_image_sum + if compute_mean and result_count > 0 and result_sum is not None: + if image_sum is None: + image_sum = result_sum + else: + image_sum += result_sum + threads_done += 1 + processed_something = True + + if not write_queue.empty(): + writers[shard_id].write(write_queue.get()) + shard_id += 1 + if shard_id >= num_shards: + shard_id = 0 + images_written += 1 + processed_something = True + + if not processed_something: + time.sleep(0.2) + + if images_loaded == 0: + raise LoadError('no images loaded from input file') + logger.debug('%s images loaded' % images_loaded) + + if images_written == 0: + raise WriteError('no images written to database') + logger.info('%s images written to database' % images_written) + + if compute_mean: + _save_means(image_sum, images_written, mean_files) + + for writer in writers: + writer.close() + + def _create_lmdb(image_count, write_queue, batch_size, output_dir, summary_queue, num_threads, mean_files=None, @@ -595,6 +685,9 @@ def _load_thread(load_queue, write_queue, summary_queue, if backend == 'lmdb': datum = _array_to_datum(image, label, encoding) write_queue.put(datum) + elif backend == 'tfrecords': + tf_example = _array_to_tf_feature(image, label, encoding) + write_queue.put(tf_example) else: write_queue.put((image, label)) @@ -613,6 +706,50 @@ def _initial_image_sum(width, height, channels): return np.zeros((height, width, channels), np.float64) +def _int64_feature(value): + return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) + + +def _bytes_feature(value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + +def _array_to_tf_feature(image, label, encoding): + """ + Creates a tensorflow Example from a numpy.ndarray + """ + if not encoding: + image_raw = image.tostring() + encoding_id = 0 + else: + s = StringIO() + if encoding == 'png': + PIL.Image.fromarray(image).save(s, format='PNG') + encoding_id = 1 + elif encoding == 'jpg': + PIL.Image.fromarray(image).save(s, format='JPEG', quality=90) + encoding_id = 2 + else: + raise ValueError('Invalid encoding type') + image_raw = s.getvalue() + + depth = image.shape[2] if len(image.shape) > 2 else 1 + + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'height': _int64_feature(image.shape[0]), + 'width': _int64_feature(image.shape[1]), + 'depth': _int64_feature(depth), + 'label': _int64_feature(label), + 'image_raw': _bytes_feature(image_raw), + 'encoding': _int64_feature(encoding_id), + # @TODO(tzaman) - add bitdepth flag? + } + )) + return example.SerializeToString() + + def _array_to_datum(image, label, encoding): """ Create a caffe Datum from a numpy.ndarray @@ -763,7 +900,7 @@ def _save_means(image_sum, image_count, mean_files): ) parser.add_argument('-b', '--backend', default='lmdb', - help='The database backend - lmdb[default] or hdf5') + help='The database backend - lmdb[default], hdf5 or tfrecords') parser.add_argument('--lmdb_map_size', type=int, help='The initial map size for LMDB (in MB)') diff --git a/digits/tools/tensorflow/caffe_tf.proto b/digits/tools/tensorflow/caffe_tf.proto new file mode 100644 index 000000000..1b6186ddd --- /dev/null +++ b/digits/tools/tensorflow/caffe_tf.proto @@ -0,0 +1,43 @@ +// Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. +// This file is a subset of the caffe.proto file from the Caffe project. +// It is meant to reduce the dependency on Caffe from the Tensorflow wrapper +// For NVIDIA DIGITS + +syntax = "proto2"; + +// Specifies the shape (dimensions) of a Blob. +message BlobShape { + repeated int64 dim = 1 [packed = true]; +} + +message BlobProto { + optional BlobShape shape = 7; + repeated float data = 5 [packed = true]; + repeated float diff = 6 [packed = true]; + repeated double double_data = 8 [packed = true]; + repeated double double_diff = 9 [packed = true]; + + // 4D dimensions -- deprecated. Use "shape" instead. + optional int32 num = 1 [default = 0]; + optional int32 channels = 2 [default = 0]; + optional int32 height = 3 [default = 0]; + optional int32 width = 4 [default = 0]; +} + +// The BlobProtoVector is simply a way to pass multiple blobproto instances +// around. +message BlobProtoVector { + repeated BlobProto blobs = 1; +} + +message Datum { + optional int32 channels = 1; + optional int32 height = 2; + optional int32 width = 3; + // the actual image data, in bytes + optional bytes data = 4; + optional int32 label = 5; + // Optionally, the datum could also hold float data. + repeated float float_data = 6; + optional bool encoded = 7 [default = false]; +} diff --git a/digits/tools/tensorflow/caffe_tf_pb2.py b/digits/tools/tensorflow/caffe_tf_pb2.py new file mode 100644 index 000000000..fe99857ee --- /dev/null +++ b/digits/tools/tensorflow/caffe_tf_pb2.py @@ -0,0 +1,293 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: caffe_tf.proto + +import sys +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 # noqa +# @@protoc_insertion_point(imports) + +_b = (sys.version_info[0] < 3 and (lambda x: x)) or (lambda x: x.encode('latin1')) + +_sym_db = _symbol_database.Default() + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='caffe_tf.proto', + package='', + serialized_pb=_b('\n\x0e\x63\x61\x66\x66\x65_tf.proto\"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xc6\x01\n\tBlobProto\x12\x19\n\x05shape\x18\x07 \x01(\x0b\x32\n.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_data\x18\x08 \x03(\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_diff\x18\t \x03(\x01\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30\",\n\x0f\x42lobProtoVector\x12\x19\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\n.BlobProto\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse') # noqa +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + +_BLOBSHAPE = _descriptor.Descriptor( + name='BlobShape', + full_name='BlobShape', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='dim', full_name='BlobShape.dim', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + serialized_start=18, + serialized_end=46, +) + + +_BLOBPROTO = _descriptor.Descriptor( + name='BlobProto', + full_name='BlobProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shape', full_name='BlobProto.shape', index=0, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='data', full_name='BlobProto.data', index=1, + number=5, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), + _descriptor.FieldDescriptor( + name='diff', full_name='BlobProto.diff', index=2, + number=6, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), + _descriptor.FieldDescriptor( + name='double_data', full_name='BlobProto.double_data', index=3, + number=8, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), + _descriptor.FieldDescriptor( + name='double_diff', full_name='BlobProto.double_diff', index=4, + number=9, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), + _descriptor.FieldDescriptor( + name='num', full_name='BlobProto.num', index=5, + number=1, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='channels', full_name='BlobProto.channels', index=6, + number=2, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='height', full_name='BlobProto.height', index=7, + number=3, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='width', full_name='BlobProto.width', index=8, + number=4, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + serialized_start=49, + serialized_end=247, +) + + +_BLOBPROTOVECTOR = _descriptor.Descriptor( + name='BlobProtoVector', + full_name='BlobProtoVector', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='blobs', full_name='BlobProtoVector.blobs', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + serialized_start=249, + serialized_end=293, +) + + +_DATUM = _descriptor.Descriptor( + name='Datum', + full_name='Datum', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='channels', full_name='Datum.channels', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='height', full_name='Datum.height', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='width', full_name='Datum.width', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='data', full_name='Datum.data', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='label', full_name='Datum.label', index=4, + number=5, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='float_data', full_name='Datum.float_data', index=5, + number=6, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='encoded', full_name='Datum.encoded', index=6, + number=7, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + serialized_start=296, + serialized_end=425, +) + +_BLOBPROTO.fields_by_name['shape'].message_type = _BLOBSHAPE +_BLOBPROTOVECTOR.fields_by_name['blobs'].message_type = _BLOBPROTO +DESCRIPTOR.message_types_by_name['BlobShape'] = _BLOBSHAPE +DESCRIPTOR.message_types_by_name['BlobProto'] = _BLOBPROTO +DESCRIPTOR.message_types_by_name['BlobProtoVector'] = _BLOBPROTOVECTOR +DESCRIPTOR.message_types_by_name['Datum'] = _DATUM + +BlobShape = _reflection.GeneratedProtocolMessageType('BlobShape', (_message.Message,), dict( + DESCRIPTOR=_BLOBSHAPE, + __module__='caffe_tf_pb2' + # @@protoc_insertion_point(class_scope:BlobShape) + )) +_sym_db.RegisterMessage(BlobShape) + +BlobProto = _reflection.GeneratedProtocolMessageType('BlobProto', (_message.Message,), dict( + DESCRIPTOR=_BLOBPROTO, + __module__='caffe_tf_pb2' + # @@protoc_insertion_point(class_scope:BlobProto) + )) +_sym_db.RegisterMessage(BlobProto) + +BlobProtoVector = _reflection.GeneratedProtocolMessageType('BlobProtoVector', (_message.Message,), dict( + DESCRIPTOR=_BLOBPROTOVECTOR, + __module__='caffe_tf_pb2' + # @@protoc_insertion_point(class_scope:BlobProtoVector) + )) +_sym_db.RegisterMessage(BlobProtoVector) + +Datum = _reflection.GeneratedProtocolMessageType('Datum', (_message.Message,), dict( + DESCRIPTOR=_DATUM, + __module__='caffe_tf_pb2' + # @@protoc_insertion_point(class_scope:Datum) + )) +_sym_db.RegisterMessage(Datum) + + +_BLOBSHAPE.fields_by_name['dim'].has_options = True +_BLOBSHAPE.fields_by_name['dim']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), + _b('\020\001')) +_BLOBPROTO.fields_by_name['data'].has_options = True +_BLOBPROTO.fields_by_name['data']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), + _b('\020\001')) +_BLOBPROTO.fields_by_name['diff'].has_options = True +_BLOBPROTO.fields_by_name['diff']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), + _b('\020\001')) +_BLOBPROTO.fields_by_name['double_data'].has_options = True +_BLOBPROTO.fields_by_name['double_data']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), + _b('\020\001')) +_BLOBPROTO.fields_by_name['double_diff'].has_options = True +_BLOBPROTO.fields_by_name['double_diff']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), + _b('\020\001')) +# @@protoc_insertion_point(module_scope) diff --git a/digits/tools/tensorflow/gan_grid.py b/digits/tools/tensorflow/gan_grid.py new file mode 100644 index 000000000..4b9a906e6 --- /dev/null +++ b/digits/tools/tensorflow/gan_grid.py @@ -0,0 +1,798 @@ +#!/usr/bin/env python2 +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +# +# This document should comply with PEP-8 Style Guide +# Linter: pylint + +""" +TensorFlow training executable for DIGITS +Defines the training procedure + +Usage: +See the self-documenting flags below. + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import threading +import time + +import datetime +import inspect +import logging +import math +import numpy as np +import os +import pickle + +from six.moves import xrange +import tensorflow as tf +from tensorflow.python.client import timeline +from tensorflow.python.lib.io import file_io +from tensorflow.core.framework import summary_pb2 + + +# Local imports +import utils as digits +import lr_policy +from model import Model + +import tf_data +import gandisplay + +# Constants +TF_INTRA_OP_THREADS = 0 +TF_INTER_OP_THREADS = 0 +MIN_LOGS_PER_TRAIN_EPOCH = 8 # torch default: 8 + +CELEBA_ALL_ATTRIBUTES = """ + 5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs + Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair Bushy_Eyebrows + Chubby Double_Chin Eyeglasses Goatee Gray_Hair Heavy_Makeup High_Cheekbones + Male Mouth_Slightly_Open Mustache Narrow_Eyes No_Beard Oval_Face Pale_Skin + Pointy_Nose Receding_Hairline Rosy_Cheeks Sideburns Smiling Straight_Hair + Wavy_Hair Wearing_Earrings Wearing_Hat Wearing_Lipstick Wearing_Necklace + Wearing_Necktie Young + """.split() + +CELEBA_EDITABLE_ATTRIBUTES = [ + 'Bald', 'Black_Hair', 'Blond_Hair', 'Eyeglasses', 'Male', 'Mustache', + 'Smiling', 'Young', 'Attractive', 'Pale_Skin', 'Big_Nose' +] + +CELEBA_EDITABLE_ATTRIBUTES_IDS = [CELEBA_ALL_ATTRIBUTES.index(attr) for attr in CELEBA_EDITABLE_ATTRIBUTES] + + +logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + level=logging.INFO) + +FLAGS = tf.app.flags.FLAGS + +# Basic model parameters. #float, integer, boolean, string +tf.app.flags.DEFINE_integer('batch_size', 16, """Number of images to process in a batch""") +tf.app.flags.DEFINE_integer( + 'croplen', 0, """Crop (x and y). A zero value means no cropping will be applied""") +tf.app.flags.DEFINE_integer('epoch', 1, """Number of epochs to train, -1 for unbounded""") +tf.app.flags.DEFINE_string('inference_db', '', """Directory with inference file source""") +tf.app.flags.DEFINE_integer( + 'validation_interval', 1, """Number of train epochs to complete, to perform one validation""") +tf.app.flags.DEFINE_string('labels_list', '', """Text file listing label definitions""") +tf.app.flags.DEFINE_string('mean', '', """Mean image file""") +tf.app.flags.DEFINE_float('momentum', '0.9', """Momentum""") # Not used by DIGITS front-end +tf.app.flags.DEFINE_string('network', '', """File containing network (model)""") +tf.app.flags.DEFINE_string('networkDirectory', '', """Directory in which network exists""") +tf.app.flags.DEFINE_string('optimization', 'sgd', """Optimization method""") +tf.app.flags.DEFINE_string('save', 'results', """Save directory""") +tf.app.flags.DEFINE_integer('seed', 0, """Fixed input seed for repeatable experiments""") +tf.app.flags.DEFINE_boolean('shuffle', False, """Shuffle records before training""") +tf.app.flags.DEFINE_float( + 'snapshotInterval', 1.0, + """Specifies the training epochs to be completed before taking a snapshot""") +tf.app.flags.DEFINE_string('snapshotPrefix', '', """Prefix of the weights/snapshots""") +tf.app.flags.DEFINE_string( + 'subtractMean', 'none', + """Select mean subtraction method. Possible values are 'image', 'pixel' or 'none'""") +tf.app.flags.DEFINE_string('train_db', '', """Directory with training file source""") +tf.app.flags.DEFINE_string( + 'train_labels', '', + """Directory with an optional and seperate labels file source for training""") +tf.app.flags.DEFINE_string('validation_db', '', """Directory with validation file source""") +tf.app.flags.DEFINE_string( + 'validation_labels', '', + """Directory with an optional and seperate labels file source for validation""") +tf.app.flags.DEFINE_string( + 'visualizeModelPath', '', """Constructs the current model for visualization""") +tf.app.flags.DEFINE_boolean( + 'visualize_inf', False, """Will output weights and activations for an inference job.""") +tf.app.flags.DEFINE_string( + 'weights', '', """Filename for weights of a model to use for fine-tuning""") + +# @TODO(tzaman): is the bitdepth in line with the DIGITS team? +tf.app.flags.DEFINE_integer('bitdepth', 8, """Specifies an image's bitdepth""") + +# @TODO(tzaman); remove torch mentions below +tf.app.flags.DEFINE_float('lr_base_rate', '0.01', """Learning rate""") +tf.app.flags.DEFINE_string( + 'lr_policy', 'fixed', + """Learning rate policy. (fixed, step, exp, inv, multistep, poly, sigmoid)""") +tf.app.flags.DEFINE_float( + 'lr_gamma', -1, + """Required to calculate learning rate. Applies to: (step, exp, inv, multistep, sigmoid)""") +tf.app.flags.DEFINE_float( + 'lr_power', float('Inf'), + """Required to calculate learning rate. Applies to: (inv, poly)""") +tf.app.flags.DEFINE_string( + 'lr_stepvalues', '', + """Required to calculate stepsize of the learning rate. Applies to: (step, multistep, sigmoid). + For the 'multistep' lr_policy you can input multiple values seperated by commas""") + +# Tensorflow-unique arguments for DIGITS +tf.app.flags.DEFINE_string( + 'save_vars', 'all', + """Sets the collection of variables to be saved: 'all' or only 'trainable'.""") +tf.app.flags.DEFINE_string('summaries_dir', '', """Directory of Tensorboard Summaries (logdir)""") +tf.app.flags.DEFINE_boolean( + 'serving_export', False, """Flag for exporting an Tensorflow Serving model""") +tf.app.flags.DEFINE_boolean('log_device_placement', False, """Whether to log device placement.""") +tf.app.flags.DEFINE_integer( + 'log_runtime_stats_per_step', 0, + """Logs runtime statistics for Tensorboard every x steps, defaults to 0 (off).""") + +# Augmentation +tf.app.flags.DEFINE_string( + 'augFlip', 'none', + """The flip options {none, fliplr, flipud, fliplrud} as randompre-processing augmentation""") +tf.app.flags.DEFINE_float( + 'augNoise', 0., """The stddev of Noise in AWGN as pre-processing augmentation""") +tf.app.flags.DEFINE_float( + 'augContrast', 0., """The contrast factor's bounds as sampled from a random-uniform distribution + as pre-processing augmentation""") +tf.app.flags.DEFINE_bool( + 'augWhitening', False, """Performs per-image whitening by subtracting off its own mean and + dividing by its own standard deviation.""") +tf.app.flags.DEFINE_float( + 'augHSVh', 0., """The stddev of HSV's Hue shift as pre-processing augmentation""") +tf.app.flags.DEFINE_float( + 'augHSVs', 0., """The stddev of HSV's Saturation shift as pre-processing augmentation""") +tf.app.flags.DEFINE_float( + 'augHSVv', 0., """The stddev of HSV's Value shift as pre-processing augmentation""") + +# GAN Grid +tf.app.flags.DEFINE_string('zs_file', 'zs.pkl', """Pickle file containing z vectors to use""") +tf.app.flags.DEFINE_string('attributes_file', 'attributes.pkl', """Pickle file containing attribute vectors""") + + +def save_timeline_trace(run_metadata, save_dir, step): + tl = timeline.Timeline(run_metadata.step_stats) + ctf = tl.generate_chrome_trace_format(show_memory=True) + tl_fn = os.path.join(save_dir, 'timeline_%s.json' % step) + with open(tl_fn, 'w') as f: + f.write(ctf) + logging.info('Timeline trace written to %s', tl_fn) + + +def strip_data_from_graph_def(graph_def): + strip_def = tf.GraphDef() + for n0 in graph_def.node: + n = strip_def.node.add() + n.MergeFrom(n0) + if n.op == 'Const': + tensor = n.attr['value'].tensor + if (tensor.tensor_content): + tensor.tensor_content = '' + if (tensor.string_val): + del tensor.string_val[:] + return strip_def + + +def visualize_graph(graph_def, path): + graph_def = strip_data_from_graph_def(graph_def) + logging.info('Writing Graph Definition..') + file_io.write_string_to_file(path, str(graph_def)) + logging.info('Graph Definition Written.') + + +def average_head_keys(tags, vals): + """ Averages keys with same end (head) name. + Example: foo1/bar=1 and foo2/bar=2 should collapse to bar=1.5 + """ + tail_tags = [w.split('/')[-1] for w in tags] + sums = {} + nums = {} + for a, b in zip(tail_tags, vals): + if a not in sums: + sums[a] = b + nums[a] = 1 + else: + sums[a] += b + nums[a] += 1 + tags_clean = sums.keys() + return tags_clean, np.asarray(sums.values())/np.asarray(nums.values()) + + +def summary_to_lists(summary_str): + """ Takes a Tensorflow stringified Summary object and returns only + the scalar values to a list of tags and a list of values + Args: + summary_str: string of a Tensorflow Summary object + Returns: + tags: list of tags + vals: list of values corresponding to the tag list + + """ + summ = summary_pb2.Summary() + summ.ParseFromString(summary_str) + tags = [] + vals = [] + for s in summ.value: + if s.HasField('simple_value'): # and s.simple_value: # Only parse scalar_summaries + if s.simple_value == float('Inf') or np.isnan(s.simple_value): + raise ValueError('Model diverged with %s = %s : Try decreasing your learning rate' % + (s.tag, s.simple_value)) + tags.append(s.tag) + vals.append(s.simple_value) + tags, vals = average_head_keys(tags, vals) + vals = np.asarray(vals) + return tags, vals + + +def print_summarylist(tags, vals): + """ Prints a nice one-line listing of tags and their values in a nice format + that corresponds to how the DIGITS regex reads it. + Args: + tags: an array of tags + vals: an array of values + Returns: + print_list: a string containing formatted tags and values + """ + print_list = '' + for i, key in enumerate(tags): + if vals[i] == float('Inf'): + raise ValueError('Infinite value %s = Inf' % key) + print_list = print_list + key + " = " + "{:.6f}".format(vals[i]) + if i < len(tags)-1: + print_list = print_list + ", " + return print_list + + +def dump(obj): + for attr in dir(obj): + print("obj.%s = %s" % (attr, getattr(obj, attr))) + + +def load_snapshot(sess, weight_path, var_candidates): + """ Loads a snapshot into a session from a weight path. Will only load the + weights that are both in the weight_path file and the passed var_candidates.""" + logging.info("Loading weights from pretrained model - %s ", weight_path) + reader = tf.train.NewCheckpointReader(weight_path) + var_map = reader.get_variable_to_shape_map() + + # Only obtain all the variables that are [in the current graph] AND [in the checkpoint] + vars_restore = [] + for vt in var_candidates: + for vm in var_map.keys(): + if vt.name.split(':')[0] == vm: + if ("global_step" not in vt.name) and not (vt.name.startswith("train/")): + vars_restore.append(vt) + logging.info('restoring %s -> %s' % (vm, vt.name)) + else: + logging.info('NOT restoring %s -> %s' % (vm, vt.name)) + + logging.info('Restoring %s variable ops.' % len(vars_restore)) + tf.train.Saver(vars_restore, max_to_keep=0, sharded=FLAGS.serving_export).restore(sess, weight_path) + logging.info('Variables restored.') + + +def save_snapshot(sess, saver, save_dir, snapshot_prefix, epoch, for_serving=False): + """ + Saves a snapshot of the current session, saving all variables previously defined + in the ctor of the saver. Also saves the flow of the graph itself (only once). + """ + number_dec = str(FLAGS.snapshotInterval-int(FLAGS.snapshotInterval))[2:] + if number_dec is '': + number_dec = '0' + epoch_fmt = "{:." + number_dec + "f}" + + snapshot_file = os.path.join(save_dir, snapshot_prefix + '_' + epoch_fmt.format(epoch) + '.ckpt') + + logging.info('Snapshotting to %s', snapshot_file) + saver.save(sess, snapshot_file) + logging.info('Snapshot saved.') + + if for_serving: + # @TODO(tzaman) : we could further extend this by supporting tensorflow-serve + logging.error('NotImplementedError: Tensorflow-Serving support.') + exit(-1) + + # Past this point the graph shouldn't be changed, so saving it once is enough + filename_graph = os.path.join(save_dir, snapshot_prefix + '.graph_def') + if not os.path.isfile(filename_graph): + with open(filename_graph, 'wb') as f: + logging.info('Saving graph to %s', filename_graph) + f.write(sess.graph_def.SerializeToString()) + logging.info('Saved graph to %s', filename_graph) + # meta_graph_def = tf.train.export_meta_graph(filename='?') + + +def save_weight_visualization(w_names, a_names, w, a): + try: + import h5py + except ImportError: + logging.error("Attempt to create HDF5 Loader but h5py is not installed.") + exit(-1) + fn = os.path.join(FLAGS.save, 'vis.h5') + vis_db = h5py.File(fn, 'w') + db_layers = vis_db.create_group("layers") + + logging.info('Saving visualization to %s', fn) + for i in range(0, len(w)): + dset = db_layers.create_group(str(i)) + dset.attrs['var'] = w_names[i].name + dset.attrs['op'] = a_names[i] + if w[i].shape: + dset.create_dataset('weights', data=w[i]) + if a[i].shape: + dset.create_dataset('activations', data=a[i]) + vis_db.close() + + +def Inference(sess, model): + """ + Runs one inference (evaluation) epoch (all the files in the loader) + """ + + inference_op = model.towers[0].inference + if FLAGS.labels_list: # Classification -> assume softmax usage + # Append a softmax op + inference_op = tf.nn.softmax(inference_op) + + weight_vars = [] + activation_ops = [] + if FLAGS.visualize_inf: + trainable_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) + # Retrace the origin op of each variable + for n in tf.get_default_graph().as_graph_def().node: + for tw in trainable_weights: + tw_name_reader = tw.name.split(':')[0] + '/read' + if tw_name_reader in n.input: + node_op_name = n.name + ':0' # @TODO(tzaman) this assumes exactly 1 output - allow to be dynamic! + weight_vars.append(tw) + activation_ops.append(node_op_name) + continue + + try: + t = 0 + + with open(FLAGS.attributes_file, 'rb') as f: + attribute_zs = pickle.load(f) + + while not False: + # model.queue_coord.should_stop(): + attributes = app.GetAttributes() + z = np.zeros(100) + + for idx, attr_scale in enumerate(attributes): + z += (attr_scale / 25) * attribute_zs[CELEBA_EDITABLE_ATTRIBUTES_IDS[idx]] + + feed_dict = {model.time_placeholder: float(t), + model.attribute_placeholder: z} + preds = sess.run(fetches=inference_op, feed_dict=feed_dict) + + app.DisplayCell(preds) + + t += 1e-5 * app.GetSpeed() * FLAGS.batch_size + + except tf.errors.OutOfRangeError: + print('Done: tf.errors.OutOfRangeError') + + +def Validation(sess, model, current_epoch): + """ + Runs one validation epoch. + """ + + # @TODO(tzaman): utilize the coordinator by resetting the queue after 1 epoch. + # see https://github.com/tensorflow/tensorflow/issues/4535#issuecomment-248990633 + + print_vals_sum = 0 + steps = 0 + while (steps * model.dataloader.batch_size) < model.dataloader.get_total(): + summary_str = sess.run(model.summary) + # Parse the summary + tags, print_vals = summary_to_lists(summary_str) + print_vals_sum = print_vals + print_vals_sum + steps += 1 + + print_list = print_summarylist(tags, print_vals_sum/steps) + + logging.info("Validation (epoch " + str(current_epoch) + "): " + print_list) + + +def loadLabels(filename): + with open(filename) as f: + return f.readlines() + + +def input_generator(zs_file, batch_size): + + time_placeholder = tf.placeholder(dtype=tf.float32, shape=()) + attribute_placeholder = tf.placeholder(dtype=tf.float32, shape=(100,)) + + def l2_norm(x): + euclidean_norm = tf.sqrt(tf.reduce_sum(tf.square(x))) + return euclidean_norm + + def dot_product(x, y): + return tf.reduce_sum(tf.mul(x, y)) + + def slerp(initial, final, progress): + omega = tf.acos(dot_product(initial / l2_norm(initial), final / l2_norm(final))) + so = tf.sin(omega) + return tf.sin((1.0-progress)*omega) / so * initial + tf.sin(progress*omega)/so * final + + with open(zs_file, 'rb') as f: + zs = pickle.load(f) + img_count = len(zs) + zs = tf.constant(zs, dtype=tf.float32) + + tensors = [] + + epoch = tf.to_int32(time_placeholder) + indices = tf.range(batch_size) + indices_init = (indices * batch_size + epoch) % img_count + indices_final = (indices_init + 1) % img_count + + for i in xrange(batch_size): + z_init = zs[indices_init[i]] + z_final = zs[indices_final[i]] + + progress = tf.mod(time_placeholder, 1) + + # progress = tf.Print(progress, [progress]) + + z = slerp(z_init, z_final, progress) + + tensors.append(z) + + batch = tf.pack(tensors) + attribute_placeholder + + return batch, time_placeholder, attribute_placeholder + + +def main(_): + + # Always keep the cpu as default + with tf.Graph().as_default(), tf.device('/cpu:0'): + + if FLAGS.validation_interval == 0: + FLAGS.validation_db = None + + # Set Tensorboard log directory + if FLAGS.summaries_dir: + # The following gives a nice but unrobust timestamp + FLAGS.summaries_dir = os.path.join(FLAGS.summaries_dir, datetime.datetime.now().strftime("%Y%m%d_%H%M%S")) + + if not FLAGS.train_db and not FLAGS.validation_db and not FLAGS.inference_db and not FLAGS.visualizeModelPath: + logging.error("At least one of the following file sources should be specified: " + "train_db, validation_db or inference_db") + exit(-1) + + if FLAGS.seed: + tf.set_random_seed(FLAGS.seed) + + batch_size_train = FLAGS.batch_size + batch_size_val = FLAGS.batch_size + logging.info("Train batch size is %s and validation batch size is %s", batch_size_train, batch_size_val) + + # This variable keeps track of next epoch, when to perform validation. + next_validation = FLAGS.validation_interval + logging.info("Training epochs to be completed for each validation : %s", next_validation) + + # This variable keeps track of next epoch, when to save model weights. + next_snapshot_save = FLAGS.snapshotInterval + logging.info("Training epochs to be completed before taking a snapshot : %s", next_snapshot_save) + last_snapshot_save_epoch = 0 + + snapshot_prefix = FLAGS.snapshotPrefix if FLAGS.snapshotPrefix else FLAGS.network.split('.')[0] + logging.info("Model weights will be saved as %s__Model.ckpt", snapshot_prefix) + + if not os.path.exists(FLAGS.save): + os.makedirs(FLAGS.save) + logging.info("Created a directory %s to save all the snapshots", FLAGS.save) + + # Load mean variable + if FLAGS.subtractMean == 'none': + mean_loader = None + else: + if not FLAGS.mean: + logging.error("subtractMean parameter not set to 'none' yet mean image path is unset") + exit(-1) + logging.info("Loading mean tensor from %s file", FLAGS.mean) + mean_loader = tf_data.MeanLoader(FLAGS.mean, FLAGS.subtractMean, FLAGS.bitdepth) + + classes = 0 + nclasses = 0 + if FLAGS.labels_list: + logging.info("Loading label definitions from %s file", FLAGS.labels_list) + classes = loadLabels(FLAGS.labels_list) + nclasses = len(classes) + if not classes: + logging.error("Reading labels file %s failed.", FLAGS.labels_list) + exit(-1) + logging.info("Found %s classes", nclasses) + + # Create a data-augmentation dict + aug_dict = { + 'aug_flip': FLAGS.augFlip, + 'aug_noise': FLAGS.augNoise, + 'aug_contrast': FLAGS.augContrast, + 'aug_whitening': FLAGS.augWhitening, + 'aug_HSV': { + 'h': FLAGS.augHSVh, + 's': FLAGS.augHSVs, + 'v': FLAGS.augHSVv, + }, + } + + # hard-code GAN inference + FLAGS.inference_db = "grid.gan" + + # Import the network file + path_network = os.path.join(os.path.dirname(os.path.realpath(__file__)), FLAGS.networkDirectory, FLAGS.network) + exec(open(path_network).read(), globals()) + + try: + UserModel + except NameError: + logging.error("The user model class 'UserModel' is not defined.") + exit(-1) + if not inspect.isclass(UserModel): # noqa + logging.error("The user model class 'UserModel' is not a class.") + exit(-1) + # @TODO(tzaman) - add mode checks to UserModel + + if FLAGS.train_db: + with tf.name_scope(digits.STAGE_TRAIN) as stage_scope: + train_model = Model(digits.STAGE_TRAIN, FLAGS.croplen, nclasses, FLAGS.optimization, FLAGS.momentum) + train_model.create_dataloader(FLAGS.train_db) + train_model.dataloader.setup(FLAGS.train_labels, + FLAGS.shuffle, + FLAGS.bitdepth, + batch_size_train, + FLAGS.epoch, + FLAGS.seed) + train_model.dataloader.set_augmentation(mean_loader, aug_dict) + train_model.create_model(UserModel, stage_scope) # noqa + + if FLAGS.validation_db: + with tf.name_scope(digits.STAGE_VAL) as stage_scope: + val_model = Model(digits.STAGE_VAL, FLAGS.croplen, nclasses) + val_model.create_dataloader(FLAGS.validation_db) + val_model.dataloader.setup(FLAGS.validation_labels, + False, + FLAGS.bitdepth, + batch_size_val, + 1e9, + FLAGS.seed) # @TODO(tzaman): set numepochs to 1 + val_model.dataloader.set_augmentation(mean_loader) + val_model.create_model(UserModel, stage_scope) # noqa + + if FLAGS.inference_db: + with tf.name_scope(digits.STAGE_INF) as stage_scope: + inf_model = Model(digits.STAGE_INF, FLAGS.croplen, nclasses) + inf_model.create_dataloader(FLAGS.inference_db) + inf_model.dataloader.setup(None, False, FLAGS.bitdepth, FLAGS.batch_size, 1, FLAGS.seed) + inf_model.dataloader.set_augmentation(mean_loader) + + batch_x, time_placeholder, attribute_placeholder = input_generator(FLAGS.zs_file, FLAGS.batch_size) + + inf_model.create_model(UserModel, stage_scope, batch_x=batch_x) # noqa + + inf_model.time_placeholder = time_placeholder + inf_model.attribute_placeholder = attribute_placeholder + + # Start running operations on the Graph. allow_soft_placement must be set to + # True to build towers on GPU, as some of the ops do not have GPU + # implementations. + sess = tf.Session(config=tf.ConfigProto( + allow_soft_placement=True, # will automatically do non-gpu supported ops on cpu + inter_op_parallelism_threads=TF_INTER_OP_THREADS, + intra_op_parallelism_threads=TF_INTRA_OP_THREADS, + log_device_placement=FLAGS.log_device_placement)) + + if FLAGS.visualizeModelPath: + visualize_graph(sess.graph_def, FLAGS.visualizeModelPath) + exit(0) + + # Saver creation. + if FLAGS.save_vars == 'all': + vars_to_save = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + elif FLAGS.save_vars == 'trainable': + vars_to_save = tf.all_variables() + else: + logging.error('Unknown save_var flag (%s)' % FLAGS.save_vars) + exit(-1) + saver = tf.train.Saver(vars_to_save, max_to_keep=0, sharded=FLAGS.serving_export) + + # Initialize variables + init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) + sess.run(init_op) + + # If weights option is set, preload weights from existing models appropriately + if FLAGS.weights: + load_snapshot(sess, FLAGS.weights, tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)) + + # Tensorboard: Merge all the summaries and write them out + writer = tf.train.SummaryWriter(os.path.join(FLAGS.summaries_dir, 'tb'), sess.graph) + + # If we are inferencing, only do that. + if FLAGS.inference_db: + inf_model.start_queue_runners(sess) + Inference(sess, inf_model) + + queue_size_op = [] + for n in tf.get_default_graph().as_graph_def().node: + if '_Size' in n.name: + queue_size_op.append(n.name+':0') + + start = time.time() # @TODO(tzaman) - removeme + + # Initial Forward Validation Pass + if FLAGS.validation_db: + val_model.start_queue_runners(sess) + Validation(sess, val_model, 0) + + if FLAGS.train_db: + # During training, a log output should occur at least X times per epoch or every X images, whichever lower + train_steps_per_epoch = train_model.dataloader.get_total() / batch_size_train + if math.ceil(train_steps_per_epoch/MIN_LOGS_PER_TRAIN_EPOCH) < math.ceil(5000/batch_size_train): + logging_interval_step = int(math.ceil(train_steps_per_epoch/MIN_LOGS_PER_TRAIN_EPOCH)) + else: + logging_interval_step = int(math.ceil(5000/batch_size_train)) + logging.info("During training. details will be logged after every %s steps (batches)", + logging_interval_step) + + # epoch value will be calculated for every batch size. To maintain unique epoch value between batches, + # it needs to be rounded to the required number of significant digits. + epoch_round = 0 # holds the required number of significant digits for round function. + tmp_batchsize = batch_size_train*logging_interval_step + while tmp_batchsize <= train_model.dataloader.get_total(): + tmp_batchsize = tmp_batchsize * 10 + epoch_round += 1 + logging.info("While logging, epoch value will be rounded to %s significant digits", epoch_round) + + # Create the learning rate policy + total_training_steps = train_model.dataloader.num_epochs * train_model.dataloader.get_total() / \ + train_model.dataloader.batch_size + lrpolicy = lr_policy.LRPolicy(FLAGS.lr_policy, + FLAGS.lr_base_rate, + FLAGS.lr_gamma, + FLAGS.lr_power, + total_training_steps, + FLAGS.lr_stepvalues) + train_model.start_queue_runners(sess) + + # Training + logging.info('Started training the model') + + current_epoch = 0 + try: + step = 0 + step_last_log = 0 + print_vals_sum = 0 + while not train_model.queue_coord.should_stop(): + log_runtime = FLAGS.log_runtime_stats_per_step and (step % FLAGS.log_runtime_stats_per_step == 0) + + run_options = None + run_metadata = None + if log_runtime: + # For a HARDWARE_TRACE you need NVIDIA CUPTI, a 'CUDA-EXTRA' + # SOFTWARE_TRACE HARDWARE_TRACE FULL_TRACE + run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) + run_metadata = tf.RunMetadata() + + feed_dict = {train_model.learning_rate: lrpolicy.get_learning_rate(step)} + + if False: + for op in train_model.train: + _, summary_str, step = sess.run([op, train_model.summary, train_model.global_step], + feed_dict=feed_dict, + options=run_options, + run_metadata=run_metadata) + else: + _, summary_str, step = sess.run([train_model.train, + train_model.summary, + train_model.global_step], + feed_dict=feed_dict, + options=run_options, + run_metadata=run_metadata) + + # HACK + step = step / len(train_model.train) + + # logging.info(sess.run(queue_size_op)) # DEVELOPMENT: for checking the queue size + + if log_runtime: + writer.add_run_metadata(run_metadata, str(step)) + save_timeline_trace(run_metadata, FLAGS.save, int(step)) + + writer.add_summary(summary_str, step) + + # Parse the summary + tags, print_vals = summary_to_lists(summary_str) + + print_vals_sum = print_vals + print_vals_sum + + # @TODO(tzaman): account for variable batch_size value on very last epoch + current_epoch = round((step * batch_size_train) / train_model.dataloader.get_total(), epoch_round) + + # Start with a forward pass + if ((step % logging_interval_step) == 0): + steps_since_log = step - step_last_log + print_list = print_summarylist(tags, print_vals_sum/steps_since_log) + logging.info("Training (epoch " + str(current_epoch) + "): " + print_list) + print_vals_sum = 0 + step_last_log = step + + # Potential Validation Pass + if FLAGS.validation_db and current_epoch >= next_validation: + Validation(sess, val_model, current_epoch) + # Find next nearest epoch value that exactly divisible by FLAGS.validation_interval: + next_validation = (round(float(current_epoch)/FLAGS.validation_interval) + 1) * \ + FLAGS.validation_interval + + # Saving Snapshot + if FLAGS.snapshotInterval > 0 and current_epoch >= next_snapshot_save: + save_snapshot(sess, saver, FLAGS.save, snapshot_prefix, current_epoch, FLAGS.serving_export) + + # To find next nearest epoch value that exactly divisible by FLAGS.snapshotInterval + next_snapshot_save = (round(float(current_epoch)/FLAGS.snapshotInterval) + 1) * \ + FLAGS.snapshotInterval + last_snapshot_save_epoch = current_epoch + writer.flush() + except tf.errors.OutOfRangeError: + logging.info('Done training for epochs: tf.errors.OutOfRangeError') + except ValueError as err: + logging.error(err.args[0]) + exit(-1) # DIGITS wants a dirty error. + except (KeyboardInterrupt): + logging.info('Interrupt signal received.') + + # If required, perform final snapshot save + if FLAGS.snapshotInterval > 0 and FLAGS.epoch > last_snapshot_save_epoch: + save_snapshot(sess, saver, FLAGS.save, snapshot_prefix, FLAGS.epoch, FLAGS.serving_export) + + print('Training wall-time:', time.time()-start) # @TODO(tzaman) - removeme + + # If required, perform final Validation pass + if FLAGS.validation_db and current_epoch >= next_validation: + Validation(sess, val_model, current_epoch) + + if FLAGS.train_db: + del train_model + if FLAGS.validation_db: + del val_model + if FLAGS.inference_db: + del inf_model + + # We need to call sess.close() because we've used a with block + sess.close() + + writer.close() + logging.info('END') + exit(0) + +if __name__ == '__main__': + + app = gandisplay.DemoApp(0, + grid_size=np.sqrt(FLAGS.batch_size)*64, + attributes=CELEBA_EDITABLE_ATTRIBUTES) + + t = threading.Thread(target=tf.app.run, args=()) + t.start() + + app.MainLoop() diff --git a/digits/tools/tensorflow/gandisplay.py b/digits/tools/tensorflow/gandisplay.py new file mode 100644 index 000000000..3479ffa19 --- /dev/null +++ b/digits/tools/tensorflow/gandisplay.py @@ -0,0 +1,263 @@ +import time +import numpy as np +import wx + +# This has been set up to optionally use the wx.BufferedDC if +# USE_BUFFERED_DC is True, it will be used. Otherwise, it uses the raw +# wx.Memory DC , etc. + +# USE_BUFFERED_DC = False +USE_BUFFERED_DC = True + +myEVT = wx.NewEventType() +DISPLAY_GRID_EVT = wx.PyEventBinder(myEVT, 1) + + +class MyEvent(wx.PyCommandEvent): + """Event to signal that a count value is ready""" + def __init__(self, etype, eid, value=None): + """Creates the event object""" + wx.PyCommandEvent.__init__(self, etype, eid) + self._value = value + + def GetValue(self): + """Returns the value from the event. + @return: the value of this event + + """ + return self._value + + +class BufferedWindow(wx.Window): + + """ + + A Buffered window class. + + To use it, subclass it and define a Draw(DC) method that takes a DC + to draw to. In that method, put the code needed to draw the picture + you want. The window will automatically be double buffered, and the + screen will be automatically updated when a Paint event is received. + + When the drawing needs to change, you app needs to call the + UpdateDrawing() method. Since the drawing is stored in a bitmap, you + can also save the drawing to file by calling the + SaveToFile(self, file_name, file_type) method. + + """ + def __init__(self, *args, **kwargs): + # make sure the NO_FULL_REPAINT_ON_RESIZE style flag is set. + kwargs['style'] = kwargs.setdefault('style', wx.NO_FULL_REPAINT_ON_RESIZE) | wx.NO_FULL_REPAINT_ON_RESIZE + wx.Window.__init__(self, *args, **kwargs) + + wx.EVT_PAINT(self, self.OnPaint) + wx.EVT_SIZE(self, self.OnSize) + + # OnSize called to make sure the buffer is initialized. + # This might result in OnSize getting called twice on some + # platforms at initialization, but little harm done. + self.OnSize(None) + self.paint_count = 0 + + def Draw(self, dc): + # just here as a place holder. + # This method should be over-ridden when subclassed + pass + + def OnPaint(self, event): + # All that is needed here is to draw the buffer to screen + if USE_BUFFERED_DC: + dc = wx.BufferedPaintDC(self, self._Buffer) + else: + dc = wx.PaintDC(self) + dc.DrawBitmap(self._Buffer, 0, 0) + + def OnSize(self, event): + # The Buffer init is done here, to make sure the buffer is always + # the same size as the Window + # Size = self.GetClientSizeTuple() + Size = self.ClientSize + + # Make new offscreen bitmap: this bitmap will always have the + # current drawing in it, so it can be used to save the image to + # a file, or whatever. + self._Buffer = wx.EmptyBitmap(*Size) + self.UpdateDrawing() + + def SaveToFile(self, FileName, FileType=wx.BITMAP_TYPE_PNG): + # This will save the contents of the buffer + # to the specified file. See the wxWindows docs for + # wx.Bitmap::SaveFile for the details + self._Buffer.SaveFile(FileName, FileType) + + def UpdateDrawing(self): + """ + This would get called if the drawing needed to change, for whatever reason. + + The idea here is that the drawing is based on some data generated + elsewhere in the system. If that data changes, the drawing needs to + be updated. + + This code re-draws the buffer, then calls Update, which forces a paint event. + """ + dc = wx.MemoryDC() + dc.SelectObject(self._Buffer) + self.Draw(dc) + del dc # need to get rid of the MemoryDC before Update() is called. + self.Refresh() + self.Update() + + +class DrawWindow(BufferedWindow): + def __init__(self, *args, **kwargs): + # Any data the Draw() function needs must be initialized before + # calling BufferedWindow.__init__, as it will call the Draw function. + self.DrawData = {} + BufferedWindow.__init__(self, *args, **kwargs) + + def Draw(self, dc): + dc.SetBackground(wx.Brush("White")) + dc.Clear() # make sure you clear the bitmap! + + # Here's the actual drawing code. + for key, data in self.DrawData.items(): + if key == "text": + dc.DrawText(data, 0, 0) + elif key == "np": + data = data.astype('uint8') + img_count = data.shape[0] + height = data.shape[1] + width = data.shape[2] + + grid_size = int(np.sqrt(img_count)) + + size = (grid_size * width, grid_size * height) + + if True: # self.size != size: + self.size = size + self.SetSize(size) + + image = wx.EmptyImage(width, height) + + for i in xrange(img_count): + x = width * (i // grid_size) + y = height * (i % grid_size) + s = data[i].tostring() + image.SetData(s) + + wxBitmap = image.ConvertToBitmap() + dc.DrawBitmap(wxBitmap, x=x, y=y) + + +class TestFrame(wx.Frame): + + SLIDER_WIDTH = 100 + SLIDER_BORDER = 50 + STATUS_HEIGHT = 20 + + def __init__(self, parent=None, grid_size=640, attributes=[]): + wx.Frame.__init__(self, parent, + size=(grid_size + self.SLIDER_WIDTH + self.SLIDER_BORDER, grid_size + self.STATUS_HEIGHT), + title="GAN Demo", + style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER) + + # Set up the MenuBar + MenuBar = wx.MenuBar() + + file_menu = wx.Menu() + + item = file_menu.Append(wx.ID_EXIT, text="&Exit") + self.Bind(wx.EVT_MENU, self.OnQuit, item) + MenuBar.Append(file_menu, "&File") + + self.SetMenuBar(MenuBar) + + self.statusbar = self.CreateStatusBar() + self.statusbar.SetStatusText('Initialising...') + + # Set up UI elements + panel = wx.Panel(self) + self.Window = DrawWindow(panel, size=(grid_size, grid_size)) + + hbox = wx.BoxSizer(wx.HORIZONTAL) + hbox.Add(self.Window, 1, wx.ALIGN_LEFT) + + # Sliders + vbox = wx.BoxSizer(wx.VERTICAL) + self.speed_slider = wx.Slider(panel, -1, value=5, minValue=0, maxValue=10, pos=wx.DefaultPosition, + size=(self.SLIDER_WIDTH, -1), + style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL | wx.SL_LABELS) + + slider_text = wx.StaticText(panel, label='Speed') + vbox.Add(slider_text, 0, wx.ALIGN_CENTRE) + vbox.Add(self.speed_slider, 0, wx.ALIGN_CENTRE) + + self.attribute_sliders = [] + for attribute in attributes: + slider_text = wx.StaticText(panel, label=attribute) + slider = wx.Slider(panel, -1, value=0, minValue=-100, maxValue=100, pos=wx.DefaultPosition, + size=(self.SLIDER_WIDTH, -1), + style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL | wx.SL_LABELS) + + vbox.Add(slider_text, 0, wx.ALIGN_CENTRE) + vbox.Add(slider, 0, wx.ALIGN_CENTRE) + self.attribute_sliders.append(slider) + + hbox.Add(vbox, 0, wx.ALIGN_RIGHT) + panel.SetSizer(hbox) + + self.Window.DrawData = {'text': u'Initialising...'} + self.Window.UpdateDrawing() + + # to measure frames per second + self.last_frame_timestamp = None + self.last_fps_update = None + + # add panel to frame + frameSizer = wx.BoxSizer(wx.VERTICAL) + frameSizer.Add(panel, 0, wx.EXPAND | wx.ALIGN_LEFT) + self.SetSizer(frameSizer) + + self.Show() + + self.Fit() + + self.Bind(DISPLAY_GRID_EVT, self.OnDisplayCell) + + def OnQuit(self, event): + self.Close(True) + + def OnDisplayCell(self, evt): + array = evt.GetValue() + self.Window.DrawData = {'np': array} + self.Window.UpdateDrawing() + + if self.last_frame_timestamp is not None: + fps = 1. / (time.time() - self.last_frame_timestamp) + if (self.last_fps_update is None) or (time.time() - self.last_fps_update > 0.5): + self.statusbar.SetStatusText('%.1ffps' % fps) + self.last_fps_update = time.time() + self.last_frame_timestamp = time.time() + + +class DemoApp(wx.App): + + def __init__(self, arg, grid_size, attributes): + self.gan_grid_size = grid_size + self.attributes = attributes + super(DemoApp, self).__init__(arg) + + def OnInit(self): + self.frame = TestFrame(grid_size=self.gan_grid_size, attributes=self.attributes) + self.SetTopWindow(self.frame) + return True + + def DisplayCell(self, array): + evt = MyEvent(myEVT, -1, array) + wx.PostEvent(self.frame, evt) + + def GetSpeed(self): + return self.frame.speed_slider.GetValue() + + def GetAttributes(self): + return [s.GetValue() for s in self.frame.attribute_sliders] diff --git a/digits/tools/tensorflow/lr_policy.py b/digits/tools/tensorflow/lr_policy.py new file mode 100644 index 000000000..d8221c362 --- /dev/null +++ b/digits/tools/tensorflow/lr_policy.py @@ -0,0 +1,117 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +# +# This document should comply with PEP-8 Style Guide +# Linter: pylint + +""" +Class for generating Caffe-style learning rates using different policies. + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging +import math + +logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + level=logging.INFO) + + +class LRPolicy(object): + """This class contains details of learning rate policies that are used in caffe. + Calculates and returns the current learning rate. The currently implemented learning rate + policies are as follows: + - fixed: always return base_lr. + - step: return base_lr * gamma ^ (floor(iter / step)) + - exp: return base_lr * gamma ^ iter + - inv: return base_lr * (1 + gamma * iter) ^ (- power) + - multistep: similar to step but it allows non uniform steps defined by + stepvalue + - poly: the effective learning rate follows a polynomial decay, to be + zero by the max_steps. return base_lr (1 - iter/max_steps) ^ (power) + - sigmoid: the effective learning rate follows a sigmod decay + return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) + """ + + def __init__(self, policy, base_rate, gamma, power, max_steps, step_values): + """Initialize a learning rate policy + Args: + policy: Learning rate policy + base_rate: Base learning rate + gamma: parameter to compute learning rate + power: parameter to compute learning rate + max_steps: parameter to compute learning rate + step_values: parameter(s) to compute learning rate. should be a string, multiple values divided as csv + Returns: + - + """ + self.policy = policy + self.base_rate = base_rate + self.gamma = gamma + self.power = power + self.max_steps = max_steps + self.step_values = step_values + if self.step_values: + self.stepvalues_list = map(float, step_values.split(',')) + else: + self.stepvalues_list = [] + + if (self.max_steps < len(self.stepvalues_list)): + self.policy = 'step' + self.stepvalues_list[0] = 1 + logging.info("Maximum iterations (i.e., %s) is less than provided step values count " + "(i.e, %s), so learning rate policy is reset to (%s) policy with the " + "step value (%s).", + self.max_steps, len(self.stepvalues_list), + self.policy, + self.stepvalues_list[0]) + else: # Converting stepsize percentages into values + for i in range(len(self.stepvalues_list)): + self.stepvalues_list[i] = round(self.max_steps * self.stepvalues_list[i] / 100) + # Avoids 'nan' values during learning rate calculation + if self.stepvalues_list[i] == 0: + self.stepvalues_list[i] = 1 + + if (self.policy == 'step') or (self.policy == 'sigmoid'): + # If the policy is not multistep, then even though multiple step values + # are provided as input, we will consider only the first value. + self.step_size = self.stepvalues_list[0] + elif (self.policy == 'multistep'): + self.current_step = 0 # This counter is important to take arbitary steps + self.stepvalue_size = len(self.stepvalues_list) + + def get_learning_rate(self, step): + """Initialize a learning rate policy + Args: + step: the current step for which the learning rate should be computed + Returns: + rate: the learning rate for the requested step + """ + rate = 0 + progress = 100 * (step / self.max_steps) # expressed in percent units + + if self.policy == "fixed": + rate = self.base_rate + elif self.policy == "step": + current_step = math.floor(step/self.step_size) + rate = self.base_rate * math.pow(self.gamma, current_step) + elif self.policy == "exp": + rate = self.base_rate * math.pow(self.gamma, progress) + elif self.policy == "inv": + rate = self.base_rate * math.pow(1 + self.gamma * progress, - self.power) + elif self.policy == "multistep": + if ((self.current_step < self.stepvalue_size) and (step > self.stepvalues_list[self.current_step])): + self.current_step = self.current_step + 1 + rate = self.base_rate * math.pow(self.gamma, self.current_step) + elif self.policy == "poly": + rate = self.base_rate * math.pow(1.0 - (step / self.max_steps), self.power) + elif self.policy == "sigmoid": + rate = self.base_rate * \ + (1.0 / (1.0 + math.exp(self.gamma * (progress - 100 * self.step_size / self.max_steps)))) + else: + logging.error("Unknown learning rate policy: %s", self.policy) + exit(-1) + return rate diff --git a/digits/tools/tensorflow/main.py b/digits/tools/tensorflow/main.py new file mode 100644 index 000000000..5175efec1 --- /dev/null +++ b/digits/tools/tensorflow/main.py @@ -0,0 +1,707 @@ +#!/usr/bin/env python2 +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +# +# This document should comply with PEP-8 Style Guide +# Linter: pylint + +""" +TensorFlow training executable for DIGITS +Defines the training procedure + +Usage: +See the self-documenting flags below. + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import time + +import datetime +import inspect +import json +import logging +import math +import numpy as np +import os +from six.moves import xrange # noqa +import tensorflow as tf +import tensorflow.contrib.slim as slim # noqa +from tensorflow.python.client import timeline, device_lib # noqa +from tensorflow.python.ops import template # noqa +from tensorflow.python.lib.io import file_io +from tensorflow.core.framework import summary_pb2 + + +# Local imports +import utils as digits +import lr_policy +from model import Model, Tower # noqa +from utils import model_property # noqa + +import tf_data + +# Constants +TF_INTRA_OP_THREADS = 0 +TF_INTER_OP_THREADS = 0 +MIN_LOGS_PER_TRAIN_EPOCH = 8 # torch default: 8 + +logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + level=logging.INFO) + +FLAGS = tf.app.flags.FLAGS + +# Basic model parameters. #float, integer, boolean, string +tf.app.flags.DEFINE_integer('batch_size', 16, """Number of images to process in a batch""") +tf.app.flags.DEFINE_integer( + 'croplen', 0, """Crop (x and y). A zero value means no cropping will be applied""") +tf.app.flags.DEFINE_integer('epoch', 1, """Number of epochs to train, -1 for unbounded""") +tf.app.flags.DEFINE_string('inference_db', '', """Directory with inference file source""") +tf.app.flags.DEFINE_integer( + 'validation_interval', 1, """Number of train epochs to complete, to perform one validation""") +tf.app.flags.DEFINE_string('labels_list', '', """Text file listing label definitions""") +tf.app.flags.DEFINE_string('mean', '', """Mean image file""") +tf.app.flags.DEFINE_float('momentum', '0.9', """Momentum""") # Not used by DIGITS front-end +tf.app.flags.DEFINE_string('network', '', """File containing network (model)""") +tf.app.flags.DEFINE_string('networkDirectory', '', """Directory in which network exists""") +tf.app.flags.DEFINE_string('optimization', 'sgd', """Optimization method""") +tf.app.flags.DEFINE_string('save', 'results', """Save directory""") +tf.app.flags.DEFINE_integer('seed', 0, """Fixed input seed for repeatable experiments""") +tf.app.flags.DEFINE_boolean('shuffle', False, """Shuffle records before training""") +tf.app.flags.DEFINE_float( + 'snapshotInterval', 1.0, + """Specifies the training epochs to be completed before taking a snapshot""") +tf.app.flags.DEFINE_string('snapshotPrefix', '', """Prefix of the weights/snapshots""") +tf.app.flags.DEFINE_string( + 'subtractMean', 'none', + """Select mean subtraction method. Possible values are 'image', 'pixel' or 'none'""") +tf.app.flags.DEFINE_string('train_db', '', """Directory with training file source""") +tf.app.flags.DEFINE_string( + 'train_labels', '', + """Directory with an optional and seperate labels file source for training""") +tf.app.flags.DEFINE_string('validation_db', '', """Directory with validation file source""") +tf.app.flags.DEFINE_string( + 'validation_labels', '', + """Directory with an optional and seperate labels file source for validation""") +tf.app.flags.DEFINE_string( + 'visualizeModelPath', '', """Constructs the current model for visualization""") +tf.app.flags.DEFINE_boolean( + 'visualize_inf', False, """Will output weights and activations for an inference job.""") +tf.app.flags.DEFINE_string( + 'weights', '', """Filename for weights of a model to use for fine-tuning""") + +# @TODO(tzaman): is the bitdepth in line with the DIGITS team? +tf.app.flags.DEFINE_integer('bitdepth', 8, """Specifies an image's bitdepth""") + +# @TODO(tzaman); remove torch mentions below +tf.app.flags.DEFINE_float('lr_base_rate', '0.01', """Learning rate""") +tf.app.flags.DEFINE_string( + 'lr_policy', 'fixed', + """Learning rate policy. (fixed, step, exp, inv, multistep, poly, sigmoid)""") +tf.app.flags.DEFINE_float( + 'lr_gamma', -1, + """Required to calculate learning rate. Applies to: (step, exp, inv, multistep, sigmoid)""") +tf.app.flags.DEFINE_float( + 'lr_power', float('Inf'), + """Required to calculate learning rate. Applies to: (inv, poly)""") +tf.app.flags.DEFINE_string( + 'lr_stepvalues', '', + """Required to calculate stepsize of the learning rate. Applies to: (step, multistep, sigmoid). + For the 'multistep' lr_policy you can input multiple values seperated by commas""") + +# Tensorflow-unique arguments for DIGITS +tf.app.flags.DEFINE_string( + 'save_vars', 'all', + """Sets the collection of variables to be saved: 'all' or only 'trainable'.""") +tf.app.flags.DEFINE_string('summaries_dir', '', """Directory of Tensorboard Summaries (logdir)""") +tf.app.flags.DEFINE_boolean( + 'serving_export', False, """Flag for exporting an Tensorflow Serving model""") +tf.app.flags.DEFINE_boolean('log_device_placement', False, """Whether to log device placement.""") +tf.app.flags.DEFINE_integer( + 'log_runtime_stats_per_step', 0, + """Logs runtime statistics for Tensorboard every x steps, defaults to 0 (off).""") + +# Augmentation +tf.app.flags.DEFINE_string( + 'augFlip', 'none', + """The flip options {none, fliplr, flipud, fliplrud} as randompre-processing augmentation""") +tf.app.flags.DEFINE_float( + 'augNoise', 0., """The stddev of Noise in AWGN as pre-processing augmentation""") +tf.app.flags.DEFINE_float( + 'augContrast', 0., """The contrast factor's bounds as sampled from a random-uniform distribution + as pre-processing augmentation""") +tf.app.flags.DEFINE_bool( + 'augWhitening', False, """Performs per-image whitening by subtracting off its own mean and + dividing by its own standard deviation.""") +tf.app.flags.DEFINE_float( + 'augHSVh', 0., """The stddev of HSV's Hue shift as pre-processing augmentation""") +tf.app.flags.DEFINE_float( + 'augHSVs', 0., """The stddev of HSV's Saturation shift as pre-processing augmentation""") +tf.app.flags.DEFINE_float( + 'augHSVv', 0., """The stddev of HSV's Value shift as pre-processing augmentation""") + + +def save_timeline_trace(run_metadata, save_dir, step): + tl = timeline.Timeline(run_metadata.step_stats) + ctf = tl.generate_chrome_trace_format(show_memory=True) + tl_fn = os.path.join(save_dir, 'timeline_%s.json' % step) + with open(tl_fn, 'w') as f: + f.write(ctf) + logging.info('Timeline trace written to %s', tl_fn) + + +def strip_data_from_graph_def(graph_def): + strip_def = tf.GraphDef() + for n0 in graph_def.node: + n = strip_def.node.add() + n.MergeFrom(n0) + if n.op == 'Const': + tensor = n.attr['value'].tensor + if (tensor.tensor_content): + tensor.tensor_content = '' + if (tensor.string_val): + del tensor.string_val[:] + return strip_def + + +def visualize_graph(graph_def, path): + graph_def = strip_data_from_graph_def(graph_def) + logging.info('Writing Graph Definition..') + file_io.write_string_to_file(path, str(graph_def)) + logging.info('Graph Definition Written.') + + +def average_head_keys(tags, vals): + """ Averages keys with same end (head) name. + Example: foo1/bar=1 and foo2/bar=2 should collapse to bar=1.5 + """ + tail_tags = [w.split('/')[-1] for w in tags] + sums = {} + nums = {} + for a, b in zip(tail_tags, vals): + if a not in sums: + sums[a] = b + nums[a] = 1 + else: + sums[a] += b + nums[a] += 1 + tags_clean = sums.keys() + return tags_clean, np.asarray(sums.values())/np.asarray(nums.values()) + + +def summary_to_lists(summary_str): + """ Takes a Tensorflow stringified Summary object and returns only + the scalar values to a list of tags and a list of values + Args: + summary_str: string of a Tensorflow Summary object + Returns: + tags: list of tags + vals: list of values corresponding to the tag list + + """ + summ = summary_pb2.Summary() + summ.ParseFromString(summary_str) + tags = [] + vals = [] + for s in summ.value: + if s.HasField('simple_value'): # and s.simple_value: # Only parse scalar_summaries + if s.simple_value == float('Inf') or np.isnan(s.simple_value): + raise ValueError('Model diverged with %s = %s : Try decreasing your learning rate' % + (s.tag, s.simple_value)) + tags.append(s.tag) + vals.append(s.simple_value) + tags, vals = average_head_keys(tags, vals) + vals = np.asarray(vals) + return tags, vals + + +def print_summarylist(tags, vals): + """ Prints a nice one-line listing of tags and their values in a nice format + that corresponds to how the DIGITS regex reads it. + Args: + tags: an array of tags + vals: an array of values + Returns: + print_list: a string containing formatted tags and values + """ + print_list = '' + for i, key in enumerate(tags): + if vals[i] == float('Inf'): + raise ValueError('Infinite value %s = Inf' % key) + print_list = print_list + key + " = " + "{:.6f}".format(vals[i]) + if i < len(tags)-1: + print_list = print_list + ", " + return print_list + + +def dump(obj): + for attr in dir(obj): + print("obj.%s = %s" % (attr, getattr(obj, attr))) + + +def load_snapshot(sess, weight_path, var_candidates): + """ Loads a snapshot into a session from a weight path. Will only load the + weights that are both in the weight_path file and the passed var_candidates.""" + logging.info("Loading weights from pretrained model - %s ", weight_path) + reader = tf.train.NewCheckpointReader(weight_path) + var_map = reader.get_variable_to_shape_map() + + # Only obtain all the variables that are [in the current graph] AND [in the checkpoint] + vars_restore = [] + for vt in var_candidates: + for vm in var_map.keys(): + if vt.name.split(':')[0] == vm: + if ("global_step" not in vt.name) and not (vt.name.startswith("train/")): + vars_restore.append(vt) + logging.info('restoring %s -> %s' % (vm, vt.name)) + else: + logging.info('NOT restoring %s -> %s' % (vm, vt.name)) + + logging.info('Restoring %s variable ops.' % len(vars_restore)) + tf.train.Saver(vars_restore, max_to_keep=0, sharded=FLAGS.serving_export).restore(sess, weight_path) + logging.info('Variables restored.') + + +def save_snapshot(sess, saver, save_dir, snapshot_prefix, epoch, for_serving=False): + """ + Saves a snapshot of the current session, saving all variables previously defined + in the ctor of the saver. Also saves the flow of the graph itself (only once). + """ + number_dec = str(FLAGS.snapshotInterval-int(FLAGS.snapshotInterval))[2:] + if number_dec is '': + number_dec = '0' + epoch_fmt = "{:." + number_dec + "f}" + + snapshot_file = os.path.join(save_dir, snapshot_prefix + '_' + epoch_fmt.format(epoch) + '.ckpt') + + logging.info('Snapshotting to %s', snapshot_file) + saver.save(sess, snapshot_file) + logging.info('Snapshot saved.') + + if for_serving: + # @TODO(tzaman) : we could further extend this by supporting tensorflow-serve + logging.error('NotImplementedError: Tensorflow-Serving support.') + exit(-1) + + # Past this point the graph shouldn't be changed, so saving it once is enough + filename_graph = os.path.join(save_dir, snapshot_prefix + '.graph_def') + if not os.path.isfile(filename_graph): + with open(filename_graph, 'wb') as f: + logging.info('Saving graph to %s', filename_graph) + f.write(sess.graph_def.SerializeToString()) + logging.info('Saved graph to %s', filename_graph) + # meta_graph_def = tf.train.export_meta_graph(filename='?') + + +def save_weight_visualization(w_names, a_names, w, a): + try: + import h5py + except ImportError: + logging.error("Attempt to create HDF5 Loader but h5py is not installed.") + exit(-1) + fn = os.path.join(FLAGS.save, 'vis.h5') + vis_db = h5py.File(fn, 'w') + db_layers = vis_db.create_group("layers") + + logging.info('Saving visualization to %s', fn) + for i in range(0, len(w)): + dset = db_layers.create_group(str(i)) + dset.attrs['var'] = w_names[i].name + dset.attrs['op'] = a_names[i] + if w[i].shape: + dset.create_dataset('weights', data=w[i]) + if a[i].shape: + dset.create_dataset('activations', data=a[i]) + vis_db.close() + + +def Inference(sess, model): + """ + Runs one inference (evaluation) epoch (all the files in the loader) + """ + + inference_op = model.towers[0].inference + if FLAGS.labels_list: # Classification -> assume softmax usage + # Append a softmax op + inference_op = tf.nn.softmax(inference_op) + + weight_vars = [] + activation_ops = [] + if FLAGS.visualize_inf: + trainable_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) + # Retrace the origin op of each variable + for n in tf.get_default_graph().as_graph_def().node: + for tw in trainable_weights: + tw_name_reader = tw.name.split(':')[0] + '/read' + if tw_name_reader in n.input: + node_op_name = n.name + ':0' # @TODO(tzaman) this assumes exactly 1 output - allow to be dynamic! + weight_vars.append(tw) + activation_ops.append(node_op_name) + continue + + try: + while not model.queue_coord.should_stop(): + keys, preds, [w], [a] = sess.run([model.dataloader.batch_k, + inference_op, + [weight_vars], + [activation_ops]]) + + if FLAGS.visualize_inf: + save_weight_visualization(weight_vars, activation_ops, w, a) + + # @TODO(tzaman): error on no output? + for i in range(len(keys)): + # for j in range(len(preds)): + # We're allowing multiple predictions per image here. DIGITS doesnt support that iirc + logging.info('Predictions for image ' + str(model.dataloader.get_key_index(keys[i])) + + ': ' + json.dumps(preds[i].tolist())) + except tf.errors.OutOfRangeError: + print('Done: tf.errors.OutOfRangeError') + + +def Validation(sess, model, current_epoch): + """ + Runs one validation epoch. + """ + + # @TODO(tzaman): utilize the coordinator by resetting the queue after 1 epoch. + # see https://github.com/tensorflow/tensorflow/issues/4535#issuecomment-248990633 + + print_vals_sum = 0 + steps = 0 + while (steps * model.dataloader.batch_size) < model.dataloader.get_total(): + summary_str = sess.run(model.summary) + # Parse the summary + tags, print_vals = summary_to_lists(summary_str) + print_vals_sum = print_vals + print_vals_sum + steps += 1 + + print_list = print_summarylist(tags, print_vals_sum/steps) + + logging.info("Validation (epoch " + str(current_epoch) + "): " + print_list) + + +def loadLabels(filename): + with open(filename) as f: + return f.readlines() + + +def main(_): + + # Always keep the cpu as default + with tf.Graph().as_default(), tf.device('/cpu:0'): + + if FLAGS.validation_interval == 0: + FLAGS.validation_db = None + + # Set Tensorboard log directory + if FLAGS.summaries_dir: + # The following gives a nice but unrobust timestamp + FLAGS.summaries_dir = os.path.join(FLAGS.summaries_dir, datetime.datetime.now().strftime("%Y%m%d_%H%M%S")) + + if not FLAGS.train_db and not FLAGS.validation_db and not FLAGS.inference_db and not FLAGS.visualizeModelPath: + logging.error("At least one of the following file sources should be specified: " + "train_db, validation_db or inference_db") + exit(-1) + + if FLAGS.seed: + tf.set_random_seed(FLAGS.seed) + + batch_size_train = FLAGS.batch_size + batch_size_val = FLAGS.batch_size + logging.info("Train batch size is %s and validation batch size is %s", batch_size_train, batch_size_val) + + # This variable keeps track of next epoch, when to perform validation. + next_validation = FLAGS.validation_interval + logging.info("Training epochs to be completed for each validation : %s", next_validation) + + # This variable keeps track of next epoch, when to save model weights. + next_snapshot_save = FLAGS.snapshotInterval + logging.info("Training epochs to be completed before taking a snapshot : %s", next_snapshot_save) + last_snapshot_save_epoch = 0 + + snapshot_prefix = FLAGS.snapshotPrefix if FLAGS.snapshotPrefix else FLAGS.network.split('.')[0] + logging.info("Model weights will be saved as %s__Model.ckpt", snapshot_prefix) + + if not os.path.exists(FLAGS.save): + os.makedirs(FLAGS.save) + logging.info("Created a directory %s to save all the snapshots", FLAGS.save) + + # Load mean variable + if FLAGS.subtractMean == 'none': + mean_loader = None + else: + if not FLAGS.mean: + logging.error("subtractMean parameter not set to 'none' yet mean image path is unset") + exit(-1) + logging.info("Loading mean tensor from %s file", FLAGS.mean) + mean_loader = tf_data.MeanLoader(FLAGS.mean, FLAGS.subtractMean, FLAGS.bitdepth) + + classes = 0 + nclasses = 0 + if FLAGS.labels_list: + logging.info("Loading label definitions from %s file", FLAGS.labels_list) + classes = loadLabels(FLAGS.labels_list) + nclasses = len(classes) + if not classes: + logging.error("Reading labels file %s failed.", FLAGS.labels_list) + exit(-1) + logging.info("Found %s classes", nclasses) + + # Create a data-augmentation dict + aug_dict = { + 'aug_flip': FLAGS.augFlip, + 'aug_noise': FLAGS.augNoise, + 'aug_contrast': FLAGS.augContrast, + 'aug_whitening': FLAGS.augWhitening, + 'aug_HSV': { + 'h': FLAGS.augHSVh, + 's': FLAGS.augHSVs, + 'v': FLAGS.augHSVv, + }, + } + + # Import the network file + path_network = os.path.join(os.path.dirname(os.path.realpath(__file__)), FLAGS.networkDirectory, FLAGS.network) + exec(open(path_network).read(), globals()) + + try: + UserModel + except NameError: + logging.error("The user model class 'UserModel' is not defined.") + exit(-1) + if not inspect.isclass(UserModel): # noqa + logging.error("The user model class 'UserModel' is not a class.") + exit(-1) + # @TODO(tzaman) - add mode checks to UserModel + + if FLAGS.train_db: + with tf.name_scope(digits.STAGE_TRAIN) as stage_scope: + train_model = Model(digits.STAGE_TRAIN, FLAGS.croplen, nclasses, FLAGS.optimization, FLAGS.momentum) + train_model.create_dataloader(FLAGS.train_db) + train_model.dataloader.setup(FLAGS.train_labels, + FLAGS.shuffle, + FLAGS.bitdepth, + batch_size_train, + FLAGS.epoch, + FLAGS.seed) + train_model.dataloader.set_augmentation(mean_loader, aug_dict) + train_model.create_model(UserModel, stage_scope) # noqa + + if FLAGS.validation_db: + with tf.name_scope(digits.STAGE_VAL) as stage_scope: + val_model = Model(digits.STAGE_VAL, FLAGS.croplen, nclasses, reuse_variable=True) + val_model.create_dataloader(FLAGS.validation_db) + val_model.dataloader.setup(FLAGS.validation_labels, + False, + FLAGS.bitdepth, + batch_size_val, + 1e9, + FLAGS.seed) # @TODO(tzaman): set numepochs to 1 + val_model.dataloader.set_augmentation(mean_loader) + val_model.create_model(UserModel, stage_scope) # noqa + + if FLAGS.inference_db: + with tf.name_scope(digits.STAGE_INF) as stage_scope: + inf_model = Model(digits.STAGE_INF, FLAGS.croplen, nclasses) + inf_model.create_dataloader(FLAGS.inference_db) + inf_model.dataloader.setup(None, False, FLAGS.bitdepth, FLAGS.batch_size, 1, FLAGS.seed) + inf_model.dataloader.set_augmentation(mean_loader) + inf_model.create_model(UserModel, stage_scope) # noqa + + # Start running operations on the Graph. allow_soft_placement must be set to + # True to build towers on GPU, as some of the ops do not have GPU + # implementations. + sess = tf.Session(config=tf.ConfigProto( + allow_soft_placement=True, # will automatically do non-gpu supported ops on cpu + inter_op_parallelism_threads=TF_INTER_OP_THREADS, + intra_op_parallelism_threads=TF_INTRA_OP_THREADS, + log_device_placement=FLAGS.log_device_placement)) + + if FLAGS.visualizeModelPath: + visualize_graph(sess.graph_def, FLAGS.visualizeModelPath) + exit(0) + + # Saver creation. + if FLAGS.save_vars == 'all': + vars_to_save = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + elif FLAGS.save_vars == 'trainable': + vars_to_save = tf.all_variables() + else: + logging.error('Unknown save_var flag (%s)' % FLAGS.save_vars) + exit(-1) + saver = tf.train.Saver(vars_to_save, max_to_keep=0, sharded=FLAGS.serving_export) + + # Initialize variables + init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) + sess.run(init_op) + + # If weights option is set, preload weights from existing models appropriately + if FLAGS.weights: + load_snapshot(sess, FLAGS.weights, tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)) + + # Tensorboard: Merge all the summaries and write them out + writer = tf.summary.FileWriter(os.path.join(FLAGS.summaries_dir, 'tb'), sess.graph) + + # If we are inferencing, only do that. + if FLAGS.inference_db: + inf_model.start_queue_runners(sess) + Inference(sess, inf_model) + + queue_size_op = [] + for n in tf.get_default_graph().as_graph_def().node: + if '_Size' in n.name: + queue_size_op.append(n.name+':0') + + start = time.time() # @TODO(tzaman) - removeme + + # Initial Forward Validation Pass + if FLAGS.validation_db: + val_model.start_queue_runners(sess) + Validation(sess, val_model, 0) + + if FLAGS.train_db: + # During training, a log output should occur at least X times per epoch or every X images, whichever lower + train_steps_per_epoch = train_model.dataloader.get_total() / batch_size_train + if math.ceil(train_steps_per_epoch/MIN_LOGS_PER_TRAIN_EPOCH) < math.ceil(5000/batch_size_train): + logging_interval_step = int(math.ceil(train_steps_per_epoch/MIN_LOGS_PER_TRAIN_EPOCH)) + else: + logging_interval_step = int(math.ceil(5000/batch_size_train)) + logging.info("During training. details will be logged after every %s steps (batches)", + logging_interval_step) + + # epoch value will be calculated for every batch size. To maintain unique epoch value between batches, + # it needs to be rounded to the required number of significant digits. + epoch_round = 0 # holds the required number of significant digits for round function. + tmp_batchsize = batch_size_train*logging_interval_step + while tmp_batchsize <= train_model.dataloader.get_total(): + tmp_batchsize = tmp_batchsize * 10 + epoch_round += 1 + logging.info("While logging, epoch value will be rounded to %s significant digits", epoch_round) + + # Create the learning rate policy + total_training_steps = train_model.dataloader.num_epochs * train_model.dataloader.get_total() / \ + train_model.dataloader.batch_size + lrpolicy = lr_policy.LRPolicy(FLAGS.lr_policy, + FLAGS.lr_base_rate, + FLAGS.lr_gamma, + FLAGS.lr_power, + total_training_steps, + FLAGS.lr_stepvalues) + train_model.start_queue_runners(sess) + + # Training + logging.info('Started training the model') + + current_epoch = 0 + try: + step = 0 + step_last_log = 0 + print_vals_sum = 0 + while not train_model.queue_coord.should_stop(): + log_runtime = FLAGS.log_runtime_stats_per_step and (step % FLAGS.log_runtime_stats_per_step == 0) + + run_options = None + run_metadata = None + if log_runtime: + # For a HARDWARE_TRACE you need NVIDIA CUPTI, a 'CUDA-EXTRA' + # SOFTWARE_TRACE HARDWARE_TRACE FULL_TRACE + run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) + run_metadata = tf.RunMetadata() + + feed_dict = {train_model.learning_rate: lrpolicy.get_learning_rate(step)} + + if False: + for op in train_model.train: + _, summary_str, step = sess.run([op, train_model.summary, train_model.global_step], + feed_dict=feed_dict, + options=run_options, + run_metadata=run_metadata) + else: + _, summary_str, step = sess.run([train_model.train, + train_model.summary, + train_model.global_step], + feed_dict=feed_dict, + options=run_options, + run_metadata=run_metadata) + + # HACK + step = step / len(train_model.train) + + # logging.info(sess.run(queue_size_op)) # DEVELOPMENT: for checking the queue size + + if log_runtime: + writer.add_run_metadata(run_metadata, str(step)) + save_timeline_trace(run_metadata, FLAGS.save, int(step)) + + writer.add_summary(summary_str, step) + + # Parse the summary + tags, print_vals = summary_to_lists(summary_str) + + print_vals_sum = print_vals + print_vals_sum + + # @TODO(tzaman): account for variable batch_size value on very last epoch + current_epoch = round((step * batch_size_train) / train_model.dataloader.get_total(), epoch_round) + + # Start with a forward pass + if ((step % logging_interval_step) == 0): + steps_since_log = step - step_last_log + print_list = print_summarylist(tags, print_vals_sum/steps_since_log) + logging.info("Training (epoch " + str(current_epoch) + "): " + print_list) + print_vals_sum = 0 + step_last_log = step + + # Potential Validation Pass + if FLAGS.validation_db and current_epoch >= next_validation: + Validation(sess, val_model, current_epoch) + # Find next nearest epoch value that exactly divisible by FLAGS.validation_interval: + next_validation = (round(float(current_epoch)/FLAGS.validation_interval) + 1) * \ + FLAGS.validation_interval + + # Saving Snapshot + if FLAGS.snapshotInterval > 0 and current_epoch >= next_snapshot_save: + save_snapshot(sess, saver, FLAGS.save, snapshot_prefix, current_epoch, FLAGS.serving_export) + + # To find next nearest epoch value that exactly divisible by FLAGS.snapshotInterval + next_snapshot_save = (round(float(current_epoch)/FLAGS.snapshotInterval) + 1) * \ + FLAGS.snapshotInterval + last_snapshot_save_epoch = current_epoch + writer.flush() + except tf.errors.OutOfRangeError: + logging.info('Done training for epochs: tf.errors.OutOfRangeError') + except ValueError as err: + logging.error(err.args[0]) + exit(-1) # DIGITS wants a dirty error. + except (KeyboardInterrupt): + logging.info('Interrupt signal received.') + + # If required, perform final snapshot save + if FLAGS.snapshotInterval > 0 and FLAGS.epoch > last_snapshot_save_epoch: + save_snapshot(sess, saver, FLAGS.save, snapshot_prefix, FLAGS.epoch, FLAGS.serving_export) + + print('Training wall-time:', time.time()-start) # @TODO(tzaman) - removeme + + # If required, perform final Validation pass + if FLAGS.validation_db and current_epoch >= next_validation: + Validation(sess, val_model, current_epoch) + + if FLAGS.train_db: + del train_model + if FLAGS.validation_db: + del val_model + if FLAGS.inference_db: + del inf_model + + # We need to call sess.close() because we've used a with block + sess.close() + + writer.close() + logging.info('END') + exit(0) + +if __name__ == '__main__': + tf.app.run() diff --git a/digits/tools/tensorflow/model.py b/digits/tools/tensorflow/model.py new file mode 100644 index 000000000..a19df6309 --- /dev/null +++ b/digits/tools/tensorflow/model.py @@ -0,0 +1,316 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +# +# This document should comply with PEP-8 Style Guide +# Linter: pylint + +""" +Interface for setting up and creating a model in Tensorflow. + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging +import tensorflow as tf +from tensorflow.python.framework import ops + +# Local imports +import tf_data +import utils as digits +from utils import model_property + +logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + level=logging.INFO) + +# Constants +SUMMARIZE_TOWER_STATS = False + + +# from +# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/image/cifar10/cifar10_multi_gpu_train.py +def average_gradients(tower_grads): + """Calculate the average gradient for each shared variable across all towers. + Note that this function provides a synchronization point across all towers. + Args: + tower_grads: List of lists of (gradient, variable) tuples. The outer list + is over individual gradients. The inner list is over the gradient + calculation for each tower. + Returns: + List of pairs of (gradient, variable) where the gradient has been averaged + across all towers. + """ + with tf.name_scope('gradient_average'): + average_grads = [] + for grad_and_vars in zip(*tower_grads): + # Note that each grad_and_vars looks like the following: + # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) + grads = [] + for g, _ in grad_and_vars: + # Add 0 dimension to the gradients to represent the tower. + expanded_g = tf.expand_dims(g, 0) + # Append on a 'tower' dimension which we will average over below. + grads.append(expanded_g) + # Average over the 'tower' dimension. + grads_transformed = tf.concat(grads, 0) + grads_transformed = tf.reduce_mean(grads_transformed, 0) + + # Keep in mind that the Variables are redundant because they are shared + # across towers. So .. we will just return the first tower's pointer to + # the Variable. + v = grad_and_vars[0][1] + grad_and_var = (grads_transformed, v) + average_grads.append(grad_and_var) + return average_grads + + +class Model(object): + """ + Wrapper around the actual tensorflow workflow process. + This is structured in a way that the user should only care about + creating the model while using the DIGITS UI to select the + optimizer and other options. + + This class is executed to start a tensorflow workflow. + """ + def __init__(self, stage, croplen, nclasses, optimization=None, momentum=None, reuse_variable=False): + self.stage = stage + self.croplen = croplen + self.nclasses = nclasses + self.dataloader = None + self.queue_coord = None + self.queue_threads = None + + self._optimization = optimization + self._momentum = momentum + self.summaries = [] + self.towers = [] + self._train = None + self._reuse = reuse_variable + + # Touch to initialize + # if optimization: + # self.learning_rate + # self.global_step + # self.optimizer + + def create_dataloader(self, db_path): + self.dataloader = tf_data.LoaderFactory.set_source(db_path, is_inference=(self.stage == digits.STAGE_INF)) + # @TODO(tzaman) communicate the dataloader summaries to our Model summary list + self.dataloader.stage = self.stage + self.dataloader.croplen = self.croplen + self.dataloader.nclasses = self.nclasses + + def init_dataloader(self): + with tf.device('/cpu:0'): + with tf.name_scope(digits.GraphKeys.LOADER): + self.dataloader.create_input_pipeline() + + def create_model(self, obj_UserModel, stage_scope, batch_x=None): + + if batch_x is None: + self.init_dataloader() + batch_x = self.dataloader.batch_x + if self.stage != digits.STAGE_INF: + batch_y = self.dataloader.batch_y + else: + assert self.stage == digits.STAGE_INF + batch_x = batch_x + + available_devices = digits.get_available_gpus() + if not available_devices: + available_devices.append('/cpu:0') + + # available_devices = ['/gpu:0', '/gpu:1'] # DEVELOPMENT : virtual multi-gpu + + # Split the batch over the batch dimension over the number of available gpu's + if len(available_devices) == 1: + batch_x_split = [batch_x] + if self.stage != digits.STAGE_INF: # Has no labels + batch_y_split = [batch_y] + else: + with tf.name_scope('parallelize'): + # Split them up + batch_x_split = tf.split(batch_x, len(available_devices), 0, name='split_batch') + if self.stage != digits.STAGE_INF: # Has no labels + batch_y_split = tf.split(batch_y, len(available_devices), 0, name='split_batch') + + # Run the user model through the build_model function that should be filled in + grad_towers = [] + for dev_i, dev_name in enumerate(available_devices): + with tf.device(dev_name): + current_scope = stage_scope if len(available_devices) == 1 else ('tower_%d' % dev_i) + with tf.name_scope(current_scope) as scope_tower: + + if self.stage != digits.STAGE_INF: + tower_model = self.add_tower(obj_tower=obj_UserModel, + x=batch_x_split[dev_i], + y=batch_y_split[dev_i]) + else: + tower_model = self.add_tower(obj_tower=obj_UserModel, + x=batch_x_split[dev_i], + y=None) + + with tf.variable_scope(digits.GraphKeys.MODEL, reuse=dev_i > 0 or self._reuse): + tower_model.inference # touch to initialize + + # Reuse the variables in this scope for the next tower/device + tf.get_variable_scope().reuse_variables() + + if self.stage == digits.STAGE_INF: + # For inferencing we will only use the inference part of the graph + continue + + with tf.name_scope(digits.GraphKeys.LOSS): + for loss in self.get_tower_losses(tower_model): + tf.add_to_collection(digits.GraphKeys.LOSSES, loss['loss']) + + # Assemble all made within this scope so far. The user can add custom + # losses to the digits.GraphKeys.LOSSES collection + losses = tf.get_collection(digits.GraphKeys.LOSSES, scope=scope_tower) + losses += ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope=None) + tower_loss = tf.add_n(losses, name='loss') + + self.summaries.append(tf.summary.scalar(tower_loss.op.name, tower_loss)) + + if self.stage == digits.STAGE_TRAIN: + grad_tower_losses = [] + for loss in self.get_tower_losses(tower_model): + grad_tower_loss = self.optimizer.compute_gradients(loss['loss'], loss['vars']) + grad_tower_loss = tower_model.gradientUpdate(grad_tower_loss) + grad_tower_losses.append(grad_tower_loss) + grad_towers.append(grad_tower_losses) + + # Assemble and average the gradients from all towers + if self.stage == digits.STAGE_TRAIN: + n_gpus = len(available_devices) + if n_gpus == 1: + grad_averages = grad_towers[0] + else: + with tf.device(available_devices[0]): + n_losses = len(grad_towers[0]) + grad_averages = [] + for loss in xrange(n_losses): + grad_averages.append(average_gradients([grad_towers[gpu][loss] for gpu in xrange(n_gpus)])) + apply_gradient_ops = [] + for grad_avg in grad_averages: + apply_gradient_ops.append(self.optimizer.apply_gradients(grad_avg, global_step=self.global_step)) + self._train = apply_gradient_ops + + def start_queue_runners(self, sess): + logging.info('Starting queue runners (%s)', self.stage) + # Distinguish the queue runner collection (for easily obtaining them by collection key) + queue_runners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS, scope=self.stage+'.*') + for qr in queue_runners: + if self.stage in qr.name: + tf.add_to_collection(digits.GraphKeys.QUEUE_RUNNERS, qr) + + self.queue_coord = tf.train.Coordinator() + self.queue_threads = tf.train.start_queue_runners(sess=sess, coord=self.queue_coord, + collection=digits.GraphKeys.QUEUE_RUNNERS) + logging.info('Queue runners started (%s)', self.stage) + + def __del__(self): + # Destructor + if self.queue_coord: + # Close and terminate the queues + self.queue_coord.request_stop() + self.queue_coord.join(self.queue_threads) + + def add_tower(self, obj_tower, x, y): + is_training = self.stage == digits.STAGE_TRAIN + is_inference = self.stage == digits.STAGE_INF + input_shape = self.dataloader.get_shape() + tower = obj_tower(x, y, input_shape, self.nclasses, is_training, is_inference) + self.towers.append(tower) + return tower + + @model_property + def train(self): + return self._train + + @model_property + def summary(self): + """ + Merge train summaries + """ + for t in self.towers: + self.summaries += t.summaries + + if not len(self.summaries): + logging.error("No summaries defined. Please define at least one summary.") + exit(-1) + return tf.summary.merge(self.summaries) + + @model_property + def global_step(self): + # Force global_step on the CPU, becaues the GPU's first step will end at 0 instead of 1. + with tf.device('/cpu:0'): + return tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), + trainable=False) + + @model_property + def learning_rate(self): + # @TODO(tzaman): the learning rate is a function of the global step, so we could + # define it entirely in tf ops, instead of a placeholder and feeding. + with tf.device('/cpu:0'): + lr = tf.placeholder(tf.float32, shape=[], name='learning_rate') + self.summaries.append(tf.summary.scalar('lr', lr)) + return lr + + @model_property + def optimizer(self): + logging.info("Optimizer:%s", self._optimization) + if self._optimization == 'sgd': + return tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate) + elif self._optimization == 'adadelta': + return tf.train.AdadeltaOptimizer(learning_rate=self.learning_rate) + elif self._optimization == 'adagrad': + return tf.train.AdagradOptimizer(learning_rate=self.learning_rate) + elif self._optimization == 'adagradda': + return tf.train.AdagradDAOptimizer(learning_rate=self.learning_rate, + global_step=self.global_step) + elif self._optimization == 'momentum': + return tf.train.MomentumOptimizer(learning_rate=self.learning_rate, + momentum=self._momentum) + elif self._optimization == 'adam': + return tf.train.AdamOptimizer(learning_rate=self.learning_rate) + elif self._optimization == 'ftrl': + return tf.train.FtrlOptimizer(learning_rate=self.learning_rate) + elif self._optimization == 'rmsprop': + return tf.train.RMSPropOptimizer(learning_rate=self.learning_rate, + momentum=self._momentum) + else: + logging.error("Invalid optimization flag %s", self._optimization) + exit(-1) + + def get_tower_losses(self, tower): + """ + Return list of losses + + If user-defined model returns only one loss then this is encapsulated into + the expected list of dicts structure + """ + + if isinstance(tower.loss, list): + return tower.loss + else: + return [{'loss': tower.loss, 'vars': tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)}] + + +class Tower(object): + + def __init__(self, x, y, input_shape, nclasses, is_training, is_inference): + self.input_shape = input_shape + self.nclasses = nclasses + self.is_training = is_training + self.is_inference = is_inference + self.summaries = [] + self.x = x + self.y = y + self.train = None + + def gradientUpdate(self, grad): + return grad diff --git a/digits/tools/tensorflow/tf_data.py b/digits/tools/tensorflow/tf_data.py new file mode 100644 index 000000000..f0775ba58 --- /dev/null +++ b/digits/tools/tensorflow/tf_data.py @@ -0,0 +1,893 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +# +# This document should comply with PEP-8 Style Guide +# Linter: pylint + +""" +Interface for data loading for Tensorflow. +Data loading is done through a data loading factory,that will setup +the correct functions for the respective backends. + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from PIL import Image +import logging +import magic +import math +import numpy as np +import os +import tensorflow as tf + +# Local imports +import caffe_tf_pb2 +import utils as digits + +# Constants +MIN_FRACTION_OF_EXAMPLES_IN_QUEUE = 0.4 +MAX_ABSOLUTE_EXAMPLES_IN_QUEUE = 4096 # The queue size cannot exceed this number +NUM_THREADS_DATA_LOADER = 6 +LOG_MEAN_FILE = False # Logs the mean file as loaded in TF to TB + +# Supported extensions for Loaders +DB_EXTENSIONS = { + 'hdf5': ['.H5', '.HDF5'], + 'lmdb': ['.MDB', '.LMDB'], + 'tfrecords': ['.TFRECORDS'], + 'filelist': ['.TXT'], + 'file': ['.JPG', '.JPEG', '.PNG'], + 'gangrid': ['.GAN'], +} + +LIST_DELIMITER = ' ' # For the FILELIST format + +logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', + level=logging.INFO) + + +def get_backend_of_source(db_path): + """ + Takes a path as argument and infers the format of the data. + If a directory is provided, it looks for the existance of an extension + in the entire directory in an order of a priority of dbs (hdf5, lmdb, filelist, file) + Args: + db_path: path to a file or directory + Returns: + backend: the backend type + """ + + # If a directory is given, we include all its contents. Otherwise it's just the one file. + if os.path.isdir(db_path): + files_in_path = [fn for fn in os.listdir(db_path) if not fn.startswith('.')] + else: + files_in_path = [db_path] + + # Keep the below priority ordering + for db_fmt in ['hdf5', 'lmdb', 'tfrecords', 'filelist', 'file', 'gangrid']: + ext_list = DB_EXTENSIONS[db_fmt] + for ext in ext_list: + if any(ext in os.path.splitext(fn)[1].upper() for fn in files_in_path): + return db_fmt + + logging.error("Cannot infer backend from db_path (%s)." % (db_path)) + exit(-1) + + +class MeanLoader(object): + """ + Loads in a mean file for tensorflow. This is done through using a constant + variable. It needs to be loaded first, after which the constant tf op + can be retrieved through a function, and can be accounted for. + + """ + def __init__(self, mean_file_path, subtraction_type, bitdepth): + self._mean_file_path = mean_file_path + self._subtraction_type = subtraction_type + self._bitdepth = bitdepth + self.tf_mean_image = None + self.load_mean() + + def load_mean(self): + """ + The mean is loaded in the graph through a tf.constant for maximum efficiency. This is first + done only once through a numpy array that defines the value of the constant. + All pre-processing of the mean file is done before the definition of the tf.constant + to make sure these operations are not repeated in the graph + """ + + file_extension = os.path.splitext(self._mean_file_path)[1].upper() + + if file_extension == '.BINARYPROTO': + blob = caffe_tf_pb2.BlobProto() + with open(self._mean_file_path, 'rb') as infile: + blob.ParseFromString(infile.read()) + data = np.array(blob.data, dtype="float32").reshape(blob.channels, blob.height, blob.width) + if blob.channels == 3: + # converting from BGR to RGB + data = data[[2, 1, 0], ...] # channel swap + # convert to (height, width, channels) + data = data.transpose((1, 2, 0)) + elif blob.channels == 1: + # convert to (height, width) + data = data[0] + else: + logging.error('Unknown amount of channels (%d) in mean file (%s)' % + (blob.channels, self._mean_file_path)) + exit(-1) + # elif file_extension in IMG_FILE_EXT: + # img = Image.open(self._mean_file_path) + # img.load() + # data = np.asarray(img, dtype="float32") + else: + logging.error('Failed loading mean file: Unsupported extension (%s)' % (file_extension)) + exit(-1) + + if (self._subtraction_type == 'image') or (self._subtraction_type == 'pixel'): + if self._subtraction_type == 'pixel': + data = data.mean(axis=(0, 1)) + data = np.reshape(data, (1, 1, -1)) + elif len(data.shape) != 3: + # Explicitly add channel dim + data = data[:, :, None] + + # return data in original pixel scale + self.tf_mean_image = tf.constant(data, name='Const_Mean_Image') + + else: + logging.error('Unsupported mean subtraction type (%s)' % (self._subtraction_type)) + exit(-1) + + def subtract_mean_op(self, tf_graph): + """ + Places mean subtraction on top of the tensorflow graph supplied, returns the added op + Args: + tf_graph: the graph the subtraction of the mean should placed upon + Returns: + The graph with the mean subtraction placed on top of it + """ + return (tf_graph - self.tf_mean_image) + + +class LoaderFactory(object): + """ + A factory for data loading. It sets up a subclass with data loading + done with the respective backend. Its output is a tensorflow queue op + that is used to load in data, with optionally some minor postprocessing ops. + """ + def __init__(self): + self.croplen = None + self.nclasses = None + self.mean_loader = None + self.backend = None + self.db_path = None + self.batch_x = None + self.batch_y = None + self.batch_k = None + self.stage = None + self._seed = None + self.unencoded_data_format = 'hwc' + self.unencoded_channel_scheme = 'rgb' + self.summaries = None + self.aug_dict = {} + + # @TODO(tzaman) rewrite this factory again + pass + + @staticmethod + def set_source(db_path, is_inference=False): + """ + Returns the correct backend. + """ + backend = get_backend_of_source(db_path) + loader = None + if backend == 'lmdb': + loader = LmdbLoader() + elif backend == 'hdf5': + loader = Hdf5Loader() + elif backend == 'file' or backend == 'filelist': + loader = FileListLoader() + elif backend == 'tfrecords': + loader = TFRecordsLoader() + elif backend == 'gangrid': + loader = GanGridLoader() + else: + logging.error("Backend (%s) not implemented" % (backend)) + exit(-1) + loader.backend = backend + loader.db_path = db_path + loader.is_inference = is_inference + return loader + + def setup(self, labels_db_path, shuffle, bitdepth, batch_size, num_epochs=None, seed=None): + with tf.device('/cpu:0'): + self.labels_db_path = labels_db_path + + self.shuffle = shuffle + self.bitdepth = bitdepth + self.batch_size = batch_size + self.num_epochs = num_epochs + self._seed = seed + + if self.labels_db_path: + self.labels_db = LoaderFactory.set_source(self.labels_db_path) + self.labels_db.bitdepth = self.bitdepth + self.labels_db.stage = self.stage + self.labels_db.initialize() + + self.initialize() + logging.info("Found %s images in db %s ", self.get_total(), self.db_path) + + def get_key_index(self, key): + return self.keys.index(key) + + def set_augmentation(self, mean_loader, aug_dict={}): + with tf.device('/cpu:0'): + self.mean_loader = mean_loader + self.aug_dict = aug_dict + + def get_shape(self): + input_shape = [self.height, self.width, self.channels] + # update input_shape if crop length specified + # this is necessary as the input_shape is provided + # below to the user-defined function that defines the network + if self.croplen > 0: + input_shape[0] = self.croplen + input_shape[1] = self.croplen + return input_shape + + def get_total(self): + return self.total + + def reshape_decode(self, data, shape): + if self.float_data: # @TODO(tzaman): this is LMDB specific - Make generic! + data = tf.reshape(data, shape) + data = digits.chw_to_hwc(data) + else: + # Decode image of any time option might come: https://github.com/tensorflow/tensorflow/issues/4009 + # Distinguish between mime types + if self.data_encoded: + if self.data_mime == 'image/png': + data = tf.image.decode_png(data, dtype=self.image_dtype, name='image_decoder') + elif self.data_mime == 'image/jpeg': + data = tf.image.decode_jpeg(data, name='image_decoder') + else: + logging.error('Unsupported mime type (%s); cannot be decoded' % (self.data_mime)) + exit(-1) + else: + if self.backend == 'lmdb': + data = tf.decode_raw(data, self.image_dtype, name='raw_decoder') + + # if data is in CHW, set the shape and convert to HWC + if self.unencoded_data_format == 'chw': + data = tf.reshape(data, [shape[0], shape[1], shape[2]]) + data = digits.chw_to_hwc(data) + else: # 'hwc' + data = tf.reshape(data, shape) + + if (self.channels == 3) and self.unencoded_channel_scheme == 'bgr': + data = digits.bgr_to_rgb(data) + + # Convert to float + data = tf.to_float(data) + # data = tf.image.convert_image_dtype(data, tf.float32) # normalize to [0:1) range + return data + + def create_input_pipeline(self): + """ + This function returns part of the graph that does data loading, and + includes a queueing, optional data decoding and optional post-processing + like data augmentation or mean subtraction. + + Args: + None. + Produces: + batch_x: Input data batch + batch_y: Label data batch + batch_k: A list of keys (strings) from which the batch originated + Returns: + None. + """ + + # @TODO(tzaman) the container can be used if the reset function is implemented: + # see https://github.com/tensorflow/tensorflow/issues/4535#issuecomment-248990633 + # + # with tf.container('queue-container'): + + key_queue = self.get_queue() + + single_label = None + single_label_shape = None + if self.stage == digits.STAGE_INF: + single_key, single_data, single_data_shape, _, _ = self.get_single_data(key_queue) + else: + single_key, single_data, single_data_shape, single_label, single_label_shape = \ + self.get_single_data(key_queue) + + single_data_shape = tf.reshape(single_data_shape, [3]) # Shape the shape to have three dimensions + single_data = self.reshape_decode(single_data, single_data_shape) + + if self.labels_db_path: # Using a seperate label db; label can be anything + single_label_shape = tf.reshape(single_label_shape, [3]) # Shape the shape + single_label = self.labels_db.reshape_decode(single_label, single_label_shape) + elif single_label is not None: # Not using a seperate label db; label is a scalar + single_label = tf.reshape(single_label, []) + + # Mean Subtraction + if self.mean_loader: + with tf.name_scope('mean_subtraction'): + single_data = self.mean_loader.subtract_mean_op(single_data) + if LOG_MEAN_FILE: + expanded_data = tf.expand_dims(self.mean_loader.tf_mean_image, 0) + self.summaries.append(tf.summary.image('mean_image', expanded_data, max_outputs=1)) + + # (Random) Cropping + if self.croplen: + with tf.name_scope('cropping'): + if self.stage == digits.STAGE_TRAIN: + single_data = tf.random_crop(single_data, + [self.croplen, self.croplen, self.channels], + seed=self._seed) + else: # Validation or Inference + single_data = tf.image.resize_image_with_crop_or_pad(single_data, self.croplen, self.croplen) + + # Data Augmentation + if self.aug_dict: + with tf.name_scope('augmentation'): + flipflag = self.aug_dict['aug_flip'] + if flipflag == 'fliplr' or flipflag == 'fliplrud': + single_data = tf.image.random_flip_left_right(single_data, seed=self._seed) + if flipflag == 'flipud' or flipflag == 'fliplrud': + single_data = tf.image.random_flip_up_down(single_data, seed=self._seed) + + noise_std = self.aug_dict['aug_noise'] + if noise_std > 0.: + # Note the tf.random_normal requires a static shape + single_data = tf.add(single_data, tf.random_normal(self.get_shape(), + mean=0.0, + stddev=noise_std, + dtype=tf.float32, + seed=self._seed, + name='AWGN')) + + contrast_fact = self.aug_dict['aug_contrast'] + if contrast_fact > 0: + single_data = tf.image.random_contrast(single_data, + lower=1.-contrast_fact, + upper=1.+contrast_fact, + seed=self._seed) + + # @TODO(tzaman): rewrite the below HSV stuff entirely in a TF PR to be done in one single operation + aug_hsv = self.aug_dict['aug_HSV'] + if aug_hsv['h'] > 0.: + single_data = tf.image.random_hue(single_data, aug_hsv['h'], seed=self._seed) + if aug_hsv['s'] > 0.: + single_data = tf.image.random_saturation(single_data, + 1 - aug_hsv['s'], + 1 + aug_hsv['s'], + seed=self._seed) + if aug_hsv['v'] > 0.: + # closely resembles V - temporary until rewritten + single_data = tf.image.random_brightness(single_data, aug_hsv['v'], seed=self._seed) + + # @TODO(tzaman) whitening is so invasive that we need a way to add it to the val/inf too in a + # portable manner, like the mean file : how? If we don't find a way, don't use whitening. + aug_whitening = self.aug_dict['aug_whitening'] + if aug_whitening: + # Subtract off its own mean and divide by the standard deviation of its own the pixels. + with tf.name_scope('whitening'): + single_data = tf.image.per_image_standardization(single_data) # N.B. also converts to float + + max_queue_capacity = min(math.ceil(self.total * MIN_FRACTION_OF_EXAMPLES_IN_QUEUE), + MAX_ABSOLUTE_EXAMPLES_IN_QUEUE) + + single_batch = [single_key, single_data] + if single_label is not None: + single_batch.append(single_label) + + if self.backend == 'tfrecords' and self.shuffle: + batch = tf.train.shuffle_batch( + single_batch, + batch_size=self.batch_size, + num_threads=NUM_THREADS_DATA_LOADER, + capacity=10*self.batch_size, # Max amount that will be loaded and queued + shapes=[[0], self.get_shape(), []], # Only makes sense is dynamic_pad=False #@TODO(tzaman) - FIXME + min_after_dequeue=5*self.batch_size, + allow_smaller_final_batch=True, # Happens if total%batch_size!=0 + name='batcher') + else: + batch = tf.train.batch( + single_batch, + batch_size=self.batch_size, + dynamic_pad=True, # Allows us to not supply fixed shape a priori + enqueue_many=False, # Each tensor is a single example + # set number of threads to 1 for tfrecords (used for inference) + num_threads=NUM_THREADS_DATA_LOADER if not self.is_inference else 1, + capacity=max_queue_capacity, # Max amount that will be loaded and queued + allow_smaller_final_batch=True, # Happens if total%batch_size!=0 + name='batcher') + + self.batch_k = batch[0] # Key + self.batch_x = batch[1] # Input + if len(batch) == 3: + # There's a label (unlike during inferencing) + self.batch_y = batch[2] # Output (label) + + +class LmdbLoader(LoaderFactory): + """ Loads files from lmbd files as used in Caffe + """ + def __init__(self): + pass + + def initialize(self): + try: + import lmdb + except ImportError: + logging.error("Attempt to create LMDB Loader but lmdb is not installed.") + exit(-1) + + self.unencoded_data_format = 'chw' + self.unencoded_channel_scheme = 'bgr' + + # Set up the data loader + self.lmdb_env = lmdb.open(self.db_path, readonly=True, lock=False) + self.lmdb_txn = self.lmdb_env.begin(buffers=False) + self.total = self.lmdb_txn.stat()['entries'] + self.keys = [key for key, _ in self.lmdb_txn.cursor()] + + # Read the first entry to get some info + lmdb_val = self.lmdb_txn.get(self.keys[0]) + datum = caffe_tf_pb2.Datum() + datum.ParseFromString(lmdb_val) + + self.channels = datum.channels + self.width = datum.width + self.height = datum.height + self.data_encoded = datum.encoded + self.float_data = datum.float_data + + if self.data_encoded: + # Obtain mime-type + self.data_mime = magic.from_buffer(datum.data, mime=True) + + if not self.float_data: + if self.bitdepth == 8: + self.image_dtype = tf.uint8 + else: + if self.data_mime == 'image/jpeg': + logging.error("Tensorflow does not support 16 bit jpeg decoding.") + exit(-1) + self.image_dtype = tf.uint16 + + def get_queue(self): + return tf.train.string_input_producer( + self.keys, + num_epochs=self.num_epochs, + capacity=self.total, + shuffle=self.shuffle, + seed=self._seed, + name='input_producer' + ) + + def get_tf_data_type(self): + """Returns the type of the data, in tf format. + It takes in account byte-data or floating point data. + It also takes in account the possible seperate lmdb label db. + Returns: + The tensorflow-datatype of the data + """ + return tf.float32 if self.float_data else tf.string + + def get_tf_label_type(self): + """Returns the type of the label, in tf format. + It takes in account byte-data or floating point data. + It also takes in account the possible seperate lmdb label db. + Returns: + The tensorflow-datatype of the label + """ + if self.labels_db_path: + return self.labels_db.get_tf_data_type() + else: + # No seperate db, return scalar label + return tf.int64 + + def generate_data_op(self): + """Generates and returns an op that fetches a single sample of data. + + Args: + self: + + Returns: + A python function that is inserted as an op + """ + def get_data_and_shape(lmdb_txn, key): + val = lmdb_txn.get(key) + datum = caffe_tf_pb2.Datum() + datum.ParseFromString(val) + shape = np.array([datum.channels, datum.height, datum.width], dtype=np.int32) + if datum.float_data: + data = np.asarray(datum.float_data, dtype='float32') + else: + data = datum.data + label = np.asarray([datum.label], dtype=np.int64) # scalar label + return data, shape, label + + def get_data_op(key): + """Fetches a sample of data and its label from lmdb. If a seperate label database + exists, it will also load it from the seperate db inside this function. This is + done the data and its label are loaded at the same time, avoiding multiple queues + and race conditions. + + Args: + self: the current lmdb instance + + Returns: + single_data: One sample of training data + single_data_shape: The shape of the preceeding training data + single_label: The label that is the reference value describing the data + single_label_shape: The shape of the preceeding label data + """ + single_data, single_data_shape, single_label = get_data_and_shape(self.lmdb_txn, key) + single_label_shape = np.array([], dtype=np.int32) + if self.labels_db_path: + single_label, single_label_shape, _ = get_data_and_shape(self.labels_db.lmdb_txn, key) + return single_data, [single_data_shape], single_label, [single_label_shape] + return get_data_op + + def get_single_data(self, key_queue): + """ + Returns: + key, single_data, single_data_shape, single_label, single_label_shape + """ + key = key_queue.dequeue() # Operation that dequeues one key and returns a string with the key + py_func_return_type = [self.get_tf_data_type(), tf.int32, self.get_tf_label_type(), tf.int32] + d, ds, l, ls = tf.py_func(self.generate_data_op(), [key], py_func_return_type, name='data_reader') + return key, d, ds, l, ls + + def __del__(self): + self.lmdb_env.close() + + +class FileListLoader(LoaderFactory): + """ The FileListLoader loads files from a list of string(s) pointing to (a) file(s). + These files are then retrieved by their string and loaded according to their extension. + """ + def __init__(self): + pass + + def initialize(self): + self.float_data = False + self.data_encoded = True + + if self.backend == 'file': + # Single file + self.total = 1 + self.keys = [self.db_path] + first_file_path = self.db_path + elif self.backend == 'filelist': + # Single file with a list of files + with open(self.db_path) as f: + self.keys = f.readlines() + + # Retain only the images in the list + self.keys = [key.split(LIST_DELIMITER)[0].rstrip() for key in self.keys] + + if len(self.keys) > 0: + # Assume the first entry in the line is a pointer to the file path + first_file_path = self.keys[0] + else: + logging.error('Filelist (%s) contains no lines.' % (self.db_path)) + exit(-1) + else: + logging.error('Unsupported backend in FileListLoader (%s)' % (self.backend)) + exit(-1) + + self.total = len(self.keys) + + # Check first file for statistics + im = Image.open(first_file_path) + self.width, self.height = im.size + self.channels = 1 if im.mode == 'L' else 3 # @TODO(tzaman): allow more channels + + self.data_mime = magic.from_file(first_file_path, mime=True) + + if self.bitdepth == 8: + self.image_dtype = tf.uint8 + else: + if self.data_mime == 'image/jpeg': + logging.error("Tensorflow does not support 16 bit jpeg decoding.") + exit(-1) + self.image_dtype = tf.uint16 + + self.reader = tf.WholeFileReader() + + def get_queue(self): + return tf.train.string_input_producer( + self.keys, + num_epochs=self.num_epochs, + capacity=self.total, + shuffle=self.shuffle, + seed=self._seed, + name='input_producer' + ) + + def get_single_data(self, key_queue): + """ + Returns: + key, single_data, single_data_shape, single_label, single_label_shape + """ + key, value = self.reader.read(key_queue) + shape = np.array([self.width, self.height, self.channels], dtype=np.int32) # @TODO: this is not dynamic + return key, value, shape # @TODO(tzaman) - Note: will only work for inferencing stage! + + +class TFRecordsLoader(LoaderFactory): + """ The TFRecordsLoader connects directly into the tensorflow graph. + It uses TFRecords, the 'standard' tensorflow data format. + """ + def __init__(self): + pass + + def initialize(self): + self.float_data = False # For now only strings + self.unencoded_data_format = 'hwc' + self.unencoded_channel_scheme = 'rgb' + self.reader = None + if self.bitdepth == 8: + self.image_dtype = tf.uint8 + else: + self.image_dtype = tf.uint16 + + # Count all the records @TODO(tzaman): account for shards! + # Loop the records in path @TODO(tzaman) get this from a txt? + # self.db_path += '/test.tfrecords' # @TODO(tzaman) this is a hack + + self.shard_paths = [] + list_db_files = os.path.join(self.db_path, 'list.txt') + self.total = 0 + if os.path.exists(list_db_files): + files = [os.path.join(self.db_path, f) for f in open(list_db_files, 'r').read().splitlines()] + else: + files = [self.db_path] + for shard_path in files: + # Account for the relative path format in list.txt + record_iter = tf.python_io.tf_record_iterator(shard_path) + for r in record_iter: + self.total += 1 + if not self.total: + raise ValueError('Database or shard contains no records (%s)' % (self.db_path)) + self.shard_paths.append(shard_path) + self.keys = ['%s:0' % p for p in self.shard_paths] + + # Use last record read to extract some preliminary data that is sometimes needed or useful + example_proto = tf.train.Example() + example_proto.ParseFromString(r) + + # @TODO(tzaman) - bitdepth flag? + self.channels = example_proto.features.feature['depth'].int64_list.value[0] + self.height = example_proto.features.feature['height'].int64_list.value[0] + self.width = example_proto.features.feature['width'].int64_list.value[0] + data_encoding_id = example_proto.features.feature['encoding'].int64_list.value[0] + if data_encoding_id: + self.data_encoded = True + self.data_mime = 'image/png' if data_encoding_id == 1 else 'image/jpeg' + else: + self.data_encoded = False + + # Set up the reader + # @TODO(tzaman) there's a filename queue because it can have multiple (sharded) tfrecord files (!) + # .. account for that! + self.reader = tf.TFRecordReader(name='tfrecord_reader') + + def get_queue(self): + return tf.train.string_input_producer(self.shard_paths, + num_epochs=self.num_epochs, + shuffle=self.shuffle, + seed=self._seed, + name='input_producer' + ) + + def get_single_data(self, key_queue): + """ + Returns: + key, single_data, single_data_shape, single_label, single_label_shape + """ + + key, serialized_example = self.reader.read(key_queue) + features = tf.parse_single_example( + serialized_example, + # Defaults are not specified since both keys are required. + features={ + 'image_raw': tf.FixedLenFeature([self.height, self.width, self.channels], tf.float32), + 'label': tf.FixedLenFeature([], tf.int64), + }) + + d = features['image_raw'] + ds = np.array([self.height, self.width, self.channels], dtype=np.int32) # @TODO: this is not dynamic + l = features['label'] # l = tf.cast(features['label'], tf.int32) + ls = np.array([], dtype=np.int32) # @TODO: this is not dynamic + return key, d, ds, l, ls + + +class Hdf5Loader(LoaderFactory): + + def __init__(self): + pass + + def initialize(self): + try: + import h5py + except ImportError: + logging.error("Attempt to create HDF5 Loader but h5py is not installed.") + exit(-1) + + self.data_encoded = False + self.float_data = True # Always stored as float32 + self.keys = None # Not using keys + + self.h5dbs = [] + self.h5dbs_endrange = [] + list_db_files = self.db_path + '/list.txt' + self.total = 0 + with open(list_db_files) as f: + for line in f: + # Account for the relative path format in list.txt + fn = self.db_path + '/' + os.path.basename(line.strip()) + db = h5py.File(fn) + self.check_hdf5_db(db) + self.total += len(db['data']) + self.h5dbs_endrange.append(self.total) + self.h5dbs.append(db) + + # Read the first file to get shape information + self.channels, self.height, self.width = self.h5dbs[0]['data'][0].shape + + def check_hdf5_db(self, db): + # Make sure we have data and labels in the db + if "data" not in db or "label" not in db: + logging.error("The HDF5 loader requires both a 'data' and 'label' group in the HDF5 root.") + exit(-1) + + if len(db['data']) != len(db['label']): + logging.error("HDF5 data and label amount mismatch (%d/%d)" % (len(db['data']), len(db['label']))) + exit(-1) + + if len(db['data']) == 0: + logging.error("HDF5 database contains no data.") + exit(-1) + + def get_queue(self): + return tf.train.range_input_producer( + self.total, + num_epochs=self.num_epochs, + capacity=self.total, + shuffle=self.shuffle, + seed=self._seed, + name='input_producer' + ) + + def get_tf_data_type(self): + """Returns the type of the data, in tf format. + It takes in account byte-data or floating point data. + It also takes in account the possible seperate lmdb label db. + Returns: + The tensorflow-datatype of the data + """ + return tf.float32 if self.float_data else tf.string + + def get_tf_label_type(self): + """Returns the type of the label, in tf format. + It takes in account byte-data or floating point data. + It also takes in account the possible seperate lmdb label db. + Returns: + The tensorflow-datatype of the label + """ + if self.labels_db_path: + return self.labels_db.get_tf_data_type() + else: + # No seperate db, return scalar label + return tf.int64 + + def get_data_and_shape(self, sample_key): + """ Gets a sample across multiple hdf5 databases + """ + prev_end_range = 0 + for i, end_range in enumerate(self.h5dbs_endrange): + if sample_key < end_range: + key_within_db = sample_key-prev_end_range + data = self.h5dbs[i]['data'][key_within_db] + shape = np.asarray(data.shape, dtype=np.int32) + label = self.h5dbs[i]['label'][key_within_db].astype(np.int64) + return data, shape, label + prev_end_range = end_range + + logging.error("Out of range") # @TODO(tzaman) out of range error + exit(-1) + + def generate_data_op(self): + """Generates and returns an op that fetches a single sample of data. + Returns: + A python function that is inserted as an op + """ + def get_data_op(key): + """Fetches a sample of data and its label from db. If a seperate label database + exists, it will also load it from the seperate db inside this function. This is + done the data and its label are loaded at the same time, avoiding multiple queues + and race conditions. + Args: + key: integer key id + Returns: + single_data: One sample of training data + single_data_shape: The shape of the preceeding training data + single_label: The label that is the reference value describing the data + single_label_shape: The shape of the preceeding label data + """ + single_data, single_data_shape, single_label = self.get_data_and_shape(key) + single_label_shape = np.array([], dtype=np.int32) + if self.labels_db_path: + single_label, single_label_shape, _ = self.labels_db.get_data_and_shape(key) + return single_data, [single_data_shape], single_label, [single_label_shape] + return get_data_op + + def get_single_data(self, key_queue): + """ + Returns: + key, single_data, single_data_shape, single_label, single_label_shape + """ + key = key_queue.dequeue() # Operation that dequeues one key and returns a string with the key + py_func_return_type = [self.get_tf_data_type(), tf.int32, self.get_tf_label_type(), tf.int32] + d, ds, l, ls = tf.py_func(self.generate_data_op(), [key], py_func_return_type, name='data_reader') + return key, d, ds, l, ls + + def __del__(self): + for db in self.h5dbs: + db.close() + + +class GanGridLoader(LoaderFactory): + """ + The GanGridLoader generates data for a GAN. + """ + def __init__(self): + pass + + def initialize(self): + self.float_data = False # For now only strings + self.keys = None # Not using keys + self.unencoded_data_format = 'hwc' + self.unencoded_channel_scheme = 'rgb' + self.reader = None + self.image_dtype = tf.float32 + + self.channels = 1 + self.height = 1 + self.width = 100 + self.data_encoded = False + + self.total = 100000 + + def get_queue(self): + return tf.train.range_input_producer( + self.total, + num_epochs=self.num_epochs, + capacity=self.total, + shuffle=self.shuffle, + seed=self._seed, + name='input_producer' + ) + + def get_single_data(self, key_queue): + """ + Returns: + key, single_data, single_data_shape, single_label, single_label_shape + """ + + key = tf.to_int32(key_queue.dequeue()) # Operation that dequeues an index + + d = key + ds = np.array([1, 1, 1], dtype=np.int32) + + return key, d, ds, None, None diff --git a/digits/tools/tensorflow/utils.py b/digits/tools/tensorflow/utils.py new file mode 100644 index 000000000..3e927e12f --- /dev/null +++ b/digits/tools/tensorflow/utils.py @@ -0,0 +1,109 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +# +# This document should comply with PEP-8 Style Guide +# Linter: pylint + +""" +Digits default Tensorflow Ops as helper functions. + +""" + +import functools +import tensorflow as tf +from tensorflow.python.client import device_lib + +STAGE_TRAIN = 'train' +STAGE_VAL = 'val' +STAGE_INF = 'inf' + + +class GraphKeys(object): + TEMPLATE = "model" + QUEUE_RUNNERS = "queue_runner" + MODEL = "model" + LOSS = "loss" # The namescope + LOSSES = "losses" # The collection + LOADER = "data" + + +def model_property(function): + # From https://danijar.com/structuring-your-tensorflow-models/ + attribute = '_cache_' + function.__name__ + + @property + @functools.wraps(function) + def decorator(self): + if not hasattr(self, attribute): + setattr(self, attribute, function(self)) + return getattr(self, attribute) + return decorator + + +def classification_loss(pred, y): + """ + Definition of the loss for regular classification + """ + ssoftmax = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=y, name='cross_entropy_single') + return tf.reduce_mean(ssoftmax, name='cross_entropy_batch') + + +def mse_loss(lhs, rhs): + return tf.reduce_mean(tf.square(lhs - rhs)) + + +def constrastive_loss(lhs, rhs, y, margin=1.0): + """ + Contrastive loss confirming to the Caffe definition + """ + d = tf.reduce_sum(tf.square(tf.subtract(lhs, rhs)), 1) + d_sqrt = tf.sqrt(1e-6 + d) + loss = (y * d) + ((1 - y) * tf.square(tf.maximum(margin - d_sqrt, 0))) + return tf.reduce_mean(loss) # Note: constant component removed (/2) + + +def classification_accuracy_top_n(pred, y, top_n): + single_acc_t = tf.nn.in_top_k(pred, y, top_n) + return tf.reduce_mean(tf.cast(single_acc_t, tf.float32), name='accuracy_top_%d' % top_n) + + +def classification_accuracy(pred, y): + """ + Default definition of accuracy. Something is considered accurate if and only + if a true label exactly matches the highest value in the prediction vector. + """ + single_acc = tf.equal(y, tf.argmax(pred, 1)) + return tf.reduce_mean(tf.cast(single_acc, tf.float32), name='accuracy') + + +def nhwc_to_nchw(x): + return tf.transpose(x, [0, 3, 1, 2]) + + +def hwc_to_chw(x): + return tf.transpose(x, [2, 0, 1]) + + +def nchw_to_nhwc(x): + return tf.transpose(x, [0, 2, 3, 1]) + + +def chw_to_hwc(x): + return tf.transpose(x, [1, 2, 0]) + + +def bgr_to_rgb(x): + return tf.reverse(x, [2]) + + +def rgb_to_bgr(x): + return tf.reverse(x, [2]) + + +def get_available_gpus(): + """ + Queries the CUDA GPU devices visible to Tensorflow. + Returns: + A list with tf-style gpu strings (f.e. ['/gpu:0', '/gpu:1']) + """ + local_device_protos = device_lib.list_local_devices() + return [x.name for x in local_device_protos if x.device_type == 'GPU'] diff --git a/digits/tools/torch/LRPolicy.lua b/digits/tools/torch/LRPolicy.lua index 1f1908afc..b62a20c10 100644 --- a/digits/tools/torch/LRPolicy.lua +++ b/digits/tools/torch/LRPolicy.lua @@ -74,6 +74,7 @@ function LRPolicy:GetLearningRate(iter) else --have to include additional comments print("Unknown learning rate policy: " .. self.policy) + os.exit(-1) end return rate diff --git a/digits/tools/torch/data.lua b/digits/tools/torch/data.lua index 9718896d5..b303a3f44 100644 --- a/digits/tools/torch/data.lua +++ b/digits/tools/torch/data.lua @@ -2,7 +2,6 @@ require 'torch' -- torch require 'nn' -- provides a normalization operator require 'utils' -- various utility functions -require 'hdf5' -- import HDF5 now as it is unsafe to do it from a worker thread local threads = require 'threads' -- for multi-threaded data loader check_require('image') -- for color transforms diff --git a/digits/tools/torch/main.lua b/digits/tools/torch/main.lua index 4dc190ad9..65f6c6e18 100644 --- a/digits/tools/torch/main.lua +++ b/digits/tools/torch/main.lua @@ -328,7 +328,7 @@ if opt.visualizeModel then print('\nModel: \n' .. model:__tostring()) print('\nCriterion: \n' .. loss:__tostring()) logmessage.display(0,'Network definition ends') - os.exit(-1) + os.exit(0) end -- NOTE: currently randomState option wasn't used in DIGITS. This option was provided to be used from command line, if required. diff --git a/digits/tools/torch/test.lua b/digits/tools/torch/test.lua index 9562c508e..f56918d7f 100644 --- a/digits/tools/torch/test.lua +++ b/digits/tools/torch/test.lua @@ -31,7 +31,7 @@ opt = lapp[[ -y,--ccn2 (default no) should be 'yes' if ccn2 is used in network. Default : false -s,--save (default .) save directory ---testMany (default no) If this option is 'yes', then "image" input parameter should specify the file with all the images to be tested +--testMany (default no) If this option is 'yes', then the "image" input parameter should specify the text-file containing a list of image files to be tested --testUntil (default -1) specifies how many images in the "image" file to be tested. This parameter is only valid when testMany is set to "yes" --subtractMean (default 'image') Select mean subtraction method. Possible values are 'image', 'pixel' or 'none'. --labels (default '') file contains label definitions @@ -268,11 +268,11 @@ local function predictBatch(inputs, model) prediction,classes = prediction:float():sort(true) for j=1,topN do -- output format : LABEL_ID (LABEL_NAME) CONFIDENCE - logmessage.display(0,'For image ' .. index ..', predicted class '..tostring(j)..': ' .. classes[j] .. ' (' .. class_labels[classes[j]] .. ') ' .. prediction[j]) + logmessage.display(0,'For image ' .. index .. ', predicted class ' .. tostring(j) .. ': ' .. classes[j] .. ' (' .. class_labels[classes[j]] .. ') ' .. prediction[j]) end else allPredictions = utils.dataToJson(prediction) - logmessage.display(0,'Predictions for image ' .. index ..': '..allPredictions) + logmessage.display(0,'Predictions for image ' .. index .. ': ' .. allPredictions) end end end diff --git a/digits/utils/forms.py b/digits/utils/forms.py index 83d503740..dd075fde3 100644 --- a/digits/utils/forms.py +++ b/digits/utils/forms.py @@ -27,10 +27,8 @@ def _validator(form, field): if all_conditions_met: # Verify that data exists if field.data is None \ - or (isinstance(field.data, (str, unicode)) - and not field.data.strip()) \ - or (isinstance(field.data, FileStorage) - and not field.data.filename.strip()): + or (isinstance(field.data, (str, unicode)) and not field.data.strip()) \ + or (isinstance(field.data, FileStorage) and not field.data.filename.strip()): raise validators.ValidationError('This field is required.') else: # This field is not required, ignore other errors @@ -52,11 +50,9 @@ def _validator(form, field): other_field_value = getattr(form, other_field).data if other_field_value: # Verify that data exists - if field.data is None \ - or (isinstance(field.data, (str, unicode)) - and not field.data.strip()) \ - or (isinstance(field.data, FileStorage) - and not field.data.filename.strip()): + if field.data is None or \ + (isinstance(field.data, (str, unicode)) and not field.data.strip()) \ + or (isinstance(field.data, FileStorage) and not field.data.filename.strip()): raise validators.ValidationError('This field is required if %s is set.' % other_field) else: # This field is not required, ignore other errors diff --git a/digits/utils/lmdbreader.py b/digits/utils/lmdbreader.py index 3d5d99efe..56810beaf 100644 --- a/digits/utils/lmdbreader.py +++ b/digits/utils/lmdbreader.py @@ -23,6 +23,8 @@ def __init__(self, location): with self._db.begin() as txn: self.total_entries = txn.stat()['entries'] + self.txn = self._db.begin() + def entries(self): """ Generator returning all entries in the DB @@ -31,3 +33,7 @@ def entries(self): cursor = txn.cursor() for item in cursor: yield item + + def entry(self, key): + """Return single entry""" + return self.txn.get(key) diff --git a/digits/views.py b/digits/views.py index 6068cecc5..a4707b50a 100644 --- a/digits/views.py +++ b/digits/views.py @@ -634,6 +634,10 @@ def handle_error(e): details['trace'] = trace.split('\n') return flask.jsonify({'error': details}), status_code else: + message = message.replace('\\n', '
    ') + if isinstance(e, digits.frameworks.errors.NetworkVisualizationError): + trace = message + message = '' return flask.render_template('error.html', title=error_type, message=message, diff --git a/docs/BuildDigits.md b/docs/BuildDigits.md index 2bc6c4f8d..e1e12241a 100644 --- a/docs/BuildDigits.md +++ b/docs/BuildDigits.md @@ -72,6 +72,10 @@ optional arguments: Now that you're up and running, check out the [Getting Started Guide](GettingStarted.md). +# Development + +If you are interested in developing for DIGITS or work with its source code, check out the [Development Setup Guide](DevelopmentSetup.md) + ## Troubleshooting Most configuration options should have appropriate defaults. diff --git a/docs/BuildTensorflow.md b/docs/BuildTensorflow.md new file mode 100644 index 000000000..6b8b36275 --- /dev/null +++ b/docs/BuildTensorflow.md @@ -0,0 +1,48 @@ +# Installing TensorFlow + +DIGITS now supports TensorFlow as an optional alternative backend to Caffe or Torch. + +> NOTE: TensorFlow support is still experimental! + +We recommend installing TensorFlow in a fixed and separate environment. This is because TensorFlow support is still in development and stability is not ensured. + +Installation for [Ubuntu](https://www.tensorflow.org/install/install_linux#installing_with_virtualenv) + +Installation for [Mac](https://www.tensorflow.org/install/install_mac#installing_with_virtualenv) + +## Requirements + +DIGITS is current targeting tensorflow-gpu V1.2. + +TensorFlow for DIGITS requires one or more NVIDIA GPUs with CUDA Compute Capbility of 3.0 or higher. See [the official GPU support list](https://developer.nvidia.com/cuda-gpus) to see if your GPU supports it. + +Along with that requirement, the following should be installed + +* One or more NVIDIA GPUs ([details](InstallCuda.md#gpu)) +* An NVIDIA driver ([details and installation instructions](InstallCuda.md#driver)) +* A CUDA toolkit ([details and installation instructions](InstallCuda.md#cuda-toolkit)) +* cuDNN 5.1 ([download page](https://developer.nvidia.com/cudnn)) + +### A Note About cuDNN and TensorFlow +Currently tensorflow v1.2 targets cuDNN 5.1. The latest cuDNN version is 6. **To have tensorflow running in digits, you must have cuDNN 5.1 installed. Currently, cuDNN 6 is incompatiable with tensorflow.** To install it, use the following command in a terminal + +``` +sudo apt-get install libcudnn5 +``` + + +## Installation + +These instructions are based on [the official TensorFlow instructions] +(https://www.tensorflow.org/versions/master/install/) + +TensorFlow comes with pip, to install it, just simply use the command +``` +pip install tensorflow-gpu==1.2.0 +``` + +TensorFlow should then install effortlessly and pull in all its required dependices. + +## Getting Started With TensorFlow In DIGITS + +Follow [these instructions](GettingStartedTensorflow.md) for information on getting started with TensorFlow in DIGITS diff --git a/docs/BuildTorch.md b/docs/BuildTorch.md index d59b8031a..b354b1eae 100644 --- a/docs/BuildTorch.md +++ b/docs/BuildTorch.md @@ -16,7 +16,7 @@ Install some dependencies with Deb packages: sudo apt-get install --no-install-recommends git software-properties-common ``` -## Basic install +## Basic Installation These instructions are based on [the official Torch instructions](http://torch.ch/docs/getting-started.html). ```sh diff --git a/docs/DevelopmentSetup.md b/docs/DevelopmentSetup.md new file mode 100644 index 000000000..32fdeb75d --- /dev/null +++ b/docs/DevelopmentSetup.md @@ -0,0 +1,34 @@ +# Development + +The source code for DIGITS is available on [github](https://github.com/NVIDIA/DIGITS). + +To have access to your local machine, you may clone from the github repository with +``` +git clone https://github.com/NVIDIA/DIGITS.git +``` +Or you may download the source code as a zip file from the github website. + +## Running DIGITS in Development + +DIGITS comes with the script to run for a development server. +To run the development server, use +``` +./digits-devserver +``` + +## Running unit tests for DIGITS + +To successfully run all the unit tests, the following plugins have to be installed +``` +sudo pip install -r ./requirements_test.txt +``` + +To run all the tests for DIGITS, use +``` +./digits-test +``` + +If you would like to have a verbose output with the name of the tests, use +``` +./digits-test -v +``` \ No newline at end of file diff --git a/docs/GettingStartedTensorflow.md b/docs/GettingStartedTensorflow.md new file mode 100644 index 000000000..d1128bbbf --- /dev/null +++ b/docs/GettingStartedTensorflow.md @@ -0,0 +1,244 @@ +# Getting Started with TensorFlow™ in DIGITS + +Table of Contents +================= +* [Enabling Support For TensorFlow In DIGITS](#enabling-support-for-tensorflow-in-digits) +* [Selecting TensorFlow When Creating A Model In DIGITS](#selecting-tensorflow-when-creating-a-model-in-digits) +* [Defining A TensorFlow Model In DIGITS](#defining-a-tensorflow-model-in-digits) + * [Provided Properties](#provided-properties) + * [Internal Properties](#internal-properties) + * [Tensors](#tensors) +* [Other TensorFlow Tools in DIGITS](#other-tensorflow-tools-in-digits) + * [Provided Helpful Functions](#provided-helpful-functions) + * [Visualization With TensorBoard](#visualization-with-tensorboard) +* [Examples](#examples) + * [Simple Auto-Encoder Network](#simple-auto-encoder-network) + * [Freezing Variables in Pre-Trained Models by Renaming](#freezing-variables-in-pre-trained-models-by-renaming) + * [Multi-GPU Training](#multi-gpu-training) + +## Enabling Support For TensorFlow In DIGITS + +DIGITS will automatically enable support for TensorFlow if it detects that TensorFlow-gpu is installed in the system. This is done by a line of python code that attempts to ```import tensorflow``` to see if it actually imports. + +If DIGITS cannot enable tensorflow, a message will be printed in the console saying: ```TensorFlow support is disabled``` + +## Selecting TensorFlow When Creating A Model In DIGITS + +Click on the "TensorFlow" tab on the model creation page + +![Select TensorFlow](images/Select_TensorFlow.png) + +## Defining A TensorFlow Model In DIGITS + +To define a TensorFlow model in DIGITS, you need to write a python class that follows this basic template + +```python +class UserModel(Tower): + + @model_propertyOther TensorFlow Tools in DIGITS + def inference(self): + # Your code here + return model + + @model_property#with tf.variable_scope(digits.GraphKeys.MODEL, reuse=None): + def loss(self): + # Your code here + return loss +``` + +For example, this is what it looks like for [LeNet-5](http://yann.lecun.com/exdb/lenet/), a model that was created for the classification of hand written digits by Yann Lecun: + +```python +class UserModel(Tower): + + @model_property + def inference(self): + x = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]]) + # scale (divide by MNIST std) + x = x * 0.0125 + with slim.arg_scope([slim.conv2d, slim.fully_connected], + weights_initializer=tf.contrib.layers.xavier_initializer(), + weights_regularizer=slim.l2_regularizer(0.0005) ): + model = slim.conv2d(x, 20, [5, 5], padding='VALID', scope='conv1') + model = slim.max_pool2d(model, [2, 2], padding='VALID', scope='pool1') + model = slim.conv2d(model, 50, [5, 5], padding='VALID', scope='conv2') + model = slim.max_pool2d(model, [2, 2], padding='VALID', scope='pool2') + model = slim.flatten(model) + model = slim.fully_connected(model, 500, scope='fc1') + model = slim.dropout(model, 0.5, is_training=self.is_training, scope='do1') + model = slim.fully_connected(model, self.nclasses, activation_fn=None, scope='fc2') + return model + + @model_property + def loss(self): + loss = digits.classification_loss(self.inference, self.y) + accuracy = digits.classification_accuracy(self.inference, self.y) + self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy)) + return loss +``` + +The properties ```inference``` and ```loss``` must be defined and the class must be called ```UserModel``` and it must inherit ```Tower```. This is how DIGITS will interact with the python code. + +### Provided Properties + +Properties that are accessible through ```self``` + +Property name | Type | Description +--------------|-----------|------------ +`nclasses` | number | Number of classes (for classification datasets). For other type of datasets, this is undefined +`input_shape` | Tensor | Shape (1D Tensor) of the first input Tensor. For image data, this is set to height, width, and channels accessible by [0], [1], and [2] respectively. +`is_training` | boolean | Whether this is a training graph +`is_inference` | boolean | Whether this graph is created for inference/testing +`x` | Tensor | The input node, with the shape of [N, H, W, C] +`y` | Tensor | The label, [N] for scalar labels, [N, H, W, C] otherwise. Defined only if self.is_training is True + +### Internal Properties + +These properties are in the `UserModel` class written by the user + +Property name | Return Type | Description +--------------|-------------|------------ +`__init()__` | None | The constructor for the `UserModel` class +`inference()` | Tensor | Called during training and inference +`loss()` | Tensor | Called during training to determine the loss and variables to train + +### Tensors + +The network are fed with TensorFlow Tensor objects that are in [N, H, W, C] format. + +## Other TensorFlow Tools in DIGITS + +DIGITS provides a few useful tools to help with your development with TensorFlow. + +### Provided Helpful Functions + +DIGITS provides a few helpful functions to help you with creating the model. Here are the functions we provide inside the `digits` class + +Function Name | Parameters | Description +--------------------|---------------------|------------- +`classification_loss` | pred - the images to be classified
    y - the labels | Used for classification training to calculate the loss of image classification +`mse_loss` | lhs - left hand tensor
    rhs - right hand tensor | Used for calculating the mean square loss between 2 tensors +`constrastive_loss` | lhs - left hand tensor
    rhs - right hand tensor
    y - `labels` | Calculates the contrastive loss with respect to the Caffe definition +`classification_accuracy` | pred - the image to be classified
    y - the labels | Used to measure how accurate the classification task is +`nhwc_to_nchw` | x - the tensor to transpose | Transpose the tensor that was originally NHWC format to NCHW. The tensor must be a degree of 4 +`nchw_to_nhwc` | x - the tensor to transpose | Transpose the tensor that was originally NCHW format to NHWC. The tensor must be a degree of 4 +`hwc_to_chw` | x - the tensor to transpose | Transpose the tensor that was originally HWC format to CHW. The tensor must be a degree of 3 +`chw_to_hwc` | x - the tensor to transpose | Transpose the tensor that was originally CHW format to HWC. The tensor must be a degree of 3 +`bgr_to_rgb` | x - the tensor to transform | Transform the tensor that was originally in BGR channels to RGB. +`rgb_to_bgr` | x - the tensor to transform | Transform the tensor that was originally in RGB channels to BGR. + +### Visualization With TensorBoard + +![TensorBoard](images/TensorBoard.png) + +TensorBoard is a visualization tools provided by TensorFlow to see the graph of your neural network. DIGITS provides easy access to TensorBoard network visualization for your network while creating it. This can be accessed by clicking on the `Visualize` button under `Custom Network` as seen in the image below. + +![Visualize TensorBoard](images/visualize_button.png) + +If there is something wrong with the network model, DIGITS will automatically provide with you the stacktrace and the error message to help you understand where the problem is. + +You can also spin up the full Tensorboard server while your model is training with the command +``` +$ tensorboard --logdir /tb/ +``` +where `` is the directory where them model is being trained at, which can be found here: + +![Job Dir](images/job-dir.png) + +Afterwards, you can open up the Tensorboard page by going to +`http://localhost:6006` + +Or you can click the `Tensorboard` link under Visualization + +![Visualize Button](images/visualize-btn.png) + +To know more about how TensorBoard works, its official documentation is availabile in the [official tensorflow documentaton](https://www.tensorflow.org/get_started/summaries_and_tensorboard) + +## Examples + +### Simple Auto-Encoder Network + +The following network is a simple auto encoder to demostate the structure of how to use tensorflow in DIGITS. An auto encoder is a 2 part network that basically acts as a compression mechanism. The first part will try to compress an image to a size smaller than original while the second part will try to decompress the compressed representation created by the compression network. + +```python +class UserModel(Tower): + + @model_property + def inference(self): + + # the order for input shape is [0] -> H, [1] -> W, [2] -> C + # this is because tensorflow's default order is NHWC + model = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]]) + image_dim = self.input_shape[0] * self.input_shape[1] + + with slim.arg_scope([slim.fully_connected], + weights_initializer=tf.contrib.layers.xavier_initializer(), + weights_regularizer=slim.l2_regularizer(0.0005)): + + # first we reshape the images to something + model = tf.reshape(_x, shape=[-1, image_dim]) + + # encode the image + model = slim.fully_connected(model, 300, scope='fc1') + model = slim.fully_connected(model, 50, scope='fc2') + + # decode the image + model = slim.fully_connected(model, 300, scope='fc3') + model = slim.fully_connected(model, image_dim, activation_fn=None, scope='fc4') + + # form it back to the original + model = tf.reshape(model, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]]) + + return model + + @model_property + def loss(self): + + # In an autoencoder, we compare the encoded and then decoded image with the original + original = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]]) + + # self.inference is called to get the processed image + model = self.inference + loss = digits.mse_loss(original, model) + + return loss +``` + +### Freezing Variables in Pre-Trained Models by Renaming + +The following is a demonstration of how to specifying which weights we would like to use for training. This works best if we are using a pre-trained model. This is applicable for fine tuning a model. + +When you originally train a model, tensorflow will save the variables with their specified names. When you reload the model to retrain it, tensorflow will simutainously reload all those variables and mark them available to retrain if they are specified in the model definition. When you change the name of the variables in the model, tensorflow will then know to not train that variable and thus "freezes" it. + +```python +class UserModel(Tower): + + @model_property + def inference(self): + + model = construct_model() + """code to construct the network omitted""" + + # assuming the original model have weight2 and bias2 variables + # in here, we renamed them by adding the suffix _not_in_use + # this tells TensorFlow that these variables in the pre-trained model should + # not be retrained and it should be frozen + # If we would like to freeze a weight, all we have to do is just rename it + self.weights = { + 'weight1': tf.get_variable('weight1', [5, 5, self.input_shape[2], 20], initializer=tf.contrib.layers.xavier_initializer()), + 'weight2': tf.get_variable('weight2_not_in_use', [5, 5, 20, 50], initializer=tf.contrib.layers.xavier_initializer()) + } + + self.biases = { + 'bias1': tf.get_variable('bias1', [20], initializer=tf.constant_initializer(0.0)), + 'bias2': tf.get_variable('bias2_not_in_use', [50], initializer=tf.constant_initializer(0.0)) + } + + return model + + @model_property + def loss(self): + loss = calculate_loss() + """code to calculate loss omitted""" + return loss +``` diff --git a/docs/GettingStartedTorch.md b/docs/GettingStartedTorch.md index b1bb1703b..8eae62386 100644 --- a/docs/GettingStartedTorch.md +++ b/docs/GettingStartedTorch.md @@ -14,9 +14,6 @@ Table of Contents * [Supervised Regression Learning](#supervised-regression-learning) * [Command Line Inference](#command-line-inference) * [Multi-GPU training](#multi-gpu-training) -* [Tutorials](#tutorials) - * [Training an autoencoder](#training-an-autoencoder) - * [Training a regression model](#training-a-regression-model) ## Enabling support for Torch7 in DIGITS diff --git a/docs/ModelStore.md b/docs/ModelStore.md index 818e72dee..9ecffc023 100644 --- a/docs/ModelStore.md +++ b/docs/ModelStore.md @@ -1,6 +1,22 @@ # Model Store ## Introduction +Model Store lists models in user-specified servers. +Users can imports models from Model Store into DIGITS. + + +## Setting up environment variable +The configuration of Model Store requires one environment variable DIGITS_MODEL_STORE_URL to be set. +NVIDIA plans to publish one public Model Store at http://developer.download.nvidia.com/compute/machine-learning/modelstore/5.0. +You can set up the environment variable with that url before launching DIGITS. +For example, run the following command in your Bash shell. +``` shell +export DIGITS_MODEL_STORE_URL='http://developer.download.nvidia.com/compute/machine-learning/modelstore/5.0' +``` +If multiple model stores are available, specify their url's, separated by the comma (,). +``` shell +export DIGITS_MODEL_STORE_URL='http://localhost/mymodelstore,http://dlserver/teammodelstore' +``` DIGITS 5.0 introduces the concept of a "model store," which is a collection of trained models that can be used as pre-trained weights to accelerate training convergence. A DIGITS server can be configured to connect to one or more model stores to download these trained models from the store to the server. diff --git a/docs/images/Select_TensorFlow.png b/docs/images/Select_TensorFlow.png new file mode 100644 index 000000000..cd2859a16 Binary files /dev/null and b/docs/images/Select_TensorFlow.png differ diff --git a/docs/images/TensorBoard.png b/docs/images/TensorBoard.png new file mode 100644 index 000000000..daa0137cb Binary files /dev/null and b/docs/images/TensorBoard.png differ diff --git a/docs/images/job-dir.png b/docs/images/job-dir.png new file mode 100644 index 000000000..d0b748770 Binary files /dev/null and b/docs/images/job-dir.png differ diff --git a/docs/images/visualize-btn.png b/docs/images/visualize-btn.png new file mode 100644 index 000000000..80a332606 Binary files /dev/null and b/docs/images/visualize-btn.png differ diff --git a/docs/images/visualize_button.png b/docs/images/visualize_button.png new file mode 100644 index 000000000..dae2c4c8e Binary files /dev/null and b/docs/images/visualize_button.png differ diff --git a/examples/autoencoder/README.md b/examples/autoencoder/README.md index da46652d2..7a6e7963e 100644 --- a/examples/autoencoder/README.md +++ b/examples/autoencoder/README.md @@ -1,10 +1,11 @@ -# Training an image autoencoder with DIGITS and Torch7 +# Training an image autoencoder with DIGITS Table of Contents ================= * [Introduction](#introduction) * [Dataset creation](#dataset-creation) -* [Model definition](#model-creation) +* [Model definition (Torch)](#model-creation-torch) +* [Model definition (Tensorflow)](#model-creation-tensorflow) * [Verification](#verification) ## Introduction @@ -34,7 +35,7 @@ In the generic dataset creation form you need to provide the paths to the train ![Create generic dataset form](create-generic-dataset-form.png) -## Model creation +## Model creation (Torch) Now that you have a generic dataset to train on, you may create a generic model by clicking on `New Model\Images\Other` on the main page: @@ -44,7 +45,7 @@ Select the dataset you just created and under the `Custom network` tab, select ` ```lua local autoencoder = nn.Sequential() autoencoder:add(nn.MulConstant(0.00390625)) -autoencoder:add(nn.View(-1):setNumInputDims(3)) -- 1*28*8 -> 784 +autoencoder:add(nn.View(-1):setNumInputDims(3)) -- 1*28*28 -> 784 autoencoder:add(nn.Linear(784,300)) autoencoder:add(nn.ReLU()) autoencoder:add(nn.Linear(300,50)) @@ -90,6 +91,44 @@ After training for 30 epochs the loss function should look similar to this: ![Training loss](training-loss.png) +## Model creation (Tensorflow) + +The following example was made using TensorFlow-Slim. However you can also do this in vanilla Tensorflow and Keras + +Select the dataset you just created and under the `Custom network` tab, select `Tensorflow`. There you can paste the following network definition: +```python +# Tensorflow MNIST autoencoder model using TensorFlow-Slim +# The format of the data in this example is: batch_size * height * width * channel +class UserModel(Tower): + + @model_property + def inference(self): + + with slim.arg_scope([slim.fully_connected], + weights_initializer=tf.contrib.layers.xavier_initializer(), + weights_regularizer=slim.l2_regularizer(0.0005) ): + const = tf.constant(0.00390625) + model = tf.multiply(self.x, const) + model = tf.reshape(model, shape=[-1, 784]) # equivalent to `model = slim.flatten(_x)` + model = slim.fully_connected(model, 300, scope='fc1') + model = slim.fully_connected(model, 50, scope='fc2') + model = slim.fully_connected(model, 300, scope='fc3') + model = slim.fully_connected(model, 784, activation_fn=None, scope='fc4') + model = tf.reshape(model, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]]) + + # The below image summary makes it very easy to review your result + tf.summary.image(self.x.op.name, self.x, max_outputs=5, collections=['summaries']) + tf.summary.image(model.op.name, model, max_outputs=5, collections=['summaries']) + + return model + + @model_property + def loss(self): + return digits.mse_loss(self.inference, self.x) +``` + +The result from running the Tensorflow example should produce results that are similar to Torch. + ## Verification Now we can assess the quality of the autoencoder. On the model page, select an image from the MNIST test set (one that the network has never seen during training) and diff --git a/examples/autoencoder/autoencoder-TF.py b/examples/autoencoder/autoencoder-TF.py new file mode 100644 index 000000000..a850514ec --- /dev/null +++ b/examples/autoencoder/autoencoder-TF.py @@ -0,0 +1,34 @@ +# Tensorflow MNIST autoencoder model using TensorFlow-Slim +from model import Tower +from utils import model_property +import tensorflow as tf +import tensorflow.contrib.slim as slim +import utils as digits + + +class UserModel(Tower): + + @model_property + def inference(self): + + with slim.arg_scope([slim.fully_connected], + weights_initializer=tf.contrib.layers.xavier_initializer(), + weights_regularizer=slim.l2_regularizer(0.0005)): + const = tf.constant(0.00390625) + model = tf.multiply(self.x, const) + model = tf.reshape(model, shape=[-1, 784]) # equivalent to `model = slim.flatten(_x)` + model = slim.fully_connected(model, 300, scope='fc1') + model = slim.fully_connected(model, 50, scope='fc2') + model = slim.fully_connected(model, 300, scope='fc3') + model = slim.fully_connected(model, 784, activation_fn=None, scope='fc4') + model = tf.reshape(model, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]]) + + # The below image summary makes it very easy to review your result + tf.summary.image(self.x.op.name, self.x, max_outputs=5, collections=['summaries']) + tf.summary.image(model.op.name, model, max_outputs=5, collections=['summaries']) + + return model + + @model_property + def loss(self): + return digits.mse_loss(self.inference, self.x) diff --git a/examples/binary-segmentation/README.md b/examples/binary-segmentation/README.md index a2c46aaea..322accba7 100644 --- a/examples/binary-segmentation/README.md +++ b/examples/binary-segmentation/README.md @@ -61,7 +61,7 @@ Set the number of training epochs to `150`. ### Using Caffe Select the `Custom network` tab then click `Caffe`. -In the text area copy/paste the contents of this [Caffe model](segmentation-model.prototxt). +In the text area, copy/paste the contents of this [Caffe model](segmentation-model.prototxt). Set the base learning rate to `1e-7`. You may click `Visualize` to review the network topology: @@ -71,9 +71,15 @@ You may click `Visualize` to review the network topology: ### Using Torch7 Select the `Custom network` tab then click `Torch`. -In the text area copy/paste the contents of this [Torch model](segmentation-model.lua). +In the text area, copy/paste the contents of this [Torch model](segmentation-model.lua). Set the base learning rate to `0.001`. +### Using Tensorflow + +Select the `Custom netowrk` tab then click `Tensorflow`, +In the text area, copy/paste the contents of this [Tensorflow model](binary_segmentation-TF.py). +Set the base learning rate to `1e-5` + ### Some words on the model The proposed network is a simple Fully Convolutional Network (FCN). @@ -99,6 +105,8 @@ The `create_images.py` script created many images we can use for testing. You may also run the command again to produce new images. To help visualize the output of the network, select the `Image` visualization method and set `Pixel Conversion` to `clip`: +For Tensorflow only, select `HWC` for data order + ![select visualization](select-visualization.png) You can test images individually with `Test One`: diff --git a/examples/binary-segmentation/binary_segmentation-TF.py b/examples/binary-segmentation/binary_segmentation-TF.py new file mode 100644 index 000000000..6dc756c5d --- /dev/null +++ b/examples/binary-segmentation/binary_segmentation-TF.py @@ -0,0 +1,32 @@ +# Tensorflow Triangle binary segmentation model using TensorFlow-Slim +from model import Tower +from utils import model_property +import tensorflow as tf +import tensorflow.contrib.slim as slim +import utils as digits + + +class UserModel(Tower): + + @model_property + def inference(self): + _x = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]]) + with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], + weights_initializer=tf.contrib.layers.xavier_initializer(), + weights_regularizer=slim.l2_regularizer(0.05)): + + # 1*H*W -> 32*H*W + model = slim.conv2d(_x, 32, [3, 3], padding='SAME', scope='conv1') + # 32*H*W -> 1024*H/16*W/16 + model = slim.conv2d(model, 1024, [16, 16], padding='VALID', scope='conv2', stride=16) + model = slim.conv2d_transpose(model, self.input_shape[2], [16, 16], + stride=16, padding='VALID', activation_fn=None, scope='deconv_1') + return model + + @model_property + def loss(self): + y = tf.reshape(self.y, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]]) + + # For a fancy tensorboard summary: put the input, label and model side by side (sbs) for a fancy image summary: + # tf.summary.image(sbs.op.name, sbs, max_outputs=3, collections=["training summary"]) + return digits.mse_loss(self.inference, y) diff --git a/examples/binary-segmentation/segmentation-model.lua b/examples/binary-segmentation/segmentation-model.lua index 167795066..6f42b08d6 100644 --- a/examples/binary-segmentation/segmentation-model.lua +++ b/examples/binary-segmentation/segmentation-model.lua @@ -11,7 +11,7 @@ return function(params) end if pcall(function() require('cudnn') end) then - print('Using CuDNN backend') + --print('Using CuDNN backend') backend = cudnn convLayer = cudnn.SpatialConvolution convLayerName = 'cudnn.SpatialConvolution' diff --git a/examples/fine-tuning/README.md b/examples/fine-tuning/README.md index 6ae1aeca2..1864e82ae 100644 --- a/examples/fine-tuning/README.md +++ b/examples/fine-tuning/README.md @@ -70,6 +70,12 @@ The description is similar to the standard LeNet model except for the following - the `fineTuneHook` hook sets the `accGradParameters` fields of the original layers to `nil` in order to keep the existing features unchanged - the `fineTuneHook` adds a `nn.Linear(10, 2)` layer on top of the network +### Using Tensorflow + +In the `Custom network` text area, paste the contents of this [python file](lenet-fine-tune-tf.py). +The description is similar to the standard LeNet model except for the following differences: +- the weight and bias with the name `out` had their tensorflow variable renamed to `wout_not_in_use` and `bout_not_in_use`. This signifies that these weights will not be used and the original weights are frozen + ## Verification Now if you give your network a name and click `Create` the loss function should go down very sharply and the validation accuracy should exceed 90%: diff --git a/examples/fine-tuning/lenet-fine-tune-tf.py b/examples/fine-tuning/lenet-fine-tune-tf.py new file mode 100644 index 000000000..d7552b8cb --- /dev/null +++ b/examples/fine-tuning/lenet-fine-tune-tf.py @@ -0,0 +1,92 @@ +from model import Tower +from utils import model_property +import tensorflow as tf +import utils as digits + + +class UserModel(Tower): + + @model_property + def inference(self): + + # Create some wrappers for simplicity + def conv2d(x, W, b, s, name, padding='SAME'): + # Conv2D wrapper, with bias and relu activation + x = tf.nn.conv2d(x, W, strides=[1, s, s, 1], padding=padding, name=name + '_conv2d') + x = tf.nn.bias_add(x, b, name=name + 'bias_add') + return tf.nn.relu(x) + + def maxpool2d(x, k, s, name, padding='VALID'): + # MaxPool2D wrapper + return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, s, s, 1], + padding=padding, name=name + '_maxpool2d') + + # Create model + def conv_net(x, weights, biases): + # scale (divide by MNIST std) + x = x * 0.0125 + + # Convolution Layer + conv1 = conv2d(x, weights['wc1'], biases['bc1'], s=1, name='CONV1', padding='VALID') + # Max Pooling (down-sampling) + conv1 = maxpool2d(conv1, k=2, s=2, name='CONV1', padding='VALID') + + # Convolution Layer + conv2 = conv2d(conv1, weights['wc2'], biases['bc2'], s=1, name='CONV2', padding='VALID') + # Max Pooling (down-sampling) + conv2 = maxpool2d(conv2, k=2, s=2, name='CONV2', padding='VALID') + + # Fully connected layer + # Reshape conv2 output to fit fully connected layer input + fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]], name="FC1_reshape") + fc1 = tf.add(tf.matmul(fc1, weights['wd1'], name='FC1_multi'), biases['bd1'], name='FC1_add') + fc1 = tf.nn.relu(fc1, name='FC1_relu') + + # Apply Dropout + if self.is_training: + fc1 = tf.nn.dropout(fc1, 0.5, name='FC1_drop') + + # Output, class prediction + out = tf.add(tf.matmul(fc1, weights['out'], name='OUT_multi'), biases['out'], name='OUT_add') + + true_out = tf.add(tf.matmul(out, weights['true_out'], name='OUT_multi'), + biases['true_out'], name='TRUE_OUT_add') + return true_out + + # Store layers weight & bias + weights = { + # 5x5 conv, 1 input, 20 outputs + 'wc1': tf.get_variable('wc1', [5, 5, self.input_shape[2], 20], + initializer=tf.contrib.layers.xavier_initializer()), + # 5x5 conv, 20 inputs, 50 outputs + 'wc2': tf.get_variable('wc2', [5, 5, 20, 50], initializer=tf.contrib.layers.xavier_initializer()), + # fully connected, 4*4*16=800 inputs, 500 outputs + 'wd1': tf.get_variable('wd1', [4*4*50, 500], initializer=tf.contrib.layers.xavier_initializer()), + # 500 inputs, 10 outputs (class prediction) + 'out': tf.get_variable('wout_not_in_use', [500, 10], initializer=tf.contrib.layers.xavier_initializer()), + # adjust from 10 classes to 2 output + 'true_out': tf.get_variable('twout', [10, 2], initializer=tf.contrib.layers.xavier_initializer()) + } + + self.weights = weights + + # Leave the intial biases zero + biases = { + 'bc1': tf.get_variable('bc1', [20], initializer=tf.constant_initializer(0.0)), + 'bc2': tf.get_variable('bc2', [50], initializer=tf.constant_initializer(0.0)), + 'bd1': tf.get_variable('bd1', [500], initializer=tf.constant_initializer(0.0)), + 'out': tf.get_variable('bout_not_in_use', [10], initializer=tf.constant_initializer(0.0)), + 'true_out': tf.get_variable('tbout', [2], initializer=tf.constant_initializer(0.0)) + } + + self.biases = biases + + model = conv_net(self.x, weights, biases) + return model + + @model_property + def loss(self): + loss = digits.classification_loss(self.inference, self.y) + accuracy = digits.classification_accuracy(self.inference, self.y) + self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy)) + return loss diff --git a/examples/fine-tuning/lenet-fine-tune.lua b/examples/fine-tuning/lenet-fine-tune.lua index 56b8886bc..24c88c271 100644 --- a/examples/fine-tuning/lenet-fine-tune.lua +++ b/examples/fine-tuning/lenet-fine-tune.lua @@ -13,17 +13,17 @@ return function(params) end if pcall(function() require('cudnn') end) then - print('Using CuDNN backend') + --print('Using CuDNN backend') backend = cudnn convLayer = cudnn.SpatialConvolution convLayerName = 'cudnn.SpatialConvolution' else print('Failed to load cudnn backend (is libcudnn.so in your library path?)') if pcall(function() require('cunn') end) then - print('Falling back to legacy cunn backend') + --print('Falling back to legacy cunn backend') else - print('Failed to load cunn backend (is CUDA installed?)') - print('Falling back to legacy nn backend') + --print('Failed to load cunn backend (is CUDA installed?)') + --print('Falling back to legacy nn backend') end backend = nn -- works with cunn or nn convLayer = nn.SpatialConvolutionMM diff --git a/examples/gan/README.md b/examples/gan/README.md new file mode 100644 index 000000000..ad7e029a6 --- /dev/null +++ b/examples/gan/README.md @@ -0,0 +1,373 @@ +# Using DIGITS to train a Generative Adversarial Network + +Table of Contents +================= +* [Introduction](#introduction) +* [Preliminary installation steps](#preliminary-installation-steps) +* [Handwritten digits](#handwritten-digits) + * [Creating the dataset](#creating-the-dataset) + * [Training the model](#training-the-model) + * [Sampling the model](#sampling-the-model) + * [Training an encoder](#training-an-encoder) +* [Celebrity faces](#celebrity-faces) + * [Downloading the CelebA dataset](#downloading-the-celeba-dataset) + * [Creating the CelebA dataset](#creating-the-celeba-dataset) + * [Training the CelebA model](#training-the-celeba-model) + * [Training a CelebA encoder](#training-a-celeba-encoder) + * [Generating attribute vectors](#generating-attribute-vectors) + * [Sampling the CelebA model](#sampling-the-celeba-model) + * [Setting image attributes](#setting-image-attributes) + * [Analogy grid](#analogy-grid) + * [Embeddings visualization](#embeddings-visualization) + +**Disclaimer:** +This walk-through makes use of experimental features in DIGITS. +Please do not report issues on the main DIGITS fork. + +## Introduction + +Generative Adversarial Networks (GAN) were introduced by Ian Goodfellow in [Generative Adversarial Networks](https://arxiv.org/abs/1406.2661), Goodfellow, 2014. +GANs learn a data probability distribution under unsupervised learning. +In practice a GAN learns to draw from a dataset probability distribution in order to generate data. +To this avail, a GAN comprises two pieces: a Generator that generates data and a Discriminator that learns to discriminate between "real" data (from the dataset) and "fake" data (those that were generated). +A latent representation of the data is learnt by ways of a feature vector called `z`. +Through a number of fully-connected and transpose convolutional layers, the generator learns to generate images from `z`. +During training, `z` is sampled from a random distribution. +During inference, `z` may be specified to generate images with carefully chosen attributes. + +The typical training loop comprises the following phases: +- optimize the discriminator on real samples (make it classify them as such), +- draw `z` from a random distribution and have the generator create the corresponding image, +- optimize the discriminator on generator samples (make it classify them as such), +- optimize the generator (make it fool the disciminator). + +Other papers of interest: +- [Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks](https://arxiv.org/abs/1511.06434), Radford, 2015. +- [Sampling Generative Networks](https://arxiv.org/abs/1609.04468), White, 2016. + +## Preliminary installation steps + +You will need a version of DIGITS that includes this document. + +Tensorflow may be installed by doing for example (refer to the Tensorflow homepage for more up-to-date install instructions): + +```sh +$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.1-cp27-none-linux_x86_64.whl +$ pip install --upgrade $TF_BINARY_URL +``` + +If you haven't done so already, install the top-level DIGITS module: +```sh +$ pip install -e $DIGITS_ROOT +``` + +The GAN data and visualization plugins for DIGITS may be installed by doing: +```sh +$ pip install -e $DIGITS_ROOT/plugins/data/gan/ +$ pip install -e $DIGITS_ROOT/plugins/view/gan/ +``` + +## Handwritten digits + +Here we will use the MNIST dataset. You don't need labels to train a GAN however if you do have labels, as is the case for MNIST, you can use them to train a **conditional** GAN. +In this example, we will condition our GAN on the class labels. +Conditioning a GAN in this way is useful because this allows us to dissociate classes from other learnable features that define the "style" of images. +In practice, in our network we will concatenate a one-hot representation of labels to the activations of every layer in both the generator and the discriminator. + +### Creating the dataset + +If you already followed the [GettingStarted](../../docs/GettingStarted.md) example, you should already have a classification dataset for MNIST. +We will reuse the LMDB files that were already created for the MNIST classification dataset to create a new generic dataset. +Creating a generic dataset is required here because GANs do not fall into the category of classification datasets and require specific plug-ins. + +Open the classification dataset page and make good note of the job directory for your MNIST classification dataset: + +![mnist classification dataset](mnist-classification-dataset.png) + +On the home page, click `New Dataset>Images>Other`. +Then specify the path to the training images LMDB and the path to the mean image protobuf file: + +![create generic MNIST](create-mnist-generic-dataset.png) + +Give your dataset a name and click "Create". + +### Training the model + +On the home page, click `New Model>Images>Other`. +- select your dataset, +- set the number of epochs to 60, +- set mean subtraction to `None`, +- select `ADAM` solver, +- select learning rate to `2e-4`, +- use only 1 GPU for this model. + +In the `custom network` tab, select `Tensorflow` and copy-paste this [network definition](./network-mnist.py). + +You can click `Visualize` to browse the model graph (this only works in Chrome): + +![mnist graph](mnist-graph.png) + +Name your model `GAN-MNIST` and click `Create`. + +> Note: when training a neural network it is typical to expect the loss to go down and see there an indication that the model is learning well. +This is not the case in a typical GAN. +If the loss of the discriminator is very low, this means that the generator is not doing a good job at fooling the discriminator. +Conversely, if the loss of the generator is too low, this means the discriminator is not doing a good job at detecting fake samples. +In a balanced set-up, equilibrium is reached when the generator can fool the discriminator in 50% of cases. +From the definition of the cross entropy loss this corresponds to a loss value of `-math.log(0.5)=0.69`. + +![mnist loss](mnist-loss.png) + +Also note that we've added a metric in this model to track the `Chi-square` distance between the histogram of real pixel values and the histogram of generated pixel values. +This is used as a proxy indication that the generator has learnt a good propability distribution. +Getting this distance to go low is a necessary yet insufficient condition. +Note however how the `Chi-square` metric is going down below: + +![mnist chi square](mnist-chi-square.png) + +You can also open Tensorboard and point to the `tb` sub-folder of the job directory to see samples of generated and real images (under the `images` tab). + +To start Tensorboard: + +```sh +$ tensorboard --logdir /tb/ +``` + +To open Tensorboard in a browser, open `http://localhost:6006`. + +In the below image, the first row shows generated images. +The second row shows real images (from the dataset). +This is updated periodically during training so you can monitor how well the network is doing. + +![MNIST TB samples](mnist-tb-samples.png) + +### Sampling the model + +We will see how to sample `z` to produce images. +On the model page, select the `GAN` inference method and the `GAN` inference form. +In the inference form, select the `MNIST Class sweep` task. + +![MNIST class sweep inference form](mnist-inference-form-class-sweep.png) + +Click `Test`. +This shows a grid of digits, all of them were sampled using the same randomly generated `z`. +The `z` vector is then concatenated with various shades of labels, using spherical interpolation. +Every column shows how a digit is slowly morphing into the next digit: + +![MNIST class sweep](mnist-class-sweep.png) + +This can also be visualized with an animated gif: + +![MNIST Animated](mnist-animated.gif) + +Now in the inference form, select `MNIST Style sweep` and click `Test`. +This shows a new grid of digits. +Every column shows how a digit is slowly morphing from one "style" (i.e. one randomly generated z vector) into another style: + +![MNIST style sweep](mnist-style-sweep.png) + +### Training an encoder + +In order to avoid picking `z` randomly, we can train an encoder to generate the `z` vector, given an input image. +Here we will fine-tune the discriminator such that it becomes an encoder: +- we replace the last layer of the discriminator with a fully-connected layer that has as many neurons as there are numbers in `z` (100 in our case), +- we reverse the order of things in our GAN: the output of the discriminator/encoder is now connected to the input of the generator, +- we change the loss function: we are now using the L2 distance between the generated image and the image in the dataset. + +> Note: the parameters of the generator must be frozen when training the encoder. + +To train the encoder, do this: +- clone the `GAN-MNIST` model +- in the `Previous networks` tab, select the GAN model, select the last epoch then click `Customize` +- use this [network description](./network-mnist-encoder.py) + +Name your model `GAN-MNIST-Encoder` and click `Create`. + +Notice that the loss converges smoothly towards a low plateau: + +![MNIST encoder loss](mnist-encoder-loss.png) + +Now that we have an encoder we can encode an image and find the corresponding `z` vector. +On the `GAN-MNIST-Encoder` page, +- select the `GAN` visualization method and the `MNIST encoder` task, +- upload an image of a `3` +- click `Test One` + +![MNIST encode image form](mnist-encode-image-form.png) + +>Note: in this GAN-encoder, class "3" is hard-coded in the encoder model **during inference**. +If you want to encode another class you will need to manually update this line in the model description: `self.y = tf.to_int32(3*tf.ones(shape=[self.batch_size]))` + +On the resulting page, you can see the input image (left), the reconstructed image (middle), and the corresponding `z` vector (right): + +![MNIST encode image](mnist-encode-image.png) + +Now that we have a `z` vector, we can do a class sweep for that particular "style": +- copy the encoded `z` vector from the encoder. +- move to the `GAN-MNIST` model page. +- select the `GAN` visualization method and the `Grid` task, +- select the `GAN` inference form and the `Class sweep` task, +- paste the encoded `z` vector into the field: + +![MNIST styled class sweep form](mnist-styled-class-sweep-form.png) + +Now click `Test` and you will see a class sweep using the particular style that you specified: + +![MNIST styled class sweep](mnist-styled-class-sweep.png) + +## Celebrity faces + +### Downloading the CelebA dataset + +The Celebrity Faces (a.k.a. "CelebA") dataset is available from this [location](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html). +Download [img/img_align_celeba.zip](https://drive.google.com/drive/folders/0B7EVK8r0v71pTUZsaXdaSnZBZzg) and [Anno/list_attr_celeba.txt](https://drive.google.com/drive/folders/0B7EVK8r0v71pOC0wOVZlQnFfaGs). +Extract the ZIP file into a local folder. + +### Creating the CelebA dataset + +On the home page, click `New dataset>Images>GAN`. +Specify the location of the attributes and images: + +![celeba dataset creation](celeba-dataset-form.png). + +Leave other settings in their default state, name your dataset `CelebA-64` then click `Create`. + +You can explore the dataset using the `Explore` button on the model page. +See below for some image samples: + +![celeba samples](exploring-celeba.png) + +### Training the CelebA model + +This model is very similar to the MNIST one but differs slightly: +- it receives colour 64x64 images, +- it has a few more layers, +- it is not conditioned on labels. + +On the home page, click `New Model>Images>Other`. +- select your dataset, +- set the number of epochs to 60, +- set the batch size to 64, +- set mean subtraction to `None`, +- select `ADAM` solver, +- select learning rate to `5e-4`, +- use only 1 GPU for this model. + +In the `custom network` tab, select `Tensorflow` and copy-paste this [network definition](./network-celebA.py). + +Name your model `GAN-CelebA` then click create. + +You may notice that the learning curves don't converge towards 0.69 as smoothly as in the MNIST case. +This is because the generator is overpowered by the discriminator. +This is not necessarily an issue as we will see later. + +![CelebA loss](celeba-loss.png) + +### Training a CelebA encoder + +Proceed as in the MNIST example but use this [network description](./network-celebA-encoder.py). + +### Generating attribute vectors + +The CelebA dataset comes with 40 different attributes for each image. +We can use these labels to generate characteristic `z` vectors for each attribute. +A way to do this is to compute the average `z` vector for images that have the attribute and subtract the average `z` vector for images that do not have the attribute. +This can be done with the `gan_features.py` script: + +```sh +$ export DIGITS_JOBS_DIR=/path/to/digits/jobs +$ export GAN_ENCODER_JOB_ID=20170202-100209-72af # replace this with the Job ID of your GAN-CelebA-Encoder +$ ./examples/gan/gan_features.py -j $DIGITS_JOBS_DIR $GAN_ENCODER_JOB_ID -g 0 +``` + +Running the above command will sweep through the 200k images in the dataset and create a file named `attributes_z.pkl` that includes the 40 characteristic `z` vectors. + +### Sampling the CelebA model + +#### Setting image attributes + +You can find `z` vectors by encoding images from the dataset through the GAN-CelebA-Encoder model. +- move to the model page for your `GAN-CelebA-Encoder`, +- select the `GAN` visualization method, select the `CelebA Encoder` task, +- select the `GAN` inference form, select the `Encode list` task, +- specify the path to your attributes text file and the path to the image folder. + +![CelebA encode list form](celeba-encode-list-form.png) + +Click `Test`. You may see something like: + +![CelebA encode list](celeba-encode-list.png). + +Every row is showing the input image (left), the reconstructed image (center) and the corresponding z vector (right). + +Now if you pick a `z` vector, you can add/remove attributes to this image: +- open the `GAN-CelebA` model page, +- select the `Image output` visualization method and set the `Data order` to `HWC`: + +![CelebA select image output](celeba-select-image-output.png) + +- select the `GAN` inference form and select the `CelebA - add/remove attributes` task, +- specify the location of the attributes file you created with `gan_features.py`. +- paste the `z` vector you found when using the `Encode list` task above. +- click `Add row` a number of times to create new rows. +Each row will generate an image with the corresponding attributes. +If you leave all cells in a row blank, you will get the original image. +If you set `Black Hair` to `+1` and `Blond Hair` to `-1`, this will transform a blond person into a person with dark hair. +If you set `Smiling` to `-1` this will make a smiling person... not smile. + +See for example: + +![CelebA set attributes form](celeba-set-attributes-form.png) + +This will generate these images: + +![CelebA attributes](celeba-set-attributes.png) + +#### Analogy grid + +If you pick 3 `z` vectors you can generate an analogy similar to `king - man + woman = queen`. +To experience this: +- select the `GAN` visualization method, select the `Grid` task, +- select the `GAN` inference form, select the `CelebA - analogy` task, +- set the `source`, `sink 1` and `sink 2` vectors. + +This will create a grid with the following analogy: `destination = sink 1 + sink 2 - source` with: +- `source` in top-left corner, +- `sink 1` in top-right corner, +- `sink 2` in bottom-left corner, +- `destination` in bottom-right corner. + +![CelebA analogy form](celeba-analogy-form.png) + +This might result in a grid like this: + +``` + man with blond hair looking straight ++ woman with dark hair looking to her left +- woman with blond hair looking straight += man with dark hair looking slightly to his left +``` + +Pretty good, heh? + +![CelebA analogy](celeba-analogy.png) + +The grid can also be visualized through an animated image: + +![animated grid](gan-grid-animated.gif) + +### Embeddings visualization + +You might have noticed another byproduct of the `gan_features.py` script: a file named `embeddings.pkl`. +This file contains `z` vectors for the first 10k images in the CelebA dataset. +We can use this to display image embeddings in Tensorboard: + +```sh +$ ./gan_embeddings.py embeddings.pkl +$ tensorboard --logdir ./gan-tb/ +``` + +Now open a browser on `http://localhost:6006`. +In the `Embeddings` tab you will see something similar to this: + +![animated embeddings](celeba-embeddings.gif) diff --git a/examples/gan/celeba-analogy-form.png b/examples/gan/celeba-analogy-form.png new file mode 100644 index 000000000..4cf71077c Binary files /dev/null and b/examples/gan/celeba-analogy-form.png differ diff --git a/examples/gan/celeba-analogy.png b/examples/gan/celeba-analogy.png new file mode 100644 index 000000000..8ffbf4272 Binary files /dev/null and b/examples/gan/celeba-analogy.png differ diff --git a/examples/gan/celeba-dataset-form.png b/examples/gan/celeba-dataset-form.png new file mode 100644 index 000000000..1d1b4ebd3 Binary files /dev/null and b/examples/gan/celeba-dataset-form.png differ diff --git a/examples/gan/celeba-embeddings.gif b/examples/gan/celeba-embeddings.gif new file mode 100644 index 000000000..f303e29e6 Binary files /dev/null and b/examples/gan/celeba-embeddings.gif differ diff --git a/examples/gan/celeba-encode-list-form.png b/examples/gan/celeba-encode-list-form.png new file mode 100644 index 000000000..ab0f95b1b Binary files /dev/null and b/examples/gan/celeba-encode-list-form.png differ diff --git a/examples/gan/celeba-encode-list.png b/examples/gan/celeba-encode-list.png new file mode 100644 index 000000000..900a83364 Binary files /dev/null and b/examples/gan/celeba-encode-list.png differ diff --git a/examples/gan/celeba-loss.png b/examples/gan/celeba-loss.png new file mode 100644 index 000000000..cb2ed81dd Binary files /dev/null and b/examples/gan/celeba-loss.png differ diff --git a/examples/gan/celeba-select-image-output.png b/examples/gan/celeba-select-image-output.png new file mode 100644 index 000000000..2b932a79a Binary files /dev/null and b/examples/gan/celeba-select-image-output.png differ diff --git a/examples/gan/celeba-set-attributes-form.png b/examples/gan/celeba-set-attributes-form.png new file mode 100644 index 000000000..6dbc9d401 Binary files /dev/null and b/examples/gan/celeba-set-attributes-form.png differ diff --git a/examples/gan/celeba-set-attributes.png b/examples/gan/celeba-set-attributes.png new file mode 100644 index 000000000..8ce31fd5d Binary files /dev/null and b/examples/gan/celeba-set-attributes.png differ diff --git a/examples/gan/create-mnist-generic-dataset.png b/examples/gan/create-mnist-generic-dataset.png new file mode 100644 index 000000000..020fea509 Binary files /dev/null and b/examples/gan/create-mnist-generic-dataset.png differ diff --git a/examples/gan/create-mnist-model.png b/examples/gan/create-mnist-model.png new file mode 100644 index 000000000..1f7927586 Binary files /dev/null and b/examples/gan/create-mnist-model.png differ diff --git a/examples/gan/exploring-celeba.png b/examples/gan/exploring-celeba.png new file mode 100644 index 000000000..721495afd Binary files /dev/null and b/examples/gan/exploring-celeba.png differ diff --git a/examples/gan/gan-grid-animated.gif b/examples/gan/gan-grid-animated.gif new file mode 100644 index 000000000..d38d53cc7 Binary files /dev/null and b/examples/gan/gan-grid-animated.gif differ diff --git a/examples/gan/gan_embeddings.py b/examples/gan/gan_embeddings.py new file mode 100755 index 000000000..127033912 --- /dev/null +++ b/examples/gan/gan_embeddings.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python2 +# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + +import argparse +import os +import pickle +import shutil + +import numpy as np +import PIL.Image +import tensorflow as tf +from tensorflow.contrib.tensorboard.plugins import projector + +TB_DIR = os.path.join(os.getcwd(), "gan-tb") +SPRITE_IMAGE_FILENAME = os.path.join(TB_DIR, "sprite.png") + + +def save_tb_embeddings(embeddings_filename): + f = open(embeddings_filename, 'rb') + embeddings = pickle.load(f) + + images = embeddings['images'] + zs = embeddings['zs'] + + # overwrite Tensorboard log dir if necessary + if os.path.exists(TB_DIR): + shutil.rmtree(TB_DIR) + os.makedirs(TB_DIR) + + # create grid image + img_width, img_height = save_sprite_image(images) + + with tf.device('cpu:0'): + # create embedding var + embedding_var = tf.Variable(initial_value=zs) + + # save projector config + summary_writer = tf.summary.FileWriter(TB_DIR) + config = projector.ProjectorConfig() + embedding = config.embeddings.add() + embedding.tensor_name = embedding_var.name + embedding.sprite.image_path = SPRITE_IMAGE_FILENAME + embedding.sprite.single_image_dim.extend([img_width, img_height]) + projector.visualize_embeddings(summary_writer, config) + + # save embeddings + sess = tf.Session() + sess.run(embedding_var.initializer) + saver = tf.train.Saver([embedding_var]) + saver.save(sess, os.path.join(TB_DIR, 'model.ckpt')) + + +def save_sprite_image(images): + n_embeddings = images.shape[0] + grid_cols = int(np.sqrt(n_embeddings)) + grid_rows = int(np.ceil(float(n_embeddings) / grid_cols)) + img_height, img_width, img_channels = images[0].shape + grid_image = np.empty((img_height * grid_rows, img_width * grid_cols, img_channels)) + for i, image in enumerate(images): + row = i / grid_cols + col = i % grid_cols + x = img_width * col + y = img_height * row + grid_image[y:y + img_height, x:x + img_width] = image + grid_image = PIL.Image.fromarray(grid_image.astype('uint8')) + grid_image.save(SPRITE_IMAGE_FILENAME) + return img_width, img_height + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser(description='Inference tool - DIGITS') + + # Positional arguments + + parser.add_argument( + 'embeddings_file', + help='Embeddings pickle file') + + args = vars(parser.parse_args()) + + try: + save_tb_embeddings( + args['embeddings_file'], + ) + except Exception as e: + print('%s: %s' % (type(e).__name__, e.message)) + raise diff --git a/examples/gan/gan_features.py b/examples/gan/gan_features.py new file mode 100755 index 000000000..40702a43a --- /dev/null +++ b/examples/gan/gan_features.py @@ -0,0 +1,247 @@ +#!/usr/bin/env python2 +# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + +import argparse +import logging +import numpy as np +import pickle +import PIL.Image +import os +import sys +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + +# Add path for DIGITS package +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) +import digits.config # noqa +from digits import utils, log # noqa +from digits.inference.errors import InferenceError # noqa +from digits.job import Job # noqa +from digits.utils.lmdbreader import DbReader # noqa + +# Import digits.config before caffe to set the path +import caffe_pb2 # noqa + +logger = logging.getLogger('digits.tools.inference') + +# number of image embeddings to store +N_EMBEDDINGS = 10000 + + +def parse_datum(value): + """ + Parse a Caffe datum + """ + datum = caffe_pb2.Datum() + datum.ParseFromString(value) + if datum.encoded: + s = StringIO() + s.write(datum.data) + s.seek(0) + img = PIL.Image.open(s) + img = np.array(img) + else: + import caffe.io + arr = caffe.io.datum_to_array(datum) + # CHW -> HWC + arr = arr.transpose((1, 2, 0)) + if arr.shape[2] == 1: + # HWC -> HW + arr = arr[:, :, 0] + elif arr.shape[2] == 3: + # BGR -> RGB + # XXX see issue #59 + arr = arr[:, :, [2, 1, 0]] + img = arr + return img + + +def save_attributes(attributes): + """ + Save attribute vectors + """ + zs = np.zeros(attributes['positive_attribute_z'].shape) + for i in xrange(attributes['n_attributes']): + zs[i] = attributes['positive_attribute_z'][i] / attributes['positive_count'][i] \ + - attributes['negative_attribute_z'][i] / attributes['negative_count'][i] + output = open('attributes_z.pkl', 'wb') + pickle.dump(zs, output) + + +def save_embeddings(embeddings): + filename = 'embeddings.pkl' + logger.info('Saving embeddings to %s...' % filename) + output = open(filename, 'wb') + pickle.dump(embeddings, output) + + +def infer(jobs_dir, + model_id, + epoch, + batch_size, + gpu): + """ + Perform inference on a list of images using the specified model + """ + # job directory defaults to that defined in DIGITS config + if jobs_dir == 'none': + jobs_dir = digits.config.config_value('jobs_dir') + + # load model job + model_dir = os.path.join(jobs_dir, model_id) + assert os.path.isdir(model_dir), "Model dir %s does not exist" % model_dir + model = Job.load(model_dir) + + # load dataset job + dataset_dir = os.path.join(jobs_dir, model.dataset_id) + assert os.path.isdir(dataset_dir), "Dataset dir %s does not exist" % dataset_dir + dataset = Job.load(dataset_dir) + for task in model.tasks: + task.dataset = dataset + + # retrieve snapshot file + task = model.train_task() + snapshot_filename = None + epoch = float(epoch) + if epoch == -1 and len(task.snapshots): + # use last epoch + epoch = task.snapshots[-1][1] + snapshot_filename = task.snapshots[-1][0] + else: + for f, e in task.snapshots: + if e == epoch: + snapshot_filename = f + break + if not snapshot_filename: + raise InferenceError("Unable to find snapshot for epoch=%s" % repr(epoch)) + + input_data = [] # sample data + input_labels = [] # sample labels + + # load images from database + feature_db_path = dataset.get_feature_db_path(utils.constants.TRAIN_DB) + feature_reader = DbReader(feature_db_path) + + label_db_path = dataset.get_label_db_path(utils.constants.TRAIN_DB) + label_reader = DbReader(label_db_path) + + embeddings = {'count': 0, 'images': None, 'zs': None} + + def aggregate(images, labels, attributes, embeddings): + # perform inference + outputs = model.train_task().infer_many( + images, + snapshot_epoch=epoch, + gpu=gpu, + resize=False) + z_vectors = outputs['output'][:, :100] + for image, label, z in zip(images, labels, z_vectors): + if embeddings['images'] is None: + embeddings['images'] = np.empty((N_EMBEDDINGS,) + image.shape) + if embeddings['zs'] is None: + embeddings['zs'] = np.empty((N_EMBEDDINGS,) + z.shape) + if embeddings['count'] < N_EMBEDDINGS: + embeddings['images'][embeddings['count']] = image + embeddings['zs'][embeddings['count']] = z + embeddings['count'] += 1 + if embeddings['count'] == N_EMBEDDINGS: + save_embeddings(embeddings) + + for attribute in range(attributes['n_attributes']): + if label[attribute] > 0: + attributes['positive_attribute_z'][attribute] += z + attributes['positive_count'][attribute] += 1 + else: + attributes['negative_attribute_z'][attribute] += z + attributes['negative_count'][attribute] += 1 + # save + save_attributes(attributes) + + n_input_samples = 0 + label_len = None + z_dim = 100 + for key, value in feature_reader.entries(): + img = parse_datum(value) + label = parse_datum(label_reader.entry(key))[0] + if label_len is None: + label_len = len(label) + attributes = { + 'n_attributes': label_len, + 'negative_count': np.zeros(label_len), + 'positive_count': np.zeros(label_len), + 'negative_attribute_z': np.zeros((label_len, z_dim)), + 'positive_attribute_z': np.zeros((label_len, z_dim)), + } + elif label_len != len(label): + raise ValueError("label len differs: %d vs %d" % (label_len, len(label))) + input_data.append(img) + input_labels.append(label) + n_input_samples = n_input_samples + 1 + if n_input_samples % batch_size == 0: + aggregate(input_data, input_labels, attributes, embeddings) + print("######## %d processed ########" % n_input_samples) + input_data = [] # sample data + input_labels = [] # sample labels + + if n_input_samples % batch_size != 0: + aggregate(input_data, input_labels, attributes, embeddings) + print("######## %d processed ########" % n_input_samples) + +if __name__ == '__main__': + + parser = argparse.ArgumentParser(description='Inference tool - DIGITS') + + # Positional arguments + + parser.add_argument( + 'model', + help='Model ID') + + # Optional arguments + parser.add_argument( + '-e', + '--epoch', + default='-1', + help="Epoch (-1 for last)" + ) + + parser.add_argument( + '-j', + '--jobs_dir', + default='none', + help='Jobs directory (default: from DIGITS config)', + ) + + parser.add_argument( + '-b', + '--batch_size', + type=int, + default=1024, + help='Batch size', + ) + + parser.add_argument( + '-g', + '--gpu', + type=int, + default=None, + help='GPU to use (as in nvidia-smi output, default: None)', + ) + + parser.set_defaults(resize=True) + + args = vars(parser.parse_args()) + + try: + infer( + args['jobs_dir'], + args['model'], + args['epoch'], + args['batch_size'], + args['gpu'], + ) + except Exception as e: + logger.error('%s: %s' % (type(e).__name__, e.message)) + raise diff --git a/examples/gan/mnist-animated.gif b/examples/gan/mnist-animated.gif new file mode 100644 index 000000000..d1719763f Binary files /dev/null and b/examples/gan/mnist-animated.gif differ diff --git a/examples/gan/mnist-chi-square.png b/examples/gan/mnist-chi-square.png new file mode 100644 index 000000000..af19cb4a3 Binary files /dev/null and b/examples/gan/mnist-chi-square.png differ diff --git a/examples/gan/mnist-class-sweep.png b/examples/gan/mnist-class-sweep.png new file mode 100644 index 000000000..4e47d8dab Binary files /dev/null and b/examples/gan/mnist-class-sweep.png differ diff --git a/examples/gan/mnist-classification-dataset.png b/examples/gan/mnist-classification-dataset.png new file mode 100644 index 000000000..045ac1e5f Binary files /dev/null and b/examples/gan/mnist-classification-dataset.png differ diff --git a/examples/gan/mnist-encode-image-form.png b/examples/gan/mnist-encode-image-form.png new file mode 100644 index 000000000..2e28dc948 Binary files /dev/null and b/examples/gan/mnist-encode-image-form.png differ diff --git a/examples/gan/mnist-encode-image.png b/examples/gan/mnist-encode-image.png new file mode 100644 index 000000000..d8493e95b Binary files /dev/null and b/examples/gan/mnist-encode-image.png differ diff --git a/examples/gan/mnist-encoder-loss.png b/examples/gan/mnist-encoder-loss.png new file mode 100644 index 000000000..1065b9497 Binary files /dev/null and b/examples/gan/mnist-encoder-loss.png differ diff --git a/examples/gan/mnist-graph.png b/examples/gan/mnist-graph.png new file mode 100644 index 000000000..197b30e9a Binary files /dev/null and b/examples/gan/mnist-graph.png differ diff --git a/examples/gan/mnist-inference-form-class-sweep.png b/examples/gan/mnist-inference-form-class-sweep.png new file mode 100644 index 000000000..7dd6e2dc8 Binary files /dev/null and b/examples/gan/mnist-inference-form-class-sweep.png differ diff --git a/examples/gan/mnist-loss.png b/examples/gan/mnist-loss.png new file mode 100644 index 000000000..f6cb49bf6 Binary files /dev/null and b/examples/gan/mnist-loss.png differ diff --git a/examples/gan/mnist-style-sweep.png b/examples/gan/mnist-style-sweep.png new file mode 100644 index 000000000..ea4fc8a52 Binary files /dev/null and b/examples/gan/mnist-style-sweep.png differ diff --git a/examples/gan/mnist-styled-class-sweep-form.png b/examples/gan/mnist-styled-class-sweep-form.png new file mode 100644 index 000000000..1b85912d6 Binary files /dev/null and b/examples/gan/mnist-styled-class-sweep-form.png differ diff --git a/examples/gan/mnist-styled-class-sweep.png b/examples/gan/mnist-styled-class-sweep.png new file mode 100644 index 000000000..fb2112663 Binary files /dev/null and b/examples/gan/mnist-styled-class-sweep.png differ diff --git a/examples/gan/mnist-tb-samples.png b/examples/gan/mnist-tb-samples.png new file mode 100644 index 000000000..5d70bf67f Binary files /dev/null and b/examples/gan/mnist-tb-samples.png differ diff --git a/examples/gan/network-celebA-encoder.py b/examples/gan/network-celebA-encoder.py new file mode 100644 index 000000000..833633910 --- /dev/null +++ b/examples/gan/network-celebA-encoder.py @@ -0,0 +1,402 @@ +# The MIT License (MIT) +# +# Original work Copyright (c) 2016 Taehoon Kim +# Modified work Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import tensorflow as tf +from model import Tower +from utils import model_property + +image_summary = tf.summary.image +scalar_summary = tf.summary.scalar +histogram_summary = tf.summary.histogram +merge_summary = tf.summary.merge +SummaryWriter = tf.summary.FileWriter + + +class batch_norm(object): + """ + This class creates an op that composes the specified tensor with a batch + normalization layer. + """ + + def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"): + """Instance initialization""" + with tf.variable_scope(name): + self.epsilon = epsilon + self.momentum = momentum + self.name = name + + def __call__(self, x, train=True): + """ + Functional interface + + Args: + x: tensor to compose + train: set to True during training and False otherwise + """ + return tf.contrib.layers.batch_norm(x, + decay=self.momentum, + updates_collections=None, + epsilon=self.epsilon, + scale=True, + is_training=train, + scope=self.name) + + +def conv2d(input_, output_dim, + k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, + name="conv2d"): + """ + Compose specified symbol with 2D convolution layer + + Args: + input_: tensor to compose. Shape: [N, H, W, C] + output_dim: number of output features maps + k_h: kernel height + k_w: kernel width + d_h: horizontal stride + d_w: vertical stride + stddev: standard deviation of gaussian distribution to use for random weight initialization + name: name scope + + Returns: + Composed tensor. + """ + with tf.variable_scope(name): + w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], + initializer=tf.truncated_normal_initializer(stddev=stddev)) + conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') + + biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) + conv = tf.nn.bias_add(conv, biases) + + return conv + + +def deconv2d(input_, output_shape, + k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, + name="deconv2d", with_w=False): + """ + Compose specified symbol with 2D *transpose* convolution layer + + Args: + input_: tensor to compose. Shape: [N, H, W, C] + output_shape: output shape + k_h: kernel height + k_w: kernel width + d_h: horizontal stride + d_w: vertical stride + stddev: standard deviation of gaussian distribution to use for random weight initialization + name: name scope + + Returns: + Composed tensor. + """ + with tf.variable_scope(name): + # filter : [height, width, output_channels, in_channels] + w = tf.get_variable('w', + [k_h, k_w, output_shape[-1], + input_.get_shape()[-1]], + initializer=tf.random_normal_initializer(stddev=stddev)) + deconv = tf.nn.conv2d_transpose(input_, w, + output_shape=output_shape, + strides=[1, d_h, d_w, 1]) + + biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0)) + deconv = tf.reshape(tf.nn.bias_add(deconv, biases), output_shape) + + if with_w: + return deconv, w, biases + else: + return deconv + + +def lrelu(x, leak=0.2, name="lrelu"): + """Compose specified tensor with leaky Rectifier Linear Unit""" + return tf.maximum(x, leak*x) + + +def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): + """ + Compose specified tensor with linear (fully-connected) layer + + Args: + input_: tensor to compose. Shape: [N, M] + output_size: number of output neurons + scope: name scope + stddev: standard deviation of gaussian distribution to use for random weight initialization + name: name scope + with_w: whether to also return parameter variables + + Returns: + Composed tensor. Shape: [N, output_size] + """ + shape = input_.get_shape().as_list() + + with tf.variable_scope(scope or "Linear"): + matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32, + tf.random_normal_initializer(stddev=stddev)) + bias = tf.get_variable("bias", [output_size], + initializer=tf.constant_initializer(bias_start)) + if with_w: + return tf.matmul(input_, matrix) + bias, matrix, bias + else: + return tf.matmul(input_, matrix) + bias + + +class UserModel(Tower): + """ + User Model definition + + DIGITS creates an instance of this class for every tower it needs + to create. This includes: + - one for training, + - one for validation, + - one for testing. + + In the case of multi-GPU training, one training instance is created + for every GPU. DIGITS takes care of doing the gradient averaging + across GPUs so this class only needs to define the inference op + and desired loss/cost function. + """ + + def __init__(self, *args, **kwargs): + """ + Identify the correct input nodes. + + In the parent class, DIGITS conveniently sets the following fields: + - self.is_training: whether this is a training graph + - self.is_inference: whether this graph is created for inference/testing + - self.x: input node. Shape: [N, H, W, C] + - self.y: label. Shape: [N] for scalar labels, [N, H, W, C] otherwise. + Only defined if self._is_training is True + """ + super(UserModel, self).__init__(*args, **kwargs) + + image_size = 64 + output_size = 64 + c_dim = 3 + z_dim = 100 + + self.dcgan_init(image_size=image_size, + output_size=output_size, + c_dim=c_dim, + z_dim=z_dim) + + @model_property + def inference(self): + """ op to use for inference """ + + # scale back to [0, 255] range + images = (self.G * 127) + 128 + images_flat = tf.reshape(images, [self.batch_size, self.image_size * self.image_size * self.c_dim]) + # concatenate encoded z and generated image into a single flat structure + zgen_flat = tf.reshape(self.DzGEN, [self.batch_size, self.z_dim]) + return tf.concat([zgen_flat, images_flat], 1) + + @model_property + def loss(self): + """ + Loss function + + Returns either an op or a list of dicts. + If the returned value is an op then DIGITS will optimize against this op + with respect to all trainable variables. + If the returned value is a list then DIGITS will optimize against each + loss in the list with respect to the specified variables. + """ + + # here we are returning a list because we want to alternately optimize the + # discriminator and the generator. + + losses = [ + {'loss': self.dzgen_loss, 'vars': self.d_vars}, + ] + return losses + + def dcgan_init(self, + image_size, + output_size, + z_dim, + c_dim, + gf_dim=64, + df_dim=64, + gfc_dim=1024, + dfc_dim=1024): + """ + + Args: + output_size: (optional) The resolution in pixels of the images. [64] + z_dim: (optional) Dimension of dim for Z. [100] + gf_dim: (optional) Dimension of gen filters in first conv layer. [64] + df_dim: (optional) Dimension of discrim filters in first conv layer. [64] + gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024] + dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024] + c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3] + """ + self.image_size = image_size + self.output_size = output_size + + self.z_dim = z_dim + + self.gf_dim = gf_dim + self.df_dim = df_dim + + self.gfc_dim = gfc_dim + self.dfc_dim = dfc_dim + + self.c_dim = c_dim + + self.batch_size = tf.shape(self.x)[0] + + self.soft_label_margin = 0.1 + + # batch normalization : deals with poor initialization helps gradient flow + self.d_bn1 = batch_norm(name='d_bn1') + self.d_bn2 = batch_norm(name='d_bn2') + self.d_bn3 = batch_norm(name='d_bn3') + + self.g_bn0 = batch_norm(name='g_bn0') + self.g_bn1 = batch_norm(name='g_bn1') + self.g_bn2 = batch_norm(name='g_bn2') + self.g_bn3 = batch_norm(name='g_bn3') + + self.build_model() + + def build_model(self): + + # reshape/rescale x + self.images = (tf.reshape(self.x, shape=[self.batch_size, + self.image_size, + self.image_size, + self.c_dim], + name='x_reshaped') - 128) / 127. + + # create discriminator/encoder + self.DzGEN, self.D_logits = self.discriminator(self.images, reuse=False) + # create generator + self.G = self.generator(self.DzGEN) + # loss is now L2 distance between input image and generator output + self.dzgen_loss = tf.reduce_mean(tf.square(self.G - self.images), name="loss_DzGEN") + + # debug + self.summaries.append(image_summary("G", self.G, max_outputs=3)) + self.summaries.append(image_summary("X", self.images, max_outputs=3)) + self.summaries.append(histogram_summary("G_hist", self.G)) + self.summaries.append(histogram_summary("X_hist", self.images)) + self.summaries.append(scalar_summary("DzGen_loss", self.dzgen_loss)) + + # all trainable variables + t_vars = tf.trainable_variables() + # d variables + self.d_vars = [var for var in t_vars if 'd_' in var.name] + + def discriminator(self, image, y=None, reuse=False): + """ + Create the discriminator + + This creates a string of layers: + - input - [N, 64, 64, 3] + - conv layer with 64 5x5 kernels and 2x2 stride - [N, 32, 32, 64] + - leaky relu - [N, 32, 32, 64] + - conv layer with 128 5x5 kernels and 2x2 stride - [N, 16, 16, 32] + - batch norm - [N, 16, 16, 32] + - leaky relu - [N, 16, 16, 32] + - conv layer with 256 5x5 kernels and 2x2 stride - [N, 8, 8, 256] + - batch norm - [N, 8, 8, 256] + - leaky relu - [N, 8, 8, 256] + - conv layer with 256 5x5 kernels and 2x2 stride - [N, 4, 4, 512] + - batch norm - [N, 4, 4, 512] + - leaky relu - [N, 4, 4, 512] + - flatten - [N, 8192] + - linear layer with 1 output neurons - [N, 1] + - sigmoid - [N,1] + + Args: + image: batch of input images - shape: [N, H, W, C] + y: batch of one-hot encoded labels - shape: [N, K] + reuse: whether to re-use previously created variables + """ + + # NOTE: although we are really creating an encoder here we need to re-use the same + # variable scope (i.e. "discriminator") as in the original GAN so we can re-use + # learned parameters + with tf.variable_scope("discriminator") as scope: + if reuse: + scope.reuse_variables() + + h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv')) + h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv'), train=self.is_training)) + h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv'), train=self.is_training)) + h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv'), train=self.is_training)) + h3_size = ((self.output_size // 16) ** 2) * self.df_dim * 8 + h4 = linear(tf.reshape(h3, [self.batch_size, h3_size]), self.z_dim, 'd_h3_lin_retrain') + return h4, h4 + + def generator(self, z, y=None): + """ + Create the generator + + This creates a string of layers: + - input - [N, 100] + - linear layer with 8192 output neurons - [N, 8192] + - reshape - [N, 4, 4, 512] + - batch norm - [N, 4, 4, 512] + - relu - [N, 4, 4, 512] + - transpose convolution with 256 filters and stride 2 - [N, 8, 8, 256] + - batch norm - [N, 8, 8, 256] + - relu - [N, 8, 8, 256] + - transpose convolution with 128 filters and stride 2 - [N, 16, 16, 128] + - batch norm - [N, 16, 16, 128] + - relu - [N, 16, 16, 128] + - transpose convolution with 64 filters and stride 2 - [N, 32, 32, 64] + - batch norm - [N, 32, 32, 64] + - relu - [N, 32, 32, 64] + - transpose convolution with 3 filters and stride 2 - [N, 64, 64, 3] + - tanh - [N, 64, 64, 3] + """ + with tf.variable_scope("generator"): + s = self.output_size + s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16) + + # project `z` and reshape + self.z_, self.h0_w, self.h0_b = linear(z, self.gf_dim*8*s16*s16, 'g_h0_lin', with_w=True) + + self.h0 = tf.reshape(self.z_, [-1, s16, s16, self.gf_dim * 8]) + h0 = tf.nn.relu(self.g_bn0(self.h0, train=False)) + + self.h1, self.h1_w, self.h1_b = deconv2d(h0, [self.batch_size, s8, s8, self.gf_dim*4], + name='g_h1', with_w=True) + h1 = tf.nn.relu(self.g_bn1(self.h1, train=False)) + + h2, self.h2_w, self.h2_b = deconv2d(h1, [self.batch_size, s4, s4, self.gf_dim*2], + name='g_h2', with_w=True) + h2 = tf.nn.relu(self.g_bn2(h2, train=False)) + + h3, self.h3_w, self.h3_b = deconv2d(h2, [self.batch_size, s2, s2, self.gf_dim*1], + name='g_h3', with_w=True) + h3 = tf.nn.relu(self.g_bn3(h3, train=False)) + + h4, self.h4_w, self.h4_b = deconv2d(h3, [self.batch_size, s, s, self.c_dim], + name='g_h4', with_w=True) + + return tf.nn.tanh(h4) diff --git a/examples/gan/network-celebA.py b/examples/gan/network-celebA.py new file mode 100644 index 000000000..4ab2855d8 --- /dev/null +++ b/examples/gan/network-celebA.py @@ -0,0 +1,447 @@ +# The MIT License (MIT) +# +# Original work Copyright (c) 2016 Taehoon Kim +# Modified work Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import tensorflow as tf +from model import Tower +from utils import model_property + +image_summary = tf.summary.image +scalar_summary = tf.summary.scalar +histogram_summary = tf.summary.histogram +merge_summary = tf.summary.merge +SummaryWriter = tf.summary.FileWriter + + +class batch_norm(object): + """ + This class creates an op that composes the specified tensor with a batch + normalization layer. + """ + + def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"): + """Instance initialization""" + with tf.variable_scope(name): + self.epsilon = epsilon + self.momentum = momentum + self.name = name + + def __call__(self, x, train=True): + """ + Functional interface + + Args: + x: tensor to compose + train: set to True during training and False otherwise + """ + return tf.contrib.layers.batch_norm(x, + decay=self.momentum, + updates_collections=None, + epsilon=self.epsilon, + scale=True, + is_training=train, + scope=self.name) + + +def conv2d(input_, output_dim, + k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, + name="conv2d"): + """ + Compose specified symbol with 2D convolution layer + + Args: + input_: tensor to compose. Shape: [N, H, W, C] + output_dim: number of output features maps + k_h: kernel height + k_w: kernel width + d_h: horizontal stride + d_w: vertical stride + stddev: standard deviation of gaussian distribution to use for random weight initialization + name: name scope + + Returns: + Composed tensor. + """ + with tf.variable_scope(name): + w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], + initializer=tf.truncated_normal_initializer(stddev=stddev)) + conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') + + biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) + conv = tf.nn.bias_add(conv, biases) + + return conv + + +def deconv2d(input_, output_shape, + k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, + name="deconv2d", with_w=False): + """ + Compose specified symbol with 2D *transpose* convolution layer + + Args: + input_: tensor to compose. Shape: [N, H, W, C] + output_shape: output shape + k_h: kernel height + k_w: kernel width + d_h: horizontal stride + d_w: vertical stride + stddev: standard deviation of gaussian distribution to use for random weight initialization + name: name scope + + Returns: + Composed tensor. + """ + with tf.variable_scope(name): + # filter : [height, width, output_channels, in_channels] + w = tf.get_variable('w', + [k_h, k_w, output_shape[-1], + input_.get_shape()[-1]], + initializer=tf.random_normal_initializer(stddev=stddev)) + deconv = tf.nn.conv2d_transpose(input_, w, + output_shape=output_shape, + strides=[1, d_h, d_w, 1]) + + biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0)) + deconv = tf.reshape(tf.nn.bias_add(deconv, biases), output_shape) + + if with_w: + return deconv, w, biases + else: + return deconv + + +def lrelu(x, leak=0.2, name="lrelu"): + """Compose specified tensor with leaky Rectifier Linear Unit""" + return tf.maximum(x, leak*x) + + +def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): + """ + Compose specified tensor with linear (fully-connected) layer + + Args: + input_: tensor to compose. Shape: [N, M] + output_size: number of output neurons + scope: name scope + stddev: standard deviation of gaussian distribution to use for random weight initialization + name: name scope + with_w: whether to also return parameter variables + + Returns: + Composed tensor. Shape: [N, output_size] + """ + shape = input_.get_shape().as_list() + + with tf.variable_scope(scope or "Linear"): + matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32, + tf.random_normal_initializer(stddev=stddev)) + bias = tf.get_variable("bias", [output_size], + initializer=tf.constant_initializer(bias_start)) + if with_w: + return tf.matmul(input_, matrix) + bias, matrix, bias + else: + return tf.matmul(input_, matrix) + bias + + +class UserModel(Tower): + """ + User Model definition + + DIGITS creates an instance of this class for every tower it needs + to create. This includes: + - one for training, + - one for validation, + - one for testing. + + In the case of multi-GPU training, one training instance is created + for every GPU. DIGITS takes care of doing the gradient averaging + across GPUs so this class only needs to define the inference op + and desired loss/cost function. + """ + + def __init__(self, *args, **kwargs): + """ + Identify the correct input nodes. + + In the parent class, DIGITS conveniently sets the following fields: + - self.is_training: whether this is a training graph + - self.is_inference: whether this graph is created for inference/testing + - self.x: input node. Shape: [N, H, W, C] + - self.y: label. Shape: [N] for scalar labels, [N, H, W, C] otherwise. + Only defined if self._is_training is True + """ + super(UserModel, self).__init__(*args, **kwargs) + + image_size = 64 + output_size = 64 + c_dim = 3 + z_dim = 100 + + self.dcgan_init(image_size=image_size, + output_size=output_size, + c_dim=c_dim, + z_dim=z_dim) + + @model_property + def inference(self): + """op to use for inference""" + + # scale back to [0, 255] range + return tf.to_int32((self.G+127) * 128) + + @model_property + def loss(self): + """ + Loss function + + Returns either an op or a list of dicts. + If the returned value is an op then DIGITS will optimize against this op + with respect to all trainable variables. + If the returned value is a list then DIGITS will optimize against each + loss in the list with respect to the specified variables. + """ + + # here we are returning a list because we want to alternately optimize the + # discriminator and the generator. + + losses = [ + {'loss': self.d_loss, 'vars': self.d_vars}, + {'loss': self.g_loss, 'vars': self.g_vars} + ] + return losses + + def dcgan_init(self, + image_size, + output_size, + z_dim, + c_dim, + gf_dim=64, + df_dim=64, + gfc_dim=1024, + dfc_dim=1024): + """ + + Args: + output_size: (optional) The resolution in pixels of the images. [64] + z_dim: (optional) Dimension of dim for Z. [100] + gf_dim: (optional) Dimension of gen filters in first conv layer. [64] + df_dim: (optional) Dimension of discrim filters in first conv layer. [64] + gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024] + dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024] + c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3] + """ + self.image_size = image_size + self.output_size = output_size + + self.z_dim = z_dim + + self.gf_dim = gf_dim + self.df_dim = df_dim + + self.gfc_dim = gfc_dim + self.dfc_dim = dfc_dim + + self.c_dim = c_dim + + self.batch_size = tf.shape(self.x)[0] + + self.soft_label_margin = 0.1 + + # batch normalization : deals with poor initialization helps gradient flow + self.d_bn1 = batch_norm(name='d_bn1') + self.d_bn2 = batch_norm(name='d_bn2') + self.d_bn3 = batch_norm(name='d_bn3') + + self.g_bn0 = batch_norm(name='g_bn0') + self.g_bn1 = batch_norm(name='g_bn1') + self.g_bn2 = batch_norm(name='g_bn2') + self.g_bn3 = batch_norm(name='g_bn3') + + self.build_model() + + def build_model(self): + + if not self.is_inference: + # create both the generator and the discriminator + # self.x is a batch of images - shape: [N, H, W, C] + # self.y is a vector of labels - shape: [N] + + # sample z from a normal distribution + self.z = tf.random_normal(shape=[self.batch_size, self.z_dim], dtype=tf.float32, seed=None, name='z') + + # scale input to [-1, +1] range + self.images = (tf.reshape(self.x, + shape=[self.batch_size, + self.image_size, + self.image_size, + self.c_dim], + name='x_reshaped') - 128) / 127. + + # create generator + self.G = self.generator(self.z) + # create an instance of the discriminator (real samples) + self.D, self.D_logits = self.discriminator(self.images, reuse=False) + # create another identical instance of the discriminator (fake samples) + # NOTE: we are re-using variables here to share weights between the two + # instances of the discriminator + self.D_, self.D_logits_ = self.discriminator(self.G, reuse=True) + + # we are using the cross entropy loss for all these losses + # note the use of the soft label smoothing here to prevent D from getting overly confident + # on real samples + d_real = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits, + labels=(tf.ones_like(self.D) - self.soft_label_margin), + name="loss_D_real") + self.d_loss_real = tf.reduce_mean(d_real) + d_fake = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_, + labels=(tf.zeros_like(self.D_)), + name="loss_D_fake") + self.d_loss_fake = tf.reduce_mean(d_fake) + self.d_loss = (self.d_loss_real + self.d_loss_fake) / 2. + # the typical GAN set-up is that of a minimax game where D is trying to minimize + # its own error and G is trying to maximize D's error however note how we are flipping G labels here: + # instead of maximizing D's error, we are minimizing D's error on the 'wrong' label + # this trick helps produce a stronger gradient + g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_, + labels=(tf.ones_like(self.D_) + self.soft_label_margin), + name="loss_G") + self.g_loss = tf.reduce_mean(g_loss) + + # debug + self.summaries.append(image_summary("G", self.G, max_outputs=3)) + self.summaries.append(image_summary("X", self.images, max_outputs=3)) + self.summaries.append(histogram_summary("G_hist", self.G)) + self.summaries.append(histogram_summary("X_hist", self.images)) + self.summaries.append(scalar_summary("d_loss_real", self.d_loss_real)) + self.summaries.append(scalar_summary("d_loss_fake", self.d_loss_fake)) + self.summaries.append(scalar_summary("g_loss", self.g_loss)) + self.summaries.append(scalar_summary("d_loss", self.d_loss)) + + # all trainable variables + t_vars = tf.trainable_variables() + # G variables + self.g_vars = [var for var in t_vars if 'g_' in var.name] + # D variables + self.d_vars = [var for var in t_vars if 'd_' in var.name] + + # Extra hook for debug: log chi-square distance between G's output histogram and the dataset's histogram + value_range = [0.0, 1.0] + nbins = 100 + hist_g = tf.histogram_fixed_width(self.G, value_range, nbins=nbins, dtype=tf.float32) / nbins + hist_images = tf.histogram_fixed_width(self.images, value_range, nbins=nbins, dtype=tf.float32) / nbins + chi_square = tf.reduce_mean(tf.div(tf.square(hist_g - hist_images), hist_g + hist_images + 1e-5)) + self.summaries.append(scalar_summary("chi_square", chi_square)) + else: + # Create only the generator + self.x = tf.reshape(self.x, shape=[self.batch_size, self.z_dim]) + self.z = self.x[:, :self.z_dim] + self.G = self.generator(self.z) + + def discriminator(self, image, y=None, reuse=False): + """ + Create the discriminator + + This creates a string of layers: + - input - [N, 64, 64, 3] + - conv layer with 64 5x5 kernels and 2x2 stride - [N, 32, 32, 64] + - leaky relu - [N, 32, 32, 64] + - conv layer with 128 5x5 kernels and 2x2 stride - [N, 16, 16, 32] + - batch norm - [N, 16, 16, 32] + - leaky relu - [N, 16, 16, 32] + - conv layer with 256 5x5 kernels and 2x2 stride - [N, 8, 8, 256] + - batch norm - [N, 8, 8, 256] + - leaky relu - [N, 8, 8, 256] + - conv layer with 256 5x5 kernels and 2x2 stride - [N, 4, 4, 512] + - batch norm - [N, 4, 4, 512] + - leaky relu - [N, 4, 4, 512] + - flatten - [N, 8192] + - linear layer with 1 output neurons - [N, 1] + - sigmoid - [N,1] + + Args: + image: batch of input images - shape: [N, H, W, C] + y: batch of one-hot encoded labels - shape: [N, K] + reuse: whether to re-use previously created variables + """ + with tf.variable_scope("discriminator") as scope: + if reuse: + scope.reuse_variables() + + h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv')) + h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim * 2, name='d_h1_conv'), train=self.is_training)) + h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim * 4, name='d_h2_conv'), train=self.is_training)) + h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim * 8, name='d_h3_conv'), train=self.is_training)) + h3_size = ((self.output_size // 16) ** 2) * self.df_dim * 8 + h4 = linear(tf.reshape(h3, [self.batch_size, h3_size]), 1, 'd_h3_lin') + + return tf.nn.sigmoid(h4), h4 + + def generator(self, z, y=None): + """ + Create the generator + + This creates a string of layers: + - input - [N, 100] + - linear layer with 8192 output neurons - [N, 8192] + - reshape - [N, 4, 4, 512] + - batch norm - [N, 4, 4, 512] + - relu - [N, 4, 4, 512] + - transpose convolution with 256 filters and stride 2 - [N, 8, 8, 256] + - batch norm - [N, 8, 8, 256] + - relu - [N, 8, 8, 256] + - transpose convolution with 128 filters and stride 2 - [N, 16, 16, 128] + - batch norm - [N, 16, 16, 128] + - relu - [N, 16, 16, 128] + - transpose convolution with 64 filters and stride 2 - [N, 32, 32, 64] + - batch norm - [N, 32, 32, 64] + - relu - [N, 32, 32, 64] + - transpose convolution with 3 filters and stride 2 - [N, 64, 64, 3] + - tanh - [N, 64, 64, 3] + """ + with tf.variable_scope("generator"): + + s = self.output_size + s2, s4, s8, s16 = int(s // 2), int(s // 4), int(s // 8), int(s // 16) + + # project `z` and reshape + self.z_, self.h0_w, self.h0_b = linear(z, self.gf_dim * 8 * s16 * s16, 'g_h0_lin', with_w=True) + + self.h0 = tf.reshape(self.z_, [-1, s16, s16, self.gf_dim * 8]) + h0 = tf.nn.relu(self.g_bn0(self.h0, train=self.is_training)) + + self.h1, self.h1_w, self.h1_b = deconv2d(h0, [self.batch_size, s8, s8, self.gf_dim * 4], + name='g_h1', with_w=True) + h1 = tf.nn.relu(self.g_bn1(self.h1, train=self.is_training)) + + h2, self.h2_w, self.h2_b = deconv2d(h1, [self.batch_size, s4, s4, self.gf_dim * 2], + name='g_h2', with_w=True) + h2 = tf.nn.relu(self.g_bn2(h2, train=self.is_training)) + + h3, self.h3_w, self.h3_b = deconv2d(h2, [self.batch_size, s2, s2, self.gf_dim * 1], + name='g_h3', with_w=True) + h3 = tf.nn.relu(self.g_bn3(h3, train=self.is_training)) + + h4, self.h4_w, self.h4_b = deconv2d(h3, [self.batch_size, s, s, self.c_dim], + name='g_h4', with_w=True) + + return tf.nn.tanh(h4) diff --git a/examples/gan/network-mnist-encoder.py b/examples/gan/network-mnist-encoder.py new file mode 100644 index 000000000..9168bb375 --- /dev/null +++ b/examples/gan/network-mnist-encoder.py @@ -0,0 +1,431 @@ +# The MIT License (MIT) +# +# Original work Copyright (c) 2016 Taehoon Kim +# Modified work Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import tensorflow as tf +from model import Tower +from utils import model_property + +image_summary = tf.summary.image +scalar_summary = tf.summary.scalar +histogram_summary = tf.summary.histogram +merge_summary = tf.summary.merge +SummaryWriter = tf.summary.FileWriter + + +class batch_norm(object): + """ + This class creates an op that composes the specified tensor with a batch + normalization layer. + """ + + def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"): + """Instance initialization""" + with tf.variable_scope(name): + self.epsilon = epsilon + self.momentum = momentum + self.name = name + + def __call__(self, x, train=True): + """ + Functional interface + + Args: + x: tensor to compose + train: set to True during training and False otherwise + """ + return tf.contrib.layers.batch_norm(x, + decay=self.momentum, + updates_collections=None, + epsilon=self.epsilon, + scale=True, + is_training=train, + scope=self.name) + + +def conv_cond_concat(x, y): + """ + Concatenate conditioning matrix across channel axis. + + The specified input tensor is concatenated with K feature maps (K = number of classes) + across the channel dimension. Each of the K feature maps is set to all-zeros except for + the one whose index matches the target class (which is set to all-ones). + + Args: + x: non-conditioned tensor. Shape: [N, H, W, C] + y: one-hot encoded conditioning matrix. Shape: [N, K] + + Returns: + conditioned feature map. Shape: [N, H, W, C + K] + """ + x_shapes = x.get_shape() + y_shapes = y.get_shape() + batch_size = tf.shape(x)[0] + return tf.concat([x, y * tf.ones([batch_size, int(x_shapes[1]), int(x_shapes[2]), int(y_shapes[3])])], 3) + + +def conv2d(input_, output_dim, + k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, + name="conv2d"): + """ + Compose specified symbol with 2D convolution layer + + Args: + input_: tensor to compose. Shape: [N, H, W, C] + output_dim: number of output features maps + k_h: kernel height + k_w: kernel width + d_h: horizontal stride + d_w: vertical stride + stddev: standard deviation of gaussian distribution to use for random weight initialization + name: name scope + + Returns: + Composed tensor. + """ + with tf.variable_scope(name): + w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], + initializer=tf.truncated_normal_initializer(stddev=stddev)) + conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') + + biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) + conv = tf.nn.bias_add(conv, biases) + + return conv + + +def deconv2d(input_, output_shape, + k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, + name="deconv2d", with_w=False): + """ + Compose specified symbol with 2D *transpose* convolution layer + + Args: + input_: tensor to compose. Shape: [N, H, W, C] + output_shape: output shape + k_h: kernel height + k_w: kernel width + d_h: horizontal stride + d_w: vertical stride + stddev: standard deviation of gaussian distribution to use for random weight initialization + name: name scope + + Returns: + Composed tensor. + """ + with tf.variable_scope(name): + # filter : [height, width, output_channels, in_channels] + w = tf.get_variable('w', + [k_h, k_w, output_shape[-1], + input_.get_shape()[-1]], + initializer=tf.random_normal_initializer(stddev=stddev)) + deconv = tf.nn.conv2d_transpose(input_, w, + output_shape=output_shape, + strides=[1, d_h, d_w, 1]) + + biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0)) + deconv = tf.reshape(tf.nn.bias_add(deconv, biases), output_shape) + + if with_w: + return deconv, w, biases + else: + return deconv + + +def lrelu(x, leak=0.2, name="lrelu"): + """Compose specified tensor with leaky Rectifier Linear Unit""" + return tf.maximum(x, leak*x) + + +def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): + """ + Compose specified tensor with linear (fully-connected) layer + + Args: + input_: tensor to compose. Shape: [N, M] + output_size: number of output neurons + scope: name scope + stddev: standard deviation of gaussian distribution to use for random weight initialization + name: name scope + with_w: whether to also return parameter variables + + Returns: + Composed tensor. Shape: [N, output_size] + """ + shape = input_.get_shape().as_list() + + with tf.variable_scope(scope or "Linear"): + matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32, + tf.random_normal_initializer(stddev=stddev)) + bias = tf.get_variable("bias", [output_size], + initializer=tf.constant_initializer(bias_start)) + if with_w: + return tf.matmul(input_, matrix) + bias, matrix, bias + else: + return tf.matmul(input_, matrix) + bias + + +class UserModel(Tower): + """ + User Model definition + + DIGITS creates an instance of this class for every tower it needs + to create. This includes: + - one for training, + - one for validation, + - one for testing. + + In the case of multi-GPU training, one training instance is created + for every GPU. DIGITS takes care of doing the gradient averaging + across GPUs so this class only needs to define the inference op + and desired loss/cost function. + """ + + def __init__(self, *args, **kwargs): + """ + Identify the correct input nodes. + + In the parent class, DIGITS conveniently sets the following fields: + - self.is_training: whether this is a training graph + - self.is_inference: whether this graph is created for inference/testing + - self.x: input node. Shape: [N, H, W, C] + - self.y: label. Shape: [N] for scalar labels, [N, H, W, C] otherwise. + Only defined if self._is_training is True + """ + super(UserModel, self).__init__(*args, **kwargs) + + # initialize graph with parameters for MNIST + self.dcgan_init(image_size=28, + y_dim=10, + output_size=28, + c_dim=1) + + @model_property + def inference(self): + """ op to use for inference """ + # rescale + images = self.G * 255 + # flatten G output + images_flat = tf.reshape(images, [self.batch_size, self.image_size * self.image_size * self.c_dim]) + # now return encoded z concatenated with G output + # during inference the visualization script will need to extract + # both z and the generated image to display them separately + zgen_flat = tf.reshape(self.DzGEN, [self.batch_size, self.z_dim]) + return tf.concat([zgen_flat, images_flat], 1) + + @model_property + def loss(self): + """ + Loss function + + Returns either an op or a list of dicts. + If the returned value is an op then DIGITS will optimize against this op + with respect to all trainable variables. + If the returned value is a list then DIGITS will optimize against each + loss in the list with respect to the specified variables. + """ + + # here we are returning a list because we want to alternately optimize the + # discriminator on real samples, the discriminator on fake samples and the + # generator. + losses = [ + {'loss': self.dzgen_loss, 'vars': self.d_vars}, + ] + return losses + + def dcgan_init(self, image_size=108, + output_size=64, y_dim=None, z_dim=100, gf_dim=64, df_dim=64, + gfc_dim=1024, dfc_dim=1024, c_dim=3): + """ + Create the model + + Args: + output_size: (optional) The resolution in pixels of the images. [64] + y_dim: (optional) Dimension of dim for y. [None] + z_dim: (optional) Dimension of dim for Z. [100] + gf_dim: (optional) Dimension of gen filters in first conv layer. [64] + df_dim: (optional) Dimension of discrim filters in first conv layer. [64] + gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024] + dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024] + c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3] + """ + self.image_size = image_size + self.output_size = output_size + + self.y_dim = y_dim + self.z_dim = z_dim + + self.gf_dim = gf_dim + self.df_dim = df_dim + + self.gfc_dim = gfc_dim + self.dfc_dim = dfc_dim + + self.c_dim = c_dim + + self.batch_size = tf.shape(self.x)[0] + + # batch normalization : deals with poor initialization helps gradient flow + self.d_bn1 = batch_norm(name='d_bn1') + self.d_bn2 = batch_norm(name='d_bn2') + + self.g_bn0 = batch_norm(name='g_bn0') + self.g_bn1 = batch_norm(name='g_bn1') + self.g_bn2 = batch_norm(name='g_bn2') + + self.build_model() + + def build_model(self): + """Create the main ops""" + + if self.is_inference: + # HACK: we are hard-coding class 3 during inference + # TODO: find way to pass this from UI + self.y = tf.to_int32(3*tf.ones(shape=[self.batch_size])) + + # create both the generator and the discriminator/encoder + # self.x is a batch of images - shape: [N, H, W, C] + # self.y is a vector of labels - shape: [N] + + # rescale to [0,1] range + x_reshaped = tf.reshape(self.x, shape=[self.batch_size, self.image_size, self.image_size, self.c_dim], + name='x_reshaped') + + self.images = x_reshaped / 255. + + # one-hot encode y - shape: [N] -> [N, self.y_dim] + self.y = tf.one_hot(self.y, self.y_dim, name='y_onehot') + + # create discriminator/encoder + self.DzGEN, self.D_logits = self.discriminator(self.images, self.y, reuse=False) + + # create generator + self.G = self.generator(self.DzGEN, self.y) + + # we only have one loss function here (L2 distance between input image and generator output) + self.dzgen_loss = tf.reduce_mean(tf.square(self.G - self.images), name="loss_DzGEN") + + # debug + self.summaries.append(image_summary("G", self.G, max_outputs=5)) + self.summaries.append(image_summary("X", self.images, max_outputs=5)) + self.summaries.append(histogram_summary("G_hist", self.G)) + self.summaries.append(histogram_summary("X_hist", self.images)) + self.summaries.append(scalar_summary("DzGen_loss", self.dzgen_loss)) + + # all trainable variables + t_vars = tf.trainable_variables() + # D variables + self.d_vars = [var for var in t_vars if 'd_' in var.name] + + def discriminator(self, image, y=None, reuse=False): + """ + Create the discriminator/encoder + + This creates a string of layers: + - input - [N, 28, 28, 1] + - concat conditioning - [N, 28, 28, 11] + - conv layer with 11 5x5 kernels and 2x2 stride - [N, 14, 14, 11] + - leaky relu - [N, 14, 14, 11] + - concat conditioning - [N, 14, 14, 21] + - conv layer with 74 5x5 kernels and 2x2 stride - [N, 7, 7, 74] + - batch norm - [N, 14, 14, 64] + - leaky relu - [N, 14, 14, 64] + - flatten - [N, 3626] + - concat conditioning - [N, 3636] + - linear layer with 1014 output neurons - [N, 1024] + - batch norm - [N, 1024] + - leaky relu - [N, 1024] + - concat conditioning - [N, 1034] + - linear layer with 1 output neuron - [N, z_dim] + + Args: + image: batch of input images - shape: [N, H, W, C] + y: batch of one-hot encoded labels - shape: [N, K] + reuse: whether to re-use previously created variables + """ + + # NOTE: although we are really creating an encoder here we need to re-use the same + # variable scope (i.e. "discriminator") as in the original GAN so we can re-use + # learned parameters + with tf.variable_scope("discriminator") as scope: + if reuse: + # re-use (share) variables + scope.reuse_variables() + + yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim]) + x = conv_cond_concat(image, yb) + + h0 = lrelu(conv2d(x, self.c_dim + self.y_dim, name='d_h0_conv')) + h0 = conv_cond_concat(h0, yb) + + h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim + self.y_dim, name='d_h1_conv'), train=self.is_training)) + sz = h1.get_shape() + h1 = tf.reshape(h1, [self.batch_size, int(sz[1] * sz[2] * sz[3])]) + h1 = tf.concat([h1, y], 1) + + h2 = lrelu(self.d_bn2(linear(h1, self.dfc_dim, 'd_h2_lin'), train=self.is_training)) + h2 = tf.concat([h2, y], 1) + + h3 = linear(h2, self.z_dim, 'd_h3_lin_retrain') + return h3, h3 + + def generator(self, z, y=None): + """ + Create the generator + + This creates a string of layers: + - input - [N, 100] + - concatenate conditioning - [N, 110] + - linear layer with 1024 output neurons - [N, 1024] + - batch norm - [N, 1024] + - relu - [N, 1024] + - concatenate conditioning - [N, 1034] + - linear layer with 7*7*128=6272 output neurons - [N, 6272] + - reshape 7x7 feature maps - [N, 7, 7, 128] + - concatenate conditioning - [N, 7, 7, 138] + - transpose convolution with 128 filters and stride 2 - [N, 14, 14, 128] + - batch norm - [N, 14, 14, 128] + - relu - [N, 14, 14, 128] + - concatenate conditioing - [N, 14, 14, 138] + - transpose convolution with 1 filter and stride 2 - [N, 28, 28, 1] + """ + with tf.variable_scope("generator"): + s = self.output_size + s2, s4 = int(s/2), int(s/4) + + # yb = tf.expand_dims(tf.expand_dims(y, 1),2) + yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim]) + z = tf.concat([z, y], 1) + + h0 = tf.nn.relu(self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin'), train=False)) + h0 = tf.concat([h0, y], 1) + + h1 = tf.nn.relu(self.g_bn1(linear(h0, self.gf_dim*2*s4*s4, 'g_h1_lin'), train=False)) + h1 = tf.reshape(h1, [self.batch_size, s4, s4, self.gf_dim * 2]) + + h1 = conv_cond_concat(h1, yb) + + h2 = tf.nn.relu(self.g_bn2(deconv2d(h1, [self.batch_size, s2, s2, self.gf_dim * 2], + name='g_h2'), train=False)) + h2 = conv_cond_concat(h2, yb) + + return tf.nn.sigmoid(deconv2d(h2, [self.batch_size, s, s, self.c_dim], name='g_h3')) diff --git a/examples/gan/network-mnist.py b/examples/gan/network-mnist.py new file mode 100644 index 000000000..33cc7c774 --- /dev/null +++ b/examples/gan/network-mnist.py @@ -0,0 +1,472 @@ +# The MIT License (MIT) +# +# Original work Copyright (c) 2016 Taehoon Kim +# Modified work Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import tensorflow as tf +from model import Tower +from utils import model_property + +image_summary = tf.summary.image +scalar_summary = tf.summary.scalar +histogram_summary = tf.summary.histogram +merge_summary = tf.summary.merge +SummaryWriter = tf.summary.FileWriter + + +class batch_norm(object): + """ + This class creates an op that composes the specified tensor with a batch + normalization layer. + """ + + def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"): + """Instance initialization""" + with tf.variable_scope(name): + self.epsilon = epsilon + self.momentum = momentum + self.name = name + + def __call__(self, x, train=True): + """ + Functional interface + + Args: + x: tensor to compose + train: set to True during training and False otherwise + """ + return tf.contrib.layers.batch_norm(x, + decay=self.momentum, + updates_collections=None, + epsilon=self.epsilon, + scale=True, + is_training=train, + scope=self.name) + + +def conv_cond_concat(x, y): + """ + Concatenate conditioning matrix across channel axis. + + The specified input tensor is concatenated with K feature maps (K = number of classes) + across the channel dimension. Each of the K feature maps is set to all-zeros except for + the one whose index matches the target class (which is set to all-ones). + + Args: + x: non-conditioned tensor. Shape: [N, H, W, C] + y: one-hot encoded conditioning matrix. Shape: [N, K] + + Returns: + conditioned feature map. Shape: [N, H, W, C + K] + """ + x_shapes = x.get_shape() + y_shapes = y.get_shape() + batch_size = tf.shape(x)[0] + return tf.concat([x, y * tf.ones([batch_size, int(x_shapes[1]), int(x_shapes[2]), int(y_shapes[3])])], 3) + + +def conv2d(input_, output_dim, + k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, + name="conv2d"): + """ + Compose specified symbol with 2D convolution layer + + Args: + input_: tensor to compose. Shape: [N, H, W, C] + output_dim: number of output features maps + k_h: kernel height + k_w: kernel width + d_h: horizontal stride + d_w: vertical stride + stddev: standard deviation of gaussian distribution to use for random weight initialization + name: name scope + + Returns: + Composed tensor. + """ + with tf.variable_scope(name): + w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], + initializer=tf.truncated_normal_initializer(stddev=stddev)) + conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') + + biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) + conv = tf.nn.bias_add(conv, biases) + + return conv + + +def deconv2d(input_, output_shape, + k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, + name="deconv2d", with_w=False): + """ + Compose specified symbol with 2D *transpose* convolution layer + + Args: + input_: tensor to compose. Shape: [N, H, W, C] + output_shape: output shape + k_h: kernel height + k_w: kernel width + d_h: horizontal stride + d_w: vertical stride + stddev: standard deviation of gaussian distribution to use for random weight initialization + name: name scope + + Returns: + Composed tensor. + """ + with tf.variable_scope(name): + # filter : [height, width, output_channels, in_channels] + w = tf.get_variable('w', + [k_h, k_w, output_shape[-1], + input_.get_shape()[-1]], + initializer=tf.random_normal_initializer(stddev=stddev)) + deconv = tf.nn.conv2d_transpose(input_, w, + output_shape=output_shape, + strides=[1, d_h, d_w, 1]) + + biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0)) + deconv = tf.reshape(tf.nn.bias_add(deconv, biases), output_shape) + + if with_w: + return deconv, w, biases + else: + return deconv + + +def lrelu(x, leak=0.2, name="lrelu"): + """Compose specified tensor with leaky Rectifier Linear Unit""" + return tf.maximum(x, leak*x) + + +def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): + """ + Compose specified tensor with linear (fully-connected) layer + + Args: + input_: tensor to compose. Shape: [N, M] + output_size: number of output neurons + scope: name scope + stddev: standard deviation of gaussian distribution to use for random weight initialization + name: name scope + with_w: whether to also return parameter variables + + Returns: + Composed tensor. Shape: [N, output_size] + """ + shape = input_.get_shape().as_list() + + with tf.variable_scope(scope or "Linear"): + matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32, + tf.random_normal_initializer(stddev=stddev)) + bias = tf.get_variable("bias", [output_size], + initializer=tf.constant_initializer(bias_start)) + if with_w: + return tf.matmul(input_, matrix) + bias, matrix, bias + else: + return tf.matmul(input_, matrix) + bias + + +class UserModel(Tower): + """ + User Model definition + + DIGITS creates an instance of this class for every tower it needs + to create. This includes: + - one for training, + - one for validation, + - one for testing. + + In the case of multi-GPU training, one training instance is created + for every GPU. DIGITS takes care of doing the gradient averaging + across GPUs so this class only needs to define the inference op + and desired loss/cost function. + """ + + def __init__(self, *args, **kwargs): + """ + Identify the correct input nodes. + + In the parent class, DIGITS conveniently sets the following fields: + - self.is_training: whether this is a training graph + - self.is_inference: whether this graph is created for inference/testing + - self.x: input node. Shape: [N, H, W, C] + - self.y: label. Shape: [N] for scalar labels, [N, H, W, C] otherwise. + Only defined if self._is_training is True + """ + super(UserModel, self).__init__(*args, **kwargs) + + # initialize graph with parameters for MNIST + self.dcgan_init(image_size=28, + y_dim=10, + output_size=28, + c_dim=1) + + @model_property + def inference(self): + """op to use for inference""" + + # inference op is the output of the generator after rescaling + # to the 8-bit range + return tf.to_int32(self.G * 255) + + @model_property + def loss(self): + """ + Loss function + + Returns either an op or a list of dicts. + If the returned value is an op then DIGITS will optimize against this op + with respect to all trainable variables. + If the returned value is a list then DIGITS will optimize against each + loss in the list with respect to the specified variables. + """ + + # here we are returning a list because we want to alternately optimize the + # discriminator on real samples, the discriminator on fake samples and the + # generator. + losses = [ + {'loss': self.d_loss_real, 'vars': self.d_vars}, + {'loss': self.d_loss_fake, 'vars': self.d_vars}, + {'loss': self.g_loss, 'vars': self.g_vars} + ] + return losses + + def dcgan_init(self, image_size=108, + output_size=64, y_dim=None, z_dim=100, gf_dim=64, df_dim=64, + gfc_dim=1024, dfc_dim=1024, c_dim=3): + """ + Create the model + + Args: + output_size: (optional) The resolution in pixels of the images. [64] + y_dim: (optional) Dimension of dim for y. [None] + z_dim: (optional) Dimension of dim for Z. [100] + gf_dim: (optional) Dimension of gen filters in first conv layer. [64] + df_dim: (optional) Dimension of discrim filters in first conv layer. [64] + gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024] + dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024] + c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3] + """ + self.image_size = image_size + self.output_size = output_size + + self.y_dim = y_dim + self.z_dim = z_dim + + self.gf_dim = gf_dim + self.df_dim = df_dim + + self.gfc_dim = gfc_dim + self.dfc_dim = dfc_dim + + self.c_dim = c_dim + + self.batch_size = tf.shape(self.x)[0] + + # batch normalization : deals with poor initialization helps gradient flow + self.d_bn1 = batch_norm(name='d_bn1') + self.d_bn2 = batch_norm(name='d_bn2') + + self.g_bn0 = batch_norm(name='g_bn0') + self.g_bn1 = batch_norm(name='g_bn1') + self.g_bn2 = batch_norm(name='g_bn2') + + self.build_model() + + def build_model(self): + """Create the main ops""" + + if not self.is_inference: + # create both the generator and the discriminator + # self.x is a batch of images - shape: [N, H, W, C] + # self.y is a vector of labels - shape: [N] + + # sample z from a normal distribution + self.z = tf.random_normal(shape=[self.batch_size, self.z_dim], dtype=tf.float32, seed=None, name='z') + + # rescale x to [0, 1] + x_reshaped = tf.reshape(self.x, shape=[self.batch_size, self.image_size, self.image_size, self.c_dim], + name='x_reshaped') + self.images = x_reshaped / 255. + + # one hot encode the label - shape: [N] -> [N, self.y_dim] + self.y = tf.one_hot(self.y, self.y_dim, name='y_onehot') + + # create the generator + self.G = self.generator(self.z, self.y) + + # create one instance of the discriminator for real images (the input is + # images from the dataset) + self.D, self.D_logits = self.discriminator(self.images, self.y, reuse=False) + + # create another instance of the discriminator for fake images (the input is + # the discriminator). Note how we are reusing variables to share weights between + # both instances of the discriminator + self.D_, self.D_logits_ = self.discriminator(self.G, self.y, reuse=True) + + # aggregate losses across batch + + # we are using the cross entropy loss for all these losses + d_real = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits, + labels=tf.ones_like(self.D), + name="loss_D_real") + self.d_loss_real = tf.reduce_mean(d_real) + d_fake = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_, + labels=tf.zeros_like(self.D_), + name="loss_D_fake") + self.d_loss_fake = tf.reduce_mean(d_fake) + self.d_loss = (self.d_loss_real + self.d_loss_fake) / 2. + # the typical GAN set-up is that of a minimax game where D is trying to minimize + # its own error and G is trying to maximize D's error however note how we are flipping G labels here: + # instead of maximizing D's error, we are minimizing D's error on the 'wrong' label + # this trick helps produce a stronger gradient + g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_, + labels=tf.ones_like(self.D_), + name="loss_G") + self.g_loss = tf.reduce_mean(g_loss) + + # create some summaries for debug and monitoring + self.summaries.append(histogram_summary("z", self.z)) + self.summaries.append(histogram_summary("d", self.D)) + self.summaries.append(histogram_summary("d_", self.D_)) + self.summaries.append(image_summary("G", self.G, max_outputs=5)) + self.summaries.append(image_summary("X", self.images, max_outputs=5)) + self.summaries.append(histogram_summary("G_hist", self.G)) + self.summaries.append(histogram_summary("X_hist", self.images)) + self.summaries.append(scalar_summary("d_loss_real", self.d_loss_real)) + self.summaries.append(scalar_summary("d_loss_fake", self.d_loss_fake)) + self.summaries.append(scalar_summary("g_loss", self.g_loss)) + self.summaries.append(scalar_summary("d_loss", self.d_loss)) + + # all trainable variables + t_vars = tf.trainable_variables() + # G's variables + self.g_vars = [var for var in t_vars if 'g_' in var.name] + # D's variables + self.d_vars = [var for var in t_vars if 'd_' in var.name] + + # Extra hook for debug: log chi-square distance between G's output histogram and the dataset's histogram + value_range = [0.0, 1.0] + nbins = 100 + hist_g = tf.histogram_fixed_width(self.G, value_range, nbins=nbins, dtype=tf.float32) / nbins + hist_images = tf.histogram_fixed_width(self.images, value_range, nbins=nbins, dtype=tf.float32) / nbins + chi_square = tf.reduce_mean(tf.div(tf.square(hist_g - hist_images), hist_g + hist_images)) + self.summaries.append(scalar_summary("chi_square", chi_square)) + else: + # Create only the generator + + # self.x is the conditioned latent representation - shape: [self.batch_size, 1, self.z_dim + self.y_dim] + self.x = tf.reshape(self.x, shape=[self.batch_size, self.z_dim + self.y_dim]) + # extract z and y + self.y = self.x[:, self.z_dim:self.z_dim + self.y_dim] + self.z = self.x[:, :self.z_dim] + # create an instance of the generator + self.G = self.generator(self.z, self.y) + + def discriminator(self, image, y=None, reuse=False): + """ + Create the discriminator + + This creates a string of layers: + - input - [N, 28, 28, 1] + - concat conditioning - [N, 28, 28, 11] + - conv layer with 11 5x5 kernels and 2x2 stride - [N, 14, 14, 11] + - leaky relu - [N, 14, 14, 11] + - concat conditioning - [N, 14, 14, 21] + - conv layer with 74 5x5 kernels and 2x2 stride - [N, 7, 7, 74] + - batch norm - [N, 14, 14, 64] + - leaky relu - [N, 14, 14, 64] + - flatten - [N, 3626] + - concat conditioning - [N, 3636] + - linear layer with 1014 output neurons - [N, 1024] + - batch norm - [N, 1024] + - leaky relu - [N, 1024] + - concat conditioning - [N, 1034] + - linear layer with 1 output neuron - [N, 1] + + Args: + image: batch of input images - shape: [N, H, W, C] + y: batch of one-hot encoded labels - shape: [N, K] + reuse: whether to re-use previously created variables + """ + with tf.variable_scope("discriminator") as scope: + if reuse: + # re-use (share) variables + scope.reuse_variables() + + yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim]) + x = conv_cond_concat(image, yb) + + h0 = lrelu(conv2d(x, self.c_dim + self.y_dim, name='d_h0_conv')) + h0 = conv_cond_concat(h0, yb) + + h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim + self.y_dim, name='d_h1_conv'), train=self.is_training)) + sz = h1.get_shape() + h1 = tf.reshape(h1, [self.batch_size, int(sz[1] * sz[2] * sz[3])]) + h1 = tf.concat([h1, y], 1) + + h2 = lrelu(self.d_bn2(linear(h1, self.dfc_dim, 'd_h2_lin'), train=self.is_training)) + h2 = tf.concat([h2, y], 1) + + h3 = linear(h2, 1, 'd_h3_lin') + + return tf.nn.sigmoid(h3), h3 + + def generator(self, z, y=None): + """ + Create the generator + + This creates a string of layers: + - input - [N, 100] + - concatenate conditioning - [N, 110] + - linear layer with 1024 output neurons - [N, 1024] + - batch norm - [N, 1024] + - relu - [N, 1024] + - concatenate conditioning - [N, 1034] + - linear layer with 7*7*128=6272 output neurons - [N, 6272] + - reshape 7x7 feature maps - [N, 7, 7, 128] + - concatenate conditioning - [N, 7, 7, 138] + - transpose convolution with 128 filters and stride 2 - [N, 14, 14, 128] + - batch norm - [N, 14, 14, 128] + - relu - [N, 14, 14, 128] + - concatenate conditioing - [N, 14, 14, 138] + - transpose convolution with 1 filter and stride 2 - [N, 28, 28, 1] + """ + with tf.variable_scope("generator"): + + s = self.output_size + s2, s4 = int(s/2), int(s/4) + + yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim]) + z = tf.concat([z, y], 1) + + h0 = tf.nn.relu(self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin'), train=self.is_training)) + h0 = tf.concat([h0, y], 1) + + h1 = tf.nn.relu(self.g_bn1(linear(h0, self.gf_dim*2*s4*s4, 'g_h1_lin'), train=self.is_training)) + h1 = tf.reshape(h1, [self.batch_size, s4, s4, self.gf_dim * 2]) + + h1 = conv_cond_concat(h1, yb) + h2 = tf.nn.relu(self.g_bn2(deconv2d(h1, [self.batch_size, s2, s2, self.gf_dim * 2], name='g_h2'), + train=self.is_training)) + h2 = conv_cond_concat(h2, yb) + + return tf.nn.sigmoid(deconv2d(h2, [self.batch_size, s, s, self.c_dim], name='g_h3')) diff --git a/examples/regression/README.md b/examples/regression/README.md index b987bdde8..1a2c31235 100644 --- a/examples/regression/README.md +++ b/examples/regression/README.md @@ -154,11 +154,50 @@ return function(p) end ``` +### Using Tensorflow +Under the `Custom Network` tab, select `Tensorflow`. There you can paste the following network definition: +```python +class UserModel(Tower): + + @model_property + def inference(self): + n_hidden = 32 + + const = tf.constant(0.004) + normed = tf.multiply(self.x, const) + + # The reshaping have to be done for tensorflow to get the shape right + right_shape = tf.reshape(normed, shape=[-1, 32, 32]) + transposed = tf.transpose(right_shape, [0, 2, 1]) + squeezed = tf.reshape(transposed, shape=[-1, 1024]) + + # Define weights + weights = { + 'w1': tf.get_variable('w1', [1024, 2]) + } + biases = { + 'b1': tf.get_variable('b1', [2]) + } + + # Linear activation + model = tf.matmul(squeezed, weights['w1'] ) + biases['b1'] + tf.summary.image(model.op.name, model, max_outputs=1, collections=["Training Summary"]) + return model + + @model_property + def loss(self): + label = tf.reshape(self.y, shape=[-1, 2]) + model = self.inference + loss = digits.mse_loss(model, label) + return loss +``` +Set the learning rate to `0.01` to ensure a smooth training curve. + ## Verification After training for 15 epochs the loss function should look similar to this: -![Training loss](regression-loss.png)) +![Training loss](regression-loss.png) Now we can assess the quality of the model. To this avail, we can use the test image that was generated by `test_lmdb_creator.py`: diff --git a/examples/regression/regression_mnist-TF.py b/examples/regression/regression_mnist-TF.py new file mode 100644 index 000000000..72a1079d7 --- /dev/null +++ b/examples/regression/regression_mnist-TF.py @@ -0,0 +1,37 @@ +from model import Tower +from utils import model_property +import tensorflow as tf +import utils as digits + + +class UserModel(Tower): + + @model_property + def inference(self): + const = tf.constant(0.004) + normed = tf.multiply(self.x, const) + + # The reshaping have to be done for tensorflow to get the shape right + right_shape = tf.reshape(normed, shape=[-1, 32, 32]) + transposed = tf.transpose(right_shape, [0, 2, 1]) + squeezed = tf.reshape(transposed, shape=[-1, 1024]) + + # Define weights + weights = { + 'w1': tf.get_variable('w1', [1024, 2]) + } + biases = { + 'b1': tf.get_variable('b1', [2]) + } + + # Linear activation + model = tf.matmul(squeezed, weights['w1']) + biases['b1'] + tf.summary.image(model.op.name, model, max_outputs=1, collections=["Training Summary"]) + return model + + @model_property + def loss(self): + label = tf.reshape(self.y, shape=[-1, 2]) + model = self.inference + loss = digits.mse_loss(model, label) + return loss diff --git a/examples/siamese/README.md b/examples/siamese/README.md index 2237b4f55..d7ee84f54 100644 --- a/examples/siamese/README.md +++ b/examples/siamese/README.md @@ -92,6 +92,10 @@ Finally, the `nn.CosineEmbeddingCriterion` criterion is used. Similar to Caffe's pull apart images from different classes. However, since the `Cosine` distance is used, the model will learn to minimize the *angle* between features that are extracted from images of the same class and conversely will maximize the angle between features extracted from images from different classes. See below for a visual illustration of the impact this difference has on extracted features. +## Using Tensorflow + +Under the `Custom Network` tab, select `Tensorflow`. There you can paste this [network definition](siamese-TF.py) then give your model a name and click `Create`. + ## Verification After training the Caffe model for 30 epochs the loss function should look similar to this: diff --git a/examples/siamese/feature_clusters_contrastive_tf.png b/examples/siamese/feature_clusters_contrastive_tf.png new file mode 100644 index 000000000..d5e493fe5 Binary files /dev/null and b/examples/siamese/feature_clusters_contrastive_tf.png differ diff --git a/examples/siamese/siamese-TF.py b/examples/siamese/siamese-TF.py new file mode 100644 index 000000000..fe98d7249 --- /dev/null +++ b/examples/siamese/siamese-TF.py @@ -0,0 +1,47 @@ +from model import Tower +from utils import model_property +import tensorflow as tf +import tensorflow.contrib.slim as slim +import utils as digits + + +class UserModel(Tower): + + @model_property + def inference(self): + _x = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]]) + # tf.image_summary(_x.op.name, _x, max_images=10, collections=[digits.GraphKeys.SUMMARIES_TRAIN]) + + # Split out the color channels + _, model_g, model_b = tf.split(_x, 3, 3, name='split_channels') + # tf.image_summary(model_g.op.name, model_g, max_images=10, collections=[digits.GraphKeys.SUMMARIES_TRAIN]) + # tf.image_summary(model_b.op.name, model_b, max_images=10, collections=[digits.GraphKeys.SUMMARIES_TRAIN]) + + with slim.arg_scope([slim.conv2d, slim.fully_connected], + weights_initializer=tf.contrib.layers.xavier_initializer(), + weights_regularizer=slim.l2_regularizer(0.0005)): + with tf.variable_scope("siamese") as scope: + def make_tower(net): + net = slim.conv2d(net, 20, [5, 5], padding='VALID', scope='conv1') + net = slim.max_pool2d(net, [2, 2], padding='VALID', scope='pool1') + net = slim.conv2d(net, 50, [5, 5], padding='VALID', scope='conv2') + net = slim.max_pool2d(net, [2, 2], padding='VALID', scope='pool2') + net = slim.flatten(net) + net = slim.fully_connected(net, 500, scope='fc1') + net = slim.fully_connected(net, 2, activation_fn=None, scope='fc2') + return net + + model_g = make_tower(model_g) + model_g = tf.reshape(model_g, shape=[-1, 2]) + scope.reuse_variables() + model_b = make_tower(model_b) + model_b = tf.reshape(model_b, shape=[-1, 2]) + + return [model_g, model_b] + + @model_property + def loss(self): + _y = tf.reshape(self.y, shape=[-1]) + _y = tf.to_float(_y) + model = self.inference + return digits.constrastive_loss(model[0], model[1], _y) diff --git a/packaging/deb/templates/control b/packaging/deb/templates/control index dc9897017..28ed6f308 100644 --- a/packaging/deb/templates/control +++ b/packaging/deb/templates/control @@ -27,4 +27,3 @@ Description: NVIDIA DIGITS webserver DIGITS is the Deep Learning GPU Training System from NVIDIA. It provides an interactive environment for training, evaluating and experimenting with neural networks. - diff --git a/plugins/data/bAbI/MANIFEST.in b/plugins/data/bAbI/MANIFEST.in new file mode 100644 index 000000000..28ff74ce9 --- /dev/null +++ b/plugins/data/bAbI/MANIFEST.in @@ -0,0 +1 @@ +recursive-include digitsDataPluginBAbI *.html diff --git a/plugins/data/bAbI/README b/plugins/data/bAbI/README new file mode 100644 index 000000000..bb61cde0d --- /dev/null +++ b/plugins/data/bAbI/README @@ -0,0 +1,2 @@ +This DIGITS plug-in demonstrates how to load data from the bAbI dataset. +The dataset may be found on https://research.facebook.com/research/babi/ diff --git a/plugins/data/bAbI/digitsDataPluginBAbI/__init__.py b/plugins/data/bAbI/digitsDataPluginBAbI/__init__.py new file mode 100644 index 000000000..79071170e --- /dev/null +++ b/plugins/data/bAbI/digitsDataPluginBAbI/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +from __future__ import absolute_import + +from .data import DataIngestion + +__all__ = ['DataIngestion'] diff --git a/plugins/data/bAbI/digitsDataPluginBAbI/data.py b/plugins/data/bAbI/digitsDataPluginBAbI/data.py new file mode 100644 index 000000000..0ba2a2813 --- /dev/null +++ b/plugins/data/bAbI/digitsDataPluginBAbI/data.py @@ -0,0 +1,113 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +from __future__ import absolute_import + +import os + +from digits.utils import subclass, override, constants +from digits.extensions.data.interface import DataIngestionInterface +from .forms import DatasetForm, InferenceForm +from . import utils + + +DATASET_TEMPLATE = "templates/dataset_template.html" +INFERENCE_TEMPLATE = "templates/inference_template.html" + + +@subclass +class DataIngestion(DataIngestionInterface): + """ + A data ingestion extension for the bAbI dataset + """ + + def __init__(self, is_inference_db=False, **kwargs): + super(DataIngestion, self).__init__(**kwargs) + + self.userdata['is_inference_db'] = is_inference_db + + if 'train_text_data' not in self.userdata: + # get task ID + try: + task_id = int(self.task_id) + except: + task_id = None + self.userdata['task_id'] = task_id + + # get data - this doesn't scale well to huge datasets but this makes it + # straightforard to create a mapping of words to indices and figure out max + # dimensions of stories and sentences + self.userdata['train_text_data'] = utils.parse_folder_phase( + self.story_folder, task_id, train=True) + self.userdata['stats'] = utils.get_stats(self.userdata['train_text_data']) + + @override + def encode_entry(self, entry): + stats = self.userdata['stats'] + return utils.encode_sample(entry, stats['word_map'], stats['sentence_size'], stats['story_size']) + + @staticmethod + @override + def get_category(): + return "Text" + + @staticmethod + @override + def get_id(): + return "text-babi" + + @staticmethod + @override + def get_dataset_form(): + return DatasetForm() + + @staticmethod + @override + def get_dataset_template(form): + """ + parameters: + - form: form returned by get_dataset_form(). This may be populated + with values if the job was cloned + return: + - (template, context) tuple + - template is a Jinja template to use for rendering dataset creation + options + - context is a dictionary of context variables to use for rendering + the form + """ + extension_dir = os.path.dirname(os.path.abspath(__file__)) + template = open(os.path.join(extension_dir, DATASET_TEMPLATE), "r").read() + context = {'form': form} + return (template, context) + + @override + def get_inference_form(self): + return InferenceForm() + + @staticmethod + @override + def get_inference_template(form): + extension_dir = os.path.dirname(os.path.abspath(__file__)) + template = open(os.path.join(extension_dir, INFERENCE_TEMPLATE), "r").read() + context = {'form': form} + return (template, context) + + @staticmethod + @override + def get_title(): + return "bAbI" + + @override + def itemize_entries(self, stage): + entries = [] + if not self.userdata['is_inference_db']: + data = self.userdata['train_text_data'] + n_val_entries = int(len(data)*self.pct_val/100) + if stage == constants.TRAIN_DB: + entries = data[n_val_entries:] + elif stage == constants.VAL_DB: + entries = data[:n_val_entries] + elif stage == constants.TEST_DB: + if not bool(self.snippet): + raise ValueError("You must write a story and a question") + entries = utils.parse_lines(str(self.snippet).splitlines()) + + return entries diff --git a/plugins/data/bAbI/digitsDataPluginBAbI/forms.py b/plugins/data/bAbI/digitsDataPluginBAbI/forms.py new file mode 100644 index 000000000..90f66b745 --- /dev/null +++ b/plugins/data/bAbI/digitsDataPluginBAbI/forms.py @@ -0,0 +1,99 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +from __future__ import absolute_import + +import os + +from digits import utils +from digits.utils import subclass +from flask.ext.wtf import Form +from wtforms import validators + + +@subclass +class DatasetForm(Form): + """ + A form used to create a Sunnybrook dataset + """ + + def validate_folder_path(form, field): + if not field.data: + pass + else: + # make sure the filesystem path exists + if not os.path.exists(field.data) or not os.path.isdir(field.data): + raise validators.ValidationError( + 'Folder does not exist or is not reachable') + else: + return True + + story_folder = utils.forms.StringField( + u'Story folder', + validators=[ + validators.DataRequired(), + validate_folder_path, + ], + tooltip="Specify the path to a folder of stories - filenames are " + "expected to have this format: qa[1-N]*[train|test].txt" + ) + + task_id = utils.forms.SelectField( + 'Task ID', + choices=[ + ('all', 'All'), + ('1', '1'), + ('2', '2'), + ('3', '3'), + ('4', '4'), + ('5', '5'), + ('6', '6'), + ('7', '7'), + ('8', '8'), + ('9', '9'), + ('10', '10'), + ('11', '11'), + ('12', '12'), + ('13', '13'), + ('14', '14'), + ('15', '15'), + ('16', '16'), + ('17', '17'), + ('18', '18'), + ('19', '19'), + ('20', '20'), + ], + default='1', + tooltip="Select a task to train on or 'all' to train a joint model " + "on all tasks." + ) + + pct_val = utils.forms.IntegerField( + u'% for validation', + default=10, + validators=[ + validators.NumberRange(min=0, max=100) + ], + tooltip="You can choose to set apart a certain percentage of images " + "from the training images for the validation set." + ) + + +@subclass +class InferenceForm(Form): + + def validate_file_path(form, field): + if not field.data: + pass + else: + # make sure the filesystem path exists + if not os.path.exists(field.data) and not os.path.isdir(field.data): + raise validators.ValidationError( + 'File does not exist or is not reachable') + else: + return True + """ + A form used to perform inference on a text classification dataset + """ + snippet = utils.forms.TextAreaField( + u'Story/Question', + tooltip="Write all sentences there and end with a question" + ) diff --git a/plugins/data/bAbI/digitsDataPluginBAbI/templates/dataset_template.html b/plugins/data/bAbI/digitsDataPluginBAbI/templates/dataset_template.html new file mode 100644 index 000000000..529dadbde --- /dev/null +++ b/plugins/data/bAbI/digitsDataPluginBAbI/templates/dataset_template.html @@ -0,0 +1,23 @@ +{# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #} + +{% from "helper.html" import print_flashes %} +{% from "helper.html" import print_errors %} +{% from "helper.html" import mark_errors %} + +
    + {{ form.story_folder.label }} + {{ form.story_folder.tooltip }} + {{ form.story_folder(class='form-control autocomplete_path', placeholder='folder') }} +
    + +
    + {{ form.task_id.label }} + {{ form.task_id.tooltip }} + {{ form.task_id(class='form-control') }} +
    + +
    + {{ form.pct_val.label }} + {{ form.pct_val.tooltip }} + {{ form.pct_val(class='form-control') }} +
    diff --git a/plugins/data/bAbI/digitsDataPluginBAbI/templates/inference_template.html b/plugins/data/bAbI/digitsDataPluginBAbI/templates/inference_template.html new file mode 100644 index 000000000..37761c76a --- /dev/null +++ b/plugins/data/bAbI/digitsDataPluginBAbI/templates/inference_template.html @@ -0,0 +1,18 @@ +{# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #} + +{% from "helper.html" import print_flashes %} +{% from "helper.html" import print_errors %} +{% from "helper.html" import mark_errors %} + +
    +
    +

    Test a story/question

    +
    +
    + {{ form.snippet.label }} + {{ form.snippet.tooltip }} + {{ form.snippet(class='form-control', placeholder='image file') }} +
    +
    +
    +
    diff --git a/plugins/data/bAbI/digitsDataPluginBAbI/templates/template.html b/plugins/data/bAbI/digitsDataPluginBAbI/templates/template.html new file mode 100644 index 000000000..2b8cf6067 --- /dev/null +++ b/plugins/data/bAbI/digitsDataPluginBAbI/templates/template.html @@ -0,0 +1,37 @@ +{# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #} + +{% from "helper.html" import print_flashes %} +{% from "helper.html" import print_errors %} +{% from "helper.html" import mark_errors %} + +{{ form.data_stage(class='form-control') }} + +
    + {{ form.train_data_file.label }} + {{ form.train_data_file.tooltip }} + {{ form.train_data_file(class='form-control autocomplete_path', placeholder='.csv file') }} +
    + +
    + {{ form.val_data_file.label }} + {{ form.val_data_file.tooltip }} + {{ form.val_data_file(class='form-control autocomplete_path', placeholder='.csv file') }} +
    + +
    + {{ form.alphabet.label }} + {{ form.alphabet.tooltip }} + {{ form.alphabet(class='form-control') }} +
    + +
    + {{ form.class_labels_file.label }} + {{ form.class_labels_file.tooltip }} + {{ form.class_labels_file(class='form-control autocomplete_path', placeholder='.txt file') }} +
    + +
    + {{ form.max_chars_per_sample.label }} + {{ form.max_chars_per_sample.tooltip }} + {{ form.max_chars_per_sample(class='form-control') }} +
    diff --git a/plugins/data/bAbI/digitsDataPluginBAbI/utils.py b/plugins/data/bAbI/digitsDataPluginBAbI/utils.py new file mode 100644 index 000000000..08783c4a1 --- /dev/null +++ b/plugins/data/bAbI/digitsDataPluginBAbI/utils.py @@ -0,0 +1,143 @@ + +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +from __future__ import absolute_import + +import copy +import os +import string + +import numpy as np + + +def encode_field(field, word_map, sentence_size, story_size): + """ + return a 2-D array with shape (story_size, sentence_size) + """ + x = np.zeros((story_size, sentence_size)) + for i, sentence in enumerate(field): + if i >= story_size: + raise ValueError("Field '%s' is longer than max (%d)" % + (field, story_size)) + for j, word in enumerate(sentence): + if j >= sentence_size: + raise ValueError("Sentence '%s' is longer than max (%d)" % + (field, sentence_size)) + try: + idx = word_map[word] + except: + # assign to last index + idx = len(word_map) + 1 + x[i, j] = idx + return x + + +def encode_sample(sample, word_map, sentence_size, story_size): + """ + return an encoded (feature, label) tuple + """ + story = encode_field(sample['story'], word_map, sentence_size, story_size) + question = encode_field(sample['question'], word_map, sentence_size, story_size) + answer = encode_field(sample['answer'], word_map, sentence_size, story_size) + + feature = np.zeros((2, story_size, sentence_size)) + feature[0] = story + feature[1] = question + + label = answer[np.newaxis, :] + + return feature, label + + +def find_files(path, task_id, train): + """ + Find files in specified path with filenames that + match {task}*{phase}.txt where: + task="qa{task_id}_" or "" if task_id==None + phase="train" if train==True or "test" otherwise + """ + task = "qa{}_".format(task_id) if task_id else "" + phase = "train" if train else "test" + + files = [] + for dirpath, dirnames, filenames in os.walk(path, followlinks=True): + for filename in filenames: + if task in filename and phase in filename: + files.append(os.path.join(dirpath, filename)) + + return files + + +def get_stats(dataset): + """ + return dataset statistics + """ + fields = [field for sample in dataset for field in sample.values()] + sentences = [sentence for field in fields for sentence in field] + words = sorted(set([word for sentence in sentences for word in sentence])) + + return {'word_map': dict((word, i) for i, word in enumerate(words, start=1)), + 'sentence_size': max([len(sentence) for sentence in sentences]), + 'story_size': max([len(story) for story in fields])} + + +def parse_folder_phase(path, task_id, train): + """ + Returns a list of samples for a phase by aggregating all samples + from matching files + """ + phase_data = [] + files = find_files(path, task_id, train) + for file in files: + phase_data.extend(parse_file(file)) + return phase_data + + +def parse_file(filename): + with open(filename) as f: + return parse_lines(f.readlines()) + + +def parse_lines(lines): + """ + Returns a list of samples from a collection of lines where each sample + is a dictionary with 'story', 'question', 'answer' keys. Every key + value is a list of words without punctuation. + """ + data = [] + print "lines are %s" % lines + story = None + for line in lines: + # convert to lower case + line = line.lower() + # find line ID (new stories start with line ID = 1) + line_id, line = line.split(' ', 1) + try: + if int(line_id) == 1: + # new story + story = [] + except: + if not story: + story = [] + # this isn't a like id, re-integrate into line + line = "%s %s" % (line_id, line) + # is this a question? + if '?' in line: + items = remove_punctuation(line).split('\t') + question = items[0] + if len(items) > 1: + answer = items[1] + else: + answer = '' + # add to data + data.append({ + 'story': copy.copy(story), + 'question': [question.split()], + 'answer': [answer.split()], + }) + else: + story.append(remove_punctuation(line).split()) + return data + + +def remove_punctuation(s): + return s.translate(string.maketrans("", ""), string.punctuation) diff --git a/plugins/data/bAbI/setup.py b/plugins/data/bAbI/setup.py new file mode 100644 index 000000000..40b266e00 --- /dev/null +++ b/plugins/data/bAbI/setup.py @@ -0,0 +1,27 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +import os +from setuptools import setup, find_packages + +from digits.extensions.data import GROUP as DIGITS_PLUGIN_GROUP + + +# Utility function to read the README file. +def read(fname): + return open(os.path.join(os.path.dirname(__file__), fname)).read() + + +setup( + name="digits_bAbI_data_plugin", + version="0.0.1", + author="Greg Heinrich", + description=("A data ingestion plugin for the bAbI dataset"), + long_description=read('README'), + license="Apache", + packages=find_packages(), + entry_points={ + DIGITS_PLUGIN_GROUP: [ + 'class=digitsDataPluginBAbI:DataIngestion', + ]}, + include_package_data=True, +) diff --git a/plugins/data/gan/MANIFEST.in b/plugins/data/gan/MANIFEST.in new file mode 100644 index 000000000..7a2811778 --- /dev/null +++ b/plugins/data/gan/MANIFEST.in @@ -0,0 +1 @@ +recursive-include digitsDataPluginGan *.html diff --git a/plugins/data/gan/README b/plugins/data/gan/README new file mode 100644 index 000000000..e654df110 --- /dev/null +++ b/plugins/data/gan/README @@ -0,0 +1 @@ +This DIGITS plug-in demonstrates how to load data for a Generative Adversarial Network. diff --git a/plugins/data/gan/digitsDataPluginGan/__init__.py b/plugins/data/gan/digitsDataPluginGan/__init__.py new file mode 100644 index 000000000..79071170e --- /dev/null +++ b/plugins/data/gan/digitsDataPluginGan/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +from __future__ import absolute_import + +from .data import DataIngestion + +__all__ = ['DataIngestion'] diff --git a/plugins/data/gan/digitsDataPluginGan/data.py b/plugins/data/gan/digitsDataPluginGan/data.py new file mode 100644 index 000000000..50d12c84d --- /dev/null +++ b/plugins/data/gan/digitsDataPluginGan/data.py @@ -0,0 +1,312 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +from __future__ import absolute_import + +import os +import pickle + +import numpy as np + +from digits.utils import constants, override, image, subclass +from digits.extensions.data.interface import DataIngestionInterface +from .forms import DatasetForm, InferenceForm + + +DATASET_TEMPLATE = "templates/dataset_template.html" +INFERENCE_TEMPLATE = "templates/inference_template.html" + +CELEBA_ALL_ATTRIBUTES = """ + 5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs + Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair Bushy_Eyebrows + Chubby Double_Chin Eyeglasses Goatee Gray_Hair Heavy_Makeup High_Cheekbones + Male Mouth_Slightly_Open Mustache Narrow_Eyes No_Beard Oval_Face Pale_Skin + Pointy_Nose Receding_Hairline Rosy_Cheeks Sideburns Smiling Straight_Hair + Wavy_Hair Wearing_Earrings Wearing_Hat Wearing_Lipstick Wearing_Necklace + Wearing_Necktie Young + """.split() + +CELEBA_EDITABLE_ATTRIBUTES = [ + 'Bald', 'Black_Hair', 'Blond_Hair', 'Male', 'Smiling', 'Wearing_Lipstick', 'Young' +] + +CELEBA_EDITABLE_ATTRIBUTES_IDS = [CELEBA_ALL_ATTRIBUTES.index(attr) for attr in CELEBA_EDITABLE_ATTRIBUTES] + + +def one_hot(val, depth): + x = np.zeros(depth) + x[val] = 1 + return x + + +def slerp(val, low, high): + """Spherical interpolation. val has a range of 0 to 1.""" + if val <= 0: + return low + elif val >= 1: + return high + omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high))) + so = np.sin(omega) + return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high + + +def parse_lines_of_floats(s): + return [[float(val) for val in line.split()] for line in s.splitlines()] + + +@subclass +class DataIngestion(DataIngestionInterface): + """ + A data ingestion extension for GANs + """ + + # CONFIG = "mnist" + CONFIG = "celeba" + # CONFIG = "celeba_cond" + + def __init__(self, is_inference_db=False, **kwargs): + super(DataIngestion, self).__init__(**kwargs) + + if 'dataset_type' in self.userdata: + self.CONFIG = self.userdata['dataset_type'] + + self.z_dim = 100 + if self.CONFIG == "mnist": + self.y_dim = 10 + elif self.CONFIG == "celeba": + self.y_dim = 0 + elif self.CONFIG == "celeba_cond": + self.y_dim = 40 + + self.userdata['is_inference_db'] = is_inference_db + + self.input_dim = self.z_dim + self.y_dim + + @override + def encode_entry(self, entry): + if not self.userdata['is_inference_db']: + filename = entry[0] + label = entry[1] + feature = self.scale_image(filename) + label = np.array(label).reshape(1, 1, len(label)) + else: + if self.userdata['task_id'] in ['style', + 'class', + 'genimg', + 'attributes', + 'analogy', + 'animation']: + feature = entry + label = np.array([0]) + elif self.userdata['task_id'] == 'enclist': + filename = entry[0] + label = entry[1] + feature = self.scale_image(filename) + label = np.array(label).reshape(1, 1, len(label)) + else: + raise NotImplementedError + return feature, label + + def encode_PIL_Image(self, image): + # convert to numpy array + image = np.array(image) + # add channel axis if input is grayscale image + if image.ndim == 2: + image = image[..., np.newaxis] + elif image.ndim != 3: + raise ValueError("Unhandled number of channels: %d" % image.ndim) + # transpose to CHW + image = image.transpose(2, 0, 1) + return image + + @staticmethod + @override + def get_category(): + return "Images" + + @staticmethod + @override + def get_id(): + return "image-gan" + + @staticmethod + @override + def get_dataset_form(): + return DatasetForm() + + @staticmethod + @override + def get_dataset_template(form): + """ + parameters: + - form: form returned by get_dataset_form(). This may be populated + with values if the job was cloned + return: + - (template, context) tuple + - template is a Jinja template to use for rendering dataset creation + options + - context is a dictionary of context variables to use for rendering + the form + """ + extension_dir = os.path.dirname(os.path.abspath(__file__)) + template = open(os.path.join(extension_dir, DATASET_TEMPLATE), "r").read() + context = {'form': form} + return (template, context) + + @override + def get_inference_form(self): + return InferenceForm(CELEBA_ALL_ATTRIBUTES, CELEBA_EDITABLE_ATTRIBUTES_IDS) + + @staticmethod + @override + def get_inference_template(form): + extension_dir = os.path.dirname(os.path.abspath(__file__)) + template = open(os.path.join(extension_dir, INFERENCE_TEMPLATE), "r").read() + context = {'form': form} + return (template, context) + + @staticmethod + @override + def get_title(): + return "GAN" + + @override + def itemize_entries(self, stage): + entries = [] + if not self.userdata['is_inference_db']: + if stage == constants.TRAIN_DB: + # read file list + with open(self.userdata['file_list']) as f: + lines = f.read().splitlines() + # skip first 2 lines (header) + for line in lines[2:]: + fields = line.split() + filename = fields[0] + # add full path + filename = os.path.join(self.userdata['image_folder'], filename) + label = [int(field) for field in fields[1:]] + entries.append((filename, label)) + elif stage == constants.TEST_DB: + if self.userdata['task_id'] == 'style': + if self.userdata['style_z1_vector']: + z1 = np.array([float(v) for v in self.userdata['style_z1_vector'].split()]) + else: + z1 = np.random.normal(size=(100,)) + if self.userdata['style_z2_vector']: + z2 = np.array([float(v) for v in self.userdata['style_z2_vector'].split()]) + else: + z2 = np.random.normal(size=(100,)) + for val in np.linspace(0, 1, self.userdata['row_count']): + for c in range(10): + z_ = slerp(val, z1, z2) + feature = np.append(z_, one_hot(c, self.y_dim)).reshape((1, 1, self.input_dim)) + entries.append(feature) + elif self.userdata['task_id'] == 'class': + if self.userdata['class_z_vector']: + z = np.array([float(v) for v in self.userdata['class_z_vector'].split()]) + else: + z = np.random.normal(size=(100,)) + for val in np.linspace(0, 1, self.userdata['row_count']): + for i in range(10): + c_0 = i + c_1 = (i + 1) % 10 + feature_0 = np.append(z, one_hot(c_0, self.y_dim)) + feature_1 = np.append(z, one_hot(c_1, self.y_dim)) + feature = slerp(val, feature_0, feature_1).reshape((1, 1, self.input_dim)) + entries.append(feature) + elif self.userdata['task_id'] == 'genimg': + c = int(self.userdata['genimg_class_id']) + if self.userdata['genimg_z_vector']: + z = np.array([float(v) for v in self.userdata['genimg_z_vector'].split()]) + else: + z = np.random.normal(size=(100,)) + if self.y_dim > 0: + z = np.append(z, one_hot(c, self.y_dim)) + feature = z.reshape((1, 1, self.input_dim)) + entries.append(feature) + elif self.userdata['task_id'] == 'attributes': + if self.userdata['attributes_z_vector']: + z = np.array([float(v) for v in self.userdata['attributes_z_vector'].split()]) + else: + z = np.random.normal(size=(100,)) + with open(self.userdata['attributes_file'], 'rb') as f: + attributes_z = pickle.load(f) + params = parse_lines_of_floats(self.userdata['attributes_params']) + for img_params in params: + z_img = np.copy(z) + for i, coeff in enumerate(img_params): + z_img += coeff * attributes_z[CELEBA_EDITABLE_ATTRIBUTES_IDS[i]] + entries.append(z_img.reshape((1, 1, self.input_dim))) + elif self.userdata['task_id'] == 'enclist': + with open(self.userdata['enc_file_list']) as f: + lines = f.read().splitlines() + # skip first 2 lines (header) + max_images = self.userdata['enc_num_images'] + for line in lines[2:max_images + 2]: + fields = line.split() + filename = fields[0] + # add full path + filename = os.path.join(self.userdata['enc_image_folder'], filename) + label = [int(field) for field in fields[1:]] + entries.append((filename, label)) + elif self.userdata['task_id'] == 'analogy': + if self.userdata['attributes_z1_vector']: + z1 = np.array([float(v) for v in self.userdata['attributes_z1_vector'].split()]) + else: + z1 = np.random.normal(size=(100,)) + if self.userdata['attributes_z2_vector']: + z2 = np.array([float(v) for v in self.userdata['attributes_z2_vector'].split()]) + else: + z2 = np.random.normal(size=(100,)) + if self.userdata['attributes_z3_vector']: + z3 = np.array([float(v) for v in self.userdata['attributes_z3_vector'].split()]) + else: + z3 = np.random.normal(size=(100,)) + + # create analogy vector + z4 = z2 + z3 - z1 + + grid_size = self.userdata['row_count'] + + # now interpolate across columns + for row in xrange(grid_size): + row_k = row / float(grid_size - 1) + z_left = slerp(row_k, z1, z3) + z_right = slerp(row_k, z2, z4) + entries.append(z_left.reshape((1, 1, self.input_dim))) + for col in xrange(1, grid_size - 1): + col_k = col / float(grid_size - 1) + z = slerp(col_k, z_left, z_right) + entries.append(z.reshape((1, 1, self.input_dim))) + entries.append(z_right.reshape((1, 1, self.input_dim))) + elif self.userdata['task_id'] == 'animation': + zs = parse_lines_of_floats(self.userdata['animation_z_vectors']) + zs = [np.array(z) for z in zs] + num_transitions = self.userdata['animation_num_transitions'] + for i, z in enumerate(zs): + z_next = zs[(i + 1) % len(zs)] + for k in xrange(num_transitions): + z_ = slerp(float(k) / num_transitions, z, z_next) + entries.append(z_.reshape((1, 1, self.input_dim))) + else: + raise ValueError("Unknown task: %s" % self.userdata['task_id']) + return entries + + def scale_image(self, filename): + im = np.array(image.load_image(filename)) + + # center crop + if self.userdata['center_crop_size']: + crop_size = int(self.userdata['center_crop_size']) + width, height = im.shape[0:2] + i = (width // 2) - crop_size // 2 + j = (height // 2) - crop_size // 2 + im = im[i:i + crop_size, j:j + crop_size, :] + + # resize + if self.userdata['resize']: + resize = int(self.userdata['resize']) + im = image.resize_image(im, resize, resize, resize_mode='squash') + + # transpose to CHW + feature = im.transpose(2, 0, 1) + + return feature diff --git a/plugins/data/gan/digitsDataPluginGan/forms.py b/plugins/data/gan/digitsDataPluginGan/forms.py new file mode 100644 index 000000000..97f38c0b3 --- /dev/null +++ b/plugins/data/gan/digitsDataPluginGan/forms.py @@ -0,0 +1,228 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +from __future__ import absolute_import + +import os + +from flask.ext.wtf import Form +from wtforms import HiddenField, TextAreaField, validators + +from digits import utils +from digits.utils import subclass + + +@subclass +class DatasetForm(Form): + """ + A form used to create a Sunnybrook dataset + """ + + def validate_file_path(form, field): + if not field.data: + pass + else: + # make sure the filesystem path exists + if not os.path.exists(field.data) and not os.path.isdir(field.data): + raise validators.ValidationError( + 'File does not exist or is not reachable') + else: + return True + + def validate_folder_path(form, field): + if not field.data: + pass + else: + # make sure the filesystem path exists + if not os.path.exists(field.data) or not os.path.isdir(field.data): + raise validators.ValidationError( + 'Folder does not exist or is not reachable') + else: + return True + + file_list = utils.forms.StringField( + u'File list (with attributes) in CelebA format', + validators=[ + validate_file_path, + ], + tooltip="Provide file list in CelebA format" + ) + + image_folder = utils.forms.StringField( + u'Image folder', + validators=[ + validators.DataRequired(), + validate_folder_path, + ], + tooltip="Specify the path to a folder of images." + ) + + center_crop_size = utils.forms.IntegerField( + u'Center crop size', + default=108, + validators=[ + validators.NumberRange(min=0) + ], + tooltip="Specify center crop." + ) + + resize = utils.forms.IntegerField( + u'Resize after crop', + default=64, + tooltip="Resize after crop." + ) + + +@subclass +class InferenceForm(Form): + """ + A form used to perform inference on a text classification dataset + """ + + def __init__(self, attributes, editable_attribute_ids, **kwargs): + super(InferenceForm, self).__init__(**kwargs) + self.attributes = attributes + self.editable_attribute_ids = editable_attribute_ids + + def validate_file_path(form, field): + if not field.data: + pass + else: + # make sure the filesystem path exists + if not os.path.exists(field.data) and not os.path.isdir(field.data): + raise validators.ValidationError( + 'File does not exist or is not reachable') + else: + return True + + def validate_folder_path(form, field): + if not field.data: + pass + else: + # make sure the filesystem path exists + if not os.path.exists(field.data) or not os.path.isdir(field.data): + raise validators.ValidationError( + 'Folder does not exist or is not reachable') + else: + return True + + row_count = utils.forms.IntegerField( + u'Rows', + default=10, + validators=[ + validators.NumberRange(min=1) + ], + tooltip="Rows to generate in output grid." + ) + + dataset_type = utils.forms.SelectField( + 'Dataset', + choices=[ + ('mnist', 'MNIST'), + ('celeba', 'CelebA'), + ], + default='celeba', + tooltip="Select a dataset." + ) + + task_id = utils.forms.SelectField( + 'Task ID', + choices=[ + ('class', 'MNIST - Class sweep'), + ('style', 'MNIST - Style sweep'), + ('genimg', 'Generate single image'), + ('attributes', 'CelebA - add/remove attributes'), + ('enclist', 'CelebA - Encode list of images'), + ('analogy', 'CelebA - Analogy'), + ('animation', 'CelebA - Animation'), + ], + default='class', + tooltip="Select a task to execute." + ) + + class_z_vector = utils.forms.StringField( + u'Z vector (leave blank for random)', + ) + + style_z1_vector = utils.forms.StringField( + u'Z1 vector (leave blank for random)', + ) + + style_z2_vector = utils.forms.StringField( + u'Z2 vector (leave blank for random)', + ) + + genimg_z_vector = utils.forms.StringField( + u'Z vector (leave blank for random)', + ) + + genimg_class_id = utils.forms.IntegerField( + u'Class ID', + default=0, + validators=[ + validators.NumberRange(min=0, max=9) + ], + tooltip="Class of image to generate (leave blank for CelebA)." + ) + + attributes_z_vector = utils.forms.StringField( + u'Z vector (leave blank for random)', + ) + + attributes_file = utils.forms.StringField( + u'Attributes vector file', + validators=[ + validate_file_path, + ], + tooltip="Specify the path to a file that contains attributes vectors." + ) + + attributes_params = HiddenField() + + enc_file_list = utils.forms.StringField( + u'File list', + validators=[ + validate_file_path, + ], + tooltip="Specify the path to a file that contains a list of files." + ) + + enc_image_folder = utils.forms.StringField( + u'Image folder', + validators=[ + validate_folder_path, + ], + tooltip="Specify the path to a folder of images." + ) + + enc_num_images = utils.forms.IntegerField( + u'Number of images to encode', + default=100, + validators=[ + validators.NumberRange(min=0) + ], + tooltip="Max number of images to encode." + ) + + attributes_z1_vector = utils.forms.StringField( + u'Source Z vector (leave blank for random)', + ) + + attributes_z2_vector = utils.forms.StringField( + u'First Sink Z vector (leave blank for random)', + ) + + attributes_z3_vector = utils.forms.StringField( + u'Second Sink Z vector (leave blank for random)', + ) + + animation_num_transitions = utils.forms.IntegerField( + u'Number of transitions per image', + default=10, + validators=[ + validators.NumberRange(min=1, max=100) + ], + tooltip="Number of transitions between each of the specified images" + ) + + animation_z_vectors = TextAreaField( + u'z vectors (one per line)', + ) diff --git a/plugins/data/gan/digitsDataPluginGan/templates/dataset_template.html b/plugins/data/gan/digitsDataPluginGan/templates/dataset_template.html new file mode 100644 index 000000000..4b1ec2712 --- /dev/null +++ b/plugins/data/gan/digitsDataPluginGan/templates/dataset_template.html @@ -0,0 +1,29 @@ +{# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #} + +{% from "helper.html" import print_flashes %} +{% from "helper.html" import print_errors %} +{% from "helper.html" import mark_errors %} + +
    + {{ form.file_list.label }} + {{ form.file_list.tooltip }} + {{ form.file_list(class='form-control autocomplete_path', placeholder='file') }} +
    + +
    + {{ form.image_folder.label }} + {{ form.image_folder.tooltip }} + {{ form.image_folder(class='form-control autocomplete_path', placeholder='folder') }} +
    + +
    + {{ form.center_crop_size.label }} + {{ form.center_crop_size.tooltip }} + {{ form.center_crop_size(class='form-control', placeholder='folder') }} +
    + +
    + {{ form.resize.label }} + {{ form.resize.tooltip }} + {{ form.resize(class='form-control', placeholder='folder') }} +
    diff --git a/plugins/data/gan/digitsDataPluginGan/templates/inference_template.html b/plugins/data/gan/digitsDataPluginGan/templates/inference_template.html new file mode 100644 index 000000000..6b979b71c --- /dev/null +++ b/plugins/data/gan/digitsDataPluginGan/templates/inference_template.html @@ -0,0 +1,255 @@ +{# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #} + +{% from "helper.html" import print_flashes %} +{% from "helper.html" import print_errors %} +{% from "helper.html" import mark_errors %} + + + +

    GAN inference Options

    + +
    + +

    Choose a type of dataset

    + +
    + {{ form.dataset_type.label }} + {{ form.dataset_type.tooltip }} + {{ form.dataset_type(class='form-control') }} +
    + +

    Choose a task

    + +
    + {{ form.task_id.label }} + {{ form.task_id.tooltip }} + {{ form.task_id(class='form-control') }} +
    + +
    +

    MNIST Class sweep parameters

    + +
    Use with "GAN" visualization method (select "Grid" task).
    + +
    + {{ form.class_z_vector.label }} + {{ form.class_z_vector.tooltip }} + {{ form.class_z_vector(class='form-control') }} +
    +
    + +
    +

    MNIST Style sweep parameters

    + +
    Use with "GAN" visualization method (select "Grid" task).
    + +
    + {{ form.style_z1_vector.label }} + {{ form.style_z1_vector.tooltip }} + {{ form.style_z1_vector(class='form-control') }} +
    + +
    + {{ form.style_z2_vector.label }} + {{ form.style_z2_vector.tooltip }} + {{ form.style_z2_vector(class='form-control') }} +
    +
    + +
    +

    Image generation parameters

    + +
    Use with "Image Output" visualization method (select "HWC" data order)
    + +
    + {{ form.genimg_z_vector.label }} + {{ form.genimg_z_vector.tooltip }} + {{ form.genimg_z_vector(class='form-control') }} +
    + +
    + {{ form.genimg_class_id.label }} + {{ form.genimg_class_id.tooltip }} + {{ form.genimg_class_id(class='form-control') }} +
    +
    + +
    +

    CelebA Additive Attributes

    + +
    Use with "Image Output" visualization method (HWC data order).
    + +
    + {{ form.attributes_file.label }} + {{ form.attributes_file.tooltip }} + {{ form.attributes_file(class='form-control autocomplete_path') }} +
    + +
    + {{ form.attributes_z_vector.label }} + {{ form.attributes_z_vector.tooltip }} + {{ form.attributes_z_vector(class='form-control') }} +
    + +
    Add or remove attributes by filling corresponding box with +1 or -1 + (or any other multiplier).
    + +
    + {{ form.attributes_params.label }} + {{ form.attributes_params.tooltip }} + {{ form.attributes_params(class='form-control') }} +
    + + + + {% for attr_id in form.editable_attribute_ids %} + + {% endfor %} + + + + +
    {{form.attributes[attr_id]}}
    + +
    + +
    +

    Encode file list

    + +
    Use with "GAN" visualization method (select "Encoder" task).
    + +
    + {{ form.enc_file_list.label }} + {{ form.enc_file_list.tooltip }} + {{ form.enc_file_list(class='form-control autocomplete_path', placeholder='file') }} +
    + +
    + {{ form.enc_image_folder.label }} + {{ form.enc_image_folder.tooltip }} + {{ form.enc_image_folder(class='form-control autocomplete_path', placeholder='folder') }} +
    + +
    + {{ form.enc_num_images.label }} + {{ form.enc_num_images.tooltip }} + {{ form.enc_num_images(class='form-control autocomplete_path', placeholder='folder') }} +
    +
    + +
    +

    Analogy

    + +
    Use with "GAN" visualization method
    + +
    + {{ form.attributes_z1_vector.label }} + {{ form.attributes_z1_vector.tooltip }} + {{ form.attributes_z1_vector(class='form-control autocomplete_path', placeholder='folder') }} +
    + +
    + {{ form.attributes_z2_vector.label }} + {{ form.attributes_z2_vector.tooltip }} + {{ form.attributes_z2_vector(class='form-control autocomplete_path', placeholder='folder') }} +
    + +
    + {{ form.attributes_z3_vector.label }} + {{ form.attributes_z3_vector.tooltip }} + {{ form.attributes_z3_vector(class='form-control autocomplete_path', placeholder='folder') }} +
    +
    + +
    +

    Animation

    + +
    Use with "GAN" visualization method
    + +
    + {{ form.animation_num_transitions.label }} + {{ form.animation_num_transitions.tooltip }} + {{ form.animation_num_transitions(class='form-control autocomplete_path', placeholder='folder') }} +
    + +
    + {{ form.animation_z_vectors.label }} + {{ form.animation_z_vectors.tooltip }} + {{ form.animation_z_vectors(class='form-control autocomplete_path', placeholder='z vectors') }} +
    +
    + +
    + + diff --git a/plugins/data/gan/digitsDataPluginGan/templates/template.html b/plugins/data/gan/digitsDataPluginGan/templates/template.html new file mode 100644 index 000000000..2b8cf6067 --- /dev/null +++ b/plugins/data/gan/digitsDataPluginGan/templates/template.html @@ -0,0 +1,37 @@ +{# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #} + +{% from "helper.html" import print_flashes %} +{% from "helper.html" import print_errors %} +{% from "helper.html" import mark_errors %} + +{{ form.data_stage(class='form-control') }} + +
    + {{ form.train_data_file.label }} + {{ form.train_data_file.tooltip }} + {{ form.train_data_file(class='form-control autocomplete_path', placeholder='.csv file') }} +
    + +
    + {{ form.val_data_file.label }} + {{ form.val_data_file.tooltip }} + {{ form.val_data_file(class='form-control autocomplete_path', placeholder='.csv file') }} +
    + +
    + {{ form.alphabet.label }} + {{ form.alphabet.tooltip }} + {{ form.alphabet(class='form-control') }} +
    + +
    + {{ form.class_labels_file.label }} + {{ form.class_labels_file.tooltip }} + {{ form.class_labels_file(class='form-control autocomplete_path', placeholder='.txt file') }} +
    + +
    + {{ form.max_chars_per_sample.label }} + {{ form.max_chars_per_sample.tooltip }} + {{ form.max_chars_per_sample(class='form-control') }} +
    diff --git a/plugins/data/gan/setup.py b/plugins/data/gan/setup.py new file mode 100644 index 000000000..5fc97b523 --- /dev/null +++ b/plugins/data/gan/setup.py @@ -0,0 +1,27 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +import os +from setuptools import setup, find_packages + +from digits.extensions.data import GROUP as DIGITS_PLUGIN_GROUP + + +# Utility function to read the README file. +def read(fname): + return open(os.path.join(os.path.dirname(__file__), fname)).read() + + +setup( + name="digits_gan_data_plugin", + version="0.0.1", + author="Greg Heinrich", + description=("A data ingestion plugin for GANs"), + long_description=read('README'), + license="Apache", + packages=find_packages(), + entry_points={ + DIGITS_PLUGIN_GROUP: [ + 'class=digitsDataPluginGan:DataIngestion', + ]}, + include_package_data=True, +) diff --git a/plugins/view/gan/MANIFEST.in b/plugins/view/gan/MANIFEST.in new file mode 100644 index 000000000..36a8647ad --- /dev/null +++ b/plugins/view/gan/MANIFEST.in @@ -0,0 +1 @@ +recursive-include digitsViewPluginGan *.html diff --git a/plugins/view/gan/README b/plugins/view/gan/README new file mode 100644 index 000000000..8b0ecd5a1 --- /dev/null +++ b/plugins/view/gan/README @@ -0,0 +1 @@ +This DIGITS plug-in visualizes the output of a GAN. \ No newline at end of file diff --git a/plugins/view/gan/digitsViewPluginGan/__init__.py b/plugins/view/gan/digitsViewPluginGan/__init__.py new file mode 100644 index 000000000..af82aa2f8 --- /dev/null +++ b/plugins/view/gan/digitsViewPluginGan/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +from __future__ import absolute_import + +from .view import Visualization + +__all__ = ['Visualization'] diff --git a/plugins/view/gan/digitsViewPluginGan/forms.py b/plugins/view/gan/digitsViewPluginGan/forms.py new file mode 100644 index 000000000..fb3a19728 --- /dev/null +++ b/plugins/view/gan/digitsViewPluginGan/forms.py @@ -0,0 +1,49 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +from __future__ import absolute_import + +import os + +from digits import utils +from digits.utils import subclass +from flask_wtf import Form +import wtforms.validators + + +@subclass +class ConfigForm(Form): + """ + A form used to configure gradient visualization + """ + + def validate_file_path(form, field): + if not field.data: + pass + else: + # make sure the filesystem path exists + if not os.path.exists(field.data) and not os.path.isdir(field.data): + raise wtforms.validators.ValidationError('File does not exist or is not reachable') + else: + return True + + gan_view_task_id = utils.forms.SelectField( + 'Task', + choices=[ + ('grid', 'Grid'), + ('mnist_encoder', 'MNIST Encoder'), + ('celeba_encoder', 'CelebA Encoder'), + ('animation', 'Animation'), + ('attributes', 'CelebA get attributes'), + ], + default='grid', + tooltip="Select a task." + ) + + attributes_file = utils.forms.StringField( + u'Attributes vector file', + validators=[ + validate_file_path, + ], + tooltip="Specify the path to a file that contains attributes vectors." + ) + + pass diff --git a/plugins/view/gan/digitsViewPluginGan/templates/config_template.html b/plugins/view/gan/digitsViewPluginGan/templates/config_template.html new file mode 100644 index 000000000..ecb3dce24 --- /dev/null +++ b/plugins/view/gan/digitsViewPluginGan/templates/config_template.html @@ -0,0 +1,19 @@ +{# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #} + +{% from "helper.html" import print_flashes %} +{% from "helper.html" import print_errors %} +{% from "helper.html" import mark_errors %} + +Show the output of a GAN + +
    + {{ form.gan_view_task_id.label }} + {{ form.gan_view_task_id.tooltip }} + {{ form.gan_view_task_id(class='form-control autocomplete_path', placeholder='folder') }} +
    + +
    + {{ form.attributes_file.label }} + {{ form.attributes_file.tooltip }} + {{ form.attributes_file(class='form-control autocomplete_path') }} +
    diff --git a/plugins/view/gan/digitsViewPluginGan/templates/header_template.html b/plugins/view/gan/digitsViewPluginGan/templates/header_template.html new file mode 100644 index 000000000..313fa8348 --- /dev/null +++ b/plugins/view/gan/digitsViewPluginGan/templates/header_template.html @@ -0,0 +1,28 @@ +{# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #} + + + + + + + {% if animated_image %} + +

    Animation

    + + + {% endif %} + {% if task_id == 'grid' %} + +

    Grid

    + + {% for row_id in rows %} + + {% for col_id in cols %} + + {% endfor %} + + {% endfor %} + {% endif %} +
    +
    +
    diff --git a/plugins/view/gan/digitsViewPluginGan/templates/view_template.html b/plugins/view/gan/digitsViewPluginGan/templates/view_template.html new file mode 100644 index 000000000..d7d234b18 --- /dev/null +++ b/plugins/view/gan/digitsViewPluginGan/templates/view_template.html @@ -0,0 +1,47 @@ +{# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #} + +{% from "helper.html" import print_flashes %} +{% from "helper.html" import print_errors %} +{% from "helper.html" import mark_errors %} + +{% if task_id=='grid' %} + +{% endif %} +{% if task_id=='grid' or task_id=='animation' %} + {{key}} + +{% elif task_id=='encoder' %} + + + + + + +
    + + + + + {{z}} +
    +{% elif task_id=='attributes' %} + + + + + + +
    + + + {% for attribute in top5 %} + {{attribute[1]}} {{ attribute[0] }} +
    + {% endfor %} +
    +{% endif %} diff --git a/plugins/view/gan/digitsViewPluginGan/view.py b/plugins/view/gan/digitsViewPluginGan/view.py new file mode 100644 index 000000000..eb64c60c1 --- /dev/null +++ b/plugins/view/gan/digitsViewPluginGan/view.py @@ -0,0 +1,263 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +from __future__ import absolute_import + +import os + +# Find the best implementation available +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO +import pickle + +import imageio +import numpy as np +import PIL.Image +import PIL.ImageDraw + +import digits +from digits.utils import subclass, override +from digits.extensions.view.interface import VisualizationInterface +from .forms import ConfigForm + + +CONFIG_TEMPLATE = "templates/config_template.html" +HEADER_TEMPLATE = "templates/header_template.html" +VIEW_TEMPLATE = "templates/view_template.html" + +CELEBA_ATTRIBUTES = """ + 5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs + Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair Bushy_Eyebrows + Chubby Double_Chin Eyeglasses Goatee Gray_Hair Heavy_Makeup High_Cheekbones + Male Mouth_Slightly_Open Mustache Narrow_Eyes No_Beard Oval_Face Pale_Skin + Pointy_Nose Receding_Hairline Rosy_Cheeks Sideburns Smiling Straight_Hair + Wavy_Hair Wearing_Earrings Wearing_Hat Wearing_Lipstick Wearing_Necklace + Wearing_Necktie Young + """.split() + + +@subclass +class Visualization(VisualizationInterface): + """ + A visualization extension to display the output of a GAN + """ + + def __init__(self, dataset, **kwargs): + """ + Init + """ + # memorize view template for later use + extension_dir = os.path.dirname(os.path.abspath(__file__)) + self.view_template = open( + os.path.join(extension_dir, VIEW_TEMPLATE), "r").read() + + self.normalize = True + self.grid_size = 10 + + # view options + self.task_id = kwargs['gan_view_task_id'] + self.attributes_file = kwargs['attributes_file'] + + @staticmethod + def get_config_form(): + return ConfigForm() + + @staticmethod + def get_config_template(form): + """ + parameters: + - form: form returned by get_config_form(). This may be populated + with values if the job was cloned + returns: + - (template, context) tuple + - template is a Jinja template to use for rendering config options + - context is a dictionary of context variables to use for rendering + the form + """ + extension_dir = os.path.dirname(os.path.abspath(__file__)) + template = open( + os.path.join(extension_dir, CONFIG_TEMPLATE), "r").read() + context = {'form': form} + return (template, context) + + @override + def get_header_template(self): + """ + Implements get_header_template() method from view extension interface + """ + + extension_dir = os.path.dirname(os.path.abspath(__file__)) + template = open( + os.path.join(extension_dir, HEADER_TEMPLATE), "r").read() + + context = {'task_id': self.task_id, + 'cols': range(self.grid_size), + 'rows': range(self.grid_size), + 'animated_image': None} + + if hasattr(self, 'animated_images'): + # create animated gif + string_buf = StringIO() + fmt = "gif" + imageio.mimsave(string_buf, self.animated_images, format=fmt) + data = string_buf.getvalue().encode('base64').replace('\n', '') + animated_image_html = 'data:image/%s;base64,%s' % (fmt, data) + context['animated_image'] = animated_image_html + + return template, context + + @staticmethod + def get_id(): + return "image-gan" + + @staticmethod + def get_title(): + return "GAN" + + def get_image_html(self, image): + # assume 8-bit + if self.normalize: + image -= image.min() + if image.max() > 0: + image /= image.max() + image *= 255 + else: + # clip + image = image.clip(0, 255) + + # convert to uint8 + image = image.astype('uint8') + + # convert to PIL image + channels = image.shape[2] + if channels == 1: + # drop channel axis + image = PIL.Image.fromarray(image[:, :, 0]) + elif channels == 3: + image = PIL.Image.fromarray(image) + else: + raise ValueError("Unhandled number of channels: %d" % channels) + + # image.save(fname) + + image_html = digits.utils.image.embed_image_html(image) + + return image_html + + @override + def get_view_template(self, data): + """ + parameters: + - data: data returned by process_data() + returns: + - (template, context) tuple + - template is a Jinja template to use for rendering config + options + - context is a dictionary of context variables to use for + rendering the form + """ + context = {'task_id': self.task_id} + context.update(data) + if self.task_id in ['celeba_encoder', 'mnist_encoder']: + context.update({'task_id': 'encoder'}) + return self.view_template, context + + @override + def process_data(self, input_id, input_data, output_data): + """ + Process one inference and return data to visualize + """ + data = output_data[output_data.keys()[0]].astype('float32') + + if self.task_id == 'grid': + col_id = int(input_id) // self.grid_size + row_id = int(input_id) % self.grid_size + image_html = self.get_image_html(data) + + img_size = data.shape[0] + if img_size == 28: + # MNIST + if not hasattr(self, 'animated_images'): + self.animated_images = [None] * (self.grid_size ** 2) + self.animated_images[row_id * self.grid_size + col_id] = data.astype('uint8') + elif img_size == 64: + # CelebA + if not hasattr(self, 'animated_images'): + self.animated_images = [None] * (4 * self.grid_size - 4) + print("animated: %s" % repr(self.animated_images)) + + if (col_id == 0 or row_id == 0 or col_id == (self.grid_size - 1) or row_id == (self.grid_size - 1)): + if row_id == 0: + idx = col_id + elif col_id == (self.grid_size - 1): + idx = self.grid_size - 1 + row_id + elif row_id == (self.grid_size - 1): + idx = 3 * self.grid_size - 3 - col_id + else: + idx = 4 * self.grid_size - 4 - row_id + self.animated_images[idx] = data.astype('uint8') + print("set idx %d " % idx) + else: + raise ValueError("Unhandled image size: %d" % img_size) + + return {'image': image_html, + 'col_id': col_id, + 'row_id': row_id, + 'key': input_id} + elif self.task_id == 'mnist_encoder': + self.z_dim = 100 + z = data[:self.z_dim] + image = data[self.z_dim:].reshape(28, 28) + input_data = input_data.astype('float32') + input_data = input_data[:, :, np.newaxis] + image = image[:, :, np.newaxis] + image_input_html = self.get_image_html(input_data) + image_output_html = self.get_image_html(image) + return {'z': z, + 'image_input': image_input_html, + 'image_output': image_output_html, + 'key': input_id} + elif self.task_id == 'celeba_encoder': + self.z_dim = 100 + z = data[:self.z_dim] + image = data[self.z_dim:].reshape(64, 64, 3) + input_data = input_data.astype('float32') + image_input_html = self.get_image_html(input_data) + image_output_html = self.get_image_html(image) + return {'z': z, + 'image_input': image_input_html, + 'image_output': image_output_html, + 'key': input_id} + elif self.task_id == 'animation': + image_html = self.get_image_html(data) + if not hasattr(self, 'animated_images'): + self.animated_images = [] + self.animated_images.append(data.astype('uint8')) + return {'image': image_html, + 'key': input_id} + elif self.task_id == 'attributes': + self.z_dim = 100 + z = data[:self.z_dim] + input_data = input_data.astype('float32') + image_input_html = self.get_image_html(input_data) + image = data[self.z_dim:].reshape(64, 64, 3) + image_output_html = self.get_image_html(image) + with open(self.attributes_file, 'rb') as f: + attributes_z = pickle.load(f) + + # inner_products = np.inner(z, attributes_z) + inner_products = np.empty((40)) + for i in range(40): + if True: + attr = attributes_z[i] + inner_products[i] = np.inner(z, attr) / np.linalg.norm(attr) + else: + inner_products[i] = 0 + + top_5_indices = np.argsort(inner_products)[::-1][:5] + top_5 = [(CELEBA_ATTRIBUTES[idx], "%.2f" % inner_products[idx]) for idx in top_5_indices] + return {'image_input': image_input_html, + 'image_output': image_output_html, + 'top5': top_5} + else: + raise ValueError("Unknown task: %s" % self.task_id) diff --git a/plugins/view/gan/setup.py b/plugins/view/gan/setup.py new file mode 100644 index 000000000..c6c59cb42 --- /dev/null +++ b/plugins/view/gan/setup.py @@ -0,0 +1,29 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +import os +from setuptools import setup, find_packages + +from digits.extensions.view import GROUP as DIGITS_PLUGIN_GROUP + + +# Utility function to read the README file. +def read(fname): + return open(os.path.join(os.path.dirname(__file__), fname)).read() + + +setup( + name="digits_gan_view_plugin", + version="0.0.1", + author="Greg Heinrich", + description=("A view plugin for GANs"), + long_description=read('README'), + license="Apache", + packages=find_packages(), + entry_points={ + DIGITS_PLUGIN_GROUP: [ + 'class=digitsViewPluginGan:Visualization', + ] + }, + include_package_data=True, + install_requires=['imageio>=2.1.2'], +) diff --git a/requirements.txt b/requirements.txt index 0a6202f08..3104e670b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,3 +17,4 @@ pydot>=1.0.28,<=1.0.29 psutil>=1.2.1,<=3.4.2 matplotlib>=1.3.1,<=1.5.1 scikit-fmm>=0.0.9 +python-magic>=0.2 diff --git a/scripts/travis/install-openblas.sh b/scripts/travis/install-openblas.sh deleted file mode 100755 index 9ce8b4f2f..000000000 --- a/scripts/travis/install-openblas.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. -set -e - -LOCAL_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) - -if [ "$#" -ne 1 ]; -then - echo "Usage: $0 INSTALL_DIR" - exit 1 -fi - -INSTALL_DIR=$(readlink -f "$1") -if [ -d "$INSTALL_DIR" ] && ls "$INSTALL_DIR/"*.so >/dev/null 2>&1; then - echo "Using cached build at $INSTALL_DIR ..." -else - rm -rf "$INSTALL_DIR" - git clone https://github.com/xianyi/OpenBLAS.git "$INSTALL_DIR" -b v0.2.18 --depth 1 - cd "$INSTALL_DIR" - - # Redirect build output to a log and only show it if an error occurs - # Otherwise there is too much output for TravisCI to display properly - LOG_FILE="$LOCAL_DIR/openblas-build.log" - make NO_AFFINITY=1 USE_OPENMP=1 >"$LOG_FILE" 2>&1 || (cat "$LOG_FILE" && false) -fi - -cd "$INSTALL_DIR" -sudo make install PREFIX=/usr/local diff --git a/scripts/travis/install-tensorflow.sh b/scripts/travis/install-tensorflow.sh new file mode 100755 index 000000000..f691f67fd --- /dev/null +++ b/scripts/travis/install-tensorflow.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +set -e + +LOCAL_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) + +if [ ! -z "$DEB_BUILD" ]; then + echo "Skipping for deb build" + exit 0 +fi + +set -x + +pip install https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.2.0rc0-cp27-none-linux_x86_64.whl --upgrade