diff --git a/.appveyor.yml b/.appveyor.yml index d0d2f3620..6fa458511 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -4,7 +4,7 @@ image: Visual Studio 2019 environment: MODE: test - PYTHON_VERSION: C:\Python36-x64 + PYTHON_VERSION: C:\Python37-x64 NUMPY_VERSION: numpy TF_VERSION: tensorflow NENGO_VERSION: nengo[tests] @@ -18,7 +18,7 @@ environment: - NENGO_VERSION: nengo[tests]==3.0.0 TF_VERSION: tensorflow==2.2.0 NUMPY_VERSION: numpy==1.16.0 - PYTHON_VERSION: C:\Python35-x64 + PYTHON_VERSION: C:\Python36-x64 init: - set PATH=%PYTHON_VERSION%;%PYTHON_VERSION%\Scripts;%PATH% diff --git a/.nengobones.yml b/.nengobones.yml index 6c2bcacef..633650b76 100644 --- a/.nengobones.yml +++ b/.nengobones.yml @@ -231,7 +231,7 @@ ci_scripts: - template: deploy travis_yml: - python: 3.6 + python: 3.7 global_vars: NUMPY_VERSION: numpy>=1.16.0 TF_VERSION: tensorflow @@ -256,7 +256,7 @@ travis_yml: NENGO_VERSION: nengo[tests]==3.0.0 TF_VERSION: tensorflow==2.2.0 NUMPY_VERSION: numpy==1.16.0 - python: 3.5 + python: 3.6 - script: test-coverage test_args: --graph-mode - stage: advanced @@ -315,7 +315,6 @@ setup_py: - "Operating System :: Microsoft :: Windows" - "Operating System :: POSIX :: Linux" - "Programming Language :: Python" - - "Programming Language :: Python :: 3.5" - "Programming Language :: Python :: 3.6" - "Programming Language :: Python :: 3.7" - "Programming Language :: Python :: 3.8" diff --git a/.templates/setup.py.template b/.templates/setup.py.template index b72c0164f..6562038ad 100644 --- a/.templates/setup.py.template +++ b/.templates/setup.py.template @@ -19,7 +19,7 @@ else: # environment, so we can't just look up the tensorflow version in the current # environment. but the pip package will be in the isolated sys.path, so we can use # that to look up the site-packages directory of the original environment. - target_path = os.path.join("site-packages", "pip") + target_path = str(pathlib.Path("site-packages", "pip")) for path in sys.path: if target_path in path: source_path = [path[: path.index("pip")]] @@ -44,7 +44,7 @@ else: install_req = [ "nengo>=3.0.0", "numpy>=1.16.0", - "%s>=2.2.0" % tf_req, + "{}>=2.2.0".format(tf_req), "jinja2>=2.10.1", "packaging>=20.0", "progressbar2>=3.39.0", diff --git a/.travis.yml b/.travis.yml index 5525f7e36..3b61f08be 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,7 @@ # Automatically generated by nengo-bones, do not edit this file directly language: python -python: 3.6 +python: 3.7 notifications: email: on_success: change @@ -55,7 +55,7 @@ jobs: TF_VERSION="tensorflow==2.2.0" NUMPY_VERSION="numpy==1.16.0" SCRIPT="test-coverage" - python: 3.5 + python: 3.6 - env: SCRIPT="test-coverage" @@ -111,11 +111,7 @@ before_install: # install/run nengo-bones - pip install git+https://github.com/nengo/nengo-bones#egg=nengo-bones - bones-generate --output-dir .ci ci-scripts - - if [[ "$TRAVIS_PYTHON_VERSION" < "3.6" ]]; then - echo "Skipping bones-check because Python $TRAVIS_PYTHON_VERSION < 3.6"; - else - bones-check --verbose; - fi + - bones-check --verbose # display environment info - pip freeze diff --git a/CHANGES.rst b/CHANGES.rst index 527457c88..a0b967a0c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -25,6 +25,13 @@ Release history *Compatible with TensorFlow 2.2.0 - 2.4.0* +**Removed** + +- Dropped support for Python 3.5 (which reached its end of life in September 2020). + (`#184`_) + +.. _#184: https://github.com/nengo/nengo-dl/pull/184 + 3.4.0 (November 26, 2020) ------------------------- diff --git a/docs/conf.py b/docs/conf.py index 394f94db1..e270f2b55 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -2,7 +2,7 @@ # # Automatically generated by nengo-bones, do not edit this file directly -import os +import pathlib import nengo_dl @@ -100,11 +100,11 @@ templates_path = ["_templates"] html_static_path = ["_static"] html_theme = "nengo_sphinx_theme" -html_title = "NengoDL {0} docs".format(release) +html_title = f"NengoDL {release} docs" htmlhelp_basename = "NengoDL" html_last_updated_fmt = "" # Default output format (suppressed) html_show_sphinx = False -html_favicon = os.path.join("_static", "favicon.ico") +html_favicon = str(pathlib.Path("_static", "favicon.ico")) html_theme_options = { "nengo_logo": "nengo-dl-full-light.svg", "nengo_logo_color": "#ff6600", diff --git a/docs/config.rst b/docs/config.rst index 72f500f5f..dd61fa94b 100644 --- a/docs/config.rst +++ b/docs/config.rst @@ -300,3 +300,33 @@ setting ``nengo_dl.Simulator(..., unroll_simulation=x)``. This will explicitly b ``x`` timesteps into the model (without using a loop). So if we use ``unroll_simulation=x`` and ``use_loop=False``, then the simulation will always run for exactly ``x`` timesteps. + +learning_phase +-------------- + +NengoDL (and Keras in general) can run models in two different modes, or "phases": +training and inference. Typically the mode is set automatically based on the function +used to execute the model; ``sim.fit`` will run in training mode, and all other +functions (e.g., ``sim.predict``, ``sim.run``, and ``sim.evaluate``) run in inference +mode. + +The most important way that the learning phase affects a NengoDL model is that it +controls whether spiking neurons are simulated in spiking or non-spiking mode. +Normally during training spiking neurons will automatically swap their behaviour to +a non-spiking equivalent, and use the spiking behaviour during inference. + +However, sometimes it can be useful to override this default behaviour. For example, +we might want to evaluate a spiking model in training mode, to get more insight into +how it is behaving during training. That is the role of the ``learning_phase`` +config option. It can be specified during Network construction to make a network +that will *always* run in training/inference mode, regardless of what function is +being called. For example: + +.. testcode:: + + with nengo.Network() as net: + nengo_dl.configure_settings(learning_phase=True) + + # this ensemble will always use the "training" mode of LIF + # (equivalent to LIFRate) + ens = nengo.Ensemble(10, 1, neuron_type=nengo.LIF()) diff --git a/docs/examples/keras-to-snn.ipynb b/docs/examples/keras-to-snn.ipynb index 7f8293b3a..2ffaa26e7 100644 --- a/docs/examples/keras-to-snn.ipynb +++ b/docs/examples/keras-to-snn.ipynb @@ -260,7 +260,7 @@ " # last timestep\n", " predictions = np.argmax(data[nengo_output][:, -1], axis=-1)\n", " accuracy = (predictions == test_labels[:n_test, 0, 0]).mean()\n", - " print(\"Test accuracy: %.2f%%\" % (100 * accuracy))\n", + " print(f\"Test accuracy: {100 * accuracy:.2f}%\")\n", "\n", " # plot the results\n", " for ii in range(3):\n", @@ -282,7 +282,8 @@ " plt.ylabel(\"Firing rates (Hz)\")\n", " plt.xlabel(\"Timestep\")\n", " plt.title(\n", - " \"Neural activities (conv0 mean=%dHz max=%dHz)\" % (rates.mean(), rates.max())\n", + " f\"Neural activities (conv0 mean={rates.mean():.1f} Hz, \"\n", + " f\"max={rates.max():.1f} Hz)\"\n", " )\n", " plt.plot(scaled_data)\n", "\n", @@ -387,7 +388,7 @@ "outputs": [], "source": [ "for s in [0.001, 0.005, 0.01]:\n", - " print(\"Synapse=%.3f\" % s)\n", + " print(f\"Synapse={s:.3f}\")\n", " run_network(\n", " activation=nengo.SpikingRectifiedLinear(),\n", " n_steps=60,\n", @@ -445,7 +446,7 @@ "outputs": [], "source": [ "for scale in [2, 5, 10]:\n", - " print(\"Scale=%d\" % scale)\n", + " print(f\"Scale={scale}\")\n", " run_network(\n", " activation=nengo.SpikingRectifiedLinear(),\n", " scale_firing_rates=scale,\n", diff --git a/docs/examples/spa-memory.ipynb b/docs/examples/spa-memory.ipynb index f6ea62c2b..17a18c6a7 100644 --- a/docs/examples/spa-memory.ipynb +++ b/docs/examples/spa-memory.ipynb @@ -77,7 +77,7 @@ "\n", " # iterate through examples to be generated, fill arrays\n", " for n in range(n_inputs):\n", - " name = \"SP_%d\" % n\n", + " name = f\"SP_{n}\"\n", " vocab.add(name, vocab.create_pointer())\n", "\n", " # create inputs and target memory for first pair\n", @@ -163,7 +163,7 @@ "def plot_memory_example(plot_sim, vocab, example_input=0):\n", " plt.figure(figsize=(8, 8))\n", "\n", - " name = \"SP_%d\" % example_input\n", + " name = f\"SP_{example_input}\"\n", "\n", " plt.subplot(3, 1, 1)\n", " plt.plot(\n", @@ -351,8 +351,8 @@ "\n", " # iterate through examples to be generated, fill arrays\n", " for n in range(n_items):\n", - " role_names = [\"ROLE_%d_%d\" % (n, i) for i in range(pairs_per_item)]\n", - " filler_names = [\"FILLER_%d_%d\" % (n, i) for i in range(pairs_per_item)]\n", + " role_names = [f\"ROLE_{n}_{i}\" for i in range(pairs_per_item)]\n", + " filler_names = [f\"FILLER_{n}_{i}\" for i in range(pairs_per_item)]\n", "\n", " # each role/filler pair is presented for presentation_time seconds\n", " for i in range(pairs_per_item):\n", @@ -361,7 +361,7 @@ " filler_names[i]\n", " ).v\n", " binding[n, i * int_steps : (i + 1) * int_steps] = vocab.parse(\n", - " \"%s*%s\" % (role_names[i], filler_names[i])\n", + " f\"{role_names[i]}*{filler_names[i]}\"\n", " ).v\n", "\n", " # randomly select a cue\n", diff --git a/docs/examples/spa-retrieval.ipynb b/docs/examples/spa-retrieval.ipynb index 5ed740936..fa9364934 100644 --- a/docs/examples/spa-retrieval.ipynb +++ b/docs/examples/spa-retrieval.ipynb @@ -79,14 +79,14 @@ "\n", " # iterate through all of the examples to be generated\n", " for n in range(n_items):\n", - " role_names = [\"ROLE_%d_%d\" % (n, i) for i in range(pairs_per_item)]\n", - " filler_names = [\"FILLER_%d_%d\" % (n, i) for i in range(pairs_per_item)]\n", + " role_names = [f\"ROLE_{n}_{i}\" for i in range(pairs_per_item)]\n", + " filler_names = [f\"FILLER_{n}_{i}\" for i in range(pairs_per_item)]\n", "\n", " # create key for the 'trace' of bound pairs (i.e. a\n", " # structured semantic pointer)\n", " trace_key = \"TRACE_\" + str(n)\n", " trace_ptr = vocab.parse(\n", - " \"+\".join(\"%s * %s\" % (x, y) for x, y in zip(role_names, filler_names))\n", + " \"+\".join(f\"{x} * {y}\" for x, y in zip(role_names, filler_names))\n", " )\n", " trace_ptr.normalize()\n", " vocab.add(trace_key, trace_ptr)\n", @@ -96,8 +96,8 @@ "\n", " # fill array elements correspond to this example\n", " traces[n, 0, :] = vocab[trace_key].v\n", - " cues[n, 0, :] = vocab[\"ROLE_%d_%d\" % (n, cue_idx)].v\n", - " targets[n, 0, :] = vocab[\"FILLER_%d_%d\" % (n, cue_idx)].v\n", + " cues[n, 0, :] = vocab[f\"ROLE_{n}_{cue_idx}\"].v\n", + " targets[n, 0, :] = vocab[f\"FILLER_{n}_{cue_idx}\"].v\n", "\n", " return traces, cues, targets, vocab" ] diff --git a/docs/installation.rst b/docs/installation.rst index 8ec47754a..c374a047d 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -13,7 +13,7 @@ That's it! Requirements ------------ -NengoDL works with Python 3.5 or later. ``pip`` will do its best to install +NengoDL works with Python 3.6 or later. ``pip`` will do its best to install all of NengoDL's requirements when it installs NengoDL. However, if anything goes wrong during this process you can install the requirements manually and then try to ``pip install nengo-dl`` again. diff --git a/nengo_dl/__init__.py b/nengo_dl/__init__.py index 56c0adeb6..c6ed07208 100644 --- a/nengo_dl/__init__.py +++ b/nengo_dl/__init__.py @@ -5,32 +5,6 @@ __license__ = "Free for non-commercial use; see LICENSE.rst" from nengo_dl.version import version as __version__ -# check python version -import sys - -if sys.version_info < (3, 5): - raise ImportError( - """ -You are running Python version %s with NengoDL version %s. NengoDL requires -at least Python 3.5. - -The fact that this version was installed on your system probably means that you -are using an older version of pip; you should consider upgrading with - - $ pip install pip setuptools --upgrade - -There are two options for getting NengoDL working: - -- Upgrade to Python >= 3.5 - -- Install an older version of NengoDL: - - $ pip install 'nengo-dl<2.0' -""" - % (sys.version, __version__) - ) -del sys - # filter out "INFO" level log messages import os diff --git a/nengo_dl/benchmarks.py b/nengo_dl/benchmarks.py index fcb578fb9..679bce04a 100644 --- a/nengo_dl/benchmarks.py +++ b/nengo_dl/benchmarks.py @@ -747,8 +747,8 @@ def build(obj, benchmark, dimensions, neurons_per_d, neuron_type, kwarg): # build benchmark and add to context for chaining print( - "Building %s with %s" - % (nengo_dl.utils.function_name(benchmark, sanitize=False), kwargs) + f"Building {nengo_dl.utils.function_name(benchmark, sanitize=False)} " + f"with {kwargs}" ) obj["net"] = benchmark(**kwargs) diff --git a/nengo_dl/builder.py b/nengo_dl/builder.py index 995089ea6..c39c40e38 100644 --- a/nengo_dl/builder.py +++ b/nengo_dl/builder.py @@ -38,7 +38,7 @@ def __init__(self, plan): for ops in self.plan: if type(ops[0]) not in Builder.builders: raise BuildError( - "No registered builder for operators of type %r" % type(ops[0]) + f"No registered builder for operators of type {type(ops[0])!r}" ) self.op_builds[ops] = Builder.builders[type(ops[0])](ops) @@ -157,7 +157,7 @@ def register_builder(build_class): if nengo_op in cls.builders: warnings.warn( - "Operator '%s' already has a builder. Overwriting." % nengo_op + f"Operator '{nengo_op}' already has a builder. Overwriting." ) cls.builders[nengo_op] = build_class diff --git a/nengo_dl/callbacks.py b/nengo_dl/callbacks.py index 60e0e3d6b..69e9bc47f 100644 --- a/nengo_dl/callbacks.py +++ b/nengo_dl/callbacks.py @@ -47,7 +47,7 @@ def __init__(self, log_dir, sim, objects): self.sim = sim with contextlib.suppress() if compat.eager_enabled() else context.eager_mode(): - self.writer = tf.summary.create_file_writer(log_dir) + self.writer = tf.summary.create_file_writer(str(log_dir)) self.summaries = [] for obj in objects: @@ -56,26 +56,26 @@ def __init__(self, log_dir, sim, objects): ): if isinstance(obj, nengo.Ensemble): param = "encoders" - name = "Ensemble_%s" % obj.label + name = f"Ensemble_{obj.label}" elif isinstance(obj, nengo.ensemble.Neurons): param = "bias" - name = "Ensemble.neurons_%s" % obj.ensemble.label + name = f"Ensemble.neurons_{obj.ensemble.label}" elif isinstance(obj, nengo.Connection): if not compat.conn_has_weights(obj): raise ValidationError( - "Connection '%s' does not have any weights to log" % obj, + f"Connection '{obj}' does not have any weights to log", "objects", ) param = "weights" - name = "Connection_%s" % obj.label + name = f"Connection_{obj.label}" self.summaries.append( - (utils.sanitize_name("%s_%s" % (name, param)), obj, param) + (utils.sanitize_name(f"{name}_{param}"), obj, param) ) else: raise ValidationError( - "Unknown summary object %s; should be an Ensemble, Neurons, or " - "Connection" % obj, + f"Unknown summary object {obj}; should be an Ensemble, Neurons, or " + "Connection", "objects", ) diff --git a/nengo_dl/compat.py b/nengo_dl/compat.py index d9d568806..1ec222b9f 100644 --- a/nengo_dl/compat.py +++ b/nengo_dl/compat.py @@ -65,7 +65,7 @@ def filter(self, record): "deprecation.py" in record.pathname or "deprecated" in record.msg.lower() ): msg = record.getMessage() - raise AttributeError("Deprecation warning detected:\n%s" % msg) + raise AttributeError(f"Deprecation warning detected:\n{msg}") return True @@ -190,7 +190,7 @@ def neuron_state(neuron_op): 3: ] assert len(names) == len(neuron_op.states) - return collections.OrderedDict((n, s) for n, s in zip(names, neuron_op.states)) + return dict(zip(names, neuron_op.states)) def neuron_step(neuron_op, dt, J, output, state): # pragma: no cover (runs in TF) """Call step_math instead of step.""" @@ -260,7 +260,7 @@ class FrozenOrderedSet(collections.abc.Set): """Backport of `nengo.utils.stdlib.FrozenOrderedSet`.""" def __init__(self, data): - self.data = collections.OrderedDict((d, None) for d in data) + self.data = dict((d, None) for d in data) def __contains__(self, elem): return elem in self.data diff --git a/nengo_dl/config.py b/nengo_dl/config.py index c00b36cce..f15739983 100644 --- a/nengo_dl/config.py +++ b/nengo_dl/config.py @@ -130,7 +130,7 @@ def configure_settings(**kwargs): ): params.set_param(attr, Parameter(attr, val)) else: - raise ConfigError("%s is not a valid config parameter" % attr) + raise ConfigError(f"{attr} is not a valid config parameter") def get_setting(model, setting, default=None, obj=None): diff --git a/nengo_dl/converter.py b/nengo_dl/converter.py index a0475973c..8369db5fb 100644 --- a/nengo_dl/converter.py +++ b/nengo_dl/converter.py @@ -140,8 +140,8 @@ def __init__( if self.swap_activations.unused_keys(): warnings.warn( - "swap_activations contained %s, but there were no layers in the model " - "with that activation type" % (self.swap_activations.unused_keys(),) + f"swap_activations contained {self.swap_activations.unused_keys()}, " + f"but there were no layers in the model with that activation type" ) self.layers = self.net.layers @@ -245,9 +245,9 @@ def verify(self, training=False, inputs=None, atol=1e-8, rtol=1e-5): ) if keras_params != nengo_params: raise ValueError( - "Number of trainable parameters in Nengo network (%d) does not " - "match number of trainable parameters in Keras model (%d)" - % (nengo_params, keras_params) + f"Number of trainable parameters in Nengo network " + f"({nengo_params}) does not match number of trainable " + f"parameters in Keras model ({keras_params})" ) out_vals = [np.reshape(x, (batch_size, n_steps, -1)) for x in out_vals] @@ -268,8 +268,9 @@ def verify(self, training=False, inputs=None, atol=1e-8, rtol=1e-5): logger.info("Nengo:\n%s", nengo_vals[fails]) raise ValueError( "Output of Keras model does not match output of converted " - "Nengo network (max difference=%.2E; set log level to INFO to see " - "all failures)" % max(abs(keras_vals[fails] - nengo_vals[fails])) + "Nengo network (max difference=" + f"{max(abs(keras_vals[fails] - nengo_vals[fails])):.2E}; " + "set log level to INFO to see all failures)" ) return True @@ -298,8 +299,8 @@ def get_converter(self, layer): if converter.has_weights and not self.split_shared_weights: # TODO: allow fallback raise ValueError( - "Multiple applications of layer %s detected; this is not supported " - "unless split_shared_weights=True" % layer + f"Multiple applications of layer {layer} detected; this is not " + f"supported unless split_shared_weights=True" ) return converter @@ -309,8 +310,9 @@ def get_converter(self, layer): # perform custom checks in layer converters if ConverterClass is None: convertible = False - error_msg = "Layer type %s does not have a registered converter" % type( - layer + error_msg = ( + f"Layer type {type(layer).__name__} does not have a registered " + f"converter" ) else: convertible, error_msg = ConverterClass.convertible(layer, self) @@ -322,14 +324,14 @@ def get_converter(self, layer): can_fallback = ConverterClass is None or ConverterClass.allow_fallback if self.allow_fallback and can_fallback: warnings.warn( - "%sFalling back to TensorNode." - % (error_msg + ". " if error_msg else "") + f"{error_msg + '. ' if error_msg else ''}" + f"Falling back to TensorNode." ) ConverterClass = self.converters[None] else: - msg = "%sUnable to convert layer %s to native Nengo objects; " % ( - error_msg + ". " if error_msg else "", - layer.name, + msg = ( + f"{error_msg + '. ' if error_msg else ''}Unable to convert layer " + f"'{layer.name}' to native Nengo objects; " ) if not self.allow_fallback and can_fallback: msg += ( @@ -361,7 +363,7 @@ def register(cls, keras_layer): def register_converter(convert_cls): if keras_layer in cls.converters: warnings.warn( - "Layer '%s' already has a converter. Overwriting." % keras_layer + f"Layer '{keras_layer}' already has a converter. Overwriting." ) cls.converters[keras_layer] = convert_cls @@ -376,15 +378,14 @@ class KerasTensorDict(collections.abc.Mapping): """ def __init__(self): - self.dict = collections.OrderedDict() + self.dict = {} def _get_key(self, key): if isinstance(key, tf.keras.layers.Layer): if len(key.inbound_nodes) > 1: raise KeyError( - "Layer %s is ambiguous because it has been called multiple " + f"Layer {key} is ambiguous because it has been called multiple " "times; use a specific set of layer outputs as key instead" - % key ) # get output tensor @@ -514,7 +515,7 @@ def add_nengo_obj(self, node_id, biases=None, activation=None): The Nengo object whose output corresponds to the output of the given Keras Node. """ - name = self.layer.name + ".%d" % node_id + name = f"{self.layer.name}.{node_id}" # apply manually specified swaps activation = self.converter.swap_activations.get(activation, activation) @@ -565,9 +566,10 @@ def add_nengo_obj(self, node_id, biases=None, activation=None): ] /= scale_firing_rates else: warnings.warn( - "Firing rate scaling being applied to activation type " - "that does not support amplitude (%s); this will change " - "the output" % type(activation) + f"Firing rate scaling being applied to activation type " + f"that does not support amplitude " + f"({type(activation).__name__}); " + f"this will change the output" ) obj = nengo.Ensemble( @@ -584,8 +586,8 @@ def add_nengo_obj(self, node_id, biases=None, activation=None): self.set_trainable(obj, False) elif self.converter.allow_fallback: warnings.warn( - "Activation type %s does not have a native Nengo equivalent; " - "falling back to a TensorNode" % activation + f"Activation type {activation} does not have a native Nengo " + f"equivalent; falling back to a TensorNode" ) obj = TensorNode( activation, @@ -594,12 +596,12 @@ def add_nengo_obj(self, node_id, biases=None, activation=None): label=name, ) else: - raise TypeError("Unsupported activation type (%s)" % self.layer.activation) + raise TypeError(f"Unsupported activation type ({self.layer.activation})") if biases is not None and isinstance(obj, (nengo.Node, TensorNode)): # obj doesn't have its own biases, so use a connection from a constant node # (so that the bias values will be trainable) - bias_node = nengo.Node([1], label="%s.bias" % name) + bias_node = nengo.Node([1], label=f"{name}.bias") nengo.Connection(bias_node, obj, transform=biases[:, None], synapse=None) logger.info("Created %s (size=%d)", obj, obj.size_out) @@ -705,7 +707,7 @@ def _get_shape(self, input_output, node_id, full_shape=False): # note: layer.get_input/output_shape_at is generally equivalent to # layer.input/output_shape, except when the layer is called multiple times # with different shapes, in which case input/output_shape is not well defined - func = getattr(self.layer, "get_%s_shape_at" % input_output) + func = getattr(self.layer, f"get_{input_output}_shape_at") # get the shape shape = func(node_id) @@ -802,11 +804,9 @@ def convertible(cls, layer, converter): val = getattr(layer, arg) if val != default: - msg = "%s.%s has value %s != %s, which is not supported" % ( - layer.name, - arg, - val, - default, + msg = ( + f"{layer.name}.{arg} has value {val} != {default}, " + "which is not supported" ) if arg in cls.unsupported_training_args: msg += " (unless inference_only=True)" @@ -855,7 +855,7 @@ def convert(self, node_id): ] with nengo.Network( - label=self.layer.name + ("" if node_id is None else ".%d" % node_id) + label=self.layer.name + ("" if node_id is None else f".{node_id}") ) as net: # add the "trainable" attribute to all objects configure_settings(trainable=None) @@ -1206,7 +1206,7 @@ def convert(self, node_id): # connect up bias node to output bias_node = nengo.Node( - broadcast_bias, label="%s.%d.bias" % (self.layer.name, node_id) + broadcast_bias, label=f"{self.layer.name}.{node_id}.bias" ) conn = nengo.Connection(bias_node, output, synapse=None) self.set_trainable(conn, False) @@ -1286,10 +1286,9 @@ def convert(self, node_id, dimensions): # bias parameter shared across all the spatial dimensions # add trainable bias weights - bias_node = nengo.Node([1], label="%s.%d.bias" % (self.layer.name, node_id)) + bias_node = nengo.Node([1], label=f"{self.layer.name}.{node_id}.bias") bias_relay = nengo.Node( - size_in=len(biases), - label="%s.%d.bias_relay" % (self.layer.name, node_id), + size_in=len(biases), label=f"{self.layer.name}.{node_id}.bias_relay" ) nengo.Connection( bias_node, bias_relay, transform=biases[:, None], synapse=None @@ -1448,9 +1447,9 @@ def convert(self, node_id): shape = self.output_shape(node_id) if any(x is None for x in shape): raise ValueError( - "Input shapes must be fully specified; got %s. If inputs contain " - "`None` in the first axis to indicate a variable number of timesteps, " - "set `temporal_model=True` on the `Converter`." % (shape,) + f"Input shapes must be fully specified; got {shape}. If inputs contain " + f"`None` in the first axis to indicate a variable number of timesteps, " + f"set `temporal_model=True` on the `Converter`." ) output = nengo.Node(size_in=np.prod(shape), label=self.layer.name) @@ -1682,13 +1681,13 @@ def convert(self, node_id): # match the layer dt). # TODO: add some kind of callback to check that sim.dt matches layer.dt? warnings.warn( - "Ignoring %s.dt=%f parameter; dt will be controlled by Simulator.dt" - % (type(self.layer).__name__, self.layer.dt) + f"Ignoring {type(self.layer).__name__}.dt={self.layer.dt:f} parameter; " + f"dt will be controlled by Simulator.dt" ) if self.layer.stateful: warnings.warn( - "Ignoring %s.stateful=True parameter; statefulness will " - "be controlled by Simulator" % type(self.layer).__name__ + f"Ignoring {type(self.layer).__name__}.stateful=True parameter; " + f"statefulness will be controlled by Simulator" ) @@ -1709,8 +1708,8 @@ def convert(self, node_id): if activation is None: # TODO: allow fallback within SpikingActivation? raise TypeError( - "SpikingActivation activation type (%s) does not have a native Nengo " - "equivalent" % self.layer.activation + f"SpikingActivation activation type ({self.layer.activation}) does not " + f"have a native Nengo equivalent" ) initial_state = tf.keras.backend.get_value( @@ -1761,9 +1760,8 @@ def convert(self, node_id): def convertible(cls, layer, converter): if not converter.inference_only and layer.trainable: msg = ( - "Cannot convert %s layer to native Nengo objects " - "unless inference_only=True or layer.trainable=False" - % type(layer).__name__ + f"Cannot convert a {type(layer).__name__} layer to native Nengo " + f"objects unless inference_only=True or layer.trainable=False" ) return False, msg @@ -1771,19 +1769,18 @@ def convertible(cls, layer, converter): tf.keras.backend.get_value(layer.layer.cell.initial_level), 0 ): msg = ( - "Cannot convert a %s layer to native Nengo objects with " - "initial_level != 0 (this probably means that training has been " - "applied to the layer before conversion)" % type(layer).__name__ + f"Cannot convert a {type(layer).__name__} layer to native Nengo " + f"objects with initial_level != 0 (this probably means that training " + f"has been applied to the layer before conversion)" ) return False, msg tau = tf.keras.backend.get_value(layer.layer.cell.tau_var) if not np.allclose(tau, np.ravel(tau)[0]): msg = ( - "Cannot convert a %s layer to native Nengo objects with different " - "tau values for each element (this probably means that training " - "has been applied to the layer before conversion)" - % type(layer).__name__ + f"Cannot convert a {type(layer).__name__} layer to native Nengo " + f"objects with different tau values for each element (this probably " + f"means that training has been applied to the layer before conversion)" ) return False, msg diff --git a/nengo_dl/graph_optimizer.py b/nengo_dl/graph_optimizer.py index 814c7e20d..b2a6ec144 100644 --- a/nengo_dl/graph_optimizer.py +++ b/nengo_dl/graph_optimizer.py @@ -5,7 +5,7 @@ import logging import warnings -from collections import OrderedDict, defaultdict +from collections import defaultdict import numpy as np from nengo.builder.operator import Copy, DotInc, ElementwiseInc, Reset, SparseDotInc @@ -575,18 +575,16 @@ def order_signals(plan, n_passes=10): of signals """ - # get all the unique base signals (we use OrderedDict to drop the duplicate + # get all the unique base signals (we use a dict to drop the duplicate # bases without changing their order, so that signal order will be # deterministic for a given model) all_signals = list( - OrderedDict( - [(s.base, None) for ops in plan for op in ops for s in op.all_signals] - ).keys() + {s.base: None for ops in plan for op in ops for s in op.all_signals}.keys() ) # figure out all the read/write blocks in the plan (in theory we would like # each block to become a contiguous chunk in the base array) - io_blocks = OrderedDict() + io_blocks = {} op_sigs = {} for ops in plan: @@ -604,12 +602,9 @@ def order_signals(plan, n_passes=10): io_blocks[(ops, i)] = frozenset(op_sigs[op][i].base for op in ops) # get rid of duplicate io blocks - duplicates = OrderedDict() + duplicates = defaultdict(int) for x in io_blocks.values(): - if x in duplicates: - duplicates[x] += 1 - else: - duplicates[x] = 1 + duplicates[x] += 1 # sort by the size of the block (descending order) # note: we multiply by the number of duplicates, since blocks that @@ -1190,7 +1185,7 @@ def remove_reset_incs(operators): for op in operators: for s in op.incs: if type(op) not in valid_inc_types: - warnings.warn("Unknown incer type %s in remove_reset_incs" % type(op)) + warnings.warn(f"Unknown incer type {type(op)} in remove_reset_incs") elif getattr(op, "dst_slice", None) is None: # don't include copy ops with dst_slice, as they aren't incrementing # the whole signal @@ -1426,7 +1421,7 @@ def is_identity(x, sig): other_src, op.Y, inc=len(op.incs) > 0, - tag="%s.identity_mul" % op.tag, + tag=f"{op.tag}.identity_mul", ) ) break diff --git a/nengo_dl/neuron_builders.py b/nengo_dl/neuron_builders.py index e827da303..83490fbad 100644 --- a/nengo_dl/neuron_builders.py +++ b/nengo_dl/neuron_builders.py @@ -5,7 +5,6 @@ import contextlib import logging import warnings -from collections import OrderedDict import numpy as np import tensorflow as tf @@ -157,13 +156,10 @@ def build_pre(self, signals, config): self.J_data = signals.combine([op.J for op in self.ops]) self.output_data = signals.combine([op.output for op in self.ops]) - self.state_data = OrderedDict( - ( - state, - signals.combine([compat.neuron_state(op)[state] for op in self.ops]), - ) + self.state_data = { + state: signals.combine([compat.neuron_state(op)[state] for op in self.ops]) for state in compat.neuron_state(self.ops[0]) - ) + } def step(self, J, dt, **state): """Implements the logic for a single inference step.""" @@ -180,7 +176,7 @@ def training_step(self, J, dt, **state): def build_step(self, signals, **step_kwargs): J = signals.gather(self.J_data) - state = OrderedDict((s, signals.gather(d)) for s, d in self.state_data.items()) + state = {s: signals.gather(d) for s, d in self.state_data.items()} step_output = tf.nest.flatten(self.step(J, signals.dt, **state)) @@ -523,8 +519,8 @@ def __init__(self, ops): self.built_neurons = self.TF_NEURON_IMPL[neuron_type](ops) else: warnings.warn( - "%s does not have a native TensorFlow implementation; " - "falling back to Python implementation" % neuron_type + f"{neuron_type} does not have a native TensorFlow implementation; " + "falling back to Python implementation" ) self.built_neurons = GenericNeuronBuilder(ops) diff --git a/nengo_dl/neurons.py b/nengo_dl/neurons.py index e2917e580..e4ac701ce 100644 --- a/nengo_dl/neurons.py +++ b/nengo_dl/neurons.py @@ -53,7 +53,7 @@ def __init__(self, sigma=1.0, **lif_args): def _argreprs(self): args = super()._argreprs if self.sigma != 1.0: - args.append("sigma=%s" % self.sigma) + args.append(f"sigma={self.sigma}") return args def rates(self, x, gain, bias): diff --git a/nengo_dl/process_builders.py b/nengo_dl/process_builders.py index 83475c7ea..1c97792c2 100644 --- a/nengo_dl/process_builders.py +++ b/nengo_dl/process_builders.py @@ -4,7 +4,6 @@ import contextlib import logging -from collections import OrderedDict import numpy as np import tensorflow as tf @@ -54,7 +53,7 @@ def build_pre(self, signals, config): # combines the result def merged_func(time, *input_state): # pragma: no cover (runs in TF) if not hasattr(self, "step_fs"): - raise SimulationError("build_post has not been called for %s" % self) + raise SimulationError(f"build_post has not been called for {self}") if self.input_data is None: input = None @@ -438,12 +437,9 @@ class SimProcessBuilder(OpBuilder): a custom builder will use the generic builder). """ - # we use OrderedDict because it is important that Lowpass come before - # LinearFilter (since we'll be using isinstance to find the right builder, - # and Lowpass is a subclass of LinearFilter) - TF_PROCESS_IMPL = OrderedDict( - [(Lowpass, LowpassBuilder), (LinearFilter, LinearFilterBuilder)] - ) + # it is important that Lowpass come before LinearFilter because we'll be using + # isinstance to find the right builder, and Lowpass is a subclass of LinearFilter + TF_PROCESS_IMPL = {Lowpass: LowpassBuilder, LinearFilter: LinearFilterBuilder} def __init__(self, ops): super().__init__(ops) diff --git a/nengo_dl/signals.py b/nengo_dl/signals.py index 73e0ff9c6..cfbbcc52a 100644 --- a/nengo_dl/signals.py +++ b/nengo_dl/signals.py @@ -3,7 +3,8 @@ """ import logging -from collections import Mapping, OrderedDict, defaultdict +from collections import defaultdict +from collections.abc import Mapping import numpy as np import tensorflow as tf @@ -76,11 +77,7 @@ def ndim(self): return len(self.shape) def __repr__(self): - return "TensorSignal(key=%s, shape=%s, label=%s)" % ( - self.key, - self.shape, - self.label, - ) + return f"TensorSignal(key={self.key}, shape={self.shape}, label={self.label})" def __getitem__(self, indices): """ @@ -167,7 +164,7 @@ def reshape(self, shape): self.dtype, shape, self.minibatch_size, - label=self.label + ".reshape(%s)" % (shape,), + label=self.label + f".reshape({shape})", ) @property @@ -294,7 +291,7 @@ def reset(self): as opposed to data that is constant for a given Nengo model. """ # these values will be re-generated whenever the model is rebuilt - self.bases = OrderedDict() + self.bases = {} # reset TensorSignals for sig in self.sig_map.values(): @@ -329,8 +326,8 @@ def scatter(self, dst, val, mode="update"): if val.dtype.is_floating and val.dtype.base_dtype != self.dtype: raise BuildError( - "Tensor detected with wrong dtype (%s), should " - "be %s." % (val.dtype.base_dtype, self.dtype) + f"Tensor detected with wrong dtype ({val.dtype.base_dtype}), should " + f"be {self.dtype}." ) # should never be writing to a variable diff --git a/nengo_dl/simulator.py b/nengo_dl/simulator.py index 46324c382..b65c25ddc 100644 --- a/nengo_dl/simulator.py +++ b/nengo_dl/simulator.py @@ -61,7 +61,7 @@ def require_open(wrapped, instance, args, kwargs): if instance.closed: raise SimulatorClosed( - "Cannot call %s after simulator is closed" % wrapped.__name__ + f"Cannot call {wrapped.__name__} after simulator is closed" ) return wrapped(*args, **kwargs) @@ -484,15 +484,14 @@ def __init__( if model is None: self.model = NengoModel( dt=float(dt), - label="%s, dt=%f" % (network, dt), + label=f"{network}, dt={dt:f}", builder=NengoBuilder(), fail_fast=False, ) else: if dt != model.dt: warnings.warn( - "Model dt (%g) does not match Simulator " - "dt (%g)" % (model.dt, dt), + f"Model dt ({model.dt:g}) does not match Simulator dt ({dt:g})", NengoWarning, ) self.model = model @@ -951,7 +950,7 @@ def _call_keras( # rather than the total number of elements in the data) warnings.warn( "Batch size is determined statically via Simulator.minibatch_size; " - "ignoring value passed to `%s`" % func_type + f"ignoring value passed to `{func_type}`" ) if "on_batch" not in func_type: kwargs["batch_size"] = ( @@ -1052,10 +1051,10 @@ def _call_keras( # reorganize results (will be flattened) back into dict if not isinstance(outputs, list): outputs = [outputs] - return collections.OrderedDict(zip(self.model.probes, outputs)) + return dict(zip(self.model.probes, outputs)) elif func_type.startswith("evaluate"): # return outputs as named dict - return collections.OrderedDict(zip(self.keras_model.metrics_names, outputs)) + return dict(zip(self.keras_model.metrics_names, outputs)) else: # return training history return outputs @@ -1092,15 +1091,15 @@ def run(self, time_in_seconds, **kwargs): if time_in_seconds < 0: raise ValidationError( - "Must be positive (got %g)" % (time_in_seconds,), attr="time_in_seconds" + f"Must be positive (got {time_in_seconds:g})", attr="time_in_seconds" ) steps = int(np.round(float(time_in_seconds) / self.dt)) if steps == 0: warnings.warn( - "%g results in running for 0 timesteps. Simulator " - "still at time %g." % (time_in_seconds, self.time) + f"{time_in_seconds:g} results in running for 0 timesteps. Simulator " + f"still at time {self.time:g}." ) else: self.run_steps(steps, **kwargs) @@ -1135,10 +1134,9 @@ def run_steps(self, n_steps, data=None, progress_bar=None, stateful=True): # error checking if actual_steps != n_steps: warnings.warn( - "Number of steps (%d) is not an even multiple of " - "`unroll_simulation` (%d). Simulation will run for %d steps, " - "which may have unintended side effects." - % (n_steps, self.unroll, actual_steps), + f"Number of steps ({n_steps}) is not an even multiple of " + f"`unroll_simulation` ({self.unroll}). Simulation will run for " + f"{actual_steps} steps, which may have unintended side effects.", RuntimeWarning, ) @@ -1224,7 +1222,9 @@ def save_params(self, path, include_state=False, include_non_trainable=None): if include_state: params.extend(self.tensor_graph.saved_state.values()) - np.savez_compressed(path + ".npz", *tf.keras.backend.batch_get_value(params)) + np.savez_compressed( + str(path) + ".npz", *tf.keras.backend.batch_get_value(params) + ) logger.info("Model parameters saved to %s.npz", path) @@ -1262,14 +1262,14 @@ def load_params(self, path, include_state=False, include_non_trainable=None): if include_state: params.extend(self.tensor_graph.saved_state.values()) - with np.load(path + ".npz") as vals: + with np.load(str(path) + ".npz") as vals: if len(params) != len(vals.files): raise SimulationError( - "Number of saved parameters in %s (%d) != number of variables in " - "the model (%d)" % (path, len(vals.files), len(params)) + f"Number of saved parameters in {path} ({len(vals.files)}) != " + f"number of variables in the model ({len(params)})" ) tf.keras.backend.batch_set_value( - zip(params, (vals["arr_%d" % i] for i in range(len(vals.files)))) + zip(params, (vals[f"arr_{i}"] for i in range(len(vals.files)))) ) logger.info("Model parameters loaded from %s.npz", path) @@ -1331,7 +1331,7 @@ def freeze_params(self, objs): if not isinstance(obj, (Network, Ensemble, Connection)): raise TypeError( - "Objects of type %s do not have parameters to store" % type(obj) + f"Objects of type {type(obj)} do not have parameters to store" ) if isinstance(obj, Network): @@ -1483,8 +1483,8 @@ def get_nengo_params(self, nengo_objs, as_dict=False): params.append({"transform": weights}) else: raise NotImplementedError( - "Cannot get parameters of Connections with transform type '%s'" - % type(obj.transform).__name__ + f"Cannot get parameters of Connections with transform type " + f"'{type(obj.transform).__name__}'" ) else: # note: we don't want to change the original gain (even though @@ -1521,8 +1521,8 @@ def get_nengo_params(self, nengo_objs, as_dict=False): for obj, p in zip(nengo_objs, params): if obj.label in param_dict: raise ValueError( - "Duplicate label ('%s') detected; cannot return " - "parameters with as_dict=True" % obj.label + f"Duplicate label ('{obj.label}') detected; cannot return " + "parameters with as_dict=True" ) else: param_dict[obj.label] = p @@ -1638,9 +1638,9 @@ def arg_func(*args, output=None): fail = abs(a - n) >= atol + rtol * abs(n) if np.any(fail): raise SimulationError( - "Gradient check failed\n" - "numeric values:\n%s\n" - "analytic values:\n%s\n" % (n[fail], a[fail]) + f"Gradient check failed\n" + f"numeric values:\n{n[fail]}\n" + f"analytic values:\n{a[fail]}\n" ) logger.info("Gradient check passed") @@ -1714,17 +1714,17 @@ def get_name(self, obj): if isinstance(obj, Node): if obj not in self.node_inputs: raise ValidationError( - "%s is not an input Node (a nengo.Node with " - "size_in==0), or is from a different network." % obj, + f"{obj} is not an input Node (a nengo.Node with size_in==0), or is " + f"from a different network.", "obj", ) elif isinstance(obj, Probe): if obj not in self.tensor_graph.probe_arrays: - raise ValidationError("%s is from a different network." % obj, "obj") + raise ValidationError(f"{obj} is from a different network.", "obj") else: raise ValidationError( - "%s is of an unknown type (%s); should be nengo.Node " - "or nengo.Probe" % (obj, type(obj)), + f"{obj} is of an unknown type ({type(obj)}); should be nengo.Node or " + f"nengo.Probe", "obj", ) return self.tensor_graph.io_names[obj] @@ -1763,27 +1763,21 @@ def _standardize_data(self, data, objects, broadcast_unary=False): if isinstance(data, (list, tuple)): if len(data) != len(objects): warnings.warn( - "Number of elements (%d) in %s does not match number of " - "%ss (%d); consider using an explicit input dictionary in this " - "case, so that the assignment of data to objects is unambiguous." - % ( - len(data), - [type(d).__name__ for d in data], - type(objects[0]).__name__, - len(objects), - ) + f"Number of elements ({len(data)}) in " + f"{[type(d).__name__ for d in data]} does not match number of " + f"{type(objects[0]).__name__}s ({len(objects)}); consider " + f"using an explicit input dictionary in this case, so that the " + f"assignment of data to objects is unambiguous." ) # convert list to named dict - data = collections.OrderedDict( - (self.get_name(obj), val) for obj, val in zip(objects, data) - ) + data = {self.get_name(obj): val for obj, val in zip(objects, data)} elif isinstance(data, dict): # convert objects to string names - data = collections.OrderedDict( - (obj if isinstance(obj, str) else self.get_name(obj), val) + data = { + obj if isinstance(obj, str) else self.get_name(obj): val for obj, val in data.items() - ) + } return data @@ -1816,10 +1810,10 @@ def _generate_inputs(self, data=None, n_steps=None): # different types of generators this could be) if n_steps is not None: raise SimulationError( - "Cannot automatically add n_steps to generator with type %s; " - "please specify n_steps manually as the first element in the " - "values yielded from generator, remembering that it needs to " - "be repeated to have shape (batch_size, 1)" % type(data) + f"Cannot automatically add n_steps to generator with type " + f"{type(data)}; please specify n_steps manually as the first " + f"element in the values yielded from generator, remembering that " + f"it needs to be repeated to have shape (batch_size, 1)" ) return data @@ -1839,7 +1833,7 @@ def _generate_inputs(self, data=None, n_steps=None): ) n_steps = data_steps - input_vals = collections.OrderedDict() + input_vals = {} # fill in data for input nodes for node, output in self.tensor_graph.input_funcs.items(): @@ -1873,8 +1867,8 @@ def _generate_inputs(self, data=None, n_steps=None): for name in data: if name not in input_vals: raise ValidationError( - "Input contained entry for '%s', which is not a valid input name" - % name, + f"Input contained entry for '{name}', which is not a valid input " + f"name", "data", ) @@ -1925,9 +1919,9 @@ def _check_data(self, data, batch_size=None, n_steps=None, nodes=True): and data_batch % self.minibatch_size != 0 ): warnings.warn( - "Number of elements in input data (%d) is not evenly divisible by " - "Simulator.minibatch_size (%d); input data will be truncated." - % (data_batch, self.minibatch_size) + f"Number of elements in input data ({data_batch}) is not " + f"evenly divisible by Simulator.minibatch_size " + f"({self.minibatch_size}); input data will be truncated." ) data_batch -= data_batch % self.minibatch_size data[k] = v[:data_batch] @@ -1942,40 +1936,40 @@ def _check_data(self, data, batch_size=None, n_steps=None, nodes=True): valid_names = [self.get_name(n) for n in self.node_inputs] if name not in valid_names: raise ValidationError( - "'%s' is not a valid node name; perhaps the name is wrong (it " - "should match the `label` on the Node), or this is not an " - "input Node (a Node with size_in==0) in this network. " - "Valid names are: %s." % (name, valid_names), + f"'{name}' is not a valid node name; perhaps the name is wrong " + f"(it should match the `label` on the Node), or this is not an " + f"input Node (a Node with size_in==0) in this network. " + f"Valid names are: {valid_names}.", "data", ) else: valid_names = [self.get_name(p) for p in self.model.probes] if name not in valid_names: raise ValidationError( - "'%s' is not a valid probe name; perhaps the name is wrong (it " - "should match the `label` on the Probe), or this is not a " - "Probe in this network. Valid names are: %s." - % (name, valid_names), + f"'{name}' is not a valid probe name; perhaps the name is " + f"wrong (it should match the `label` on the Probe), or this " + f"is not a Probe in this network. " + f"Valid names are: {valid_names}.", "data", ) # generic shape checks if len(x.shape) != 3: raise ValidationError( - "should have rank 3 (batch_size, n_steps, dimensions), " - "found rank %d" % len(x.shape), - "%s data" % name, + f"should have rank 3 (batch_size, n_steps, dimensions), found rank " + f"{len(x.shape)}", + f"{name} data", ) if x.shape[0] < self.minibatch_size: raise ValidationError( - "Batch size of data (%d) less than Simulator `minibatch_size` (%d)" - % (x.shape[0], self.minibatch_size), - "%s data" % name, + f"Batch size of data ({x.shape[0]}) less than Simulator " + f"`minibatch_size` ({self.minibatch_size})", + f"{name} data", ) if nodes and x.shape[1] % self.unroll != 0: raise ValidationError( - "The number of timesteps in input data (%s) must be evenly " - "divisible by unroll_simulation (%s)" % (x.shape[1], self.unroll), + f"The number of timesteps in input data ({x.shape[1]}) must be " + f"evenly divisible by unroll_simulation ({self.unroll})", "data", ) @@ -1997,16 +1991,16 @@ def _check_data(self, data, batch_size=None, n_steps=None, nodes=True): for n, x in data.items(): if x.shape[i] != val: raise ValidationError( - "Elements have different %s: %s vs %s" - % (labels[i], val, x.shape[i]), + f"Elements have different {labels[i]}: {val} vs " + f"{x.shape[i]}", "data", ) else: for n, x in data.items(): if x.shape[i] != args[i]: raise ValidationError( - "Data for %s has %s=%s, which does not match " - "expected size (%s)" % (n, labels[i], x.shape[i], args[i]), + f"Data for {n} has {labels[i]}={x.shape[i]}, which does " + f"not match expected size ({args[i]})", "data", ) @@ -2016,8 +2010,8 @@ def _check_data(self, data, batch_size=None, n_steps=None, nodes=True): and n_steps != self.unroll ): raise ValidationError( - "When use_loop=False, n_steps (%d) must exactly match " - "unroll_simulation (%d)" % (n_steps, self.unroll), + f"When use_loop=False, n_steps ({n_steps}) must exactly match " + f"unroll_simulation ({self.unroll})", "n_steps", ) @@ -2031,8 +2025,8 @@ def _check_data(self, data, batch_size=None, n_steps=None, nodes=True): and (data_n_steps.ndim != 2 or data_n_steps.shape[1] != 1) ) or (batch_size is not None and data_n_steps.shape != (batch_size, 1)): raise ValidationError( - "'n_steps' has wrong shape; should be %s (note that this is just " - "the integer n_steps value repeated)" % ((batch_size, 1),), + f"'n_steps' has wrong shape; should be {(batch_size, 1)} (note that" + f" this is just the integer n_steps value repeated)", "data", ) if not np.all(data_n_steps == data_n_steps[0, 0]): @@ -2101,9 +2095,9 @@ def __del__(self): if self.closed is not None and not self.closed: warnings.warn( - "Simulator with model=%s was deallocated while open. " - "Simulators should be closed manually to ensure resources " - "are properly freed." % self.model, + f"Simulator with model={self.model} was deallocated while open. " + f"Simulators should be closed manually to ensure resources are " + f"properly freed.", RuntimeWarning, ) self.close() @@ -2121,13 +2115,13 @@ def __getattribute__(self, name): "_closed_attrs" ): raise SimulatorClosed( - "Cannot access Simulator.%s after Simulator is closed" % name + f"Cannot access Simulator.{name} after Simulator is closed" ) return super().__getattribute__(name) -class SimulationData(collections.Mapping): +class SimulationData(collections.abc.Mapping): """ Data structure used to access simulation data from the model. @@ -2175,7 +2169,7 @@ def __getitem__(self, obj): if obj not in self.sim.model.params: raise ValidationError( - "Object is not in parameters of model %s" % self.sim.model, str(obj) + f"Object is not in parameters of model {self.sim.model}", str(obj) ) data = self.sim.model.params[obj] diff --git a/nengo_dl/tensor_graph.py b/nengo_dl/tensor_graph.py index 3bb997813..98feff96a 100644 --- a/nengo_dl/tensor_graph.py +++ b/nengo_dl/tensor_graph.py @@ -5,7 +5,7 @@ import logging import warnings -from collections import OrderedDict, defaultdict +from collections import defaultdict import numpy as np import tensorflow as tf @@ -85,13 +85,13 @@ def __init__( # than the simulation time). we'll compute these outside the simulation # and feed in the result. if self.model.toplevel is None: - self.invariant_inputs = OrderedDict() + self.invariant_inputs = {} else: - self.invariant_inputs = OrderedDict( - (n, n.output) + self.invariant_inputs = { + n: n.output for n in self.model.toplevel.all_nodes if n.size_in == 0 and not isinstance(n, tensor_node.TensorNode) - ) + } # remove input nodes because they are executed outside the simulation node_processes = [ @@ -174,7 +174,7 @@ def __init__( key = name.lower() if name_count[key] > 0: - name += "_%d" % name_count[key] + name += f"_{name_count[key]}" self.io_names[obj] = name name_count[key] += 1 @@ -203,7 +203,7 @@ def build_inputs(self): """ # input placeholders - inputs = OrderedDict() + inputs = {} for n in self.invariant_inputs: inputs[n] = tf.keras.layers.Input( shape=(None, n.size_out), @@ -273,7 +273,7 @@ def get_initializer(init_vals): # variables for model parameters with trackable.no_automatic_dependency_tracking_scope(self): - self.base_params = OrderedDict() + self.base_params = {} assert len(self.base_params) == 0 for sig_type in ("trainable", "non_trainable"): for k, v in self.base_arrays_init[sig_type].items(): @@ -284,8 +284,8 @@ def get_initializer(init_vals): shape=shape, dtype=dtype, trainable=sig_type == "trainable", - name="base_params/%s_%s_%s" - % (sig_type, dtype, "_".join(str(x) for x in shape)), + name=f"base_params/{sig_type}_{dtype}_" + f"{'_'.join(str(x) for x in shape)}", ) self.initial_values[k] = initializer @@ -295,7 +295,7 @@ def get_initializer(init_vals): # variables to save the internal state of simulation between runs with trackable.no_automatic_dependency_tracking_scope(self): - self.saved_state = OrderedDict() + self.saved_state = {} for k, v in self.base_arrays_init["state"].items(): initializer, shape, dtype = get_initializer(v) if initializer is not None: @@ -306,7 +306,7 @@ def get_initializer(init_vals): shape=shape, dtype=dtype, trainable=False, - name="saved_state/%s_%s" % (dtype, "_".join(str(x) for x in shape)), + name=f"saved_state/{dtype}_{'_'.join(str(x) for x in shape)}", ) self.initial_values[k] = initializer @@ -435,8 +435,8 @@ def call(self, inputs, training=None, progress=None, stateful=False): if training is True and self.inference_only: raise BuildError( - "TensorGraph was created with inference_only=True; cannot be called " - "with training=%s" % training + f"TensorGraph was created with inference_only=True; cannot be called " + f"with training={training}" ) tf.random.set_seed(self.seed) @@ -645,7 +645,7 @@ def update_probes(probe_tensors, loop_i): # change to shape (minibatch_size,) (required by keras) instead of a scalar steps_run = tf.tile(tf.expand_dims(loop_vars[0], 0), (self.minibatch_size,)) - probe_arrays = OrderedDict() + probe_arrays = {} for p, a in zip(self.model.probes, loop_vars[2]): x = a.stack() @@ -708,7 +708,7 @@ def update_probes(probe_tensors, _): # change to shape (minibatch_size,) (required by keras) instead of a scalar steps_run = tf.tile(tf.expand_dims(loop_i, 0), (self.minibatch_size,)) - probe_arrays = OrderedDict() + probe_arrays = {} for p, a in zip(self.model.probes, probe_data): if self.model.sig[p]["in"].minibatched: x = tf.stack(a, axis=1) @@ -747,7 +747,7 @@ def _build_inner_loop(self, loop_i, update_probes, progress): for unroll_iter in range(self.unroll): logger.debug("BUILDING ITERATION %d", unroll_iter) - with tf.name_scope("iteration_%d" % unroll_iter): + with tf.name_scope(f"iteration_{unroll_iter}"): # fill in invariant input data for n in self.node_inputs: if self.model.sig[n]["out"] in self.signals: @@ -964,9 +964,9 @@ def mark_network(parent_configs, net): if self.model.sig[obj][attr].trainable is True: warnings.warn( - "%s has a learning rule and is also set " - "to be trainable; this is likely to " - "produce strange training behaviour." % obj + f"{obj} has a learning rule and is also set to be " + f"trainable; this is likely to produce strange " + f"training behaviour." ) else: self.model.sig[obj][attr].trainable = False @@ -1026,13 +1026,7 @@ def create_signals(self, sigs): memory (e.g., output from `.graph_optimizer.order_signals`) """ - base_arrays = OrderedDict( - [ - ("trainable", OrderedDict()), - ("non_trainable", OrderedDict()), - ("state", OrderedDict()), - ] - ) + base_arrays = {"trainable": {}, "non_trainable": {}, "state": {}} curr_keys = {} sig_idxs = {s: i for i, s in enumerate(sigs)} diff --git a/nengo_dl/tensor_node.py b/nengo_dl/tensor_node.py index c220455a9..804c21203 100644 --- a/nengo_dl/tensor_node.py +++ b/nengo_dl/tensor_node.py @@ -43,28 +43,27 @@ def validate_output(output, minibatch_size=None, output_d=None, dtype=None): if not isinstance(output, tf.TensorSpec) and not tf.is_tensor(output): raise ValidationError( - "TensorNode function must return a Tensor (got %s)" % type(output), + f"TensorNode function must return a Tensor (got {type(output)})", attr="tensor_func", ) if minibatch_size is not None and output.shape[0] != minibatch_size: raise ValidationError( - "TensorNode output should have batch size %d (got %d)" - % (minibatch_size, output.shape[0]), + f"TensorNode output should have batch size {minibatch_size} (got " + f"{output.shape[0]})", attr="tensor_func", ) if output_d is not None and np.prod(output.shape[1:]) != output_d: raise ValidationError( - "TensorNode output should have size %d (got shape %s with size %d)" - % (minibatch_size, output.shape[1:], np.prod(output.shape[1:])), + f"TensorNode output should have size {output_d} (got shape " + f"{output.shape[1:]} with size {np.prod(output.shape[1:])})", attr="tensor_func", ) if dtype is not None and output.dtype != dtype: raise ValidationError( - "TensorNode output should have dtype %s " - "(got %s)" % (dtype, output.dtype), + f"TensorNode output should have dtype {dtype} (got {output.dtype})", attr="tensor_func", ) @@ -143,11 +142,10 @@ def coerce(self, node, func): result = func(*args) except Exception as e: raise ValidationError( - "Attempting to automatically determine TensorNode output shape " - "by calling TensorNode function produced an error. " - "If you would like to avoid this step, try manually setting " - "`TensorNode(..., shape_out=x)`. The error is shown below:\n%s" - % e, + "Attempting to automatically determine TensorNode output " + "shape by calling TensorNode function produced an error. If " + "you would like to avoid this step, try manually setting " + "`TensorNode(..., shape_out=x)`.", attr=self.name, obj=node, ) from e @@ -251,12 +249,12 @@ def build_tensor_node(model, node): # input signal if node.shape_in is not None: - sig_in = builder.Signal(shape=(node.size_in,), name="%s.in" % node) + sig_in = builder.Signal(shape=(node.size_in,), name=f"{node}.in") model.add_op(Reset(sig_in)) else: sig_in = None - sig_out = builder.Signal(shape=(node.size_out,), name="%s.out" % node) + sig_out = builder.Signal(shape=(node.size_out,), name=f"{node}.out") model.sig[node]["in"] = sig_in model.sig[node]["out"] = sig_out @@ -407,7 +405,7 @@ def __call__( shape_in=None, synapse=None, return_conn=False, - **layer_args + **layer_args, ): """ Apply the TensorNode layer to the given input object. @@ -475,12 +473,12 @@ def __call__( return (obj, conn) if return_conn else obj def __str__(self): - - return "Layer(%s)" % getattr( + name = getattr( self.layer_func, "name", getattr(self.layer_func, "__name__", self.layer_func), ) + return f"Layer({name})" def tensor_layer(input, layer_func, **kwargs): diff --git a/nengo_dl/tests/dummies.py b/nengo_dl/tests/dummies.py index 0cc40de23..f68d99d34 100644 --- a/nengo_dl/tests/dummies.py +++ b/nengo_dl/tests/dummies.py @@ -30,7 +30,7 @@ def __init__( self._base = ( self if base_shape is None - else Signal(shape=base_shape, dtype=self.dtype, label="%s.base" % label) + else Signal(shape=base_shape, dtype=self.dtype, label=f"{label}.base") ) self._elemoffset = offset self.name = label @@ -87,7 +87,7 @@ def may_share_memory(self, _): return False def __repr__(self): - return "DummySignal(%s)" % self.name + return f"DummySignal({self.name})" class Op: @@ -106,13 +106,13 @@ def __init__(self, sets=None, incs=None, reads=None, updates=None): def __repr__(self): rep = "DummyOp(" if len(self.sets) > 0: - rep += "sets=%s" % self.sets + rep += f"sets={self.sets}" if len(self.incs) > 0: - rep += "incs=%s" % self.incs + rep += f"incs={self.incs}" if len(self.reads) > 0: - rep += "reads=%s" % self.reads + rep += f"reads={self.reads}" if len(self.updates) > 0: - rep += "updates=%s" % self.updates + rep += f"updates={self.updates}" rep += ")" return rep diff --git a/nengo_dl/tests/test_benchmarks.py b/nengo_dl/tests/test_benchmarks.py index d507173e2..fbd8bb4a9 100644 --- a/nengo_dl/tests/test_benchmarks.py +++ b/nengo_dl/tests/test_benchmarks.py @@ -105,8 +105,8 @@ def _test_random( @pytest.mark.parametrize( "network, train", [("integrator", True), ("cconv", False), ("test", True)] ) -def test_run_profile(network, train, pytestconfig, monkeypatch, tmpdir): - monkeypatch.chdir(tmpdir) +def test_run_profile(network, train, pytestconfig, monkeypatch, tmp_path): + monkeypatch.chdir(tmp_path) if network == "integrator": net = benchmarks.integrator(3, 2, nengo.SpikingRectifiedLinear()) @@ -139,12 +139,15 @@ def test_cli(): old_argv = sys.argv sys.argv = [sys.argv[0]] + ( - "build --benchmark random_network --dimensions %d " - "--neurons_per_d %d --neuron_type SoftLIFRate " - "--kwarg n_ensembles=%d --kwarg connections_per_ensemble=%d " - "profile --no-train --n_steps 10 --batch_size 2 --device /cpu:0 " - "--unroll 5 --time-only" - % (dimensions, neurons_per_d, n_ensembles, n_connections) + f"build " + f"--benchmark random_network " + f"--dimensions {dimensions} " + f"--neurons_per_d {neurons_per_d} " + f"--neuron_type SoftLIFRate " + f"--kwarg n_ensembles={n_ensembles} " + f"--kwarg connections_per_ensemble={n_connections} " + f"profile " + f"--no-train --n_steps 10 --batch_size 2 --device /cpu:0 --unroll 5 --time-only" ).split() obj = {} with pytest.raises(SystemExit): diff --git a/nengo_dl/tests/test_graph_optimizer.py b/nengo_dl/tests/test_graph_optimizer.py index c13f617d4..8cadec2bc 100644 --- a/nengo_dl/tests/test_graph_optimizer.py +++ b/nengo_dl/tests/test_graph_optimizer.py @@ -612,7 +612,7 @@ def test_order_signals_views(): sig = dummies.Signal(shape=(7,), label="sig") sig2 = dummies.Signal(shape=(7,), label="sig2") views = [ - dummies.Signal(shape=(1,), base_shape=(5,), offset=1 + i, label="view_%d" % i) + dummies.Signal(shape=(1,), base_shape=(5,), offset=1 + i, label=f"view_{i}") for i in range(5) ] for v in views: diff --git a/nengo_dl/tests/test_keras.py b/nengo_dl/tests/test_keras.py index e2d4fde82..69f1863f7 100644 --- a/nengo_dl/tests/test_keras.py +++ b/nengo_dl/tests/test_keras.py @@ -348,20 +348,20 @@ def test_learning_phase_warning(Simulator): pass -def test_save_load_weights(Simulator, tmpdir): +def test_save_load_weights(Simulator, tmp_path): net = dummies.linear_net()[0] net.connections[0].transform = 2 with Simulator(net, minibatch_size=1) as sim0: - sim0.keras_model.save_weights(str(tmpdir.join("tmp"))) + sim0.keras_model.save_weights(str(tmp_path / "tmp")) net.connections[0].transform = 3 with Simulator(net, minibatch_size=2) as sim1: assert np.allclose(sim1.data[net.connections[0]].weights, 3) - sim1.keras_model.load_weights(str(tmpdir.join("tmp"))) + sim1.keras_model.load_weights(str(tmp_path / "tmp")) assert np.allclose(sim1.data[net.connections[0]].weights, 2) diff --git a/nengo_dl/tests/test_learning_rules.py b/nengo_dl/tests/test_learning_rules.py index 71510cde4..438f80d6c 100644 --- a/nengo_dl/tests/test_learning_rules.py +++ b/nengo_dl/tests/test_learning_rules.py @@ -79,7 +79,7 @@ def test_merged_learning(Simulator, rule, weights, seed): assert np.allclose(sim.data[p1][i], canonical[1]) -def test_online_learning_reset(Simulator, tmpdir, seed): +def test_online_learning_reset(Simulator, tmp_path, seed): with nengo.Network(seed=seed) as net: inp = nengo.Ensemble(10, 1) out = nengo.Node(size_in=1) @@ -93,7 +93,7 @@ def test_online_learning_reset(Simulator, tmpdir, seed): w1 = np.array(sim.data[conn].weights) - sim.save_params(str(tmpdir.join("tmp"))) + sim.save_params(tmp_path / "tmp") # test that learning has changed weights assert not np.allclose(w0, w1) @@ -110,6 +110,6 @@ def test_online_learning_reset(Simulator, tmpdir, seed): with Simulator(net) as sim: assert not np.allclose(w1, sim.data[conn].weights) - sim.load_params(str(tmpdir.join("tmp"))) + sim.load_params(tmp_path / "tmp") assert np.allclose(w1, sim.data[conn].weights) diff --git a/nengo_dl/tests/test_nengo_tests.py b/nengo_dl/tests/test_nengo_tests.py index 42ce485d7..6988ba61d 100644 --- a/nengo_dl/tests/test_nengo_tests.py +++ b/nengo_dl/tests/test_nengo_tests.py @@ -112,11 +112,11 @@ def test_dtype(Simulator, request, seed, bits): default = nengo.rc.get("precision", "bits") request.addfinalizer(lambda: nengo.rc.set("precision", "bits", default)) - float_dtype = np.dtype(getattr(np, "float%s" % bits)) - int_dtype = np.dtype(getattr(np, "int%s" % bits)) + float_dtype = np.dtype(getattr(np, f"float{bits}")) + int_dtype = np.dtype(getattr(np, f"int{bits}")) with nengo.Network() as model: - nengo_dl.configure_settings(dtype="float%s" % bits) + nengo_dl.configure_settings(dtype=f"float{bits}") u = nengo.Node([0.5, -0.4]) a = nengo.Ensemble(10, 2) @@ -129,9 +129,7 @@ def test_dtype(Simulator, request, seed, bits): # check that the builder has created signals of the correct dtype # (note that we may not necessarily use that dtype during simulation) for sig in sim.tensor_graph.signals: - assert sig.dtype in (float_dtype, int_dtype), ( - "Signal '%s' wrong dtype" % sig - ) + assert sig.dtype in (float_dtype, int_dtype), f"Signal '{sig}' wrong dtype" objs = (obj for obj in model.all_objects if sim.data[obj] is not None) for obj in objs: diff --git a/nengo_dl/tests/test_neurons.py b/nengo_dl/tests/test_neurons.py index 145af6e5a..4542ba47a 100644 --- a/nengo_dl/tests/test_neurons.py +++ b/nengo_dl/tests/test_neurons.py @@ -84,7 +84,7 @@ def test_soft_lif(Simulator, sigma, seed): if sigma == 1: assert "sigma" not in x else: - assert "sigma=%s" % sigma in x + assert f"sigma={sigma}" in x with nengo.Simulator(net) as sim: _, nengo_curves = nengo.utils.ensemble.tuning_curves(ens, sim) diff --git a/nengo_dl/tests/test_setup.py b/nengo_dl/tests/test_setup.py new file mode 100644 index 000000000..fe0c2e086 --- /dev/null +++ b/nengo_dl/tests/test_setup.py @@ -0,0 +1,25 @@ +# pylint: disable=missing-docstring + +import ast +import pathlib +import sys + +import pytest + +import nengo_dl + + +@pytest.mark.skipif( + sys.version_info < (3, 8, 0), + reason="ast.parse `feature_version` added in Python 3.8", +) +@pytest.mark.parametrize("feature_version", [(3, 4), (3, 5)]) +def test_setup_compat(feature_version): + setup_py_path = pathlib.Path(nengo_dl.__file__).parents[1] / "setup.py" + + assert setup_py_path.exists() + with setup_py_path.open("r") as fh: + source = fh.read() + + parsed = ast.parse(source, feature_version=feature_version) + assert parsed is not None diff --git a/nengo_dl/tests/test_simulator.py b/nengo_dl/tests/test_simulator.py index 0bd0acbb7..97cf8665a 100644 --- a/nengo_dl/tests/test_simulator.py +++ b/nengo_dl/tests/test_simulator.py @@ -2,10 +2,8 @@ import contextlib import logging -import os import pickle import sys -from collections import OrderedDict import nengo import numpy as np @@ -512,12 +510,12 @@ def test_generate_inputs(Simulator, seed): np.ones((minibatch_size, n_steps, 1)) * 2, ] for i, x in enumerate(vals): - assert np.allclose(feed["node_%d" % i if i > 0 else "node"], x) + assert np.allclose(feed[f"node_{i}" if i > 0 else "node"], x) assert np.allclose(sim.data[p[i]], x) # check that unseeded process was different in each minibatch item assert not np.allclose( - feed["node_%d" % (len(inp) - 1)][0], feed["node_%d" % (len(inp) - 1)][1] + feed[f"node_{len(inp) - 1}"][0], feed[f"node_{len(inp) - 1}"][1] ) with pytest.raises(SimulationError, match="automatically add n_steps"): @@ -528,7 +526,7 @@ def test_generate_inputs(Simulator, seed): @pytest.mark.parametrize("include_state", (True, False)) -def test_save_load_params(Simulator, include_state, tmpdir): +def test_save_load_params(Simulator, include_state, tmp_path): def get_network(seed): with nengo.Network(seed=seed) as net: configure_settings(simplifications=[]) @@ -554,7 +552,7 @@ def get_network(seed): sim_save.run_steps(10) - sim_save.save_params(str(tmpdir), include_state=include_state) + sim_save.save_params(tmp_path, include_state=include_state) sim_save.run_steps(10) @@ -575,7 +573,7 @@ def get_network(seed): pre_model = sim_load.keras_model - sim_load.load_params(str(tmpdir), include_state=include_state) + sim_load.load_params(tmp_path, include_state=include_state) weights2, enc2, bias2 = sim_load.data.get_params( (conn1, "weights"), (ens1, "encoders"), (ens1, "bias") @@ -603,10 +601,10 @@ def get_network(seed): with Simulator(nengo.Network()) as sim: with pytest.raises(SimulationError, match="!= number of variables"): - sim.load_params(str(tmpdir)) + sim.load_params(tmp_path) -def test_save_load_params_deprecation(Simulator, tmpdir): +def test_save_load_params_deprecation(Simulator, tmp_path): with nengo.Network() as net: a = nengo.Node([1]) p = nengo.Probe(a, synapse=0.1) @@ -617,7 +615,7 @@ def test_save_load_params_deprecation(Simulator, tmpdir): with pytest.warns( DeprecationWarning, match="include_non_trainable is deprecated" ): - sim0.save_params(str(tmpdir.join("tmp")), include_non_trainable=True) + sim0.save_params(tmp_path / "tmp", include_non_trainable=True) sim0.run_steps(5) @@ -625,7 +623,7 @@ def test_save_load_params_deprecation(Simulator, tmpdir): with pytest.warns( DeprecationWarning, match="include_non_trainable is deprecated" ): - sim1.load_params(str(tmpdir.join("tmp")), include_non_trainable=True) + sim1.load_params(tmp_path / "tmp", include_non_trainable=True) sim1.run_steps(5) @@ -725,7 +723,7 @@ def __call__(self, t): @pytest.mark.training -def test_tensorboard(Simulator, tmpdir): +def test_tensorboard(Simulator, tmp_path): with nengo.Network() as net: a = nengo.Node([0]) b = nengo.Ensemble(10, 1, neuron_type=nengo.LIFRate()) @@ -738,7 +736,7 @@ def test_tensorboard(Simulator, tmpdir): n_epochs = 3 with Simulator(net) as sim: - log_dir = str(tmpdir.join("a_run")) + log_dir = tmp_path / "a_run" sim.compile( tf.optimizers.SGD(0.0), @@ -752,7 +750,7 @@ def test_tensorboard(Simulator, tmpdir): callbacks=[ tf.keras.callbacks.TensorBoard(log_dir=log_dir, profile_batch=0), callbacks.NengoSummaries( - log_dir=os.path.join(log_dir, "nengo"), + log_dir=log_dir / "nengo", sim=sim, objects=[b, b.neurons, c], ), @@ -761,18 +759,18 @@ def test_tensorboard(Simulator, tmpdir): # look up name of event file event_dir = ( - os.path.join(log_dir, "train") + log_dir / "train" if version.parse(tf.__version__) < version.parse("2.3.0rc0") or eager_enabled() else log_dir ) event_file = [ x - for x in os.listdir(event_dir) - if x.startswith("events.out.tfevents") and not x.endswith(".profile-empty") + for x in event_dir.glob("events.out.tfevents*") + if x.suffix != ".profile-empty" ] assert len(event_file) == 1 - event_file = os.path.join(event_dir, event_file[0]) - assert os.path.exists(event_file) + event_file = event_file[0] + assert event_file.exists() summaries = ["epoch_loss", "epoch_probe_loss", "epoch_probe_1_loss"] # metadata stuff in event file @@ -782,30 +780,27 @@ def test_tensorboard(Simulator, tmpdir): else 3 ) with contextlib.suppress() if eager_enabled() else context.eager_mode(): - for i, record in enumerate(tf.data.TFRecordDataset(event_file)): + for i, record in enumerate(tf.data.TFRecordDataset(str(event_file))): event = event_pb2.Event.FromString(record.numpy()) if i >= meta_steps: curr_step = (i - meta_steps) // len(summaries) assert event.step == curr_step - # assert event.summary.value[0].tag - # == summaries[(i - 2) % len(summaries)] - # log order non-deterministic in python 3.5 so we use this less - # stringent check - assert event.summary.value[0].tag in summaries + assert ( + event.summary.value[0].tag + == summaries[(i - meta_steps) % len(summaries)] + ) assert i == len(summaries) * n_epochs + ( # pylint: disable=undefined-loop-variable meta_steps - 1 ) # look up name of event file - event_file = [ - x for x in os.listdir(os.path.join(log_dir, "nengo")) if x.endswith(".v2") - ] + event_file = list((log_dir / "nengo").glob("*.v2")) assert len(event_file) == 1 - event_file = os.path.join(log_dir, "nengo", event_file[0]) - assert os.path.exists(event_file) + event_file = event_file[0] + assert event_file.exists() summaries = [ "Ensemble_None_encoders", @@ -813,7 +808,7 @@ def test_tensorboard(Simulator, tmpdir): "Connection_None_weights", ] with contextlib.suppress() if eager_enabled() else context.eager_mode(): - for i, record in enumerate(tf.data.TFRecordDataset(event_file)): + for i, record in enumerate(tf.data.TFRecordDataset(str(event_file))): event = event_pb2.Event.FromString(record.numpy()) if i < 1: @@ -828,16 +823,16 @@ def test_tensorboard(Simulator, tmpdir): # check for error on invalid object with pytest.raises(ValidationError, match="Unknown summary object"): - callbacks.NengoSummaries(log_dir=log_dir + "/nengo", sim=sim, objects=[a]) + callbacks.NengoSummaries(log_dir=log_dir / "nengo", sim=sim, objects=[a]) if version.parse(nengo.__version__) >= version.parse("3.1.0"): with pytest.raises(ValidationError, match="does not have any weights"): - callbacks.NengoSummaries(log_dir=log_dir + "/nengo", sim=sim, objects=[c0]) + callbacks.NengoSummaries(log_dir=log_dir / "nengo", sim=sim, objects=[c0]) @pytest.mark.parametrize("mode", ("predict", "train")) @pytest.mark.training -def test_profile(Simulator, mode, tmpdir, pytestconfig): +def test_profile(Simulator, mode, tmp_path, pytestconfig): if ( pytestconfig.getoption("--graph-mode") and version.parse(tf.__version__) >= version.parse("2.4.0rc0") @@ -853,7 +848,7 @@ def test_profile(Simulator, mode, tmpdir, pytestconfig): # note: TensorFlow bug if using profile_batch=1, see # https://github.com/tensorflow/tensorflow/issues/37543 callback = callbacks.TensorBoard( - log_dir=str(tmpdir.join("profile")), profile_batch=2 + log_dir=str(tmp_path / "profile"), profile_batch=2 ) if mode == "predict": @@ -864,13 +859,10 @@ def test_profile(Simulator, mode, tmpdir, pytestconfig): {a: np.zeros((2, 5, 1))}, {p: np.zeros((2, 5, 1))}, callbacks=[callback] ) - assert os.path.exists( - str( - tmpdir.join("profile", "train") - if version.parse(tf.__version__) < version.parse("2.3.0rc0") - else tmpdir.join("profile") - ) - ) + path = tmp_path / "profile" + if version.parse(tf.__version__) < version.parse("2.3.0rc0"): + path /= "train" + assert path.exists() def test_dt_readonly(Simulator): @@ -887,12 +879,10 @@ def test_probe_data(): sim = dummies.Simulator() a = dummies.Probe(add_to_container=False) b = dummies.Probe(add_to_container=False) - sim.model.params = OrderedDict( - { - a: [np.zeros((5, 1, 3)), np.ones((5, 1, 3))], - b: [np.ones((1, 1, 3)), np.zeros((1, 1, 3))], - } - ) + sim.model.params = { + a: [np.zeros((5, 1, 3)), np.ones((5, 1, 3))], + b: [np.ones((1, 1, 3)), np.zeros((1, 1, 3))], + } sim.model.probes = (a, b) data = SimulationData(sim, True) assert data[a].shape == (5, 2, 3) @@ -1884,7 +1874,7 @@ def test_logging(Simulator, caplog): sim.run_steps(10) for rec in caplog.records: - assert rec.getMessage(), "Record %s has empty message" % rec + assert rec.getMessage(), f"Record {rec} has empty message" def test_floatx_context(Simulator): diff --git a/nengo_dl/tests/test_tensor_graph.py b/nengo_dl/tests/test_tensor_graph.py index debc0dcdc..326235b65 100644 --- a/nengo_dl/tests/test_tensor_graph.py +++ b/nengo_dl/tests/test_tensor_graph.py @@ -579,15 +579,15 @@ def test_deterministic_order(planner, tmp_path): # we could make this work by backporting the deterministic toposort from # nengo>3.1.0, but that doesn't seem worth it to get these two niche planners # to be deterministic - pytest.skip("'%s' is nondeterministic in Nengo<=3.1.0" % planner) + pytest.skip(f"'{planner}' is nondeterministic in Nengo<=3.1.0") code = textwrap.dedent( - """ + f""" import nengo import nengo_dl with nengo.Network(seed=0) as net: - nengo_dl.configure_settings(planner=nengo_dl.graph_optimizer.%s) + nengo_dl.configure_settings(planner=nengo_dl.graph_optimizer.{planner}) # use ensemblearrays as they create a lot of parallel ops ens0 = nengo.networks.EnsembleArray(1, 10) @@ -629,7 +629,6 @@ def test_deterministic_order(planner, tmp_path): print(s.shape) print(s.initial_value) """ - % planner ) tmp_path = tmp_path / "test.py" tmp_path.write_text(code, encoding="utf-8") diff --git a/nengo_dl/tests/test_tensor_node.py b/nengo_dl/tests/test_tensor_node.py index 27c5d3882..a93c343e1 100644 --- a/nengo_dl/tests/test_tensor_node.py +++ b/nengo_dl/tests/test_tensor_node.py @@ -1,5 +1,6 @@ # pylint: disable=missing-docstring +import re from functools import partial import nengo @@ -354,7 +355,10 @@ def test_func(x): layer_w_func = Layer(test_func) assert str(layer_w_func) == "Layer(test_func)" assert "TensorNode (unlabeled)" in str(layer_w_func(node)) - assert str(layer_w_func(node, label="test_func")) == '' + # nengo <= 3.1 uses double quotes around the name, after uses single quotes + assert re.compile("").match( + str(layer_w_func(node, label="test_func")) + ) class TestLayer(tf.keras.layers.Layer): pass diff --git a/nengo_dl/transform_builders.py b/nengo_dl/transform_builders.py index 97cf4e673..818586885 100644 --- a/nengo_dl/transform_builders.py +++ b/nengo_dl/transform_builders.py @@ -61,7 +61,7 @@ def build_pre(self, signals, config): if self.conv.dimensions > len(fmts): raise NotImplementedError( - "Convolutions > %d dimensions are not supported" % len(fmts) + f"Convolutions > {len(fmts)} dimensions are not supported" ) fmt = fmts[self.conv.dimensions - 1] diff --git a/nengo_dl/utils.py b/nengo_dl/utils.py index 8e3361257..71e480fcb 100644 --- a/nengo_dl/utils.py +++ b/nengo_dl/utils.py @@ -108,19 +108,19 @@ def aligned_func(*args): if output is None: raise SimulationError( - "Function %r returned None" % function_name(func, sanitize=False) + f"Function '{function_name(func, sanitize=False)}' returned None" ) try: if not np.all(np.isfinite(output)): raise SimulationError( - "Function %r returned invalid value %r" - % (function_name(func, sanitize=False), output) + f"Function '{function_name(func, sanitize=False)}' returned " + f"invalid value {output!r}" ) except (TypeError, ValueError) as e: raise SimulationError( - "Function %r returned a value %r of invalid type %r" - % (function_name(func, sanitize=False), output, type(output)) + f"Function '{function_name(func, sanitize=False)}' returned a " + f"value {output!r} of invalid type '{type(output).__name__}'" ) from e output = np.asarray(output, dtype=output_dtype) @@ -161,7 +161,7 @@ def __call__(self, progress, data, width): if data["percentage"] is None: msg = self.msg else: - msg = "%s (%d%%)" % (self.msg, data["percentage"]) + msg = f"{self.msg} ({int(data['percentage'])}%)" offset = width // 2 - len(msg) // 2 @@ -196,7 +196,7 @@ def __init__(self, present="", past=None, max_value=1, **kwargs): if past is None: past = present - self.msg_bar = MessageBar(msg=present, finish_msg="%s finished in" % past) + self.msg_bar = MessageBar(msg=present, finish_msg=f"{past} finished in") widgets = [self.msg_bar, " "] if max_value is None: @@ -268,7 +268,7 @@ def sub(self, msg=None, **kwargs): self.sub_bar.finish() self.sub_bar = SubProgressBar( - present="%s: %s" % (self.present, msg) if msg else self.present, **kwargs + present=f"{self.present}: {msg}" if msg else self.present, **kwargs ) return self.sub_bar diff --git a/nengo_dl/version.py b/nengo_dl/version.py index 1eee49038..e2dd9921b 100644 --- a/nengo_dl/version.py +++ b/nengo_dl/version.py @@ -3,6 +3,9 @@ and conform to PEP440 (see https://www.python.org/dev/peps/pep-0440/). '.devN' will be added to the version unless the code base represents a release version. Release versions are git tagged with the version. + +We avoid the use of f-strings so that this can be imported in Python < 3.6, +since it is required by setup.py. """ import warnings @@ -32,12 +35,17 @@ if nengo.version.version_info < minimum_nengo_version: # pragma: no cover raise ValueError( - "NengoDL does not support Nengo version %s. Upgrade " - "with 'pip install --upgrade --no-deps nengo'." % (nengo.version.version,) + ( + "NengoDL does not support Nengo version {nengo_version}. " + "Upgrade with 'pip install --upgrade --no-deps nengo'." + ).format(nengo_version=nengo.version.version) ) elif nengo.version.version_info > latest_nengo_version: # pragma: no cover - warnings.warn( - "This version of NengoDL has not been tested with your Nengo " - "version (%s). The latest fully supported version is %d.%d.%d." - % ((nengo.version.version,) + latest_nengo_version) + warnstr = ( + "This version of NengoDL has not been tested with your Nengo version " + "({nengo_version}). The latest fully supported version is {latest_version}." + ).format( + nengo_version=nengo.version.version, + latest_version=".".join(str(x) for x in latest_nengo_version), ) + warnings.warn(warnstr) diff --git a/pyproject.toml b/pyproject.toml index ee1a02bee..ee2d381ec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ requires = ["setuptools", "wheel"] [tool.black] -target-version = ['py35', 'py36', 'py37', 'py38'] +target-version = ['py36', 'py37', 'py38'] exclude = ''' ( '*/whitepaper2018_code.py' diff --git a/setup.cfg b/setup.cfg index a0f0d9408..df055be4e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,7 +6,7 @@ build-dir = docs/_build all_files = 1 [coverage:run] -source = nengo_dl +source = ./ [coverage:report] # Regexes for lines to exclude from consideration diff --git a/setup.py b/setup.py index e3658b9a3..427b3d66a 100755 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ # Automatically generated by nengo-bones, do not edit this file directly import io -import os +import pathlib import runpy try: @@ -26,8 +26,8 @@ def read(*filenames, **kwargs): return sep.join(buf) -root = os.path.dirname(os.path.realpath(__file__)) -version = runpy.run_path(os.path.join(root, "nengo_dl", "version.py"))["version"] +root = pathlib.Path(__file__).parent +version = runpy.run_path(str(root / "nengo_dl" / "version.py"))["version"] import sys @@ -47,7 +47,7 @@ def read(*filenames, **kwargs): # environment, so we can't just look up the tensorflow version in the current # environment. but the pip package will be in the isolated sys.path, so we can use # that to look up the site-packages directory of the original environment. - target_path = os.path.join("site-packages", "pip") + target_path = str(pathlib.Path("site-packages", "pip")) for path in sys.path: if target_path in path: source_path = [path[: path.index("pip")]] @@ -72,7 +72,7 @@ def read(*filenames, **kwargs): install_req = [ "nengo>=3.0.0", "numpy>=1.16.0", - "%s>=2.2.0" % tf_req, + "{}>=2.2.0".format(tf_req), "jinja2>=2.10.1", "packaging>=20.0", "progressbar2>=3.39.0", @@ -122,7 +122,7 @@ def read(*filenames, **kwargs): "optional": optional_req, "tests": tests_req, }, - python_requires=">=3.5", + python_requires=">=3.6", entry_points={ "nengo.backends": [ "dl = nengo_dl:Simulator", @@ -136,7 +136,6 @@ def read(*filenames, **kwargs): "Operating System :: Microsoft :: Windows", "Operating System :: POSIX :: Linux", "Programming Language :: Python", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8",