Skip to content
This repository has been archived by the owner on Oct 23, 2023. It is now read-only.

Commit

Permalink
[batchnorm_to_affine]: epsilon value is now read out from the attribu…
Browse files Browse the repository at this point in the history
…tes. (#21)

[test_batchnorm_to_affine]: added a test case for various epsilon values.
  • Loading branch information
mmrahorovic authored Feb 20, 2021
1 parent 915f835 commit 4e49431
Show file tree
Hide file tree
Showing 2 changed files with 56 additions and 1 deletion.
4 changes: 3 additions & 1 deletion src/finn/transformation/batchnorm_to_affine.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@

from finn.transformation.base import Transformation
from finn.transformation.infer_shapes import InferShapes
from finn.util.basic import get_by_name


class BatchNormToAffine(Transformation):
Expand All @@ -52,7 +53,8 @@ def apply(self, model):
bias = model.get_initializer(n.input[2])
mean = model.get_initializer(n.input[3])
variance = model.get_initializer(n.input[4])
epsilon = 1e-5
epsilon = get_by_name(n.attribute, "epsilon")
epsilon = getattr(epsilon, "f", 1e-5)
# find A and B to compute batchnorm as affine transpose Ax+B
# TODO is a division by moving avg factor needed for variance?
A = scale / np.sqrt(epsilon + variance)
Expand Down
53 changes: 53 additions & 0 deletions tests/transformation/test_batchnorm_to_affine.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
import pytest

import numpy as np
import onnx
import os
import urllib.request as ureq

Expand Down Expand Up @@ -65,3 +66,55 @@ def test_batchnorm_to_affine_shufflenet():
produced = oxe.execute_onnx(new_model, input_dict)[oname]
assert np.isclose(expected, produced).all()
os.remove(export_onnx_path)


@pytest.mark.parametrize("epsilon", [0.0, 0.00001, 0.001])
def test_batchnorm_to_affine_epsilon(epsilon):
""" Dummy batchnorm node to test out the epsilon attribute. """

batchnorm_node = onnx.helper.make_node(
"BatchNormalization",
inputs=["x", "s", "bias", "mean", "var"],
outputs=["y"],
epsilon=epsilon,
)

x = onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, [1, 3, 5, 5])
s = onnx.helper.make_tensor_value_info("s", onnx.TensorProto.FLOAT, [3])
bias = onnx.helper.make_tensor_value_info("bias", onnx.TensorProto.FLOAT, [3])
mean = onnx.helper.make_tensor_value_info("mean", onnx.TensorProto.FLOAT, [3])
var = onnx.helper.make_tensor_value_info("var", onnx.TensorProto.FLOAT, [3])
y = onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [1, 3, 5, 5])

# Graph
graph = onnx.helper.make_graph(
nodes=[batchnorm_node],
name="test_batchnorm_graph",
inputs=[x],
outputs=[y],
value_info=[s, bias, mean, var],
)

onnx_model = onnx.helper.make_model(graph, producer_name="test_batchnorm-model")
model = ModelWrapper(onnx_model)

model.set_initializer("s", np.array([1, 2, 3]).astype(np.float32))
model.set_initializer("bias", np.array([1, 2, 3]).astype(np.float32))
model.set_initializer("mean", np.array([3, 4, 5]).astype(np.float32))
model.set_initializer("var", np.array([0.5, 0.7, 0.3]).astype(np.float32))

i_val = np.arange(0, 3 * 5 * 5, dtype=np.float32)
i_val = np.reshape(i_val, [1, 3, 5, 5])
input_dict = {"x": i_val}
output_node_name = "y"

output_dict = oxe.execute_onnx(model, input_dict, return_full_exec_context=True)
output_original = output_dict[output_node_name]

model_lowered = model.transform(BatchNormToAffine())
output_dict = oxe.execute_onnx(
model_lowered, input_dict, return_full_exec_context=True
)
output_lowered = output_dict[output_node_name]

assert (output_original == output_lowered).all()

0 comments on commit 4e49431

Please sign in to comment.