diff --git a/README.md b/README.md index 95a607c..c5e20ce 100644 --- a/README.md +++ b/README.md @@ -79,7 +79,7 @@ jupyter-notebook --no-browser --allow-root --port=8888 First, create & source a virtual environment: ```shell -conda create -n python=3.10 +conda create -n python=3.8 conda activate ``` diff --git a/build/cybersecurity-mlp/build.py b/build/cybersecurity-mlp/build.py index b5fb380..f97058f 100644 --- a/build/cybersecurity-mlp/build.py +++ b/build/cybersecurity-mlp/build.py @@ -31,7 +31,6 @@ from finn.util.basic import alveo_default_platform import os import shutil -from custom_steps import custom_step_mlp_export # Which platforms to build the networks for zynq_platforms = ["Pynq-Z1", "Ultra96", "ZCU104"] @@ -90,8 +89,7 @@ def platform_to_shell(platform): save_intermediate_models=True, ) - # Export MLP model to FINN-ONNX - model = custom_step_mlp_export(model_name) + model = "models/%s.onnx" % model_name # Launch FINN compiler to generate bitfile build.build_dataflow_cfg(model, cfg) # Copy bitfiles into release dir if found diff --git a/build/cybersecurity-mlp/custom_steps.py b/build/cybersecurity-mlp/custom_steps.py deleted file mode 100644 index 30ac9bb..0000000 --- a/build/cybersecurity-mlp/custom_steps.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (C) 2023, Advanced Micro Devices, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of FINN nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import numpy as np -import pkg_resources as pk -import os - -from brevitas.nn import QuantLinear, QuantReLU, QuantIdentity -import torch -import torch.nn as nn -import brevitas.onnx as bo -from qonnx.core.modelwrapper import ModelWrapper -from qonnx.core.datatype import DataType - - -# Define export wrapper -class CybSecMLPForExport(nn.Module): - def __init__(self, my_pretrained_model): - super(CybSecMLPForExport, self).__init__() - self.pretrained = my_pretrained_model - self.qnt_output = QuantIdentity( - quant_type="binary", scaling_impl_type="const", bit_width=1, min_val=-1.0, max_val=1.0 - ) - - def forward(self, x): - # assume x contains bipolar {-1,1} elems - # shift from {-1,1} -> {0,1} since that is the - # input range for the trained network - x = (x + torch.tensor([1.0]).to("cpu")) / 2.0 - out_original = self.pretrained(x) - out_final = self.qnt_output(out_original) # output as {-1, 1} - return out_final - - -def custom_step_mlp_export(model_name): - # Define model parameters - input_size = 593 - hidden1 = 64 - hidden2 = 64 - hidden3 = 64 - weight_bit_width = 2 - act_bit_width = 2 - num_classes = 1 - - # Create model definition from Brevitas library - model = nn.Sequential( - QuantLinear(input_size, hidden1, bias=True, weight_bit_width=weight_bit_width), - nn.BatchNorm1d(hidden1), - nn.Dropout(0.5), - QuantReLU(act_bit_width=act_bit_width), - QuantLinear(hidden1, hidden2, bias=True, weight_bit_width=weight_bit_width), - nn.BatchNorm1d(hidden2), - nn.Dropout(0.5), - QuantReLU(bit_width=act_bit_width), - QuantLinear(hidden2, hidden3, bias=True, weight_bit_width=weight_bit_width), - nn.BatchNorm1d(hidden3), - nn.Dropout(0.5), - QuantReLU(bit_width=act_bit_width), - QuantLinear(hidden3, num_classes, bias=True, weight_bit_width=weight_bit_width), - ) - - # Load pre-trained weights - assets_dir = pk.resource_filename("finn.qnn-data", "cybsec-mlp/") - trained_state_dict = torch.load(assets_dir + "/state_dict.pth")["models_state_dict"][0] - model.load_state_dict(trained_state_dict, strict=False) - - # Network surgery: pad input size from 593 to 600 and convert bipolar to binary - W_orig = model[0].weight.data.detach().numpy() - W_new = np.pad(W_orig, [(0, 0), (0, 7)]) - model[0].weight.data = torch.from_numpy(W_new) - - model_for_export = CybSecMLPForExport(model) - - # Create directory to save model - os.makedirs("models", exist_ok=True) - ready_model_filename = "models/%s.onnx" % model_name - input_shape = (1, 600) - - # create a QuantTensor instance to mark input as bipolar during export - input_a = np.random.randint(0, 1, size=input_shape).astype(np.float32) - input_a = 2 * input_a - 1 - scale = 1.0 - input_t = torch.from_numpy(input_a * scale) - - # Export to ONNX - bo.export_qonnx(model_for_export, export_path=ready_model_filename, input_t=input_t) - - # Set input datatype for FINN's InferDataType transformation to infer the right datatypes - model = ModelWrapper(ready_model_filename) - model.set_tensor_datatype(model.graph.input[0].name, DataType["BIPOLAR"]) - model.save(ready_model_filename) - - return ready_model_filename diff --git a/build/cybersecurity-mlp/models/download-model.sh b/build/cybersecurity-mlp/models/download-model.sh new file mode 100755 index 0000000..e05c889 --- /dev/null +++ b/build/cybersecurity-mlp/models/download-model.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +wget https://github.com/Xilinx/finn-examples/releases/download/v0.0.7a/onnx-models-cybersecurity.zip +unzip -j onnx-models-cybersecurity.zip diff --git a/build/gtsrb/models/download-model.sh b/build/gtsrb/models/download-model.sh index 417fb1b..144b82d 100755 --- a/build/gtsrb/models/download-model.sh +++ b/build/gtsrb/models/download-model.sh @@ -1,2 +1,3 @@ #!/bin/bash -wget https://github.com/fastmachinelearning/qonnx_model_zoo/raw/main/models/GTSRB/Brevitas_CNV1W1A/cnv_1w1a_gtsrb.onnx +wget https://github.com/Xilinx/finn-examples/releases/download/v0.0.7a/onnx-models-gtsrb.zip +unzip -j onnx-models-gtsrb.zip diff --git a/build/kws/build.py b/build/kws/build.py index 7ffd513..b73cee9 100644 --- a/build/kws/build.py +++ b/build/kws/build.py @@ -67,7 +67,7 @@ def step_preprocess(model: ModelWrapper, cfg: DataflowBuildConfig): build_cfg.VerificationStepType.FOLDED_HLS_CPPSIM, ] -model_name = "MLP_W3A3_python_speech_features_pre-processing_QONNX" +model_name = "MLP_W3A3_python_speech_features_pre-processing_QONNX_opset-11" model_file = "models/" + model_name + ".onnx" # Change the ONNX opset from version 9 to 11, which adds support for the TopK node diff --git a/build/kws/models/download-model.sh b/build/kws/models/download-model.sh index 50c2314..ba63cf7 100755 --- a/build/kws/models/download-model.sh +++ b/build/kws/models/download-model.sh @@ -29,4 +29,5 @@ # Download validation data and model wget https://github.com/Xilinx/finn-examples/releases/download/kws/python_speech_preprocessing_all_validation_KWS_data.npz -wget https://github.com/Xilinx/finn-examples/releases/download/kws/MLP_W3A3_python_speech_features_pre-processing_QONNX.onnx +wget https://github.com/Xilinx/finn-examples/releases/download/v0.0.7a/onnx-models-kws.zip +unzip -j onnx-models-kws.zip diff --git a/build/mobilenet-v1/build.py b/build/mobilenet-v1/build.py index 7f6974c..59b655d 100644 --- a/build/mobilenet-v1/build.py +++ b/build/mobilenet-v1/build.py @@ -148,7 +148,7 @@ def select_build_steps(platform): specialize_layers_config_file="specialize_layers_config/%s_specialize_layers.json" % platform_name, ) - model_file = "models/%s_pre_post_tidy.onnx" % model_name + model_file = "models/%s_pre_post_tidy_opset-11.onnx" % model_name build.build_dataflow_cfg(model_file, cfg) # copy bitfiles and runtime weights into release dir if found diff --git a/build/mobilenet-v1/models/download-model.sh b/build/mobilenet-v1/models/download-model.sh index fe54a6c..5caa91b 100755 --- a/build/mobilenet-v1/models/download-model.sh +++ b/build/mobilenet-v1/models/download-model.sh @@ -1,4 +1,4 @@ #!/bin/sh -wget https://github.com/Xilinx/finn-examples/releases/download/v0.0.1a/onnx-models-mobilenetv1.zip -unzip onnx-models-mobilenetv1.zip +wget https://github.com/Xilinx/finn-examples/releases/download/v0.0.7a/onnx-models-mobilenetv1.zip +unzip -j onnx-models-mobilenetv1.zip diff --git a/build/resnet50/models/download-model.sh b/build/resnet50/models/download-model.sh index 0521649..80c82bf 100755 --- a/build/resnet50/models/download-model.sh +++ b/build/resnet50/models/download-model.sh @@ -1,4 +1,4 @@ #!/bin/sh -wget https://github.com/Xilinx/finn-examples/releases/download/v0.0.1a/onnx-models-resnet50.zip -unzip onnx-models-resnet50.zip +wget https://github.com/Xilinx/finn-examples/releases/download/v0.0.7a/onnx-models-resnet50.zip +unzip -j onnx-models-resnet50.zip diff --git a/build/vgg10-radioml/models/download-model.sh b/build/vgg10-radioml/models/download-model.sh index 1d6bae6..3dfe24c 100755 --- a/build/vgg10-radioml/models/download-model.sh +++ b/build/vgg10-radioml/models/download-model.sh @@ -1,3 +1,4 @@ #!/bin/sh -wget https://github.com/Xilinx/finn-examples/releases/download/radioml/radioml_w4a4_small_tidy.onnx +wget https://github.com/Xilinx/finn-examples/releases/download/v0.0.7a/onnx-models-radioml.zip +unzip -j onnx-models-radioml.zip diff --git a/finn_examples/bitfiles/bitfiles.zip.link b/finn_examples/bitfiles/bitfiles.zip.link index c8d3762..bea9c49 100644 --- a/finn_examples/bitfiles/bitfiles.zip.link +++ b/finn_examples/bitfiles/bitfiles.zip.link @@ -1,22 +1,22 @@ { "Pynq-Z1": { - "url": "https://github.com/Xilinx/finn-examples/releases/download/v0.0.6/Pynq-Z1.zip", - "md5sum": "54b4691e5195ff10fb0c5339b78d6ea5" + "url": "https://github.com/Xilinx/finn-examples/releases/download/v0.0.7a/Pynq-Z1.zip", + "md5sum": "402ab9dfa4612ae340715bf80e4717bd" }, "Ultra96": { - "url": "https://github.com/Xilinx/finn-examples/releases/download/v0.0.6/Ultra96.zip", - "md5sum": "a297d1f5657f624ffce16c901f1f3f30" + "url": "https://github.com/Xilinx/finn-examples/releases/download/v0.0.7a/Ultra96.zip", + "md5sum": "f777342ebc7f589950cf96843aeb6781" }, "ZCU104": { - "url": "https://github.com/Xilinx/finn-examples/releases/download/v0.0.6/ZCU104.zip", - "md5sum": "17dd6ba3d24002275b6622ae0c098378" + "url": "https://github.com/Xilinx/finn-examples/releases/download/v0.0.7a/ZCU104.zip", + "md5sum": "0e701e154a78e050cea69d2c9efefccf" }, "xilinx_u250_xdma_201830_2": { "url": "https://github.com/Xilinx/finn-examples/releases/download/rn50-u250/xilinx_u250_xdma_201830_2.zip", "md5sum": "042cc5602c8a39d7541f1d79946c0b68" }, "xilinx_u250_gen3x16_xdma_2_1_202010_1": { - "url": "https://github.com/Xilinx/finn-examples/releases/download/v0.0.6/xilinx_u250_gen3x16_xdma_2_1_202010_1.zip", - "md5sum": "59a61f233376ab0e340835034db50449" + "url": "https://github.com/Xilinx/finn-examples/releases/download/v0.0.7a/xilinx_u250_gen3x16_xdma_2_1_202010_1.zip", + "md5sum": "59d0089ebc135b0509c8ee5a8666b88f" } } diff --git a/finn_examples/models.py b/finn_examples/models.py index 3a34a61..0db17ec 100644 --- a/finn_examples/models.py +++ b/finn_examples/models.py @@ -356,7 +356,7 @@ def mobilenetv1_w4a4_imagenet(target_platform=None, bitfile_path=None, rt_weight def resnet50_w1a2_imagenet(target_platform=None, bitfile_path=None, rt_weights_path=None): target_platform = resolve_target_platform(target_platform) driver_mode = get_driver_mode() - model_name = "resnet50-w1a2" + model_name = "resnet50_w1a2" filename = find_bitfile(model_name, target_platform, bitfile_path) return FINNExampleOverlay( filename, @@ -393,6 +393,6 @@ def mlp_w2a2_unsw_nb15(target_platform=None, bitfile_path=None): def cnv_w1a1_gtsrb(target_platform=None, bitfile_path=None): target_platform = resolve_target_platform(target_platform) driver_mode = get_driver_mode() - model_name = "cnv-gtsrb-w1a1" + model_name = "cnv_1w1a_gtsrb" filename = find_bitfile(model_name, target_platform, bitfile_path) return FINNExampleOverlay(filename, driver_mode, _gtsrb_cnv_io_shape_dict) diff --git a/finn_examples/notebooks/7_traffic_sign_recognition_gtsrb.ipynb b/finn_examples/notebooks/7_traffic_sign_recognition_gtsrb.ipynb index a8c49fd..003ff1b 100644 --- a/finn_examples/notebooks/7_traffic_sign_recognition_gtsrb.ipynb +++ b/finn_examples/notebooks/7_traffic_sign_recognition_gtsrb.ipynb @@ -200,7 +200,7 @@ "ok = 0\n", "nok = 0\n", "for i in range(n_batches):\n", - " ibuf_normal = batch_imgs[i].reshape(accel.ibuf_packed_device.shape)\n", + " ibuf_normal = batch_imgs[i].reshape(np.shape(accel.ibuf_packed_device))\n", " exp = batch_labels[i]\n", " # to avoid the slower software implementation during data unpacking,\n", " # we make manual calls to buffer copies and execute_on_buffers\n", diff --git a/pyproject.toml b/pyproject.toml index 7c60c34..8f218ea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2", "wheel", "pynq>=2.5.1"] +requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2", "wheel", "pynq==3.0.1", "ipython"] [tool.setuptools_scm] write_to = "finn_examples/_version.py" diff --git a/setup.py b/setup.py index 1775a5d..1e55402 100644 --- a/setup.py +++ b/setup.py @@ -116,16 +116,16 @@ def extend_package(path): python_requires=">=3.5.2", # keeping 'setup_requires' only for readability - relying on # pyproject.toml and PEP 517/518 - setup_requires=["pynq>=2.5.1", "setuptools_scm"], + setup_requires=["pynq==3.0.1", "setuptools_scm"], install_requires=[ - "pynq>=2.5.1", + "pynq==3.0.1", "bitstring>=3.1.7", - "numpy", + "numpy==1.21.5", "finn-dataset_loading==0.0.5", # noqa ], extras_require={ ':python_version<"3.6"': ["matplotlib<3.1", "ipython==7.9"], - ':python_version>="3.6"': ["matplotlib"], + ':python_version>="3.6"': ["matplotlib", "ipython==8.9.0"], }, entry_points={"pynq.notebooks": ["finn_examples = {}.notebooks".format(module_name)]}, cmdclass={"build_py": build_py, "unzip_overlays": _unzip_overlays},