diff --git a/.daq_pm/configs/all_v7 b/.daq_pm/configs/all_v7 new file mode 100644 index 0000000..46e05de --- /dev/null +++ b/.daq_pm/configs/all_v7 @@ -0,0 +1,5 @@ +# It is a configuration file for [project_manager.vim](https://github.com/daquexian/project_manager.vim) +name binary-nn +type cpp +build_dir build_v7 +cmake_options -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_PLATFORM=android-21 -DANDROID_ABI="armeabi-v7a with NEON" -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=Release -GNinja diff --git a/.daq_pm/configs/bconv_test_x86 b/.daq_pm/configs/bconv_test_x86 new file mode 100644 index 0000000..360a39b --- /dev/null +++ b/.daq_pm/configs/bconv_test_x86 @@ -0,0 +1,7 @@ +# It is a configuration file for [project_manager.vim](https://github.com/daquexian/project_manager.vim) +name binary-nn +type cpp +build_dir build_test_x86 +target bconv_test +cmake_options -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DBNN_BUILD_MAIN_LIB=ON -GNinja +binary tests/bconv_test diff --git a/.daq_pm/configs/bgemm_test b/.daq_pm/configs/bgemm_test index 73e84b4..c049daa 100644 --- a/.daq_pm/configs/bgemm_test +++ b/.daq_pm/configs/bgemm_test @@ -1,7 +1,7 @@ # It is a configuration file for [project_manager.vim](https://github.com/daquexian/project_manager.vim) name binary-nn type cpp -build_dir build_bgemm_test +build_dir build_test target bgemm_test -cmake_options -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_PLATFORM=android-28 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=Release +cmake_options -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_PLATFORM=android-28 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=Release -GNinja binary ~/adb_push_and_run.sh tests/bgemm_test diff --git a/.daq_pm/configs/bgemm_test_v7 b/.daq_pm/configs/bgemm_test_v7 new file mode 100644 index 0000000..d8a6f3d --- /dev/null +++ b/.daq_pm/configs/bgemm_test_v7 @@ -0,0 +1,7 @@ +# It is a configuration file for [project_manager.vim](https://github.com/daquexian/project_manager.vim) +name binary-nn +type cpp +build_dir build_test_v7 +target bgemm_test +cmake_options -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_PLATFORM=android-25 -DANDROID_ABI="armeabi-v7a with NEON" -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=Release -GNinja +binary ~/adb_push_and_run.sh tests/bgemm_test diff --git a/.daq_pm/configs/bgemm_test_x86 b/.daq_pm/configs/bgemm_test_x86 new file mode 100644 index 0000000..1ad0276 --- /dev/null +++ b/.daq_pm/configs/bgemm_test_x86 @@ -0,0 +1,7 @@ +# It is a configuration file for [project_manager.vim](https://github.com/daquexian/project_manager.vim) +name binary-nn +type cpp +build_dir build_test_x86 +target bgemm_test +cmake_options -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DBNN_BUILD_MAIN_LIB=ON -GNinja +binary tests/bgemm_test diff --git a/.daq_pm/configs/bitpack_test b/.daq_pm/configs/bitpack_test index f4a4b43..5515f0b 100644 --- a/.daq_pm/configs/bitpack_test +++ b/.daq_pm/configs/bitpack_test @@ -1,7 +1,7 @@ # It is a configuration file for [project_manager.vim](https://github.com/daquexian/project_manager.vim) name binary-nn type cpp -build_dir build +build_dir build_test target bitpack_test -cmake_options -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_PLATFORM=android-28 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=Release +cmake_options -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_PLATFORM=android-28 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=Release -GNinja binary ~/adb_push_and_run.sh tests/bitpack_test diff --git a/.daq_pm/configs/net_test b/.daq_pm/configs/net_test index 6bf0eae..58ec92c 100644 --- a/.daq_pm/configs/net_test +++ b/.daq_pm/configs/net_test @@ -3,5 +3,5 @@ name binary-nn type cpp build_dir build_net_test target net_test -cmake_options -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_PLATFORM=android-28 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=Release +cmake_options -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_PLATFORM=android-21 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=Release -GNinja binary ~/adb_push_and_run.sh tests/net_test diff --git a/.daq_pm/configs/net_test_v7 b/.daq_pm/configs/net_test_v7 new file mode 100644 index 0000000..d2e3baf --- /dev/null +++ b/.daq_pm/configs/net_test_v7 @@ -0,0 +1,7 @@ +# It is a configuration file for [project_manager.vim](https://github.com/daquexian/project_manager.vim) +name binary-nn +type cpp +build_dir build_test_v7 +target net_test +cmake_options -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_PLATFORM=android-21 -DANDROID_ABI="armeabi-v7a with NEON" -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=Release -GNinja +binary ~/adb_push_and_run.sh tests/net_test diff --git a/.daq_pm/configs/net_test_x86 b/.daq_pm/configs/net_test_x86 new file mode 100644 index 0000000..9d5268d --- /dev/null +++ b/.daq_pm/configs/net_test_x86 @@ -0,0 +1,7 @@ +# It is a configuration file for [project_manager.vim](https://github.com/daquexian/project_manager.vim) +name binary-nn +type cpp +build_dir build_net_test_x86 +target net_test +cmake_options -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=Release -DBNN_BUILD_MAIN_LIB=ON -GNinja +binary tests/net_test diff --git a/CMakeLists.txt b/CMakeLists.txt index 9525b24..a7558d3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -22,11 +22,16 @@ option(BNN_SYSTEM_PROTOBUF "Use system protobuf to build onnx2bnn" ON) option(BNN_BUILD_PYTHON "Build onnx2bnn python interface" OFF) option(BNN_USE_MSVC_STATIC_RUNTIME "Link onnx2bnn to msvc static runtime" ON) -if (${CMAKE_SYSTEM_NAME} STREQUAL "Android") - set(BNN_BUILD_ANDROID ON) -else() - set(BNN_BUILD_ANDROID OFF) +message(STATUS "Target architecture: ${CMAKE_SYSTEM_PROCESSOR}") +if (NOT DEFINED BNN_BUILD_MAIN_LIB) + if (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" OR + CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7-a") + set(BNN_BUILD_MAIN_LIB ON) + else() + set(BNN_BUILD_MAIN_LIB OFF) + endif() endif() +message(STATUS "BNN_BUILD_MAIN_LIB: ${BNN_BUILD_MAIN_LIB}") include(cmake/utils.cmake) bnn_add_msvc_runtime_flag() @@ -42,7 +47,7 @@ if (${BNN_NET_BENCHMARK}) add_compile_options("-DBNN_BENCHMARK") endif() -if (${BNN_BUILD_ANDROID}) +if (BNN_BUILD_MAIN_LIB) set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG") @@ -64,7 +69,6 @@ if (${BNN_BUILD_ANDROID}) configure_benchmark() add_subdirectory(benchmark) endif() - else() set(CMAKE_CXX_STANDARD 11) diff --git a/README.md b/README.md index 8ab627b..e099b1a 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ Join chat at [Gitter (English)](https://gitter.im/dabnn/dabnn) or QQ Group (Chin Binary neural networks (BNNs) have great potential on edge devices since they replace float operations by efficient bit-wise operations. However, to leverage the efficiency of bit-wise operations, the reimplmentation of convolution layer and also other layers is needed. -To our best knowledge, dabnn is the first highly-optimized binary neural networks inference framework for mobile platform. We implemented binary convolutions with armv8 assembly. On Google Pixel 1, our dabnn is as **800%~2400% faster** as [BMXNet](https://github.com/hpi-xnor/BMXNet) (the only one open-sourced BNN inference framework except dabnn to our best knowledge) on a single binary convolution, and as about **700% faster** as it on binarized ResNet-18. +To our best knowledge, dabnn is the first highly-optimized binary neural networks inference framework for mobile platform. We implemented binary convolutions with ARM assembly. On Google Pixel 1, our dabnn is as **800%~2400% faster** as [BMXNet](https://github.com/hpi-xnor/BMXNet) (the only one open-sourced BNN inference framework except dabnn to our best knowledge) on a single binary convolution, and as about **700% faster** as it on binarized ResNet-18. ## Benchmark and Comparison @@ -46,11 +46,11 @@ The following is the comparison between our dabnn and [Caffe](http://caffe.berke ## Convert ONNX Model -We provide a conversion tool, named onnx2bnn, to convert an ONNX model to a dabnn model. To get the conversion tool, just build the project using the native toolchain (instead of arm cross-compiling toolchain). For Linux users, we provide pre-built onnx2bnn AppImage. Linux users can download it from [GitHub Releases](https://github.com/JDAI-CV/dabnn/releases). For the usage and other information about AppImage, please check out https://appimage.org . +We provide a conversion tool, named onnx2bnn, to convert an ONNX model to a dabnn model. We provide onnx2bnn pre-built binaries for all platforms in [GitHub Releases](https://github.com/JDAI-CV/dabnn/releases). For Linux users, the onnx2bnn pre-built binary is [AppImage](https://appimage.org) format, see https://appimage.org for details. -Note: Binary convolution is a custom operator, so whether the ONNX model is dabnn-comptabile heavily depends on the implementation of the binary convolution in the training code. We will soon provide an dabnn-comptabile PyTorch implementation of binary convolution. +Note: Binary convolution is a custom operator, so whether the ONNX model is dabnn-comptabile heavily depends on the implementation of the binary convolution in the training code. Please check out [our wiki](https://github.com/JDAI-CV/dabnn/wiki/Train,-export-and-convert-a-dabnn-model) for the further information. -After conversion, the generated dabnn model can be deployed on armv8 devices. For Android developer, we have provided Android AAR package and published it on [jcenter](https://bintray.com/daquexian566/maven/dabnn/_latestVersion), for the usage please check out [example project](https://github.com/JDAI-CV/dabnn-example). +After conversion, the generated dabnn model can be deployed on ARM devices (e.g., mobile phones and embedded devices). For Android developer, we have provided Android AAR package and published it on [jcenter](https://bintray.com/daquexian566/maven/dabnn/_latestVersion), for the usage please check out [example project](https://github.com/JDAI-CV/dabnn-example). ## Pretrained Models diff --git a/README_CN.md b/README_CN.md index 0d7a4ef..7a31187 100644 --- a/README_CN.md +++ b/README_CN.md @@ -18,7 +18,7 @@ QQ 群:1021964010, 入群答案: nndab 然而,想发挥出位运算的高效率,就需要用位运算对卷积进行重新实现。一直以来,始终没有人针对二值网络推出一个高度优化的 inference 框架,这让二值网络令人遗憾的失去了可以部署在现有通用设备上这个巨大的优势。 -为了填补这个巨大的空白,我们推出了 dabnn,一个用 armv8 汇编重写了卷积,高度优化的二值网络 inference 框架。实验显示 dabnn 相对于现有的二值网络和浮点网络 inference 框架均能带来极大的加速。在运行单个二值卷积时,我们的速度是 [BMXNet](https://github.com/hpi-xnor/BMXNet) (除 dabnn 外唯一一个二值网络 inference 框架) 的 **800%~2400%**,在运行二值化的 ResNet-18 时,我们的速度是 BMXNet 的约 **700%**。我们希望 dabnn 的开源可以成为二值网络部署在终端设备的关键一步,也可以有助于使这一领域的爱好者了解二值网络在真实设备上的表现。 +为了填补这个巨大的空白,我们推出了 dabnn,一个用 ARM 汇编重写了卷积,高度优化的二值网络 inference 框架。实验显示 dabnn 相对于现有的二值网络和浮点网络 inference 框架均能带来极大的加速。在运行单个二值卷积时,我们的速度是 [BMXNet](https://github.com/hpi-xnor/BMXNet) (除 dabnn 外唯一一个二值网络 inference 框架) 的 **800%~2400%**,在运行二值化的 ResNet-18 时,我们的速度是 BMXNet 的约 **700%**。我们希望 dabnn 的开源可以成为二值网络部署在终端设备的关键一步,也可以有助于使这一领域的爱好者了解二值网络在真实设备上的表现。 ## 速度 @@ -48,11 +48,11 @@ dabnn_bireal18_imagenet_stem 43279353 ns 41533009 ns 14 <--- ## 如何转换 ONNX 模型 -我们提供模型转换工具 onnx2bnn 将 ONNX 模型转换为 dabnn 格式的模型。用本地编译工具链(而不是 arm 交叉编译工具链)编译这个项目就可以编译出 onnx2dnn。对 Linux 用户我们提供可以在 Linux 下无需编译直接运行的 AppImage,从 [GitHub Releases](https://github.com/JDAI-CV/dabnn/releases) 下载即可。AppImage 的使用方法和其它相关信息请参考 https://appimage.org/。 +我们提供模型转换工具 onnx2bnn 将 ONNX 模型转换为 dabnn 格式的模型。在 [GitHub Releases](https://github.com/JDAI-CV/dabnn/releases) 里有各个平台的 onnx2bnn 预编译二进制文件,可以直接下载运行。Linux 用户我们提供的是 AppImage 格式的二进制文件,AppImage 的使用方法和其它相关信息请参考 https://appimage.org/。 -注意:因为二值卷积是一种自定义操作,所以 ONNX 模型是否与 dabnn 兼容极大程度上依赖于训练代码中二值卷积的实现。我们很快会提供一个与 dabnn 兼容的二值卷积 PyTorch 实现。 +注意:因为二值卷积是一种自定义操作,所以 ONNX 模型是否与 dabnn 兼容极大程度上依赖于训练代码中二值卷积的实现,在 [wiki](https://github.com/JDAI-CV/dabnn/wiki/Train,-export-and-convert-a-dabnn-model) 中有详细的进一步描述。 -转换完成后得到的 dabnn 模型就可以在 armv8 设备上使用。对 Android 开发者我们已经把 Android AAR 包上传到了 [jcenter](https://bintray.com/daquexian566/maven/dabnn/_latestVersion),使用方法请看[示例工程](https://github.com/JDAI-CV/dabnn-example)。 +转换完成后得到的 dabnn 模型就可以在 ARM 设备(例如手机和嵌入式设备)上使用。对 Android 开发者我们已经把 Android AAR 包上传到了 [jcenter](https://bintray.com/daquexian566/maven/dabnn/_latestVersion),使用方法请看[示例工程](https://github.com/JDAI-CV/dabnn-example)。 ## 预训练模型 diff --git a/benchmark/benchmark.cpp b/benchmark/benchmark.cpp index 9fcbfcc..be940c4 100644 --- a/benchmark/benchmark.cpp +++ b/benchmark/benchmark.cpp @@ -21,6 +21,7 @@ static void BM_pack_mat_64_small(benchmark::State &state) { } } +#ifdef __aarch64__ static void BM_pack_mat_128_small(benchmark::State &state) { const bnn::Mat a(1, 32, 32, 128, bnn::DataType::Float, 0); bnn::Mat b(1, 32, 32, 128, bnn::DataType::Bit, 0); @@ -28,6 +29,7 @@ static void BM_pack_mat_128_small(benchmark::State &state) { pack_mat_128(a, b); } } +#endif // __aarch64__ static void BM_pack_mat_64(benchmark::State &state) { const bnn::Mat a(1, 64, 64, 128, bnn::DataType::Float); @@ -37,6 +39,7 @@ static void BM_pack_mat_64(benchmark::State &state) { } } +#ifdef __aarch64__ static void BM_pack_mat_128(benchmark::State &state) { const bnn::Mat a(1, 64, 64, 128, bnn::DataType::Float); bnn::Mat b(1, 64, 64, 128, bnn::DataType::Bit); @@ -44,6 +47,7 @@ static void BM_pack_mat_128(benchmark::State &state) { pack_mat_128(a, b); } } +#endif // __aarch64__ #define SETUP_BCONV_FLOAT(size_a, size_b, num_output) \ const size_t AHEIGHT = size_a; \ @@ -73,6 +77,7 @@ static void BM_pack_mat_128(benchmark::State &state) { \ bnn::Mat c(CHEIGHT, CWIDTH, NUM_OUTPUT, bnn::DataType::Float); +#ifdef __aarch64__ static void BM_bconv_float_3x3_128(benchmark::State &state) { SETUP_BCONV_FLOAT(30, 3, 128); for (auto _ : state) { @@ -88,6 +93,7 @@ static void BM_bconv_float_1x1_128(benchmark::State &state) { bnn::bconv_1x1_128(a_binary, b, c); } } +#endif // __aarch64__ #undef SETUP_BCONV_FLOAT @@ -135,6 +141,7 @@ static void BM_bnn_bconv_1x1_naive_128(benchmark::State &state) { } } +#ifdef __aarch64__ static void BM_bnn_bconv_1x1_64(benchmark::State &state) { SETUP_BCONV(56, 1, 64, 1); for (auto _ : state) { @@ -162,6 +169,7 @@ static void BM_bnn_bconv_1x1_512(benchmark::State &state) { bnn::bconv_1x1_512(a, b, c); } } +#endif // __aarch64__ static void BM_bnn_bconv_3x3_64(benchmark::State &state) { SETUP_BCONV(58, 3, 64, 1); diff --git a/ci/build_dabnn_v7.sh b/ci/build_dabnn_v7.sh new file mode 100755 index 0000000..6ee2ba5 --- /dev/null +++ b/ci/build_dabnn_v7.sh @@ -0,0 +1,11 @@ +#! /usr/bin/env bash +set -e + +echo "y" | $ANDROID_HOME/tools/bin/sdkmanager --install 'ndk-bundle' +nproc=$(ci/get_cores.sh) + +mkdir build_dabnn && cd build_dabnn +cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_HOME/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_PLATFORM=android-21 -DANDROID_ABI="armeabi-v7a with NEON" -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=Release .. +cmake --build . -- -j$nproc +cd - + diff --git a/ci/dabnn_build_and_test.yml b/ci/dabnn_build_and_test.yml index 94c9373..284805e 100644 --- a/ci/dabnn_build_and_test.yml +++ b/ci/dabnn_build_and_test.yml @@ -22,26 +22,34 @@ pr: - README.md - docs/* -pool: - vmImage: 'macOS-10.14' -steps: -- checkout: self - submodules: true -- bash: brew install watch gnu-sed - displayName: Install watch and gnu-sed -- bash: ci/build_dabnn.sh - displayName: Build -- bash: ci/start_android_emulator.sh - displayName: Start Android Emulator -- bash: ci/adb_push_and_run.sh build_dabnn/tests/bconv_test - displayName: Binary Conv Test -- bash: ci/adb_push_and_run.sh build_dabnn/tests/bgemm_test - displayName: Binary Gemm Test -- bash: ci/download_models.sh - displayName: Download Models -- bash: ci/adb_push_and_run.sh build_dabnn/tests/net_test - displayName: Model Test -- bash: ci/build_aar.sh - env: - BINTRAY_KEY: $(bintrayKey) - displayName: Build and Publish AAR package +jobs: +- job: aarch64 + pool: + vmImage: 'macOS-10.14' + steps: + - checkout: self + submodules: true + - bash: brew install watch gnu-sed + displayName: Install watch and gnu-sed + - bash: ci/build_dabnn.sh + displayName: Build + - bash: ci/start_android_emulator.sh + displayName: Start Android Emulator + - template: template_dabnn_run_test.yml + - bash: ci/build_aar.sh + env: + BINTRAY_KEY: $(bintrayKey) + displayName: Build and Publish AAR package +- job: armv7a_with_NEON + pool: + vmImage: 'macOS-10.14' + steps: + - checkout: self + submodules: true + - bash: brew install watch gnu-sed + displayName: Install watch and gnu-sed + - bash: ci/build_dabnn_v7.sh + displayName: Build + - bash: ci/start_android_emulator_v7.sh + displayName: Start Android Emulator + - template: template_dabnn_run_test.yml diff --git a/ci/start_android_emulator_v7.sh b/ci/start_android_emulator_v7.sh new file mode 100755 index 0000000..05e37e9 --- /dev/null +++ b/ci/start_android_emulator_v7.sh @@ -0,0 +1,25 @@ +#! /usr/bin/env bash +set -e + +export TERM=xterm + +echo "y" | $ANDROID_HOME/tools/bin/sdkmanager --install 'system-images;android-25;google_apis;armeabi-v7a' + +echo "no" | $ANDROID_HOME/tools/bin/avdmanager create avd -n android_emulator -k 'system-images;android-25;google_apis;armeabi-v7a' --force + +echo "Starting emulator" + +# Start emulator in background +nohup $ANDROID_HOME/emulator/emulator -avd android_emulator -no-snapshot -no-audio & + +# start server in advance, so that the result of watch will only change when device gets online +$ANDROID_HOME/platform-tools/adb start-server + +watch -g -n 1 '$ANDROID_HOME/platform-tools/adb devices | grep -c device$' + +echo "Emulator is online" + +$ANDROID_HOME/platform-tools/adb devices + +echo "Emulator started" + diff --git a/ci/template_dabnn_run_test.yml b/ci/template_dabnn_run_test.yml new file mode 100644 index 0000000..a8923d7 --- /dev/null +++ b/ci/template_dabnn_run_test.yml @@ -0,0 +1,9 @@ +steps: +- bash: ci/adb_push_and_run.sh build_dabnn/tests/bconv_test + displayName: Binary Conv Test +- bash: ci/adb_push_and_run.sh build_dabnn/tests/bgemm_test + displayName: Binary Gemm Test +- bash: ci/download_models.sh + displayName: Download Models +- bash: ci/adb_push_and_run.sh build_dabnn/tests/net_test + displayName: Model Test diff --git a/cmake/system.cmake b/cmake/system.cmake index 2beb253..794c559 100644 --- a/cmake/system.cmake +++ b/cmake/system.cmake @@ -1,6 +1,7 @@ # Copyright 2019 JD.com Inc. JD AI -if (${CMAKE_SYSTEM_NAME} STREQUAL "Android") +if ((NOT CMAKE_SYSTEM_NAME STREQUAL CMAKE_HOST_SYSTEM_NAME) OR + (NOT CMAKE_SYSTEM_PROCESSOR STREQUAL CMAKE_HOST_SYSTEM_PROCESSOR)) set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) diff --git a/common/baseline.h b/common/baseline.h index 397d3e1..9888089 100644 --- a/common/baseline.h +++ b/common/baseline.h @@ -10,8 +10,12 @@ #include inline int bitcount(uint64_t x) { +#ifdef __aarch64__ + return __builtin_popcountl(x); +#else std::bitset<64> bs(x); return bs.count(); +#endif } inline void baseline_pack_mat(const bnn::Mat &float_mat, bnn::Mat &binary_mat) { diff --git a/dabnn/bconv.h b/dabnn/bconv.h index 3dd43c0..fad4800 100644 --- a/dabnn/bconv.h +++ b/dabnn/bconv.h @@ -7,11 +7,15 @@ #include #endif // __ARM_NEON +#if not defined (__aarch64__) +#include +#endif #include #include #include "mat.h" namespace bnn { +#ifdef __aarch64__ inline void bconv_1x1_64(const Mat &bottom_blob, const Mat &weight, Mat &top_blob); inline void bconv_1x1_128(const Mat &bottom_blob, const Mat &weight, @@ -20,8 +24,10 @@ inline void bconv_1x1_256(const Mat &bottom_blob, const Mat &weight, Mat &top_blob); inline void bconv_1x1_512(const Mat &bottom_blob, const Mat &weight, Mat &top_blob); +#endif inline void bconv_3x3(const Mat &bottom_blob, const Mat &weight, Mat &top_blob, const int stride = 1); +#ifdef __aarch64__ inline void bconv_3x3_64(const Mat &bottom_blob, const Mat &weight, Mat &top_blob, const int stride = 1); inline void bconv_3x3_64_fallback(const Mat &bottom_blob, const Mat &weight, @@ -44,8 +50,10 @@ inline void bconv_3x3_128_internal_s1(const uint64_t *bottom_ptr, const int b_w, inline void bconv_3x3_128_internal_fallback( const uint64_t *bottom_ptr, const int b_w, const uint64_t *weight_ptr, float *top_ptr, const int top_h, const int top_w, const int stride = 1); +#endif } // namespace bnn +#ifdef __aarch64__ inline void bnn::bconv_3x3_64(const Mat &bottom_blob, const Mat &weight, Mat &top_blob, const int stride) { bconv_3x3_64_opt4(bottom_blob, weight, top_blob, 0, stride); @@ -819,9 +827,11 @@ inline void unpack_output(float *b, float *a, int width, int height, #undef A } +#endif // __aarch64__ inline void bnn::bconv_3x3(const Mat &bottom_blob, const Mat &weight, Mat &top_blob, const int stride) { +#ifdef __aarch64__ // TODO: more elegant way static uint64_t packed_weight[999999]; static uint64_t packed_input[9999999]; @@ -884,8 +894,12 @@ inline void bnn::bconv_3x3(const Mat &bottom_blob, const Mat &weight, unpack_output(packed_output, static_cast(top_blob.data), top_blob.w, top_blob.h, top_blob.c); } +#else // __aarch64__ + baseline_bconv(bottom_blob, weight, 3, 3, 0, 0, stride, stride, 1, 1, top_blob.c, top_blob); +#endif // __aarch64__ } +#ifdef __aarch64__ inline void bnn::bconv_1x1_512(const Mat &bottom_blob, const Mat &weight, Mat &top_blob) { FORZS(th, top_blob.h, 2) { @@ -1673,5 +1687,6 @@ inline void bnn::bconv_1x1_64(const Mat &bottom_blob, const Mat &weight, } } } +#endif // __aarch64__ #endif diff --git a/dabnn/bgemm.h b/dabnn/bgemm.h index dc530b0..9563833 100644 --- a/dabnn/bgemm.h +++ b/dabnn/bgemm.h @@ -6,16 +6,26 @@ #if __ARM_NEON #include #endif // __ARM_NEON +#include #include +#if __ARM_NEON +#ifdef __aarch64__ #define P 8 #define R 6 +#else +#define P 4 +#define R 4 +#endif // __aarch64__ +#endif // __ARM_NEON + #define A(i, j) a[(j)*lda + (i)] // A(y, x) #define B(i, j) b[(j)*ldb + (i)] // B(y, x) #define C(i, j) c[(j)*ldc + (i)] // C(y, x) #define min(i, j) ((i) < (j) ? (i) : (j)) +#ifdef __ARM_NEON inline void pack_a(const int kc, const uint64_t *a, const int lda, uint64_t *a_to); inline void pack_b(const int kc, const uint64_t *b, const int ldb, @@ -28,6 +38,7 @@ inline void inner_kernel(const int m, const int n, const int k, const uint64_t *a, const int lda, const uint64_t *b, const int ldb, float *c, const int ldc, const int first_time); +#endif // __ARM_NEON inline void bgemm_naive(const int m, const int n, const int k, const uint64_t *a, const int lda, const uint64_t *b, const int ldb, float *c, const int ldc); @@ -35,6 +46,7 @@ inline void bgemm_naive(const int m, const int n, const int k, inline void bgemm(const int m, const int n, const int k, const uint64_t *a, int lda, const uint64_t *b, const int ldb, float *c, const int ldc) { +#ifdef __ARM_NEON int kc = 32; int mc = 32; int i, q, qb, ib; @@ -48,13 +60,17 @@ inline void bgemm(const int m, const int n, const int k, const uint64_t *a, i == 0); } } +#else + bgemm_naive(m, n, k, a, lda, b, ldb, c, ldc); +#endif // __ARM_NEON } +#if __ARM_NEON inline void inner_kernel(const int m, const int n, const int k, const uint64_t *a, const int lda, const uint64_t *b, const int ldb, float *c, const int ldc, const int first_time) { - BNN_ASSERT(k % 2 == 0, "k % 2 should be 0"); + BNN_ASSERT(k % 2 == 0, "k % 2 should be 0, k =", k); BNN_ASSERT(k * P < 128000, ""); BNN_ASSERT(k * R < 128000, ""); @@ -78,27 +94,21 @@ inline void inner_kernel(const int m, const int n, const int k, if (i != m) { FOR(_j, 0, j) { FOR(_i, i, m) { - FORZ(_k, k) { - C(_i, _j) += __builtin_popcountl(A(_i, _k) ^ B(_k, _j)); - } + FORZ(_k, k) { C(_i, _j) += bitcount(A(_i, _k) ^ B(_k, _j)); } } } } if (j != n) { FOR(_j, j, n) { FOR(_i, 0, i) { - FORZ(_k, k) { - C(_i, _j) += __builtin_popcountl(A(_i, _k) ^ B(_k, _j)); - } + FORZ(_k, k) { C(_i, _j) += bitcount(A(_i, _k) ^ B(_k, _j)); } } } } if (i != m || j != n) { FOR(_j, j, n) { FOR(_i, i, m) { - FORZ(_k, k) { - C(_i, _j) += __builtin_popcountl(A(_i, _k) ^ B(_k, _j)); - } + FORZ(_k, k) { C(_i, _j) += bitcount(A(_i, _k) ^ B(_k, _j)); } } } } @@ -135,11 +145,13 @@ inline void unpack_c(const float *c_from, const int ldc, float *c, inline void micro_kernel(int64_t kc, float *c, const uint64_t *a, const uint64_t *b) { +#ifdef __aarch64__ // C: 8x6(float 32, 6x2=12regs), A: 8*K(8regs), B: K*6(6regs) // v0~v11 contains C, v12~v17 contains 6*128 of B, v18~v25 contains 128*8 of - // A v26~v30 store temporary values A is packed as 8*128 - // ----- - // 8*128 + // A v26~v30 store temporary values A is packed as + // 8*128 + // ----- + // 8*128 // B is packed as 128*6 | 128*6 asm volatile( "mov x0, %1 \n" @@ -413,15 +425,172 @@ inline void micro_kernel(int64_t kc, float *c, const uint64_t *a, "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"); +#else // __aarch64__ + + // C: 4x4(float 32, 4x1=4), A: 4*K(4regs), B: K*4(4regs) + // q0~q3 contains C, q4~q7 contains 4*128 of B, q8~q11 contains 128*4 of A + // q12~q15 store temporary values + // + // A is packed as + // 4*128 + // ----- + // 4*128 + // B is packed as 128*4 | 128*4 + asm volatile( + "mov r0, %1 \n" + "vld1.8 {q0-q1}, [r0]! \n" + "vld1.8 {q2-q3}, [r0]! \n" + "0: \n" + "vld1.8 {q8-q9}, [%3]! \n" + "vld1.8 {q4-q5}, [%2]! \n" + "vld1.8 {q10-q11}, [%3]! \n" + "veor.u8 q12, q4, q8 \n" + "veor.u8 q13, q4, q9 \n" + "vcnt.u8 q12, q12 \n" + "vcnt.u8 q13, q13 \n" + "vld1.8 {q6-q7}, [%2]! \n" + "veor.u8 q14, q4, q10 \n" + "veor.u8 q15, q4, q11 \n" + "vcnt.u8 q14, q14 \n" + "vcnt.u8 q15, q15 \n" + "vpaddl.u8 q12, q12 \n" + "vpaddl.u8 q13, q13 \n" + "vpaddl.u8 q14, q14 \n" + "vpaddl.u8 q15, q15 \n" + "vpaddl.u16 q12, q12 \n" + "vpaddl.u16 q13, q13 \n" + "vpaddl.u16 q14, q14 \n" + "vpaddl.u16 q15, q15 \n" + "vpaddl.u32 q12, q12 \n" + "vpaddl.u32 q13, q13 \n" + "vpaddl.u32 q14, q14 \n" + "vpaddl.u32 q15, q15 \n" + "vadd.u32 d24, d24, d25 \n" + "vadd.u32 d26, d26, d27 \n" + "vadd.u32 d28, d28, d29 \n" + "vadd.u32 d30, d30, d31 \n" + "vzip.u32 q12, q14 \n" + "vzip.u32 q13, q15 \n" + "vzip.u32 q12, q13 \n" + "vadd.u32 q0, q0, q12 \n" + + "veor.u8 q12, q5, q8 \n" + "veor.u8 q13, q5, q9 \n" + "veor.u8 q14, q5, q10 \n" + "veor.u8 q15, q5, q11 \n" + "vcnt.u8 q12, q12 \n" + "vcnt.u8 q13, q13 \n" + "vcnt.u8 q14, q14 \n" + "vcnt.u8 q15, q15 \n" + "vpaddl.u8 q12, q12 \n" + "vpaddl.u8 q13, q13 \n" + "vpaddl.u8 q14, q14 \n" + "vpaddl.u8 q15, q15 \n" + "vpaddl.u16 q12, q12 \n" + "vpaddl.u16 q13, q13 \n" + "vpaddl.u16 q14, q14 \n" + "vpaddl.u16 q15, q15 \n" + "vpaddl.u32 q12, q12 \n" + "vpaddl.u32 q13, q13 \n" + "vpaddl.u32 q14, q14 \n" + "vpaddl.u32 q15, q15 \n" + "vadd.u32 d24, d24, d25 \n" + "vadd.u32 d26, d26, d27 \n" + "vadd.u32 d28, d28, d29 \n" + "vadd.u32 d30, d30, d31 \n" + "vzip.u32 q12, q14 \n" + "vzip.u32 q13, q15 \n" + "vzip.u32 q12, q13 \n" + "vadd.u32 q1, q1, q12 \n" + + "veor.u8 q12, q6, q8 \n" + "veor.u8 q13, q6, q9 \n" + "veor.u8 q14, q6, q10 \n" + "veor.u8 q15, q6, q11 \n" + "vcnt.u8 q12, q12 \n" + "vcnt.u8 q13, q13 \n" + "vcnt.u8 q14, q14 \n" + "vcnt.u8 q15, q15 \n" + "vpaddl.u8 q12, q12 \n" + "vpaddl.u8 q13, q13 \n" + "vpaddl.u8 q14, q14 \n" + "vpaddl.u8 q15, q15 \n" + "vpaddl.u16 q12, q12 \n" + "vpaddl.u16 q13, q13 \n" + "vpaddl.u16 q14, q14 \n" + "vpaddl.u16 q15, q15 \n" + "vpaddl.u32 q12, q12 \n" + "vpaddl.u32 q13, q13 \n" + "vpaddl.u32 q14, q14 \n" + "vpaddl.u32 q15, q15 \n" + "vadd.u32 d24, d24, d25 \n" + "vadd.u32 d26, d26, d27 \n" + "vadd.u32 d28, d28, d29 \n" + "vadd.u32 d30, d30, d31 \n" + "vzip.u32 q12, q14 \n" + "vzip.u32 q13, q15 \n" + "vzip.u32 q12, q13 \n" + "vadd.u32 q2, q2, q12 \n" + + "subs %0, %0, #1 \n" + + "veor.u8 q12, q7, q8 \n" + "veor.u8 q13, q7, q9 \n" + "veor.u8 q14, q7, q10 \n" + "veor.u8 q15, q7, q11 \n" + "vcnt.u8 q12, q12 \n" + "vcnt.u8 q13, q13 \n" + "vcnt.u8 q14, q14 \n" + "vcnt.u8 q15, q15 \n" + "vpaddl.u8 q12, q12 \n" + "vpaddl.u8 q13, q13 \n" + "vpaddl.u8 q14, q14 \n" + "vpaddl.u8 q15, q15 \n" + "vpaddl.u16 q12, q12 \n" + "vpaddl.u16 q13, q13 \n" + "vpaddl.u16 q14, q14 \n" + "vpaddl.u16 q15, q15 \n" + "vpaddl.u32 q12, q12 \n" + "vpaddl.u32 q13, q13 \n" + "vpaddl.u32 q14, q14 \n" + "vpaddl.u32 q15, q15 \n" + "vadd.u32 d24, d24, d25 \n" + "vadd.u32 d26, d26, d27 \n" + "vadd.u32 d28, d28, d29 \n" + "vadd.u32 d30, d30, d31 \n" + "vzip.u32 q12, q14 \n" + "vzip.u32 q13, q15 \n" + "vzip.u32 q12, q13 \n" + "vadd.u32 q3, q3, q12 \n" + + "bne 0b \n" + + "vcvt.f32.u32 q0, q0 \n" + "vcvt.f32.u32 q1, q1 \n" + "vcvt.f32.u32 q2, q2 \n" + "vcvt.f32.u32 q3, q3 \n" + "vst1.32 q0, [%1]! \n" + "vst1.32 q1, [%1]! \n" + "vst1.32 q2, [%1]! \n" + "vst1.32 q3, [%1]! \n" + : "+r"(kc), // %0 + "+r"(c), // %1 + "+r"(b), // %2 + "+r"(a) // %3 + : + : "cc", "memory", "r0", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" + ); +#endif // __aarch64__ } +#endif // __ARM_NEON -inline void bgemm_naive(int m, int n, int k, uint64_t *a, int lda, uint64_t *b, - int ldb, float *c, int ldc) { +inline void bgemm_naive(const int m, const int n, const int k, + const uint64_t *a, const int lda, const uint64_t *b, + const int ldb, float *c, const int ldc) { FORZ(i, m) { FORZ(j, n) { FORZ(h, k) { - C(i, j) += static_cast( - __builtin_popcountl((A(i, h) ^ B(h, j)))); + C(i, j) += static_cast(bitcount((A(i, h) ^ B(h, j)))); } } } diff --git a/dabnn/bitpack.h b/dabnn/bitpack.h index 8be37da..b90532c 100644 --- a/dabnn/bitpack.h +++ b/dabnn/bitpack.h @@ -16,6 +16,7 @@ #include #include "mat.h" +#ifdef __aarch64__ inline void pack_128_2(const float *float_ptr, void *binary_ptr, size_t size) { size_t nn_size = size >> 7; @@ -198,6 +199,7 @@ inline void pack_mat_128(const bnn::Mat &float_mat, bnn::Mat &binary_mat) { pack_128(static_cast(float_mat.data), binary_mat.data, float_mat.total()); } +#endif // __aarch64__ inline void pack_mat_64(const bnn::Mat &float_mat, bnn::Mat &binary_mat) { BNN_ASSERT( @@ -220,11 +222,15 @@ inline void pack_mat_64(const bnn::Mat &float_mat, bnn::Mat &binary_mat) { inline void pack_mat(const bnn::Mat &float_mat, bnn::Mat &binary_mat) { BNN_ASSERT(float_mat.c % 64 == 0, float_mat.c); +#ifdef __aarch64__ if (float_mat.c % 128 == 0) { pack_mat_128_2(float_mat, binary_mat); } else { pack_mat_64(float_mat, binary_mat); } +#else + pack_mat_64(float_mat, binary_mat); +#endif // __aarch64__ } #endif /* BITPACK_H */ diff --git a/dabnn/layers/AvePool.cpp b/dabnn/layers/AvePool.cpp index 3ef86a3..f8d2658 100644 --- a/dabnn/layers/AvePool.cpp +++ b/dabnn/layers/AvePool.cpp @@ -7,6 +7,7 @@ namespace bnn { +#ifdef __ARM_NEON void ave_pool_2x2_s2(const bnn::Mat &input, bnn::Mat &output) { FORZ(h, output.h) { FORZ(w, output.w) { @@ -16,6 +17,7 @@ void ave_pool_2x2_s2(const bnn::Mat &input, bnn::Mat &output) { const float *ptr3 = input.point(h * 2 + 1, w * 2 + 1); float *output_ptr = output.point(h, w); size_t nn = input.c >> 2; +#ifdef __aarch64__ asm volatile( "fmov s30, #4.0 \n" "dup v30.4s, v30.s[0] \n" @@ -45,9 +47,39 @@ void ave_pool_2x2_s2(const bnn::Mat &input, bnn::Mat &output) { : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v30"); +#else // __aarch64__ + asm volatile( + "vmov.f32 q13, #0.25 \n" + "0: \n" + "vld1.32 q0, [%0]! \n" + "pld [%0, #128] \n" + "vld1.32 q1, [%1]! \n" + "pld [%1, #128] \n" + "vld1.32 q2, [%2]! \n" + "pld [%2, #128] \n" + "vld1.32 q3, [%3]! \n" + "pld [%3, #128] \n" + "vadd.f32 q0, q0, q1 \n" + "vadd.f32 q2, q2, q3 \n" + "vadd.f32 q0, q0, q2 \n" + "vmul.f32 q0, q0, q13 \n" + "subs %5, %5, #1 \n" + "vst1.32 q0, [%4]! \n" + "bne 0b \n" + + : "+r"(ptr0), // %0 + "+r"(ptr1), // %1 + "+r"(ptr2), // %2 + "+r"(ptr3), // %3 + "+r"(output_ptr), // %4 + "+r"(nn) // %5 + : + : "cc", "memory", "q0", "q1", "q2", "q3", "q13"); +#endif // __aarch64__ } } } +#endif // __ARM_NEON void ave_pool_fallback(const bnn::Mat &input, const size_t pad_h, const size_t pad_w, const size_t stride_h, @@ -114,6 +146,7 @@ AvePool::AvePool(NetCP net, const std::string &name, css input, css output, } void AvePool::forward_impl() const { +#ifdef __ARM_NEON if (stride_h == 2 && stride_w == 2 && kernel_h == 2 && kernel_w == 2 && input_mat->c % 4 == 0) { pad(*input_mat, pad_h, pad_w, *padded_mat); @@ -122,6 +155,10 @@ void AvePool::forward_impl() const { ave_pool_fallback(*input_mat, pad_h, pad_w, stride_h, stride_w, kernel_h, kernel_w, *output_mat); } +#else + ave_pool_fallback(*input_mat, pad_h, pad_w, stride_h, stride_w, + kernel_h, kernel_w, *output_mat); +#endif // __ARM_NEON } } // namespace bnn diff --git a/dabnn/layers/BinConv.cpp b/dabnn/layers/BinConv.cpp index 79cdd8f..e6bdf79 100644 --- a/dabnn/layers/BinConv.cpp +++ b/dabnn/layers/BinConv.cpp @@ -30,8 +30,9 @@ BinConv::BinConv(NetCP net, const std::string &name, css input, css weight, } const auto col_mat_name = "col_mat"; if (mat_map.find(col_mat_name) == mat_map.end()) { + const auto len = output_mat->h * output_mat->w * weight_mat->h * weight_mat->w * input_mat->elem_c; mat_map[col_mat_name] = - std::make_shared(999999, bnn::DataType::Bit); + std::make_shared(len, bnn::DataType::Bit); } padded_mat = mat(pad_name); @@ -45,7 +46,7 @@ BinConv::BinConv(NetCP net, const std::string &name, css input, css weight, const int k = weight_mat->h * weight_mat->w * weight_mat->c; transposed_weight_mat = std::make_shared(weight_mat->n, weight_mat->h, weight_mat->w, - weight_mat->elem_c, DataType::Bit); + weight_mat->elem_c, DataType::Bit, false); auto *trans_data_ptr = static_cast(transposed_weight_mat->data); auto *data_ptr = static_cast(weight_mat->data); @@ -57,6 +58,7 @@ BinConv::BinConv(NetCP net, const std::string &name, css input, css weight, } bool BinConv::direct_conv_compatible() const { +#ifdef __aarch64__ if (weight_mat->h == 3 && weight_mat->w == 3 && input_mat->c == 1 && stride_h == stride_w) { return true; @@ -78,10 +80,17 @@ bool BinConv::direct_conv_compatible() const { return true; } return false; +#else + return false; +#endif } bool BinConv::gemm_compatible() const { - return weight_mat->h * weight_mat->n * weight_mat->c % 2 == 0; +#ifdef __ARM_NEON + return weight_mat->h * weight_mat->w * weight_mat->c % 2 == 0; +#else + return false; +#endif } void BinConv::forward_impl() const { @@ -90,6 +99,7 @@ void BinConv::forward_impl() const { pad(*input_mat, pad_h, pad_w, *padded_mat); bconv_3x3(*padded_mat, *weight_mat, *output_mat, stride_h); } else if (gemm_compatible()) { + output_mat->fill(0.f); bnn::im2col(*input_mat, weight_mat->h, weight_mat->w, pad_h, pad_w, stride_h, stride_w, 1, 1, *col_mat); const int m = weight_mat->n; diff --git a/dabnn/layers/MaxPool.cpp b/dabnn/layers/MaxPool.cpp index fe2080d..68079d4 100644 --- a/dabnn/layers/MaxPool.cpp +++ b/dabnn/layers/MaxPool.cpp @@ -9,6 +9,7 @@ namespace bnn { +#ifdef __ARM_NEON void maxpool2x2(const bnn::Mat &input, bnn::Mat &output, const int stride_h = 1, const int stride_w = 1) { FORZ(h, output.h) { @@ -23,6 +24,7 @@ void maxpool2x2(const bnn::Mat &input, bnn::Mat &output, const int stride_h = 1, input.point(h * stride_h + 1, w * stride_w + 1); float *output_ptr = output.point(h, w); size_t nn = input.c >> 2; +#ifdef __aarch64__ asm volatile( "0: \n" "ld1 {v0.4s}, [%0], #16 \n" @@ -49,6 +51,33 @@ void maxpool2x2(const bnn::Mat &input, bnn::Mat &output, const int stride_h = 1, : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12"); +#else // __aarch64__ + asm volatile( + "0: \n" + "vld1.32 q0, [%0]! \n" + "pld [%0, #128] \n" + "vld1.32 q1, [%1]! \n" + "pld [%1, #128] \n" + "vld1.32 q2, [%2]! \n" + "pld [%2, #128] \n" + "vld1.32 q3, [%3]! \n" + "pld [%3, #128] \n" + "vmax.f32 q0, q0, q1 \n" + "vmax.f32 q2, q2, q3 \n" + "vmax.f32 q0, q0, q2 \n" + "subs %5, %5, #1 \n" + "vst1.32 q0, [%4]! \n" + "bne 0b \n" + + : "+r"(ptr0), // %0 + "+r"(ptr1), // %1 + "+r"(ptr2), // %2 + "+r"(ptr3), // %3 + "+r"(output_ptr), // %4 + "+r"(nn) // %5 + : + : "cc", "memory", "q0", "q1", "q2", "q3"); +#endif // __aarch64__ } } } @@ -77,6 +106,7 @@ void maxpool3x3(const bnn::Mat &input, bnn::Mat &output, const int stride_h = 1, input.point(h * stride_h + 2, w * stride_w + 2); float *output_ptr = output.point(h, w); size_t nn = input.c >> 2; +#ifdef __aarch64__ asm volatile( "0: \n" "ld1 {v0.4s}, [%0], #16 \n" @@ -123,7 +153,94 @@ void maxpool3x3(const bnn::Mat &input, bnn::Mat &output, const int stride_h = 1, : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12"); +#else + asm volatile( + "0: \n" + "vld1.32 q0, [%0]! \n" + "pld [%0, #128] \n" + "vld1.32 q1, [%1]! \n" + "pld [%1, #128] \n" + "vld1.32 q2, [%2]! \n" + "pld [%2, #128] \n" + "vld1.32 q3, [%3]! \n" + "pld [%3, #128] \n" + "vmax.f32 q0, q0, q1 \n" + "vld1.32 q4, [%4]! \n" + "pld [%4, #128] \n" + "vmax.f32 q2, q2, q3 \n" + "vld1.32 q5, [%5]! \n" + "pld [%5, #128] \n" + "vld1.32 q6, [%6]! \n" + "pld [%6, #128] \n" + "vmax.f32 q4, q4, q5 \n" + "vld1.32 q7, [%7]! \n" + "pld [%7, #128] \n" + "vld1.32 q8, [%8]! \n" + "pld [%8, #128] \n" + "vmax.f32 q2, q2, q8 \n" + "vmax.f32 q6, q6, q7 \n" + "vmax.f32 q0, q0, q2 \n" + "subs %10, %10, #1 \n" + "vmax.f32 q4, q4, q6 \n" + "vmax.f32 q0, q0, q4 \n" + "vst1.32 q0, [%9]! \n" + "bne 0b \n" + + : "+r"(ptr0), // %0 + "+r"(ptr1), // %1 + "+r"(ptr2), // %2 + "+r"(ptr3), // %3 + "+r"(ptr4), // %4 + "+r"(ptr5), // %5 + "+r"(ptr6), // %6 + "+r"(ptr7), // %7 + "+r"(ptr8), // %8 + "+r"(output_ptr), // %9 + "+r"(nn) // %10 + : + : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8"); +#endif + } + } +} +#endif // __ARM_NEON + +void max_pool_fallback(const bnn::Mat &input, const size_t pad_h, + const size_t pad_w, const size_t stride_h, + const size_t stride_w, const size_t kernel_h, + const size_t kernel_w, bnn::Mat &output) { + const int output_h = + (input.h + 2 * pad_h - ((kernel_h - 1) + 1)) / stride_h + 1; + const int output_w = + (input.w + 2 * pad_w - ((kernel_w - 1) + 1)) / stride_w + 1; + + BNN_ASSERT(input.w * input.c * input.elemsize % 16 == 0, "Not align"); + BNN_ASSERT(output.w * output.c * output.elemsize % 16 == 0, "Not align"); + + int input_y = 0; + FORZ(output_y, output_h) { + int input_x = 0; + FORZ(output_x, output_w) { + FORZ(output_c, input.c) { + float m = -std::numeric_limits::max(); + FORZ(kh, kernel_h) { + int y = input_y - pad_h + kh; + const float *input_ptr = input.point(y, 0); + FORZ(kw, kernel_w) { + int x = input_x - pad_w + kw; + if (!(y < 0 || y >= input.h || x < 0 || x >= input.w)) { + const auto val = input_ptr[x * input.c + output_c]; + m = std::max(m, val); + } + } + } + + output[output_y * output_w * input.c + output_x * input.c + + output_c] = m; + } + input_x += stride_w; } + input_y += stride_h; } } @@ -150,20 +267,25 @@ MaxPool::MaxPool(NetCP net, const std::string &name, css input, css output, padded_mat = mat_map[pad_name]; } void MaxPool::forward_impl() const { - // std::numeric_limits::min() is the closest value to 0, so we uses - // -max() - pad(*input_mat, pad_h, pad_w, *padded_mat, - -std::numeric_limits::max()); - BNN_ASSERT( - (kernel_h == 3 && kernel_w == 3) || (kernel_h == 2 && kernel_w == 2), - "Not supported max_pool"); +#ifdef __ARM_NEON if (kernel_h == 3 && kernel_w == 3) { + // std::numeric_limits::min() is the closest value to 0, so we uses + // -max() + pad(*input_mat, pad_h, pad_w, *padded_mat, + -std::numeric_limits::max()); maxpool3x3(*padded_mat, *output_mat, stride_h, stride_w); } else if (kernel_h == 2 && kernel_w == 2) { + pad(*input_mat, pad_h, pad_w, *padded_mat, + -std::numeric_limits::max()); maxpool2x2(*padded_mat, *output_mat, stride_h, stride_w); } else { - std::invalid_argument("Not supported max_pool"); + max_pool_fallback(*input_mat, pad_h, pad_w, stride_h, stride_w, + kernel_h, kernel_w, *output_mat); } +#else + max_pool_fallback(*input_mat, pad_h, pad_w, stride_h, stride_w, + kernel_h, kernel_w, *output_mat); +#endif // __aarch64__ } std::string MaxPool::to_str() const { diff --git a/dabnn/layers/Relu.cpp b/dabnn/layers/Relu.cpp index 37f0061..51ead33 100644 --- a/dabnn/layers/Relu.cpp +++ b/dabnn/layers/Relu.cpp @@ -8,6 +8,7 @@ namespace bnn { void Relu::forward_impl() const { +#if __ARM_NEON float32x4_t _zero = vdupq_n_f32(0.f); float *ptr = static_cast(*data_mat); FORZ(i, data_mat->total() / 4) { @@ -17,5 +18,11 @@ void Relu::forward_impl() const { ptr += 4; } +#else + float *ptr = static_cast(*data_mat); + FORZ(i, data_mat->total()) { + *ptr = std::max(*ptr, 0.f); + } +#endif // __ARM_NEON } } // namespace bnn diff --git a/dabnn/mat.h b/dabnn/mat.h index c12fe80..d31eaed 100644 --- a/dabnn/mat.h +++ b/dabnn/mat.h @@ -271,12 +271,32 @@ inline bool Mat::operator==(const Mat &m) const { h == m.h && c == m.c && data_type == m.data_type)) { return false; } - FORZ(i, total()) { - if (std::abs(static_cast(data)[i] - m[i]) > 1e-5) { - PNT(static_cast(data)[i]); - PNT(m[i]); - return false; + if (m.data_type == DataType::Float) { + FORZ(i, total()) { + const auto elem = static_cast(data)[i]; + if (std::isnan(elem) && !std::isnan(m[i])) { + PNT(elem, m[i]); + return false; + } + if (!std::isnan(elem) && std::isnan(m[i])) { + PNT(elem, m[i]); + return false; + } + if (std::abs(elem - m[i]) > 1e-5) { + PNT(i, elem, m[i]); + return false; + } } + } else if (m.data_type == DataType::Bit) { + FORZ(i, total()) { + const auto elem = static_cast(data)[i]; + if (elem != m[i]) { + PNT(elem, m[i]); + return false; + } + } + } else { + throw std::invalid_argument("Unknown datatype"); } return true; } diff --git a/dabnn/net.cpp b/dabnn/net.cpp index b2c4870..fcdb3a8 100644 --- a/dabnn/net.cpp +++ b/dabnn/net.cpp @@ -78,6 +78,7 @@ void Net::prepare() { shaper.AddShape(name, shape); +#ifdef __aarch64__ if (Shaper::c(shape) % 128 == 0) { // Re-arrange the bit order const auto len = shaper.total(shape); @@ -95,11 +96,14 @@ void Net::prepare() { bnn::DataType::Bit, false)); pack_mat_128_2(*tmp, *mat_map_[name]); } else { +#endif // __aarch64__ add_mat(name, std::make_shared( shape[0], shape[1], shape[2], shape[3], const_cast(data), bnn::DataType::Bit, false)); +#ifdef __aarch64__ } +#endif // __aarch64__ } else if (tensor->data_type() == flatbnn::DataType::Float32) { Shaper::Shape shape(tensor->shape()->begin(), tensor->shape()->end()); diff --git a/images/comparison_cn.png b/images/comparison_cn.png index 00df92d..415e4b4 100644 Binary files a/images/comparison_cn.png and b/images/comparison_cn.png differ diff --git a/images/comparison_en.png b/images/comparison_en.png index 2983a5f..422a73e 100644 Binary files a/images/comparison_en.png and b/images/comparison_en.png differ diff --git a/tests/bconv_test.cpp b/tests/bconv_test.cpp index d6e2bfe..b719255 100644 --- a/tests/bconv_test.cpp +++ b/tests/bconv_test.cpp @@ -190,7 +190,7 @@ TEST(bconv_test, bconv_test_3x3_64) { bnn::Mat c(CHEIGHT, CWIDTH, NUM_OUTPUT, bnn::DataType::Float); c.fill(0); - bnn::bconv_3x3_64(padded, b, c); + bnn::bconv_3x3(padded, b, c); bnn::Mat expected(CHEIGHT, CWIDTH, NUM_OUTPUT, bnn::DataType::Float); expected.fill(0); @@ -229,7 +229,7 @@ TEST(bconv_test, bconv_test_3x3_64_s2) { bnn::Mat c(CHEIGHT, CWIDTH, NUM_OUTPUT, bnn::DataType::Float); c.fill(0); - bnn::bconv_3x3_64(padded, b, c, 2); + bnn::bconv_3x3(padded, b, c, 2); bnn::Mat expected(CHEIGHT, CWIDTH, NUM_OUTPUT, bnn::DataType::Float); expected.fill(0); diff --git a/tests/bitpack_test.cpp b/tests/bitpack_test.cpp index 31e8cfc..dd7c97f 100644 --- a/tests/bitpack_test.cpp +++ b/tests/bitpack_test.cpp @@ -9,6 +9,7 @@ #include #include +#ifdef __aarch64__ TEST(bitpack, pack_mat_128) { const size_t AHEIGHT = 64; const size_t AWIDTH = 64; @@ -32,6 +33,7 @@ TEST(bitpack, pack_mat_128) { bitcount(*(static_cast(expected) + i + 1))); } } +#endif // __aarch64__ TEST(bitpack, pack_mat_64) { const size_t AHEIGHT = 64; @@ -57,6 +59,7 @@ TEST(bitpack, pack_mat_64) { } } +#ifdef __aarch64__ TEST(bitpack, pack_mat_fallback) { const size_t AHEIGHT = 64; const size_t AWIDTH = 64; @@ -74,3 +77,18 @@ TEST(bitpack, pack_mat_fallback) { ASSERT_EQ(a_binary, expected); } +#endif // __aarch64__ + +TEST(bitpack, addv_v7) { + uint64_t data[2]; + fill_rand_uint64(data, 2); + uint8x16_t v = vld1q_u8(reinterpret_cast(data)); + auto v1 = vcntq_u8(v); + auto v2 = vpaddlq_u8(v1); + auto v3 = vpaddlq_u16(v2); + auto v4 = vpaddlq_u32(v3); + + auto res = vgetq_lane_u64(v4, 0) + vgetq_lane_u64(v4, 1); + + ASSERT_EQ(res, __builtin_popcountl(data[0]) + __builtin_popcountl(data[1])); +}