Skip to content

Commit

Permalink
Merge pull request #57 from JDAI-CV/prelu
Browse files Browse the repository at this point in the history
Add the support for prelu
  • Loading branch information
daquexian authored Oct 11, 2019
2 parents 55d7534 + e801568 commit f06f1bc
Show file tree
Hide file tree
Showing 8 changed files with 208 additions and 11 deletions.
10 changes: 9 additions & 1 deletion common/dab.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ namespace flatbnn;

enum DataType:byte { Float32 = 0, Bit }
enum LayerType:byte { FpConv2D = 0, AvePool, MaxPool, Relu, Softmax, FC, Add, Concat,
BinConv2D, Affine, Binarize, Split, Shuffle}
BinConv2D, Affine, Binarize, Split, Shuffle, PRelu}

table Tensor {
data_type:DataType;
Expand Down Expand Up @@ -117,6 +117,12 @@ table Affine {
output:string;
}

table PRelu {
input:string;
slope:string;
output:string;
}

table Layer {
type:LayerType;
fp_conv2d_param:FpConv2D;
Expand All @@ -133,6 +139,8 @@ table Layer {
split_param:Split;
shuffle_param:Shuffle;
name:string;
prelu_param:PRelu;
// Note: new field should only be added only at the end
}

table Model {
Expand Down
112 changes: 104 additions & 8 deletions common/dab_generated.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ struct Split;

struct Affine;

struct PRelu;

struct Layer;

struct Model;
Expand Down Expand Up @@ -86,11 +88,12 @@ enum class LayerType : int8_t {
Binarize = 10,
Split = 11,
Shuffle = 12,
PRelu = 13,
MIN = FpConv2D,
MAX = Shuffle
MAX = PRelu
};

inline const LayerType (&EnumValuesLayerType())[13] {
inline const LayerType (&EnumValuesLayerType())[14] {
static const LayerType values[] = {
LayerType::FpConv2D,
LayerType::AvePool,
Expand All @@ -104,7 +107,8 @@ inline const LayerType (&EnumValuesLayerType())[13] {
LayerType::Affine,
LayerType::Binarize,
LayerType::Split,
LayerType::Shuffle
LayerType::Shuffle,
LayerType::PRelu
};
return values;
}
Expand All @@ -124,13 +128,14 @@ inline const char * const *EnumNamesLayerType() {
"Binarize",
"Split",
"Shuffle",
"PRelu",
nullptr
};
return names;
}

inline const char *EnumNameLayerType(LayerType e) {
if (e < LayerType::FpConv2D || e > LayerType::Shuffle) return "";
if (e < LayerType::FpConv2D || e > LayerType::PRelu) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesLayerType()[index];
}
Expand Down Expand Up @@ -1467,6 +1472,84 @@ inline flatbuffers::Offset<Affine> CreateAffineDirect(
output__);
}

struct PRelu FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_INPUT = 4,
VT_SLOPE = 6,
VT_OUTPUT = 8
};
const flatbuffers::String *input() const {
return GetPointer<const flatbuffers::String *>(VT_INPUT);
}
const flatbuffers::String *slope() const {
return GetPointer<const flatbuffers::String *>(VT_SLOPE);
}
const flatbuffers::String *output() const {
return GetPointer<const flatbuffers::String *>(VT_OUTPUT);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_INPUT) &&
verifier.VerifyString(input()) &&
VerifyOffset(verifier, VT_SLOPE) &&
verifier.VerifyString(slope()) &&
VerifyOffset(verifier, VT_OUTPUT) &&
verifier.VerifyString(output()) &&
verifier.EndTable();
}
};

struct PReluBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_input(flatbuffers::Offset<flatbuffers::String> input) {
fbb_.AddOffset(PRelu::VT_INPUT, input);
}
void add_slope(flatbuffers::Offset<flatbuffers::String> slope) {
fbb_.AddOffset(PRelu::VT_SLOPE, slope);
}
void add_output(flatbuffers::Offset<flatbuffers::String> output) {
fbb_.AddOffset(PRelu::VT_OUTPUT, output);
}
explicit PReluBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
PReluBuilder &operator=(const PReluBuilder &);
flatbuffers::Offset<PRelu> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<PRelu>(end);
return o;
}
};

inline flatbuffers::Offset<PRelu> CreatePRelu(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::String> input = 0,
flatbuffers::Offset<flatbuffers::String> slope = 0,
flatbuffers::Offset<flatbuffers::String> output = 0) {
PReluBuilder builder_(_fbb);
builder_.add_output(output);
builder_.add_slope(slope);
builder_.add_input(input);
return builder_.Finish();
}

inline flatbuffers::Offset<PRelu> CreatePReluDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *input = nullptr,
const char *slope = nullptr,
const char *output = nullptr) {
auto input__ = input ? _fbb.CreateString(input) : 0;
auto slope__ = slope ? _fbb.CreateString(slope) : 0;
auto output__ = output ? _fbb.CreateString(output) : 0;
return flatbnn::CreatePRelu(
_fbb,
input__,
slope__,
output__);
}

struct Layer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_TYPE = 4,
Expand All @@ -1483,7 +1566,8 @@ struct Layer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VT_BINARIZE_PARAM = 26,
VT_SPLIT_PARAM = 28,
VT_SHUFFLE_PARAM = 30,
VT_NAME = 32
VT_NAME = 32,
VT_PRELU_PARAM = 34
};
LayerType type() const {
return static_cast<LayerType>(GetField<int8_t>(VT_TYPE, 0));
Expand Down Expand Up @@ -1530,6 +1614,9 @@ struct Layer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const flatbuffers::String *name() const {
return GetPointer<const flatbuffers::String *>(VT_NAME);
}
const PRelu *prelu_param() const {
return GetPointer<const PRelu *>(VT_PRELU_PARAM);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int8_t>(verifier, VT_TYPE) &&
Expand Down Expand Up @@ -1561,6 +1648,8 @@ struct Layer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
verifier.VerifyTable(shuffle_param()) &&
VerifyOffset(verifier, VT_NAME) &&
verifier.VerifyString(name()) &&
VerifyOffset(verifier, VT_PRELU_PARAM) &&
verifier.VerifyTable(prelu_param()) &&
verifier.EndTable();
}
};
Expand Down Expand Up @@ -1613,6 +1702,9 @@ struct LayerBuilder {
void add_name(flatbuffers::Offset<flatbuffers::String> name) {
fbb_.AddOffset(Layer::VT_NAME, name);
}
void add_prelu_param(flatbuffers::Offset<PRelu> prelu_param) {
fbb_.AddOffset(Layer::VT_PRELU_PARAM, prelu_param);
}
explicit LayerBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
Expand Down Expand Up @@ -1641,8 +1733,10 @@ inline flatbuffers::Offset<Layer> CreateLayer(
flatbuffers::Offset<Binarize> binarize_param = 0,
flatbuffers::Offset<Split> split_param = 0,
flatbuffers::Offset<Shuffle> shuffle_param = 0,
flatbuffers::Offset<flatbuffers::String> name = 0) {
flatbuffers::Offset<flatbuffers::String> name = 0,
flatbuffers::Offset<PRelu> prelu_param = 0) {
LayerBuilder builder_(_fbb);
builder_.add_prelu_param(prelu_param);
builder_.add_name(name);
builder_.add_shuffle_param(shuffle_param);
builder_.add_split_param(split_param);
Expand Down Expand Up @@ -1677,7 +1771,8 @@ inline flatbuffers::Offset<Layer> CreateLayerDirect(
flatbuffers::Offset<Binarize> binarize_param = 0,
flatbuffers::Offset<Split> split_param = 0,
flatbuffers::Offset<Shuffle> shuffle_param = 0,
const char *name = nullptr) {
const char *name = nullptr,
flatbuffers::Offset<PRelu> prelu_param = 0) {
auto name__ = name ? _fbb.CreateString(name) : 0;
return flatbnn::CreateLayer(
_fbb,
Expand All @@ -1695,7 +1790,8 @@ inline flatbuffers::Offset<Layer> CreateLayerDirect(
binarize_param,
split_param,
shuffle_param,
name__);
name__,
prelu_param);
}

struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
Expand Down
2 changes: 2 additions & 0 deletions common/flatbuffers_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,8 @@ inline std::string layer_type_to_str(flatbnn::LayerType type) {
return "split";
case flatbnn::LayerType::Shuffle:
return "shuffle";
case flatbnn::LayerType::PRelu:
return "prelu";
default:
BNN_ASSERT(false, "Missing type in this function");
}
Expand Down
2 changes: 2 additions & 0 deletions dabnn/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ add_library(dabnn
layers/Shuffle.h
layers/Split.cpp
layers/Split.h
layers/PRelu.cpp
layers/PRelu.h
layer.h
layer.cpp
${PROJECT_SOURCE_DIR}/common/Shaper.cpp
Expand Down
32 changes: 32 additions & 0 deletions dabnn/layers/PRelu.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// Copyright 2019 JD.com Inc. JD AI

#include "PRelu.h"

namespace bnn {
void PRelu::forward_impl() const {
BNN_ASSERT(slope_mat->total() == 1 ||
slope_mat->total() == static_cast<size_t>(data_mat->c),
"slope must have size 1 or input.channels");
float *ptr = static_cast<float *>(*data_mat);
float *slope_ptr = static_cast<float *>(*slope_mat);
if (slope_mat->total() == 1) {
const auto slope = *slope_ptr;
FORZ(i, data_mat->total()) {
if (*ptr < 0) {
*ptr = (*ptr) * slope;
}
ptr++;
}
} else if (slope_mat->total() == static_cast<size_t>(data_mat->c)) {
const auto nhw = data_mat->n * data_mat->h * data_mat->w;
FORZ(i, nhw) {
FORZ(j, data_mat->c) {
if (*ptr < 0) {
*ptr = (*ptr) * slope_ptr[j];
}
ptr++;
}
}
}
}
} // namespace bnn
21 changes: 21 additions & 0 deletions dabnn/layers/PRelu.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// Copyright 2019 JD.com Inc. JD AI

#ifndef BNN_PRELU_H
#define BNN_PRELU_H

#include <dabnn/layer.h>

namespace bnn {
class PRelu : public Layer {
public:
MatCP data_mat;
MatCP slope_mat;

PRelu(NetCP net, const std::string &name, css data, css slope)
: Layer(net, name, "PRelu"), data_mat(mat(data)), slope_mat(mat(slope)) {}
virtual void forward_impl() const;
};
} // namespace bnn

#endif /* BNN_PRELU_H */

7 changes: 7 additions & 0 deletions dabnn/net.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include <dabnn/layers/Relu.h>
#include <dabnn/layers/Shuffle.h>
#include <dabnn/layers/Split.h>
#include <dabnn/layers/PRelu.h>

using std::string;
using std::vector;
Expand Down Expand Up @@ -253,6 +254,12 @@ void Net::prepare() {
std::make_shared<Shuffle>(get_weak(), name, input));
break;
}
case flatbnn::LayerType::PRelu: {
ADD_INPLACE_LAYER(prelu, Eltwise, input, slope, output);
layers.push_back(
std::make_shared<PRelu>(get_weak(), name, input, slope));
break;
}
default: {
throw std::runtime_error("Not supported op " +
layer_type_to_str(layer->type()));
Expand Down
33 changes: 31 additions & 2 deletions tools/onnx2bnn/OnnxConverter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,8 @@ std::vector<std::string> OnnxConverter::Convert(
}
}
if (!precede_bn) {
throw std::invalid_argument("Binary convolutions should precede BatchNorm");
throw std::invalid_argument(
"Binary convolutions should precede BatchNorm");
}
}
AddConv(m(node.input(0)), strides, pads, dilations, group,
Expand Down Expand Up @@ -354,6 +355,34 @@ std::vector<std::string> OnnxConverter::Convert(
}
layers_.push_back(layer);
VLOG(5) << "Converting Pool completed";
} else if (op == "PRelu") {
VLOG(5) << "Start converting PRelu";
auto input_name = m(node.input(0));
auto slope_name = m(node.input(1));
const auto onnx_slope_tensor = onnx_float_tensors_.at(slope_name);
BNN_ASSERT(shaper_[input_name].size() == 4,
"PRelu only support 4-d tensor input for now");
const auto slope_shape = onnx_slope_tensor.shape;
BNN_ASSERT(
(slope_shape.size() == 3 && slope_shape[1] == 1 &&
slope_shape[2] == 1) ||
onnx_slope_tensor.data == std::vector<float>{1},
"PRelu only support scalr slope or per-channel slope for now");
const Shape flat_slope_shape{slope_shape[0]};
auto flat_slope_tensor = flatbnn::CreateTensorDirect(
builder_, flatbnn::DataType::Float32, nullptr,
&onnx_slope_tensor.data, &flat_slope_shape, slope_name.c_str());
tensors_.push_back(flat_slope_tensor);
auto output_name = m(node.output(0));
shaper_.Relu(input_name, output_name);
auto param = flatbnn::CreatePReluDirect(
builder_, input_name.c_str(), slope_name.c_str(),
output_name.c_str());
auto layer =
flatbnn::CreateLayer(builder_, flatbnn::LayerType::PRelu, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, param);
layers_.push_back(layer);
VLOG(5) << "Converting PRelu completed";
} else if (op == "Relu") {
VLOG(5) << "Start converting Relu";
auto input_name = m(node.input(0));
Expand Down Expand Up @@ -569,7 +598,7 @@ void OnnxConverter::CalculateCoeff(const ONNX_NAMESPACE::NodeProto &node,
}
if (node2.input_size() == 3) {
const auto &bias = onnx_float_tensors_[node2.input(2)];

FORZ(i, coeff_b_data.size()) {
coeff_b_data[i] += coeff_a_data[i] * bias.data[i];
}
Expand Down

0 comments on commit f06f1bc

Please sign in to comment.