Skip to content

Commit

Permalink
[TOP] Level 3 complete (#7)
Browse files Browse the repository at this point in the history
  • Loading branch information
tqchen committed May 29, 2018
1 parent 1f26ede commit da251d0
Show file tree
Hide file tree
Showing 15 changed files with 665 additions and 258 deletions.
5 changes: 4 additions & 1 deletion nnvm/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,10 @@ mxnet_source_group("Source\\core" GLOB "src/core/*.cc")
mxnet_source_group("Source\\pass" GLOB "src/pass/*.cc")


FILE(GLOB_RECURSE SOURCE "src/*.cc" "src/*.h" "include/*.h")
FILE(GLOB_RECURSE SOURCE
src/c_api/*.cc
src/core/*.cc
src/pass/*.cc)

if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/dmlc-core/CMakeLists.txt)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/dmlc-core/include)
Expand Down
6 changes: 3 additions & 3 deletions nnvm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,10 @@ else
NO_WHOLE_ARCH= --no-whole-archive
endif

all: lib/libnnvm.a lib/libnnvm_example.$(SHARED_LIBRARY_SUFFIX)
all: lib/libnnvm.a lib/libnnvm_top.$(SHARED_LIBRARY_SUFFIX)

SRC = $(wildcard src/*.cc src/c_api/*.cc src/core/*.cc src/pass/*.cc)
SRC_TOP = $(wildcard src/top/*.cc)
SRC_TOP = $(wildcard src/top/*.cc, src/top/*/*.cc)
ALL_OBJ = $(patsubst %.cc, build/%.o, $(SRC))
TOP_OBJ = $(patsubst %.cc, build/%.o, $(SRC_TOP))
ALL_DEP = $(ALL_OBJ)
Expand All @@ -72,7 +72,7 @@ lib/libnnvm.a: $(ALL_DEP)
@mkdir -p $(@D)
ar crv $@ $(filter %.o, $?)

lib/libnnvm_example.$(SHARED_LIBRARY_SUFFIX): lib/libnnvm.a ${TOP_OBJ}
lib/libnnvm_top.$(SHARED_LIBRARY_SUFFIX): lib/libnnvm.a ${TOP_OBJ}
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o, $^) $(LDFLAGS) -Wl,${WHOLE_ARCH} lib/libnnvm.a -Wl,${NO_WHOLE_ARCH}

Expand Down
20 changes: 11 additions & 9 deletions nnvm/include/nnvm/top/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,15 +54,15 @@ struct DropoutParam : public dmlc::Parameter<DropoutParam> {

struct BatchNormParam : public dmlc::Parameter<BatchNormParam> {
int axis;
float epsilon;
float momentum;
double epsilon;
double momentum;
bool center;
bool scale;

DMLC_DECLARE_PARAMETER(BatchNormParam) {
DMLC_DECLARE_FIELD(axis).set_default(1)
.describe("Specify which shape axis the channel is specified.");
DMLC_DECLARE_FIELD(epsilon).set_default(1e-5f)
DMLC_DECLARE_FIELD(epsilon).set_default(1e-5)
.describe("Small float added to variance to avoid dividing by zero.");
DMLC_DECLARE_FIELD(center).set_default(true)
.describe("If True, add offset of `beta` to normalized tensor."
Expand All @@ -81,21 +81,23 @@ struct BatchNormParam : public dmlc::Parameter<BatchNormParam> {
static const constexpr int kMovingVariance = 4;
};


// Shared by softmax and log_softmax
struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> {
int axis;

DMLC_DECLARE_PARAMETER(SoftmaxParam) {
DMLC_DECLARE_FIELD(axis).set_default(-1)
.describe("The axis to sum over when computing softmax.");
.describe("The axis to sum over when computing softmax.");
}
};

struct LogSoftmaxParam : public dmlc::Parameter<LogSoftmaxParam> {
int axis;
struct LeakyReLUParam : public dmlc::Parameter<LeakyReLUParam> {
double alpha;

DMLC_DECLARE_PARAMETER(LogSoftmaxParam) {
DMLC_DECLARE_FIELD(axis).set_default(-1)
.describe("The axis to sum over when computing softmax.");
DMLC_DECLARE_PARAMETER(LeakyReLUParam) {
DMLC_DECLARE_FIELD(alpha).set_lower_bound(0.0).set_default(0.25)
.describe("slope coefficient for the negative half axis.");
}
};

Expand Down
18 changes: 18 additions & 0 deletions nnvm/include/nnvm/top/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,24 @@ struct CastParam : public dmlc::Parameter<CastParam> {
}
};

struct ReshapeParam : public dmlc::Parameter<ReshapeParam> {
Tuple<int64_t> shape;

DMLC_DECLARE_PARAMETER(ReshapeParam) {
DMLC_DECLARE_FIELD(shape);
}
};

struct ScalarParam : public dmlc::Parameter<ScalarParam> {
double scalar;

DMLC_DECLARE_PARAMETER(ScalarParam) {
DMLC_DECLARE_FIELD(scalar);
}
};



} // namespace top
} // namespace nnvm

Expand Down
2 changes: 1 addition & 1 deletion nnvm/python/nnvm/libinfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def find_lib_path():
if hasattr(__builtin__, "NNVM_LIBRARY_NAME"):
lib_name = __builtin__.NNVM_LIBRARY_NAME
else:
lib_name = "libnnvm_example"
lib_name = "libnnvm_top"

api_path = os.path.join(base_path, '../../lib/')
cmake_build_path = os.path.join(base_path, '../../build/Release/')
Expand Down
45 changes: 40 additions & 5 deletions nnvm/src/top/nn.cc → nnvm/src/top/nn/nn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
#include <nnvm/node.h>
#include <nnvm/op_attr_types.h>
#include <nnvm/top/nn.h>
#include "./op_common.h"
#include "./elemwise_op_common.h"
#include "../op_common.h"
#include "../elemwise_op_common.h"

namespace nnvm {
namespace top {
Expand Down Expand Up @@ -126,6 +126,25 @@ NNVM_REGISTER_OP(dropout)
// batchnorm
DMLC_REGISTER_PARAMETER(BatchNormParam);

inline bool BatchNormInferShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_shape,
std::vector<TShape> *out_shape) {
CHECK_EQ(in_shape->size(), 5U)
<< "Input:[data, gamma, beta, moving_mean, moving_var]";
CHECK_EQ(out_shape->size(), 3U);
const TShape &dshape = in_shape->at(0);
if (dshape.ndim() == 0) return false;
TShape bshape({dshape[1]});
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, 1, bshape);
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, 2, bshape);
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, 3, bshape);
NNVM_ASSIGN_INPUT_SHAPE(attrs, *in_shape, 4, bshape);
NNVM_ASSIGN_OUTPUT_SHAPE(attrs, *out_shape, 0, dshape);
NNVM_ASSIGN_OUTPUT_SHAPE(attrs, *out_shape, 1, bshape);
NNVM_ASSIGN_OUTPUT_SHAPE(attrs, *out_shape, 2, bshape);
return true;
}

NNVM_REGISTER_OP(batch_norm)
.describe(R"(Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
Expand Down Expand Up @@ -167,6 +186,8 @@ axis to be the last item in the input shape.
.set_num_inputs(5)
.set_num_outputs(3)
.set_attr_parser(ParamParser<BatchNormParam>)
.set_attr<FInferShape>("FInferShape", BatchNormInferShape)
.set_attr<FInferType>("FInferType", ElemwiseType<5, 3>)
.set_attr<FListInputNames>("FListInputNames", [](const NodeAttrs& attrs) {
return std::vector<std::string>{"data", "gamma", "beta", "moving_mean", "moving_var"};
})
Expand Down Expand Up @@ -198,8 +219,6 @@ NNVM_REGISTER_OP(softmax)
.set_support_level(1);

// log_softmax
DMLC_REGISTER_PARAMETER(LogSoftmaxParam);

NNVM_REGISTER_OP(log_softmax)
.describe(R"code(Computes softmax.
Expand All @@ -208,7 +227,23 @@ NNVM_REGISTER_OP(log_softmax)
)code" NNVM_ADD_FILELINE)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser(ParamParser<LogSoftmaxParam>)
.set_attr_parser(ParamParser<SoftmaxParam>)
.set_attr<FInferShape>("FInferShape", ElemwiseShape<1, 1>)
.set_attr<FInferType>("FInferType", ElemwiseType<1, 1>)
.set_support_level(1);

// leaky_rlu
DMLC_REGISTER_PARAMETER(LeakyReLUParam);

NNVM_REGISTER_OP(leaky_relu)
.describe(R"code(Leaky version of a Rectified Linear Unit.
`y = x > 0 ? x : alpha * x`
)code" NNVM_ADD_FILELINE)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser(ParamParser<LeakyReLUParam>)
.set_attr<FInferShape>("FInferShape", ElemwiseShape<1, 1>)
.set_attr<FInferType>("FInferType", ElemwiseType<1, 1>)
.set_support_level(1);
Expand Down
Loading

0 comments on commit da251d0

Please sign in to comment.