Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

reorg code #12

Merged
merged 3 commits into from
Aug 11, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -81,16 +81,16 @@ engine.o: src/dag_engine/simple_engine.cc
narray.o: src/narray/narray.cc
narray_op_cpu.o: src/narray/narray_op_cpu.cc src/narray/narray_op-inl.h
narray_op_gpu.o: src/narray/narray_op_gpu.cu src/narray/narray_op-inl.h
static_operator.o: src/static_operator/static_operator.cc
static_operator_cpu.o: src/static_operator/static_operator_cpu.cc
static_operator_gpu.o: src/static_operator/static_operator_gpu.cu
static_operator.o: src/operator/static_operator/static_operator.cc
static_operator_cpu.o: src/operator/static_operator/static_operator_cpu.cc
static_operator_gpu.o: src/operator/static_operator/static_operator_gpu.cu
symbol.o: src/symbol/symbol.cc
static_graph.o : src/symbol/static_graph.cc
registry.o: src/registry.cc
c_api.o: src/c_api.cc
operator.o: src/operator/static_operator_wrapper.cc
fully_connect_op_cpu.o: src/static_operator/fully_connect_op.cc
fully_connect_op_gpu.o: src/static_operator/fully_connect_op.cu
fully_connect_op_cpu.o: src/operator/static_operator/fully_connect_op.cc
fully_connect_op_gpu.o: src/operator/static_operator/fully_connect_op.cu


lib/libmxnet.a: $(OBJ) $(OBJCXX11) $(CUOBJ)
Expand Down
94 changes: 0 additions & 94 deletions include/mxnet/atomic_symbol.h

This file was deleted.

42 changes: 42 additions & 0 deletions include/mxnet/base.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,5 +74,47 @@ enum Property {
kForwardRequireRnd = 2,
};

/*! \brief context information about the execution enviroment */
struct Context {
/*! \brief the device type we run the op can be cpu::kDevMask or gpu::kDevMask */
int dev_mask;
/*! \brief device id we are going to run it on */
int dev_id;
/*! \brief constructor */
Context() : dev_mask(cpu::kDevMask), dev_id(0) {}
/*!
* \brief constructor of context
* \param dev_mask the device mask
* \param dev_id the device id
*/
Context(int dev_mask, int dev_id)
: dev_mask(dev_mask), dev_id(dev_id) {}
/*!
* \brief check if current context equals another one
* \param b another context to compare
* \return whether dev mask and id are same
*/
inline bool operator==(const Context &b) const {
return dev_mask == b.dev_mask && dev_id == b.dev_id;
}
};


/*!
* \brief execution context provides the information needed
* in runtime to actually execute the operation
*/
struct RunContext {
/*!
* \brief the stream of the device, can be NULL or Stream<gpu>* in GPU mode
*/
void *stream;
};

/*! \brief dynamic shape type */
typedef mshadow::TShape TShape;
/*! \brief storage container type */
typedef mshadow::TBlob TBlob;

} // namespace mxnet
#endif // MXNET_BASE_H_
10 changes: 10 additions & 0 deletions include/mxnet/c_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,16 @@ MXNET_DLL int MXSymbolCreateFromAtomicSymbol(AtomicSymbolCreator creator,
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXSymbolCreateVariable(const char *name, SymbolHandle *out);
/*!
* \brief Create a Symbol by grouping list of symbols together
* \param num_symbols number of symbols to be grouped
* \param symbols array of symbol handles
* \param out pointer to the created symbol handle
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXSymbolCreateGroup(mx_uint num_symbols,
SymbolHandle *symbols,
SymbolHandle *out);
/*!
* \brief Create symbol from config.
* \param cfg configuration string
Expand Down
1 change: 0 additions & 1 deletion include/mxnet/dag_engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
#include <functional>
#include <vector>
#include "./base.h"
#include "./tensor_blob.h"

namespace mxnet {
/*!
Expand Down
1 change: 0 additions & 1 deletion include/mxnet/narray.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
#include <memory>
#include "./base.h"
#include "./storage.h"
#include "./tensor_blob.h"
#include "./dag_engine.h"
// check c++11
#if DMLC_USE_CXX11 == 0
Expand Down
65 changes: 62 additions & 3 deletions include/mxnet/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,70 @@
#include <dmlc/base.h>
#include <vector>
#include "./base.h"
#include "./tensor_blob.h"
#include "./static_operator.h"
#if DMLC_USE_CXX11 == 1
#include "./narray.h"
#include "./dag_engine.h"

#endif
namespace mxnet {
/*!
* \brief StaticOperator interface
* StaticOperator is a stateful object that can be used to call forward and backprop
*
* This interface relies on pre-allocated memory in TBlob, the caller need to set
* the memory region in TBlob correctly before calling Forward and Backward
*
* \sa TBlob, TShape
*/
class StaticOperator {
public:
/*! \brief destructor */
virtual ~StaticOperator() {}
/*!
* \brief describe property of op
* \return a bit map in int
*/
virtual int DescribeProperty() const {
// default most of layer only conatin internal state
return kContainInteralState;
}
/*!
* \brief perform a forward operation of StaticOperator, save the output to TBlob
* \param opt option on Forward such as whether this is training phase
* \param ctx runtime context
* \param in_data array of input data, it is const
* \param out_data array of output data,
* the space of TBlob in out_data must be pre-allocated with InferShape
*/
virtual void Forward(Option opt,
RunContext ctx,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data) = 0;
/*!
* \brief perform a backward operation of the StaticOperator to get the gradient
* \param ctx runtime context
* \param grad_next the gradient value we get from output of the StaticOperator
* \param in_data the array of input data
* \param out_grad array of output gradient, there could be three possible TBlob
* in the each element in the array
* \param req request types of the gradient saving operation
* only inplace will change input data
* \sa GradReqType
*/
virtual void Backward(RunContext ctx,
const std::vector<TBlob> &grad_next,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_grad,
const std::vector<GradReqType> &req) = 0;
/*!
* \brief factory function, create a new StaticOperator
* \param type the type of StaticOperator
* \param ctx the context device type of StaticOperator
* \return a pointer of StaticOperator object
*/
static StaticOperator *Create(const char *type, Context ctx);
};

#if DMLC_USE_CXX11 == 1
/*!
* \brief operator interface
* operator is an object can be scheduled by DAG engine directly.
Expand Down Expand Up @@ -74,5 +132,6 @@ class Operator {
*/
static Operator *CreateWrapper(StaticOperator *op, Context ctx);
}; // class operator
#endif
} // namespace mxnet
#endif // MXNET_OPERATOR_H_
2 changes: 1 addition & 1 deletion include/mxnet/registry.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
#include <vector>
#include "./base.h"
#include "./narray.h"
#include "./symbol.h"
#include "./symbolic.h"

namespace mxnet {

Expand Down
84 changes: 0 additions & 84 deletions include/mxnet/static_graph.h

This file was deleted.

Loading