From 47e09a4ec82173c0737a7d30a695c811dd9180f7 Mon Sep 17 00:00:00 2001 From: Seungbaek Hong Date: Fri, 30 Aug 2024 16:00:02 +0900 Subject: [PATCH] [Layer] add "divide layer" - added "divide layer" for division. **Self evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: Seungbaek Hong --- api/ccapi/include/layer.h | 9 ++ api/nntrainer-api-common.h | 1 + nntrainer/app_context.cpp | 3 + nntrainer/layers/divide_layer.cpp | 52 +++++++++ nntrainer/layers/divide_layer.h | 102 ++++++++++++++++++ nntrainer/layers/meson.build | 1 + test/input_gen/genModelTests_v2.py | 13 +++ test/unittest/layers/meson.build | 1 + .../layers/unittest_layers_divide.cpp | 28 +++++ test/unittest/models/unittest_models.cpp | 21 ++++ 10 files changed, 231 insertions(+) create mode 100644 nntrainer/layers/divide_layer.cpp create mode 100644 nntrainer/layers/divide_layer.h create mode 100644 test/unittest/layers/unittest_layers_divide.cpp diff --git a/api/ccapi/include/layer.h b/api/ccapi/include/layer.h index 183ed353f..34e9ea8d3 100644 --- a/api/ccapi/include/layer.h +++ b/api/ccapi/include/layer.h @@ -40,6 +40,7 @@ enum LayerType { LAYER_ADD = ML_TRAIN_LAYER_TYPE_ADD, /**< Add Layer type */ LAYER_SUBTRACT = ML_TRAIN_LAYER_TYPE_SUBTRACT, /**< Subtract Layer type */ LAYER_MULTIPLY = ML_TRAIN_LAYER_TYPE_MULTIPLY, /**< Multiply Layer type */ + LAYER_DIVIDE = ML_TRAIN_LAYER_TYPE_DIVIDE, /**< Divide Layer type */ LAYER_FC = ML_TRAIN_LAYER_TYPE_FC, /**< Fully Connected Layer type */ LAYER_SWIGLU = ML_TRAIN_LAYER_TYPE_SWIGLU, /**< Swiglu Layer type */ LAYER_BN = ML_TRAIN_LAYER_TYPE_BN, /**< Batch Normalization Layer type */ @@ -332,6 +333,14 @@ MultiplyLayer(const std::vector &properties = {}) { return createLayer(LayerType::LAYER_MULTIPLY, properties); } +/** + * @brief Helper function to create divide layer + */ +inline std::unique_ptr +DivideLayer(const std::vector &properties = {}) { + return createLayer(LayerType::LAYER_DIVIDE, properties); +} + /** * @brief Helper function to create fully connected layer */ diff --git a/api/nntrainer-api-common.h b/api/nntrainer-api-common.h index e749a8c8f..35ae0a3bd 100644 --- a/api/nntrainer-api-common.h +++ b/api/nntrainer-api-common.h @@ -68,6 +68,7 @@ typedef enum { ML_TRAIN_LAYER_TYPE_ADD = 32, /**< Add Layer type (Since 9.0)*/ ML_TRAIN_LAYER_TYPE_SUBTRACT = 33, /**< Subtract Layer type (Since 9.0)*/ ML_TRAIN_LAYER_TYPE_MULTIPLY = 34, /**< Multiply Layer type (Since 9.0)*/ + ML_TRAIN_LAYER_TYPE_DIVIDE = 35, /**< Divide Layer type (Since 9.0)*/ ML_TRAIN_LAYER_TYPE_PREPROCESS_FLIP = 300, /**< Preprocess flip Layer (Since 6.5) */ ML_TRAIN_LAYER_TYPE_PREPROCESS_TRANSLATE = diff --git a/nntrainer/app_context.cpp b/nntrainer/app_context.cpp index 53d7915cb..7a2877320 100644 --- a/nntrainer/app_context.cpp +++ b/nntrainer/app_context.cpp @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -262,6 +263,8 @@ static void add_default_object(AppContext &ac) { LayerType::LAYER_SUBTRACT); ac.registerFactory(nntrainer::createLayer, MultiplyLayer::type, LayerType::LAYER_MULTIPLY); + ac.registerFactory(nntrainer::createLayer, DivideLayer::type, + LayerType::LAYER_DIVIDE); ac.registerFactory(nntrainer::createLayer, FullyConnectedLayer::type, LayerType::LAYER_FC); ac.registerFactory(nntrainer::createLayer, diff --git a/nntrainer/layers/divide_layer.cpp b/nntrainer/layers/divide_layer.cpp new file mode 100644 index 000000000..6f378e7bd --- /dev/null +++ b/nntrainer/layers/divide_layer.cpp @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: Apache-2.0 +/** + * Copyright (C) 2024 SeungBaek Hong + * + * @file divide_layer.cpp + * @date 10 Oct 2024 + * @see https://github.com/nnstreamer/nntrainer + * @author SeungBaek Hong + * @bug No known bugs except for NYI items + * @brief This is div layer class (operation layer) + * + */ + +#include +#include +#include +#include +#include + +#include + +namespace nntrainer { + +void DivideLayer::finalize(InitLayerContext &context) { + context.setOutputDimensions({context.getInputDimensions()[0]}); +} + +void DivideLayer::forwarding_operation(const Tensor &input0, + const Tensor &input1, Tensor &hidden) { + input0.divide(input1, hidden); +} + +void DivideLayer::calcDerivative(RunLayerContext &context) { + context.getOutgoingDerivative(0).copy( + context.getIncomingDerivative(SINGLE_INOUT_IDX) + .divide(context.getInput(1))); + + context.getOutgoingDerivative(1).copy( + context.getIncomingDerivative(SINGLE_INOUT_IDX) + .multiply(context.getInput(0).multiply(-1)) + .divide(context.getInput(1).pow(2))); +} + +void DivideLayer::setProperty(const std::vector &values) { + auto remain_props = loadProperties(values, divide_props); + if (!remain_props.empty()) { + std::string msg = "[DivideLayer] Unknown Layer Properties count " + + std::to_string(values.size()); + throw exception::not_supported(msg); + } +} +} /* namespace nntrainer */ diff --git a/nntrainer/layers/divide_layer.h b/nntrainer/layers/divide_layer.h new file mode 100644 index 000000000..9a2bc85c9 --- /dev/null +++ b/nntrainer/layers/divide_layer.h @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: Apache-2.0 +/** + * Copyright (C) 2024 SeungBaek Hong + * + * @file divide_layer.h + * @date 10 Oct 2024 + * @see https://github.com/nnstreamer/nntrainer + * @author SeungBaek Hong + * @bug No known bugs except for NYI items + * @brief This is div layer class (operation layer) + * + */ + +#ifndef __DIVIDE_LAYER_H__ +#define __DIVIDE_LAYER_H__ +#ifdef __cplusplus + +#include +#include +#include + +namespace nntrainer { + +/** + * @class Divide Layer + * @brief Divide Layer + */ +class DivideLayer : public BinaryOperationLayer { +public: + /** + * @brief Constructor of Divide Layer + */ + DivideLayer() : BinaryOperationLayer(), divide_props(props::Print()) {} + + /** + * @brief Destructor of Divide Layer + */ + ~DivideLayer(){}; + + /** + * @brief Move constructor of Divide Layer. + * @param[in] DivideLayer && + */ + DivideLayer(DivideLayer &&rhs) noexcept = default; + + /** + * @brief Move assignment operator. + * @parma[in] rhs DivideLayer to be moved. + */ + DivideLayer &operator=(DivideLayer &&rhs) = default; + + /** + * @copydoc Layer::finalize(InitLayerContext &context) + */ + void finalize(InitLayerContext &context) final; + + /** + * @brief forwarding operation for add + * + * @param input0 input tensor 0 + * @param input1 input tensor 1 + * @param hidden tensor to store the result of addition + */ + void forwarding_operation(const Tensor &input0, const Tensor &input1, + Tensor &hidden) final; + + /** + * @copydoc Layer::calcDerivative(RunLayerContext &context) + */ + void calcDerivative(RunLayerContext &context) final; + + /** + * @copydoc bool supportBackwarding() const + */ + bool supportBackwarding() const final { return true; }; + + /** + * @copydoc Layer::exportTo(Exporter &exporter, ml::train::ExportMethods + * method) + */ + void exportTo(Exporter &exporter, + const ml::train::ExportMethods &method) const final {} + + /** + * @copydoc Layer::setProperty(const std::vector &values) + */ + void setProperty(const std::vector &values) final; + + /** + * @copydoc Layer::getType() + */ + const std::string getType() const final { return DivideLayer::type; }; + + std::tuple divide_props; + + inline static const std::string type = "divide"; +}; + +} // namespace nntrainer + +#endif /* __cplusplus */ +#endif /* __DIVIDE_LAYER_H__ */ diff --git a/nntrainer/layers/meson.build b/nntrainer/layers/meson.build index 91fc66b95..1218aa9b0 100644 --- a/nntrainer/layers/meson.build +++ b/nntrainer/layers/meson.build @@ -8,6 +8,7 @@ layer_sources = [ 'add_layer.cpp', 'subtract_layer.cpp', 'multiply_layer.cpp', + 'divide_layer.cpp', 'addition_layer.cpp', 'attention_layer.cpp', 'mol_attention_layer.cpp', diff --git a/test/input_gen/genModelTests_v2.py b/test/input_gen/genModelTests_v2.py index 768fddd6d..b9b03cebe 100644 --- a/test/input_gen/genModelTests_v2.py +++ b/test/input_gen/genModelTests_v2.py @@ -505,6 +505,19 @@ def forward(self, inputs, labels): return out, loss +class DivideOperation(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc = torch.nn.Linear(2, 2) + self.loss = torch.nn.MSELoss() + + def forward(self, inputs, labels): + out = self.fc(inputs[0]) + out = inputs[0] / out + loss = self.loss(out, labels[0]) + return out, loss + + if __name__ == "__main__": record_v2( ReduceMeanLast(), diff --git a/test/unittest/layers/meson.build b/test/unittest/layers/meson.build index e86590e68..7d6bb3b49 100644 --- a/test/unittest/layers/meson.build +++ b/test/unittest/layers/meson.build @@ -50,6 +50,7 @@ test_target = [ 'unittest_layers_add.cpp', 'unittest_layers_subtract.cpp', 'unittest_layers_multiply.cpp', + 'unittest_layers_divide.cpp', 'unittest_layers_multiout.cpp', 'unittest_layers_rnn.cpp', 'unittest_layers_rnncell.cpp', diff --git a/test/unittest/layers/unittest_layers_divide.cpp b/test/unittest/layers/unittest_layers_divide.cpp new file mode 100644 index 000000000..64b926697 --- /dev/null +++ b/test/unittest/layers/unittest_layers_divide.cpp @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: Apache-2.0 +/** + * Copyright (C) 2024 SeungBaek Hong + * + * @file unittest_layers_divide.cpp + * @date 30 August 2024 + * @brief Divide Layer Test + * @see https://github.com/nnstreamer/nntrainer + * @author SeungBaek Hong + * @bug No known bugs except for NYI items + */ +#include + +#include + +#include +#include + +auto semantic_divide = LayerSemanticsParamType( + nntrainer::createLayer, nntrainer::DivideLayer::type, + {}, LayerCreateSetPropertyOptions::AVAILABLE_FROM_APP_CONTEXT, false, 1); + +auto semantic_divide_multi = LayerSemanticsParamType( + nntrainer::createLayer, nntrainer::DivideLayer::type, + {}, LayerCreateSetPropertyOptions::AVAILABLE_FROM_APP_CONTEXT, false, 2); + +GTEST_PARAMETER_TEST(Divide, LayerSemantics, + ::testing::Values(semantic_divide, semantic_divide_multi)); diff --git a/test/unittest/models/unittest_models.cpp b/test/unittest/models/unittest_models.cpp index 0f7a931a0..8de98a044 100644 --- a/test/unittest/models/unittest_models.cpp +++ b/test/unittest/models/unittest_models.cpp @@ -929,6 +929,25 @@ static std::unique_ptr makeMultiplyOperation() { return nn; } +static std::unique_ptr makeDivideOperation() { + std::unique_ptr nn(new NeuralNetwork()); + + auto outer_graph = + makeGraph({{"input", {"name=in", "input_shape=1:1:2"}}, + {"fully_connected", {"name=fc", "unit=2", "input_layers=in"}}, + {"divide", {"name=divide_layer", "input_layers=in,fc"}}, + {"mse", {"name=loss", "input_layers=divide_layer"}}}); + + for (auto &node : outer_graph) { + nn->addLayer(node); + } + + nn->setProperty({"batch_size=1"}); + nn->setOptimizer(ml::train::createOptimizer("sgd", {"learning_rate=0.1"})); + + return nn; +} + GTEST_PARAMETER_TEST( model, nntrainerModelTest, ::testing::ValuesIn({ @@ -1005,6 +1024,8 @@ GTEST_PARAMETER_TEST( ModelTestOption::ALL_V2), mkModelTc_V2(makeMultiplyOperation, "multiply_operation", ModelTestOption::ALL_V2), + mkModelTc_V2(makeDivideOperation, "divide_operation", + ModelTestOption::ALL_V2), }), [](const testing::TestParamInfo &info) -> const auto & { return std::get<1>(info.param); });