Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[luci-interpreter] Support RoPE operation #14104

Merged
merged 4 commits into from
Oct 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions compiler/luci-interpreter/pal/linux/KernelsToBuild.lst
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ REGISTER_KERNEL(ResizeBilinear)
REGISTER_KERNEL(ResizeNearestNeighbor)
REGISTER_KERNEL(ReverseV2)
REGISTER_KERNEL(RmsNorm)
REGISTER_KERNEL(RoPE)
REGISTER_KERNEL(Rsqrt)
REGISTER_KERNEL(Select)
REGISTER_KERNEL(SelectV2)
Expand Down
7 changes: 7 additions & 0 deletions compiler/luci-interpreter/src/core/KernelParams.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include <luci/IR/AttrPadding.h>
#include <luci/IR/AttrFusedActFunc.h>
#include <luci/IR/AttrMirrorPadMode.h>
#include <luci/IR/AttrRoPEMode.h>
#include <luci_interpreter/core/DataType.h>

#include <cstdint>
Expand All @@ -32,6 +33,7 @@ namespace luci_interpreter
using Activation = luci::FusedActFunc;
using Padding = luci::Padding;
using MirrorPadMode = luci::MirrorPadMode;
using RoPEMode = luci::RoPEMode;

struct AddParams
{
Expand Down Expand Up @@ -186,6 +188,11 @@ struct RmsNormParams
float epsilon;
};

struct RoPEParams
{
RoPEMode mode;
};

struct ShapeParams
{
loco::DataType out_type;
Expand Down
99 changes: 99 additions & 0 deletions compiler/luci-interpreter/src/kernels/RoPE.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
/*
* Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "kernels/RoPE.h"

#include "kernels/Utils.h"

namespace luci_interpreter
{
namespace kernels
{

RoPE::RoPE(const Tensor *input, const Tensor *sin_table, const Tensor *cos_table, Tensor *output,
const RoPEParams &params)
: KernelWithParams<RoPEParams>({input, sin_table, cos_table}, {output}, params)
{
}

void RoPE::configure()
{
LUCI_INTERPRETER_CHECK(input()->shape().num_dims() == 4);
LUCI_INTERPRETER_CHECK(sin_table()->shape().dim(3) == input()->shape().dim(3));
LUCI_INTERPRETER_CHECK(cos_table()->shape().dim(3) == input()->shape().dim(3));

LUCI_INTERPRETER_CHECK(params().mode == RoPEMode::GPT_NEOX);

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

from seanshpark,
the head size (dim(3)) of input and the size of sin/cos should be the same.
so, it is necessary to check the condition here.

output()->resize(input()->shape());
}

void RoPE::execute() const
{
switch (input()->element_type())
{
case DataType::FLOAT32:
evalFloat();
break;
default:
throw std::runtime_error("luci-rope Unsupported data type.");
}
}

void RoPE::evalFloat() const
{
const auto input_shape = getTensorShape(input());
const auto sin_table_shape = getTensorShape(sin_table());
const auto cos_table_shape = getTensorShape(cos_table());
auto output_shape = getTensorShape(output());

const float *input_data = getTensorData<float>(input());
const float *sin_table_data = getTensorData<float>(sin_table());
const float *cos_table_data = getTensorData<float>(cos_table());
float *output_data = getTensorData<float>(output());

if (params().mode == RoPEMode::GPT_NEOX)
{
const int32_t i0_n = input_shape.Dims(0);
const int32_t i1_n = input_shape.Dims(1); // multihead
const int32_t i2_n = input_shape.Dims(2);
const int32_t i3_n = input_shape.Dims(3); // head

for (int32_t i0 = 0; i0 < i0_n; ++i0)
{
for (int32_t i1 = 0; i1 < i1_n; ++i1)
{
for (int32_t i2 = 0; i2 < i2_n; ++i2)
{
for (int32_t i3 = 0; i3 < i3_n / 2; ++i3)
{
const int32_t offset = tflite::Offset(input_shape, i0, i1, i2, i3);
const float x0 = input_data[offset];
const float x1 = input_data[offset + i3_n / 2];

output_data[offset] = x0 * cos_table_data[i3] - x1 * sin_table_data[i3];
output_data[offset + i3_n / 2] =
x0 * sin_table_data[i3 + i3_n / 2] + x1 * cos_table_data[i3 + i3_n / 2];
}
}
}
}
}
else
throw std::runtime_error("luci-intp RoPE unsupported mode.");
}

} // namespace kernels
} // namespace luci_interpreter
49 changes: 49 additions & 0 deletions compiler/luci-interpreter/src/kernels/RoPE.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
/*
* Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef LUCI_INTERPRETER_KERNELS_ROPE_H
#define LUCI_INTERPRETER_KERNELS_ROPE_H

#include "core/Kernel.h"
#include "core/KernelParams.h"

namespace luci_interpreter
{
namespace kernels
{

class RoPE : public KernelWithParams<RoPEParams>
{
public:
RoPE(const Tensor *input, const Tensor *sin_table, const Tensor *cos_table, Tensor *output,
const RoPEParams &params);

const Tensor *input() const { return _inputs[0]; }
const Tensor *sin_table() const { return _inputs[1]; }
const Tensor *cos_table() const { return _inputs[2]; }
Tensor *output() const { return _outputs[0]; }

void configure() override;
void execute() const override;

private:
void evalFloat() const;
};

} // namespace kernels
} // namespace luci_interpreter

#endif // LUCI_INTERPRETER_KERNELS_ROPE_H
186 changes: 186 additions & 0 deletions compiler/luci-interpreter/src/kernels/RoPE.test.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,186 @@
/*
* Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "kernels/RoPE.h"
#include "kernels/TestUtils.h"
#include "luci_interpreter/TestMemoryManager.h"

namespace luci_interpreter
{
namespace kernels
{
namespace
{

using namespace testing;

class RoPETest : public ::testing::Test
{
protected:
void SetUp() override { _memory_manager = std::make_unique<TestMemoryManager>(); }

std::unique_ptr<IMemoryManager> _memory_manager;
};

TEST_F(RoPETest, floatTest)
{
Shape input_shape{1, 1, 1, 4};
std::vector<float> input_data{0, 1.0, 2.0, 3.0};

Shape sin_shape{1, 1, 1, 4};
std::vector<float> sin_data{0.5, 1.0, 1.0, 0.5};

Shape cos_shape{1, 1, 1, 4};
std::vector<float> cos_data{1.0, 0.5, 0.5, 1.0};

Shape ref_output_shape{1, 1, 1, 4};
std::vector<float> ref_output_data{-1.0, -2.5, 1.0, 3.5};

Tensor input_tensor =
makeInputTensor<DataType::FLOAT32>(input_shape, input_data, _memory_manager.get());
Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);

Tensor sin_table = makeInputTensor<DataType::FLOAT32>(sin_shape, sin_data, _memory_manager.get());
Tensor cos_table = makeInputTensor<DataType::FLOAT32>(cos_shape, cos_data, _memory_manager.get());

RoPEParams params{};
params.mode = RoPEMode::GPT_NEOX;

RoPE kernel(&input_tensor, &sin_table, &cos_table, &output_tensor, params);
kernel.configure();
_memory_manager->allocate_memory(output_tensor);
kernel.execute();

EXPECT_THAT(extractTensorData<float>(output_tensor),
::testing::ElementsAreArray(ref_output_data));
EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray({1, 1, 1, 4}));
}

TEST_F(RoPETest, Unsupported_dims_NEG)
{
Shape input_shape{1, 1, 3};
std::vector<float> input_data{0, 1.0, 2.0};

Shape sin_shape{1, 1, 3};
std::vector<float> sin_data{0.5, 1.0, 1.0};

Shape cos_shape{1, 1, 3};
std::vector<float> cos_data{1.0, 0.5, 0.5};

Shape ref_output_shape{1, 1, 3};
std::vector<float> ref_output_data{-1.0, -2.5, 1.0};

Tensor input_tensor =
makeInputTensor<DataType::FLOAT32>(input_shape, input_data, _memory_manager.get());
Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);

Tensor sin_table = makeInputTensor<DataType::FLOAT32>(sin_shape, sin_data, _memory_manager.get());
Tensor cos_table = makeInputTensor<DataType::FLOAT32>(cos_shape, cos_data, _memory_manager.get());

RoPEParams params{};
params.mode = RoPEMode::GPT_NEOX;

RoPE kernel(&input_tensor, &sin_table, &cos_table, &output_tensor, params);
EXPECT_ANY_THROW(kernel.configure());
}

TEST_F(RoPETest, Unsupported_mode_NEG)
{
Shape input_shape{1, 1, 1, 4};
std::vector<float> input_data{0, 1.0, 2.0, 3.0};

Shape sin_shape{1, 1, 1, 4};
std::vector<float> sin_data{0.5, 1.0, 1.0, 0.5};

Shape cos_shape{1, 1, 1, 4};
std::vector<float> cos_data{1.0, 0.5, 0.5, 1.0};

Shape ref_output_shape{1, 1, 1, 4};
std::vector<float> ref_output_data{-1.0, -2.5, 1.0, 3.5};

Tensor input_tensor =
makeInputTensor<DataType::FLOAT32>(input_shape, input_data, _memory_manager.get());
Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);

Tensor sin_table = makeInputTensor<DataType::FLOAT32>(sin_shape, sin_data, _memory_manager.get());
Tensor cos_table = makeInputTensor<DataType::FLOAT32>(cos_shape, cos_data, _memory_manager.get());

RoPEParams params{};
params.mode = RoPEMode::GPT_J;

RoPE kernel(&input_tensor, &sin_table, &cos_table, &output_tensor, params);
EXPECT_ANY_THROW(kernel.configure());
}

TEST_F(RoPETest, Invalid_input_sin_table_NEG)
{
Shape input_shape{1, 1, 1, 4};
std::vector<float> input_data{0, 1.0, 2.0, 3.0};

Shape sin_shape{1, 1, 1, 3};
std::vector<float> sin_data{0.5, 1.0, 1.0};

Shape cos_shape{1, 1, 1, 4};
std::vector<float> cos_data{1.0, 0.5, 0.5, 1.0};

Shape ref_output_shape{1, 1, 1, 4};
std::vector<float> ref_output_data{-1.0, -2.5, 1.0, 3.5};

Tensor input_tensor =
makeInputTensor<DataType::FLOAT32>(input_shape, input_data, _memory_manager.get());
Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);

Tensor sin_table = makeInputTensor<DataType::FLOAT32>(sin_shape, sin_data, _memory_manager.get());
Tensor cos_table = makeInputTensor<DataType::FLOAT32>(cos_shape, cos_data, _memory_manager.get());

RoPEParams params{};
params.mode = RoPEMode::GPT_NEOX;

RoPE kernel(&input_tensor, &sin_table, &cos_table, &output_tensor, params);
EXPECT_ANY_THROW(kernel.configure());
}

TEST_F(RoPETest, Invalid_input_cos_table_NEG)
{
Shape input_shape{1, 1, 1, 4};
std::vector<float> input_data{0, 1.0, 2.0, 3.0};

Shape sin_shape{1, 1, 1, 4};
std::vector<float> sin_data{0.5, 1.0, 1.0, 0.5};

Shape cos_shape{1, 1, 1, 3};
std::vector<float> cos_data{1.0, 0.5, 0.5};

Shape ref_output_shape{1, 1, 1, 4};
std::vector<float> ref_output_data{-1.0, -2.5, 1.0, 3.5};

Tensor input_tensor =
makeInputTensor<DataType::FLOAT32>(input_shape, input_data, _memory_manager.get());
Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);

Tensor sin_table = makeInputTensor<DataType::FLOAT32>(sin_shape, sin_data, _memory_manager.get());
Tensor cos_table = makeInputTensor<DataType::FLOAT32>(cos_shape, cos_data, _memory_manager.get());

RoPEParams params{};
params.mode = RoPEMode::GPT_NEOX;

RoPE kernel(&input_tensor, &sin_table, &cos_table, &output_tensor, params);
EXPECT_ANY_THROW(kernel.configure());
}

} // namespace
} // namespace kernels
} // namespace luci_interpreter
Loading