Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MLU]uniform_random op for mlu #39450

Merged
merged 1 commit into from
Feb 11, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions paddle/fluid/operators/mlu/mlu_baseop.cc
Original file line number Diff line number Diff line change
Expand Up @@ -921,11 +921,12 @@ MLUCnnlTrigonDesc::~MLUCnnlTrigonDesc() {

/* static */ void MLUCnnl::RandomUniform(
const ExecutionContext& ctx, const int num, const cnnlDataType_t data_type,
const cnnlRandGenerator_t mlu_generator, void* output) {
const cnnlRandGenerator_t mlu_generator, const float min, const float max,
void* output) {
cnnlHandle_t handle = GetHandleFromCTX(ctx);

PADDLE_ENFORCE_MLU_SUCCESS(cnnlRandGenerateUniform(
handle, mlu_generator, data_type, nullptr, num, 0, 1, output));
handle, mlu_generator, data_type, nullptr, num, min, max, output));
}

/* static */ void MLUCnnl::TopK(
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/mlu/mlu_baseop.h
Original file line number Diff line number Diff line change
Expand Up @@ -512,7 +512,7 @@ class MLUCnnl {
static void RandomUniform(const ExecutionContext& ctx, const int num,
const cnnlDataType_t data_type,
const cnnlRandGenerator_t mlu_generator,
void* output);
const float min, const float max, void* output);

static void Cumsum(const ExecutionContext& ctx, const int axis,
const bool exclusive, const bool reverse,
Expand Down
75 changes: 75 additions & 0 deletions paddle/fluid/operators/uniform_random_op_mlu.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/uniform_random_op.h"
#include "paddle/fluid/operators/mlu/mlu_baseop.h"

namespace paddle {
namespace operators {

template <typename T>
class MLUUniformRandomKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
framework::Tensor *tensor = nullptr;
auto out_var = ctx.OutputVar("Out");

std::vector<int64_t> new_shape;
auto list_new_shape_tensor =
ctx.MultiInput<framework::Tensor>("ShapeTensorList");
if (list_new_shape_tensor.size() > 0 || ctx.HasInput("ShapeTensor")) {
if (ctx.HasInput("ShapeTensor")) {
auto *shape_tensor = ctx.Input<framework::Tensor>("ShapeTensor");
new_shape = GetNewDataFromShapeTensor(shape_tensor);
} else if (list_new_shape_tensor.size() > 0) {
new_shape = GetNewDataFromShapeTensorList(list_new_shape_tensor);
}
}

if (out_var->IsType<pten::SelectedRows>()) {
auto *selected_rows = out_var->GetMutable<pten::SelectedRows>();
tensor = selected_rows->mutable_value();
auto shape = ctx.Attr<std::vector<int64_t>>("shape");
if (!new_shape.empty()) shape = new_shape;
tensor->Resize(framework::make_ddim(shape));
selected_rows->mutable_rows()->reserve(shape[0]);
} else if (out_var->IsType<framework::LoDTensor>()) {
tensor = out_var->GetMutable<framework::LoDTensor>();
if (!new_shape.empty()) tensor->Resize(framework::make_ddim(new_shape));
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Expected type of Output(out) in uniform_random_op must be Tensor, "
"SelectedRows. But got "
"unsupport type: %s.",
framework::ToTypeName(out_var->Type())));
}

tensor->mutable_data<T>(ctx.GetPlace());
int64_t size = tensor->numel();
const float min = static_cast<T>(ctx.Attr<float>("min"));
const float max = static_cast<T>(ctx.Attr<float>("max"));
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
// make mlu seed
MLUCnnlRandomGeneratorDesc random_desc(/*is_mlu200=*/false, seed);
cnnlDataType_t data_type = ToCnnlDataType(tensor->type());
MLUCnnl::RandomUniform(ctx, size, /*data type=*/data_type,
random_desc.get(), min, max, GetBasePtr(tensor));
}
};

} // namespace operators
} // namespace paddle

REGISTER_OP_MLU_KERNEL(uniform_random,
paddle::operators::MLUUniformRandomKernel<float>);
108 changes: 108 additions & 0 deletions python/paddle/fluid/tests/unittests/mlu/test_uniform_random_op_mlu.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import sys
import subprocess
import unittest
import numpy as np
sys.path.append("..")
from op_test import OpTest
import paddle
import paddle.fluid.core as core
import paddle
from paddle.fluid.op import Operator
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from test_uniform_random_op import TestUniformRandomOp, TestUniformRandomOpSelectedRows

paddle.enable_static()


def output_hist(out):
hist, _ = np.histogram(out, range=(-5, 10))
hist = hist.astype("float32")
hist /= float(out.size)
prob = 0.1 * np.ones((10))
return hist, prob


class TestMLUUniformRandomOp(OpTest):
def setUp(self):
self.set_mlu()
self.op_type = "uniform_random"
self.init_dtype()
self.inputs = {}
self.init_attrs()
self.outputs = {"Out": np.zeros((1000, 784)).astype(self.dtype)}

def init_attrs(self):
self.attrs = {
"shape": [1000, 784],
"min": -5.0,
"max": 10.0,
"seed": 10
}
self.output_hist = output_hist

def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.MLUPlace(0)

def init_dtype(self):
self.dtype = np.float32

def test_check_output(self):
self.check_output_customized(self.verify_output, self.place)

def verify_output(self, outs):
hist, prob = self.output_hist(np.array(outs[0]))
self.assertTrue(
np.allclose(
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))


class TestMLUUniformRandomOpSelectedRows(unittest.TestCase):
def get_places(self):
places = [core.CPUPlace()]
if core.is_compiled_with_mlu():
places.append(core.MLUPlace(0))
return places

def test_check_output(self):
for place in self.get_places():
self.check_with_place(place)

def check_with_place(self, place):
scope = core.Scope()
out = scope.var("X").get_selected_rows()
paddle.seed(10)
op = Operator(
"uniform_random",
Out="X",
shape=[1000, 784],
min=-5.0,
max=10.0,
seed=10)
op.run(scope, place)
self.assertEqual(out.get_tensor().shape(), [1000, 784])
hist, prob = output_hist(np.array(out.get_tensor()))
self.assertTrue(
np.allclose(
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))


if __name__ == "__main__":
unittest.main()