Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Cherry-Pick]Fix expand_sig infershape BUG under static graph mode and NeedTransformPlace behavior if set skip_transform in yaml #41973

Merged
merged 2 commits into from
Apr 20, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 11 additions & 5 deletions paddle/phi/api/lib/data_transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,17 @@ inline bool NeedTransformDataType(const DataType& input,
inline bool NeedTransformPlace(const paddle::platform::Place& input,
const Backend& target,
const TransformFlag& transform_flag) {
bool ret =
input.GetType() == AllocationType::GPUPINNED ||
(transform_flag.need_trans_backend() && target != Backend::ALL_BACKEND &&
phi::TransToPhiBackend(input) !=
(target != Backend::GPUDNN ? target : Backend::GPU));
// NOTE(dev): The default value of TransformFlag is True, if it is set with
// False
// somewhere such as api.yaml or backward.yaml that means we should skip data
// transform. Because "stop_transform_" has highest priority.
if (!transform_flag.need_trans_backend()) {
return false;
}
bool ret = input.GetType() == AllocationType::GPUPINNED ||
(target != Backend::ALL_BACKEND &&
phi::TransToPhiBackend(input) !=
(target != Backend::GPUDNN ? target : Backend::GPU));
return ret;
}

Expand Down
11 changes: 11 additions & 0 deletions paddle/phi/ops/compat/expand_sig.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,11 @@
namespace phi {

KernelSignature ExpandOpArgumentMapping(const ArgumentMappingContext& ctx) {
const auto& shape = paddle::any_cast<std::vector<int>>(ctx.Attr("shape"));
// Infer output shape by Attr("shape") in CompileTime if it is specified.
if (!ctx.IsRuntime() && !shape.empty()) {
return KernelSignature("expand", {"X"}, {"shape"}, {"Out"});
}
if (ctx.HasInput("Shape")) {
return KernelSignature("expand", {"X"}, {"Shape"}, {"Out"});
} else if (ctx.InputSize("expand_shapes_tensor") > 0) {
Expand All @@ -27,6 +32,12 @@ KernelSignature ExpandOpArgumentMapping(const ArgumentMappingContext& ctx) {
}

KernelSignature ExpandGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
const auto& shape = paddle::any_cast<std::vector<int>>(ctx.Attr("shape"));
// Infer output shape by Attr("shape") in CompileTime if it is specified.
if (!ctx.IsRuntime() && !shape.empty()) {
return KernelSignature(
"expand_grad", {"X", "Out@GRAD"}, {"shape"}, {"X@GRAD"});
}
if (ctx.HasInput("Shape")) {
return KernelSignature("expand_grad",
{"X", GradVarName("Out")},
Expand Down
12 changes: 12 additions & 0 deletions python/paddle/fluid/tests/unittests/test_expand_v2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,18 @@ def test_api(self):
assert np.array_equal(res_3, np.tile(input, (1, 1)))


class TestExpandInferShape(unittest.TestCase):
def test_shape_with_var(self):
with program_guard(Program(), Program()):
x = paddle.static.data(shape=[-1, 1, 3], name='x')
fake_var = paddle.randn([2, 3])
target_shape = [
-1, paddle.shape(fake_var)[0], paddle.shape(fake_var)[1]
]
out = paddle.expand(x, shape=target_shape)
self.assertListEqual(list(out.shape), [-1, -1, -1])


if __name__ == "__main__":
paddle.enable_static()
unittest.main()
15 changes: 15 additions & 0 deletions python/paddle/fluid/tests/unittests/test_full_like_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import numpy as np
from op_test import OpTest
from paddle.fluid.framework import convert_np_dtype_to_dtype_
from paddle.fluid.framework import _test_eager_guard


class TestFullOp(unittest.TestCase):
Expand Down Expand Up @@ -133,5 +134,19 @@ def init_data(self):
self.dtype = np.int64


@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFullLikeOp4(unittest.TestCase):
def test_skip_data_transform(self):
paddle.disable_static()
with _test_eager_guard():
x = paddle.to_tensor(
[1., 2., 3., 4.], place=paddle.CUDAPinnedPlace())
out = paddle.full_like(x, 1.)
self.assertTrue(
(out.numpy() == np.ones([4]).astype(np.float32)).all(), True)
paddle.enable_static()


if __name__ == "__main__":
unittest.main()
4 changes: 3 additions & 1 deletion python/paddle/utils/code_gen/api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,7 @@
- api : deformable_conv
args : (Tensor x, Tensor offset, Tensor filter, Tensor mask, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step)
output : Tensor(out)
infer_meta :
infer_meta :
func : DeformableConvInferMeta
kernel :
func : deformable_conv
Expand Down Expand Up @@ -763,6 +763,8 @@
param : [x, value, dtype]
data_type : dtype > x
backend : place > x
data_transform :
skip_transform : x

- api : gather
args : (Tensor x, Tensor index, Scalar axis=0)
Expand Down