Skip to content

Commit

Permalink
Merge pull request apache#35 from wjj19950828/paddle_frontend
Browse files Browse the repository at this point in the history
Add greater_equal、isfinite and more ops
  • Loading branch information
jiangjiajun committed Sep 16, 2021
2 parents 2ae2826 + 4f96c08 commit 0167b34
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 0 deletions.
19 changes: 19 additions & 0 deletions python/tvm/relay/frontend/paddlepaddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,8 @@ def convert_unary_op(g, op, block):

op_map = {
"isinf_v2": _op.isinf,
"isfinite_v2": _op.isfinite,
"isnan_v2": _op.isnan,
}
if op.type in op_map:
unary_func = op_map[op.type]
Expand Down Expand Up @@ -610,6 +612,7 @@ def convert_elementwise_op(g, op, block):
"elementwise_floordiv": "floor_divide",
"floor_mod": "floor_mod",
"equal": "equal",
"greater_equal": "greater_equal",
"greater_than": "greater",
"less_equal": "less_equal",
"less_than": "less",
Expand Down Expand Up @@ -978,6 +981,15 @@ def convert_logical_op(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_logical_not(g, op, block):
"""Operator converter for logical_not op."""

ipt0 = g.get_node(op.input("X")[0])
op_func = get_relay_op(op.type)
out = op_func(ipt0)
g.add_node(op.output("Out")[0], out)


def convert_logsumexp(g, op, block):
"""Operator converter for logsumexp."""

Expand Down Expand Up @@ -1859,15 +1871,20 @@ def convert_where(g, op, block):
"gather": convert_gather,
"gather_nd": convert_gather_nd,
"gelu": convert_gelu,
"greater_equal": convert_elementwise_op,
"greater_than": convert_elementwise_op,
"group_norm": convert_group_norm,
"hard_shrink": convert_hard_shrink,
"hard_sigmoid": convert_hard_sigmoid,
"hard_swish": convert_hard_swish,
"index_select": convert_index_select,
"isfinite": convert_unary_op,
"isfinite_v2": convert_unary_op,
"instance_norm": convert_instance_norm,
"isinf": convert_unary_op,
"isinf_v2": convert_unary_op,
"isnan": convert_unary_op,
"isnan_v2": convert_unary_op,
"layer_norm": convert_layer_norm,
"leaky_relu": convert_leaky_relu,
"less_equal": convert_elementwise_op,
Expand All @@ -1879,7 +1896,9 @@ def convert_where(g, op, block):
"log10": convert_unary_op,
"log1p": convert_log1p,
"logical_and": convert_logical_op,
"logical_not": convert_logical_not,
"logical_or": convert_logical_op,
"logical_xor": convert_logical_op,
"logsumexp": convert_logsumexp,
"matmul": convert_matmul,
"matmul_v2": convert_matmul,
Expand Down
35 changes: 35 additions & 0 deletions tests/python/frontend/paddlepaddle/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -771,6 +771,7 @@ def forward(self, input1, input2):
"maximum",
"minimum",
"equal",
"greater_equal",
"greater_than",
"less_equal",
"less_than",
Expand Down Expand Up @@ -894,6 +895,16 @@ def index_select2(x, index):


@tvm.testing.uses_gpu
def test_forward_isfinite():
@paddle.jit.to_static
def isfinite(inputs):
return paddle.cast(paddle.isfinite(inputs), "int32")

input_shape = [5, 5]
input_data = paddle.rand(input_shape, dtype="float32")
verify_model(isfinite, input_data=input_data)


def test_forward_instance_norm():
class InstanceNorm(nn.Layer):
def __init__(self):
Expand All @@ -919,6 +930,17 @@ def isinf(inputs):
verify_model(isinf, input_data=input_data)


@tvm.testing.uses_gpu
def test_forward_isnan():
@paddle.jit.to_static
def isnan(inputs):
return paddle.cast(paddle.isnan(inputs), "int32")

input_shape = [5, 5]
input_data = paddle.rand(input_shape, dtype="float32")
verify_model(isnan, input_data=input_data)


@tvm.testing.uses_gpu
def test_forward_interpolate():
class TestBilinear(nn.Layer):
Expand Down Expand Up @@ -1014,15 +1036,28 @@ def forward(self, x, y):
z = self.func(x, y)
return paddle.cast(z, "int32")

class LogicalOp_not(LogicalOp):
@paddle.jit.to_static
def forward(self, x):
if self.out:
out = paddle.to_tensor([True, True, True])
z = self.func(x, out=out)
else:
z = self.func(x)
return paddle.cast(z, "int32")

op_list = [
"logical_or",
"logical_xor",
"logical_and",
]
x = paddle.to_tensor([True])
y = paddle.to_tensor([True, False, True, False])
for op_name in op_list:
verify_model(LogicalOp(op_name, False), [x, y])
verify_model(LogicalOp(op_name, True), [x, y])
verify_model(LogicalOp_not("logical_not", False), [y])
verify_model(LogicalOp_not("logical_not", True), [y])


@tvm.testing.uses_gpu
Expand Down

0 comments on commit 0167b34

Please sign in to comment.