diff --git a/ivy/functional/frontends/paddle/nn/functional/activation.py b/ivy/functional/frontends/paddle/nn/functional/activation.py deleted file mode 100644 index 6826012fb0b93..0000000000000 --- a/ivy/functional/frontends/paddle/nn/functional/activation.py +++ /dev/null @@ -1,211 +0,0 @@ -# local -import ivy -from ivy.func_wrapper import with_supported_dtypes -from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back -from ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def selu( - x, - /, - *, - alpha=1.6732632423543772848170429916717, - scale=1.0507009873554804934193349852946, - name=None, -): - if scale <= 1.0: - raise ValueError(f"The scale must be greater than 1.0. Received: {scale}.") - - if alpha < 0: - raise ValueError(f"The alpha must be no less than zero. Received: {alpha}.") - - ret = ivy.where(x > 0, x, alpha * ivy.expm1(x)) - arr = scale * ret - return ivy.astype(arr, x.dtype) - - -tanh = paddle_tanh - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def hardshrink(x, threshold=0.5, name=None): - mask = ivy.logical_or(ivy.greater(x, threshold), ivy.less(x, -threshold)) - return ivy.where(mask, x, 0.0) - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def hardswish(x, name=None): - relu6_val = ivy.relu6(ivy.add(x, 3)) - ret = ivy.multiply(x, ivy.divide(relu6_val, 6)) - return ret - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def hardtanh( - x, - /, - *, - min=-1.0, - max=1.0, - name=None, -): - less = ivy.where(ivy.less(x, min), min, x) - ret = ivy.where(ivy.greater(x, max), max, less).astype(x.dtype) - return ret - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def gelu(x, approximate=False, name=None): - return ivy.gelu(x, approximate=approximate) - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None): - ret = ivy.minimum(ivy.maximum(ivy.add(ivy.multiply(x, slope), offset), 0), 1) - return ret - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def relu6(x, name=None): - return ivy.relu6(x) - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def softshrink( - x, - /, - *, - threshold=0.5, - name=None, -): - low = ivy.where(ivy.less(x, -threshold), ivy.add(x, threshold), 0) - up = ivy.where(ivy.greater(x, threshold), ivy.subtract(x, threshold), 0) - add = ivy.add(low, up) - return ivy.astype(add, x.dtype) - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def softsign( - x, - /, - *, - name=None, -): - return ivy.divide(x, ivy.add(1, ivy.abs(x))) - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def log_softmax(x, axis=-1, dtype=None, name=None): - x = ivy.astype(x, dtype) if dtype else x - ret = ivy.log_softmax(x, axis=axis) - ret = ivy.astype(ret, dtype) if dtype else ret - return ret - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def prelu(x, weight, data_format="NCHW", name=None): - return ivy.add(ivy.maximum(0, x), ivy.multiply(weight, ivy.minimum(0, x))) - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def celu( - x, - /, - *, - alpha=1.0, - name=None, -): - prod = alpha * (ivy.exp(x / alpha) - 1) - ret = ivy.maximum(0, x) + ivy.minimum(0, prod) - return ret - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def rrelu( - x, - /, - *, - lower=0.125, - upper=0.3333333333333333, - training=False, - name=None, -): - if lower < 0 or lower > 1: - raise ValueError( - "The lower value must be no less than zero or greater than one. Received:" - f" {lower}." - ) - - if upper < lower: - raise ValueError( - "The upper value must be greater than lower value. Received: lower" - f" {lower}, upper {upper}." - ) - - if upper > 1: - raise ValueError( - f"The upper value must be no greater than one. Received: {upper}." - ) - - is_test = not training - if is_test: - add = lower + upper - ret = add * x * 0.5 - out = ivy.where(x >= 0, x, ret) - return out.astype(x.dtype) - # else: - # ToDo implement a correctly after fixing ivy.random_uniform - # a = ivy.random_normal(low=lower, high=upper) - # ret = ivy.where(x >= 0, x, ivy.multiply(a, x)) - # return ret.astype(x.dtype) - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def tanhshrink( - x, - /, - *, - name=None, -): - return ivy.subtract(x, ivy.tanh(x)) - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def relu_(x, name=None): - ret = ivy.relu(x) - ivy.inplace_update(x, ret) - return x - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def elu( - x, - /, - *, - alpha=1.0, - name=None, -): - return ivy.elu(x, alpha=alpha) - - -@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def mish(x, name=None): - return ivy.mish(x) diff --git a/ivy/functional/frontends/paddle/nn/functional/loss.py b/ivy/functional/frontends/paddle/nn/functional/loss.py index 3572e24a9f5f2..4fd57f81be308 100644 --- a/ivy/functional/frontends/paddle/nn/functional/loss.py +++ b/ivy/functional/frontends/paddle/nn/functional/loss.py @@ -101,3 +101,26 @@ def cosine_embedding_loss( out = ivy.sum(out) return out + + +@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") +@to_ivy_arrays_and_back +def smooth_l1_loss( + input, + label, + reduction='mean', + delta=1.0, + name=None, +): + sum_diff = ivy.abs(input - label).astype(label.dtype) + condition = sum_diff <= delta + out = ivy.where(condition, 0.5 * ivy.pow(ivy.abs(input - label), 2).astype(label.dtype), + (delta * ivy.abs(ivy.abs(input - label))).astype(label.dtype) + - (0.5 * ivy.pow(delta, 2)).astype(label.dtype)) + if reduction == "none": + pass + elif reduction == "mean": + out = ivy.mean(out) + elif reduction == "sum": + out = ivy.sum(out) + return out.astype(label.dtype) diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_paddle_activation.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_paddle_activation.py deleted file mode 100644 index 7b55199f1e5b6..0000000000000 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_paddle_activation.py +++ /dev/null @@ -1,516 +0,0 @@ -# global -from hypothesis import strategies as st - -# local -import ivy -import ivy_tests.test_ivy.helpers as helpers -from ivy_tests.test_ivy.helpers import handle_frontend_test - - -# selu -@handle_frontend_test( - fn_tree="paddle.nn.functional.selu", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - safety_factor_scale="log", - small_abs_safety_factor=20, - ), - scale=helpers.ints(min_value=2, max_value=10), - alpha=helpers.ints(min_value=1, max_value=10), -) -def test_paddle_selu( - *, - dtype_and_x, - scale, - alpha, - on_device, - fn_tree, - frontend, - test_flags, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], - alpha=alpha, - scale=scale, - ) - - -# hardshrink -@handle_frontend_test( - fn_tree="paddle.nn.functional.hardshrink", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), - threshold=helpers.floats(min_value=0, max_value=1, exclude_min=True), -) -def test_paddle_hardshrink( - *, - dtype_and_x, - threshold, - on_device, - fn_tree, - frontend, - test_flags, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], - threshold=threshold, - ) - - -# hardswish -@handle_frontend_test( - fn_tree="paddle.nn.functional.hardswish", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - safety_factor_scale="log", - ), -) -def test_paddle_hardswish( - *, - dtype_and_x, - on_device, - fn_tree, - frontend, - test_flags, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], - ) - - -# hardtanh -@handle_frontend_test( - fn_tree="paddle.nn.functional.hardtanh", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), - max_val=helpers.floats(min_value=0, max_value=1, exclude_min=True), -) -def test_paddle_hardtanh( - *, - dtype_and_x, - max_val, - on_device, - fn_tree, - frontend, - test_flags, -): - input_dtype, x = dtype_and_x - max_min = max_val, -max_val - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], - min=max_min[1], - max=max_min[0], - ) - - -# gelu -@handle_frontend_test( - fn_tree="paddle.nn.functional.gelu", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - safety_factor_scale="log", - small_abs_safety_factor=20, - ), - approximate=st.booleans(), -) -def test_paddle_gelu( - *, - dtype_and_x, - approximate, - on_device, - fn_tree, - frontend, - test_flags, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - rtol=1e-2, - atol=1e-2, - x=x[0], - approximate=approximate, - ) - - -# hardsigmoid -@handle_frontend_test( - fn_tree="paddle.nn.functional.hardsigmoid", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), - slope=helpers.ints(min_value=0, max_value=10), - offset=helpers.ints(min_value=0, max_value=10), -) -def test_paddle_hardsigmoid( - *, - dtype_and_x, - slope, - offset, - on_device, - fn_tree, - frontend, - test_flags, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], - slope=slope, - offset=offset, - ) - - -# relu6 -@handle_frontend_test( - fn_tree="paddle.nn.functional.relu6", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), -) -def test_paddle_relu6( - *, - dtype_and_x, - on_device, - fn_tree, - frontend, - test_flags, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], - ) - - -# softshrink -@handle_frontend_test( - fn_tree="paddle.nn.functional.softshrink", - dtype_and_input=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), - threshold=helpers.floats(min_value=0, max_value=1, exclude_min=True), -) -def test_paddle_softshrink( - *, - dtype_and_input, - threshold, - on_device, - fn_tree, - frontend, - test_flags, -): - input_dtype, x = dtype_and_input - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], - threshold=threshold, - ) - - -# softsign -@handle_frontend_test( - fn_tree="paddle.nn.functional.softsign", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - safety_factor_scale="log", - small_abs_safety_factor=20, - ), -) -def test_paddle_softsign( - *, - dtype_and_x, - on_device, - fn_tree, - frontend, - test_flags, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], - ) - - -# log_softmax -@handle_frontend_test( - fn_tree="paddle.nn.functional.log_softmax", - dtype_x_and_axis=helpers.dtype_values_axis( - available_dtypes=helpers.get_dtypes("float"), - min_num_dims=1, - max_axes_size=1, - force_int_axis=True, - valid_axis=True, - min_value=-30.0, - max_value=30.0, - ), - dtypes=helpers.get_dtypes("float", none=False, full=False), -) -def test_paddle_log_softmax( - *, - dtype_x_and_axis, - dtypes, - on_device, - fn_tree, - frontend, - test_flags, -): - input_dtype, x, axis = dtype_x_and_axis - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - atol=1e-2, - x=x[0], - axis=axis, - dtype=ivy.as_ivy_dtype(dtypes[0]), - ) - - -@st.composite -def _generate_prelu_arrays(draw): - arr_size = draw(helpers.ints(min_value=2, max_value=5)) - dtype = draw(helpers.get_dtypes("float", index=1, full=False)) - input = draw( - helpers.array_values( - dtype=dtype[0], shape=(arr_size), min_value=0, max_value=10 - ) - ) - weight = draw( - helpers.array_values(dtype=dtype[0], shape=(1,), min_value=0, max_value=1.0) - ) - input_weight = input, weight - return dtype, input_weight - - -# prelu -@handle_frontend_test( - fn_tree="paddle.nn.functional.prelu", - dtype_input_and_weight=_generate_prelu_arrays(), -) -def test_paddle_prelu( - *, - dtype_input_and_weight, - on_device, - fn_tree, - frontend, - test_flags, -): - dtype, x = dtype_input_and_weight - helpers.test_frontend_function( - input_dtypes=dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], - weight=x[1], - ) - - -# celu -@handle_frontend_test( - fn_tree="paddle.nn.functional.celu", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), - alpha=helpers.ints(min_value=1, max_value=10), -) -def test_paddle_celu( - *, - dtype_and_x, - alpha, - on_device, - fn_tree, - frontend, - test_flags, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], - alpha=alpha, - ) - - -@handle_frontend_test( - fn_tree="paddle.nn.functional.rrelu", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - ), -) -def test_paddle_rrelu( - *, - dtype_and_x, - on_device, - fn_tree, - frontend, - test_flags, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - test_values=False, - x=x[0], - ) - - -# tanhshrink -@handle_frontend_test( - fn_tree="paddle.nn.functional.tanhshrink", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - ), -) -def test_paddle_tanhshrink( - *, - dtype_and_x, - on_device, - fn_tree, - frontend, - test_flags, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - test_values=False, - x=x[0], - ) - - -# relu_ -@handle_frontend_test( - fn_tree="paddle.nn.functional.relu_", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), -) -def test_paddle_relu_( - dtype_and_x, - frontend, - test_flags, - fn_tree, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - x=x[0], - ) - - -# elu -@handle_frontend_test( - fn_tree="paddle.nn.functional.elu", - dtype_and_input=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), - alpha=helpers.floats(min_value=0, max_value=1, exclude_min=True), -) -def test_paddle_elu( - *, - dtype_and_input, - alpha, - on_device, - fn_tree, - frontend, - test_flags, -): - input_dtype, x = dtype_and_input - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], - alpha=alpha, - ) - - -# mish -@handle_frontend_test( - fn_tree="paddle.nn.functional.mish", - dtype_and_input=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - safety_factor_scale="log", - small_abs_safety_factor=20, - ), -) -def test_paddle_mish( - *, - dtype_and_input, - on_device, - fn_tree, - frontend, - test_flags, -): - input_dtype, x = dtype_and_input - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], - ) diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_paddle_loss.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_paddle_loss.py index 4782fbf0d132b..0c588a4f5820d 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_paddle_loss.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_paddle_loss.py @@ -155,3 +155,44 @@ def test_paddle_cosine_embedding_loss( margin=margin, reduction=reduction, ) + +@handle_frontend_test( + fn_tree="paddle.nn.functional.smooth_l1_loss", + dtype_and_x = helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + num_arrays=2, + shared_dtype=True, + min_num_dims=2, + max_num_dims=5, + min_dim_size=1, + max_dim_size=10, + ), + delta=st.floats( + min_value=0.1, + max_value=1.0, + ), + reduction=st.sampled_from(["mean", "sum", "none"]), +) +def test_paddle_smooth_l1_loss( + dtype_and_x, + delta, + reduction, + on_device, + fn_tree, + frontend, + test_flags +): + input_dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + input=x[0], + label=x[1], + reduction=reduction, + delta=delta, + ) + +