diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py index 8b9a505d20991..51ea6f197c3ed 100644 --- a/ivy/functional/frontends/paddle/tensor/tensor.py +++ b/ivy/functional/frontends/paddle/tensor/tensor.py @@ -194,6 +194,13 @@ def __rmul__(self, y, /, name=None): def __xor__(self, y, /, name=None): return paddle_frontend.logic.bitwise_xor(self, y) + @with_unsupported_dtypes( + {"2.5.2 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, + "paddle", + ) + def __rpow__(self, y, /, name=None): + return paddle_frontend.pow(y, self) + # Instance Methods # # ---------------- # diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py index 21ef6eb8a529f..9c6846a18b212 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py @@ -794,6 +794,48 @@ def test_paddle__rmul__( ) +# __rpow__ +@handle_frontend_method( + class_tree=CLASS_TREE, + init_tree="paddle.to_tensor", + method_name="__rpow__", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("numeric"), + num_arrays=2, + min_value=1, + ), +) +def test_paddle__rpow__( + dtype_and_x, + frontend_method_data, + init_flags, + method_flags, + frontend, + on_device, + backend_fw, +): + input_dtype, x = dtype_and_x + dtype = input_dtype[0] + if "int" in dtype: + x[0] = ivy.abs(x[0]) + helpers.test_frontend_method( + init_input_dtypes=input_dtype, + backend_to_test=backend_fw, + init_all_as_kwargs_np={ + "data": x[0], + }, + method_input_dtypes=input_dtype, + method_all_as_kwargs_np={ + "other": x[1], + }, + frontend_method_data=frontend_method_data, + init_flags=init_flags, + method_flags=method_flags, + frontend=frontend, + on_device=on_device, + ) + + # __rsub__ @handle_frontend_method( class_tree=CLASS_TREE,