Skip to content

Commit

Permalink
Merge pull request #1498 from IntelPython/iris_xe_test1
Browse files Browse the repository at this point in the history
Update tests to run on Iris Xe
  • Loading branch information
npolina4 authored Jul 27, 2023
2 parents 713302a + f510e16 commit 85ea6f9
Show file tree
Hide file tree
Showing 7 changed files with 148 additions and 71 deletions.
12 changes: 12 additions & 0 deletions dpnp/backend/kernels/dpnp_krnl_random.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2988,9 +2988,13 @@ void func_map_init_random(func_map_t &fmap)

fmap[DPNPFuncName::DPNP_FN_RNG_CHISQUARE][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_chisquare_default_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_CHISQUARE][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_rng_chisquare_default_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_CHISQUARE_EXT][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_chisquare_ext_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_CHISQUARE_EXT][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_rng_chisquare_ext_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_EXPONENTIAL][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_exponential_default_c<double>};
Expand Down Expand Up @@ -3136,15 +3140,23 @@ void func_map_init_random(func_map_t &fmap)

fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_CAUCHY][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_standard_cauchy_default_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_CAUCHY][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_rng_standard_cauchy_default_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_CAUCHY_EXT][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_standard_cauchy_ext_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_CAUCHY_EXT][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_rng_standard_cauchy_ext_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_EXPONENTIAL][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_standard_exponential_default_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_EXPONENTIAL][eft_FLT][eft_FLT] = {
eft_FLT, (void *)dpnp_rng_standard_exponential_default_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_EXPONENTIAL_EXT][eft_DBL][eft_DBL] =
{eft_DBL, (void *)dpnp_rng_standard_exponential_ext_c<double>};
fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_EXPONENTIAL_EXT][eft_FLT][eft_FLT] =
{eft_FLT, (void *)dpnp_rng_standard_exponential_ext_c<float>};

fmap[DPNPFuncName::DPNP_FN_RNG_STANDARD_GAMMA][eft_DBL][eft_DBL] = {
eft_DBL, (void *)dpnp_rng_standard_gamma_default_c<double>};
Expand Down
9 changes: 5 additions & 4 deletions dpnp/random/dpnp_algo_random.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,8 @@ ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_dpnp_rng_beta_c_1out_t)(c_dpctl.DPCTLSy
const c_dpctl.DPCTLEventVectorRef) except +
ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_dpnp_rng_binomial_c_1out_t)(c_dpctl.DPCTLSyclQueueRef,
void * ,
const int, const double,
const int,
const double,
const size_t,
const c_dpctl.DPCTLEventVectorRef) except +
ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_dpnp_rng_chisquare_c_1out_t)(c_dpctl.DPCTLSyclQueueRef,
Expand Down Expand Up @@ -585,7 +586,7 @@ cpdef utils.dpnp_descriptor dpnp_rng_chisquare(int df, size):
"""

# convert string type names (array.dtype) to C enum DPNPFuncType
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.float64)
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.default_float_type())

# get the FPTR data structure
cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_RNG_CHISQUARE_EXT, param1_type, param1_type)
Expand Down Expand Up @@ -1329,7 +1330,7 @@ cpdef utils.dpnp_descriptor dpnp_rng_standard_cauchy(size):
"""

# convert string type names (array.dtype) to C enum DPNPFuncType
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.float64)
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.default_float_type())

# get the FPTR data structure
cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_RNG_STANDARD_CAUCHY_EXT, param1_type, param1_type)
Expand Down Expand Up @@ -1364,7 +1365,7 @@ cpdef utils.dpnp_descriptor dpnp_rng_standard_exponential(size):
cdef fptr_dpnp_rng_standard_exponential_c_1out_t func

# convert string type names (array.dtype) to C enum DPNPFuncType
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.float64)
cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(dpnp.default_float_type())

# get the FPTR data structure
cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_RNG_STANDARD_EXPONENTIAL_EXT, param1_type, param1_type)
Expand Down
6 changes: 3 additions & 3 deletions dpnp/random/dpnp_iface_random.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def chisquare(df, size=None):
-----------
Parameter ``df`` is supported as a scalar.
Otherwise, :obj:`numpy.random.chisquare(df, size)` samples are drawn.
Output array data type is :obj:`dpnp.float64`.
Output array data type is default float type.
Examples
--------
Expand Down Expand Up @@ -1533,7 +1533,7 @@ def standard_cauchy(size=None):
Limitations
-----------
Output array data type is :obj:`dpnp.float64`.
Output array data type is default float type.
Examples
--------
Expand Down Expand Up @@ -1562,7 +1562,7 @@ def standard_exponential(size=None):
Limitations
-----------
Output array data type is :obj:`dpnp.float64`.
Output array data type is default float type.
Examples
--------
Expand Down
5 changes: 4 additions & 1 deletion tests/test_logic.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import dpnp

from .helper import get_all_dtypes
from .helper import get_all_dtypes, has_support_aspect64


@pytest.mark.parametrize("type", get_all_dtypes())
Expand Down Expand Up @@ -40,6 +40,9 @@ def test_all(type, shape):
assert_allclose(dpnp_res, np_res)


@pytest.mark.skipif(
not has_support_aspect64(), reason="Aborted on Iris Xe: SAT-5988"
)
@pytest.mark.parametrize("type", get_all_dtypes(no_bool=True, no_complex=True))
def test_allclose(type):
a = numpy.random.rand(10)
Expand Down
19 changes: 14 additions & 5 deletions tests/test_manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,29 @@

import dpnp

from .helper import (
get_all_dtypes,
get_complex_dtypes,
get_float_dtypes,
)

testdata = []
testdata += [
([True, False, True], dtype)
for dtype in ["float32", "float64", "int32", "int64", "bool"]
for dtype in get_all_dtypes(no_none=True, no_complex=True)
]
testdata += [
([1, -1, 0], dtype) for dtype in ["float32", "float64", "int32", "int64"]
([1, -1, 0], dtype)
for dtype in get_all_dtypes(no_none=True, no_bool=True, no_complex=True)
]
testdata += [([0.1, 0.0, -0.1], dtype) for dtype in ["float32", "float64"]]
testdata += [([1j, -1j, 1 - 2j], dtype) for dtype in ["complex128"]]
testdata += [([0.1, 0.0, -0.1], dtype) for dtype in get_float_dtypes()]
testdata += [([1j, -1j, 1 - 2j], dtype) for dtype in get_complex_dtypes()]


@pytest.mark.parametrize("in_obj,out_dtype", testdata)
@pytest.mark.parametrize("in_obj, out_dtype", testdata)
def test_copyto_dtype(in_obj, out_dtype):
if out_dtype == dpnp.complex64:
pytest.skip("SAT-6016: dpnp.copyto() do not work with complex64 dtype")
ndarr = numpy.array(in_obj)
expected = numpy.empty(ndarr.size, dtype=out_dtype)
numpy.copyto(expected, ndarr)
Expand Down
85 changes: 27 additions & 58 deletions tests/test_mixins.py
Original file line number Diff line number Diff line change
@@ -1,75 +1,44 @@
import unittest

import numpy

import dpnp as inp
from tests.third_party.cupy import testing


class TestMatMul(unittest.TestCase):
def test_matmul(self):
array_data = [1.0, 2.0, 3.0, 4.0]
size = 2

# DPNP
array1 = inp.reshape(
inp.array(array_data, dtype=inp.float64), (size, size)
)
array2 = inp.reshape(
inp.array(array_data, dtype=inp.float64), (size, size)
)
result = inp.matmul(array1, array2)
# print(result)

# original
array_1 = numpy.array(array_data, dtype=numpy.float64).reshape(
(size, size)
)
array_2 = numpy.array(array_data, dtype=numpy.float64).reshape(
(size, size)
)
expected = numpy.matmul(array_1, array_2)
# print(expected)
@testing.for_float_dtypes()
@testing.numpy_cupy_allclose()
def test_matmul(self, xp, dtype):
data = [1.0, 2.0, 3.0, 4.0]
shape = (2, 2)

# passed
numpy.testing.assert_array_equal(expected, result)
# still failed
# self.assertEqual(expected, result)
a = xp.array(data, dtype=dtype).reshape(shape)
b = xp.array(data, dtype=dtype).reshape(shape)

def test_matmul2(self):
array_data1 = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
array_data2 = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]
return xp.matmul(a, b)

# DPNP
array1 = inp.reshape(inp.array(array_data1, dtype=inp.float64), (3, 2))
array2 = inp.reshape(inp.array(array_data2, dtype=inp.float64), (2, 4))
result = inp.matmul(array1, array2)
# print(result)
@testing.for_float_dtypes()
@testing.numpy_cupy_allclose()
def test_matmul2(self, xp, dtype):
data1 = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
data2 = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]

# original
array_1 = numpy.array(array_data1, dtype=numpy.float64).reshape((3, 2))
array_2 = numpy.array(array_data2, dtype=numpy.float64).reshape((2, 4))
expected = numpy.matmul(array_1, array_2)
# print(expected)
a = xp.array(data1, dtype=dtype).reshape(3, 2)
b = xp.array(data2, dtype=dtype).reshape(2, 4)

numpy.testing.assert_array_equal(expected, result)
return xp.matmul(a, b)

def test_matmul3(self):
array_data1 = numpy.full((513, 513), 5)
array_data2 = numpy.full((513, 513), 2)
out = numpy.empty((513, 513), dtype=numpy.float64)
@testing.for_float_dtypes()
@testing.numpy_cupy_allclose()
def test_matmul3(self, xp, dtype):
data1 = xp.full((513, 513), 5)
data2 = xp.full((513, 513), 2)
out = xp.empty((513, 513), dtype=dtype)

# DPNP
array1 = inp.array(array_data1, dtype=inp.float64)
array2 = inp.array(array_data2, dtype=inp.float64)
out1 = inp.array(out, dtype=inp.float64)
result = inp.matmul(array1, array2, out=out1)
a = xp.array(data1, dtype=dtype)
b = xp.array(data2, dtype=dtype)

# original
array_1 = numpy.array(array_data1, dtype=numpy.float64)
array_2 = numpy.array(array_data2, dtype=numpy.float64)
expected = numpy.matmul(array_1, array_2, out=out)
xp.matmul(a, b, out=out)

numpy.testing.assert_array_equal(expected, result)
return out


if __name__ == "__main__":
Expand Down
Loading

0 comments on commit 85ea6f9

Please sign in to comment.