diff --git a/dpnp/backend/include/dpnp_iface_fptr.hpp b/dpnp/backend/include/dpnp_iface_fptr.hpp index 8b1f4c48a11..aaef5c78627 100644 --- a/dpnp/backend/include/dpnp_iface_fptr.hpp +++ b/dpnp/backend/include/dpnp_iface_fptr.hpp @@ -376,29 +376,28 @@ enum class DPNPFuncName : size_t DPNP_FN_SUBTRACT_EXT, /**< Used in numpy.subtract() impl, requires extra parameters */ DPNP_FN_SUM, /**< Used in numpy.sum() impl */ - DPNP_FN_SUM_EXT, /**< Used in numpy.sum() impl, requires extra parameters */ - DPNP_FN_SVD, /**< Used in numpy.linalg.svd() impl */ - DPNP_FN_SVD_EXT, /**< Used in numpy.linalg.svd() impl, requires extra - parameters */ - DPNP_FN_TAKE, /**< Used in numpy.take() impl */ - DPNP_FN_TAN, /**< Used in numpy.tan() impl */ - DPNP_FN_TANH, /**< Used in numpy.tanh() impl */ - DPNP_FN_TRANSPOSE, /**< Used in numpy.transpose() impl */ - DPNP_FN_TRACE, /**< Used in numpy.trace() impl */ - DPNP_FN_TRACE_EXT, /**< Used in numpy.trace() impl, requires extra - parameters */ - DPNP_FN_TRAPZ, /**< Used in numpy.trapz() impl */ - DPNP_FN_TRAPZ_EXT, /**< Used in numpy.trapz() impl, requires extra - parameters */ - DPNP_FN_TRI, /**< Used in numpy.tri() impl */ - DPNP_FN_TRIL, /**< Used in numpy.tril() impl */ - DPNP_FN_TRIU, /**< Used in numpy.triu() impl */ - DPNP_FN_TRUNC, /**< Used in numpy.trunc() impl */ - DPNP_FN_VANDER, /**< Used in numpy.vander() impl */ - DPNP_FN_VAR, /**< Used in numpy.var() impl */ - DPNP_FN_ZEROS, /**< Used in numpy.zeros() impl */ - DPNP_FN_ZEROS_LIKE, /**< Used in numpy.zeros_like() impl */ - DPNP_FN_LAST, /**< The latest element of the enumeration */ + DPNP_FN_SVD, /**< Used in numpy.linalg.svd() impl */ + DPNP_FN_SVD_EXT, /**< Used in numpy.linalg.svd() impl, requires extra + parameters */ + DPNP_FN_TAKE, /**< Used in numpy.take() impl */ + DPNP_FN_TAN, /**< Used in numpy.tan() impl */ + DPNP_FN_TANH, /**< Used in numpy.tanh() impl */ + DPNP_FN_TRANSPOSE, /**< Used in numpy.transpose() impl */ + DPNP_FN_TRACE, /**< Used in numpy.trace() impl */ + DPNP_FN_TRACE_EXT, /**< Used in numpy.trace() impl, requires extra + parameters */ + DPNP_FN_TRAPZ, /**< Used in numpy.trapz() impl */ + DPNP_FN_TRAPZ_EXT, /**< Used in numpy.trapz() impl, requires extra + parameters */ + DPNP_FN_TRI, /**< Used in numpy.tri() impl */ + DPNP_FN_TRIL, /**< Used in numpy.tril() impl */ + DPNP_FN_TRIU, /**< Used in numpy.triu() impl */ + DPNP_FN_TRUNC, /**< Used in numpy.trunc() impl */ + DPNP_FN_VANDER, /**< Used in numpy.vander() impl */ + DPNP_FN_VAR, /**< Used in numpy.var() impl */ + DPNP_FN_ZEROS, /**< Used in numpy.zeros() impl */ + DPNP_FN_ZEROS_LIKE, /**< Used in numpy.zeros_like() impl */ + DPNP_FN_LAST, /**< The latest element of the enumeration */ }; /** diff --git a/dpnp/backend/kernels/dpnp_krnl_reduction.cpp b/dpnp/backend/kernels/dpnp_krnl_reduction.cpp index d9534379102..421026c6fba 100644 --- a/dpnp/backend/kernels/dpnp_krnl_reduction.cpp +++ b/dpnp/backend/kernels/dpnp_krnl_reduction.cpp @@ -177,19 +177,6 @@ void (*dpnp_sum_default_c)(void *, const long *) = dpnp_sum_c<_DataType_output, _DataType_input>; -template -DPCTLSyclEventRef (*dpnp_sum_ext_c)(DPCTLSyclQueueRef, - void *, - const void *, - const shape_elem_type *, - const size_t, - const shape_elem_type *, - const size_t, - const void *, - const long *, - const DPCTLEventVectorRef) = - dpnp_sum_c<_DataType_output, _DataType_input>; - template class dpnp_prod_c_kernel; @@ -372,41 +359,5 @@ void func_map_init_reduction(func_map_t &fmap) fmap[DPNPFuncName::DPNP_FN_SUM][eft_DBL][eft_DBL] = { eft_DBL, (void *)dpnp_sum_default_c}; - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_INT][eft_INT] = { - eft_LNG, (void *)dpnp_sum_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_INT][eft_LNG] = { - eft_LNG, (void *)dpnp_sum_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_INT][eft_FLT] = { - eft_FLT, (void *)dpnp_sum_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_INT][eft_DBL] = { - eft_DBL, (void *)dpnp_sum_ext_c}; - - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_LNG][eft_INT] = { - eft_INT, (void *)dpnp_sum_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_LNG][eft_LNG] = { - eft_LNG, (void *)dpnp_sum_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_LNG][eft_FLT] = { - eft_FLT, (void *)dpnp_sum_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_LNG][eft_DBL] = { - eft_DBL, (void *)dpnp_sum_ext_c}; - - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_FLT][eft_INT] = { - eft_INT, (void *)dpnp_sum_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_FLT][eft_LNG] = { - eft_LNG, (void *)dpnp_sum_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_FLT][eft_FLT] = { - eft_FLT, (void *)dpnp_sum_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_FLT][eft_DBL] = { - eft_DBL, (void *)dpnp_sum_ext_c}; - - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_DBL][eft_INT] = { - eft_INT, (void *)dpnp_sum_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_DBL][eft_LNG] = { - eft_LNG, (void *)dpnp_sum_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_DBL][eft_FLT] = { - eft_FLT, (void *)dpnp_sum_ext_c}; - fmap[DPNPFuncName::DPNP_FN_SUM_EXT][eft_DBL][eft_DBL] = { - eft_DBL, (void *)dpnp_sum_ext_c}; - return; } diff --git a/dpnp/dpnp_algo/dpnp_algo.pxd b/dpnp/dpnp_algo/dpnp_algo.pxd index 18813e3e04c..69aa29bf717 100644 --- a/dpnp/dpnp_algo/dpnp_algo.pxd +++ b/dpnp/dpnp_algo/dpnp_algo.pxd @@ -185,8 +185,6 @@ cdef extern from "dpnp_iface_fptr.hpp" namespace "DPNPFuncName": # need this na DPNP_FN_SEARCHSORTED_EXT DPNP_FN_SORT DPNP_FN_SORT_EXT - DPNP_FN_SUM - DPNP_FN_SUM_EXT DPNP_FN_SVD DPNP_FN_SVD_EXT DPNP_FN_TRACE @@ -284,16 +282,6 @@ ctypedef c_dpctl.DPCTLSyclEventRef(*fptr_2in_1out_strides_t)(c_dpctl.DPCTLSyclQu const long * , const c_dpctl.DPCTLEventVectorRef) except + ctypedef void(*fptr_blas_gemm_2in_1out_t)(void *, void * , void * , size_t, size_t, size_t) -ctypedef c_dpctl.DPCTLSyclEventRef(*dpnp_reduction_c_t)(c_dpctl.DPCTLSyclQueueRef, - void *, - const void * , - const shape_elem_type*, - const size_t, - const shape_elem_type*, - const size_t, - const void * , - const long*, - const c_dpctl.DPCTLEventVectorRef) """ diff --git a/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi b/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi index ce1b0c5f894..c1a6917dffa 100644 --- a/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi +++ b/dpnp/dpnp_algo/dpnp_algo_mathematical.pxi @@ -48,8 +48,6 @@ __all__ += [ "dpnp_modf", "dpnp_nancumprod", "dpnp_nancumsum", - "dpnp_nansum", - "dpnp_sum", "dpnp_trapz", ] @@ -278,82 +276,6 @@ cpdef utils.dpnp_descriptor dpnp_nancumsum(utils.dpnp_descriptor x1): return dpnp_cumsum(x1_desc) -cpdef utils.dpnp_descriptor dpnp_nansum(utils.dpnp_descriptor x1): - x1_obj = x1.get_array() - cdef utils.dpnp_descriptor result = utils_py.create_output_descriptor_py(x1.shape, - x1.dtype, - None, - device=x1_obj.sycl_device, - usm_type=x1_obj.usm_type, - sycl_queue=x1_obj.sycl_queue) - - for i in range(result.size): - input_elem = x1.get_pyobj().flat[i] - - if dpnp.isnan(input_elem): - result.get_pyobj().flat[i] = 0 - else: - result.get_pyobj().flat[i] = input_elem - - return dpnp_sum(result) - - -cpdef utils.dpnp_descriptor dpnp_sum(utils.dpnp_descriptor x1, - object axis=None, - object dtype=None, - utils.dpnp_descriptor out=None, - cpp_bool keepdims=False, - object initial=None, - object where=True): - - cdef shape_type_c x1_shape = x1.shape - cdef DPNPFuncType x1_c_type = dpnp_dtype_to_DPNPFuncType(x1.dtype) - - cdef shape_type_c axis_shape = utils._object_to_tuple(axis) - - cdef shape_type_c result_shape = utils.get_reduction_output_shape(x1_shape, axis, keepdims) - cdef DPNPFuncType result_c_type = utils.get_output_c_type(DPNP_FN_SUM_EXT, x1_c_type, out, dtype) - - """ select kernel """ - cdef DPNPFuncData kernel_data = get_dpnp_function_ptr(DPNP_FN_SUM_EXT, x1_c_type, result_c_type) - - x1_obj = x1.get_array() - - """ Create result array """ - cdef utils.dpnp_descriptor result = utils.create_output_descriptor(result_shape, - result_c_type, - out, - device=x1_obj.sycl_device, - usm_type=x1_obj.usm_type, - sycl_queue=x1_obj.sycl_queue) - - if x1.size == 0 and axis is None: - return result - - result_sycl_queue = result.get_array().sycl_queue - - cdef c_dpctl.SyclQueue q = result_sycl_queue - cdef c_dpctl.DPCTLSyclQueueRef q_ref = q.get_queue_ref() - - """ Call FPTR interface function """ - cdef dpnp_reduction_c_t func = kernel_data.ptr - cdef c_dpctl.DPCTLSyclEventRef event_ref = func(q_ref, - result.get_data(), - x1.get_data(), - x1_shape.data(), - x1_shape.size(), - axis_shape.data(), - axis_shape.size(), - NULL, - NULL, - NULL) # dep_events_ref - - with nogil: c_dpctl.DPCTLEvent_WaitAndThrow(event_ref) - c_dpctl.DPCTLEvent_Delete(event_ref) - - return result - - cpdef utils.dpnp_descriptor dpnp_trapz(utils.dpnp_descriptor y1, utils.dpnp_descriptor x1, double dx): cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(y1.dtype) diff --git a/dpnp/dpnp_algo/dpnp_algo_statistics.pxi b/dpnp/dpnp_algo/dpnp_algo_statistics.pxi index 37d51d131ff..716c4d1afbd 100644 --- a/dpnp/dpnp_algo/dpnp_algo_statistics.pxi +++ b/dpnp/dpnp_algo/dpnp_algo_statistics.pxi @@ -36,7 +36,6 @@ and the rest of the library # NO IMPORTs here. All imports must be placed into main "dpnp_algo.pyx" file __all__ += [ - "dpnp_average", "dpnp_correlate", "dpnp_median", ] @@ -49,15 +48,6 @@ ctypedef c_dpctl.DPCTLSyclEventRef(*custom_statistic_1in_1out_func_ptr_t)(c_dpct const c_dpctl.DPCTLEventVectorRef) -cpdef dpnp_average(utils.dpnp_descriptor x1): - array_sum = dpnp_sum(x1).get_pyobj() - - """ Numpy interface inconsistency """ - return_type = dpnp.float32 if (x1.dtype == dpnp.float32) else dpnp.float64 - - return (return_type(array_sum / x1.size)) - - cpdef utils.dpnp_descriptor dpnp_correlate(utils.dpnp_descriptor x1, utils.dpnp_descriptor x2): cdef DPNPFuncType param1_type = dpnp_dtype_to_DPNPFuncType(x1.dtype) cdef DPNPFuncType param2_type = dpnp_dtype_to_DPNPFuncType(x2.dtype) diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index 20c5c922dbe..a06075c7f31 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -2643,7 +2643,7 @@ def subtract( def sum( - x, + a, /, *, axis=None, @@ -2658,31 +2658,86 @@ def sum( For full documentation refer to :obj:`numpy.sum`. + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray}: + Input array. + axis : int or tuple of ints, optional + Axis or axes along which sums must be computed. If a tuple + of unique integers, sums are computed over multiple axes. + If ``None``, the sum is computed over the entire array. + Default: ``None``. + dtype : dtype, optional + Data type of the returned array. If ``None``, the default data + type is inferred from the "kind" of the input array data type. + * If `a` has a real-valued floating-point data type, + the returned array will have the default real-valued + floating-point data type for the device where input + array `a` is allocated. + * If `a` has signed integral data type, the returned array + will have the default signed integral type for the device + where input array `a` is allocated. + * If `a` has unsigned integral data type, the returned array + will have the default unsigned integral type for the device + where input array `a` is allocated. + * If `a` has a complex-valued floating-point data type, + the returned array will have the default complex-valued + floating-pointer data type for the device where input + array `a` is allocated. + * If `a` has a boolean data type, the returned array will + have the default signed integral type for the device + where input array `a` is allocated. + If the data type (either specified or resolved) differs from the + data type of `a`, the input array elements are cast to the + specified data type before computing the sum. + Default: ``None``. + out : {dpnp.ndarray, usm_ndarray}, optional + Alternative output array in which to place the result. It must + have the same shape as the expected output, but the type of + the output values will be cast if necessary. + Default: ``None``. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) are included in the result + as singleton dimensions, so that the returned array remains + compatible with the input array according to Array Broadcasting + rules. Otherwise, if ``False``, the reduced axes are not included in + the returned array. Default: ``False``. + Returns ------- out : dpnp.ndarray - an array containing the sums. If the sum was computed over the + An array containing the sums. If the sum is computed over the entire array, a zero-dimensional array is returned. The returned array has the data type as described in the `dtype` parameter - of the Python Array API standard for the `sum` function. + description above. Limitations ----------- - Parameters `x` is supported as either :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. Parameters `initial` and `where` are supported with their default values. Otherwise ``NotImplementedError`` exception will be raised. - Input array data types are limited by supported DPNP :ref:`Data types`. + + See Also + -------- + :obj:`dpnp.ndarray.sum` : Equivalent method. + :obj:`dpnp.cumsum` : Cumulative sum of array elements. + :obj:`dpnp.trapz` : Integration of array values using the composite trapezoidal rule. + :obj:`dpnp.mean` : Compute the arithmetic mean. + :obj:`dpnp.average` : Compute the weighted average. Examples -------- >>> import dpnp as np - >>> np.sum(np.array([1, 2, 3, 4, 5])) - array(15) - >>> np.sum(np.array(5)) - array(5) - >>> result = np.sum(np.array([[0, 1], [0, 5]]), axis=0) + >>> np.sum(np.array([0.5, 1.5])) + array(2.) + >>> np.sum(np.array([0.5, 0.7, 0.2, 1.5]), dtype=np.int32) + array(1) + >>> a = np.array([[0, 1], [0, 5]]) + >>> np.sum(a) + array(6) + >>> np.sum(a, axis=0) array([0, 6]) + >>> np.sum(a, axis=1) + array([1, 5]) """ @@ -2690,7 +2745,7 @@ def sum( if not isinstance(axis, (tuple, list)): axis = (axis,) - axis = normalize_axis_tuple(axis, x.ndim, "axis") + axis = normalize_axis_tuple(axis, a.ndim, "axis") if initial != 0: raise NotImplementedError( @@ -2702,20 +2757,20 @@ def sum( ) else: if ( - len(x.shape) == 2 - and x.itemsize == 4 + len(a.shape) == 2 + and a.itemsize == 4 and ( ( axis == (0,) - and x.flags.c_contiguous - and 32 <= x.shape[1] <= 1024 - and x.shape[0] > x.shape[1] + and a.flags.c_contiguous + and 32 <= a.shape[1] <= 1024 + and a.shape[0] > a.shape[1] ) or ( axis == (1,) - and x.flags.f_contiguous - and 32 <= x.shape[0] <= 1024 - and x.shape[1] > x.shape[0] + and a.flags.f_contiguous + and 32 <= a.shape[0] <= 1024 + and a.shape[1] > a.shape[0] ) ) ): @@ -2723,7 +2778,7 @@ def sum( from dpnp.backend.extensions.sycl_ext import _sycl_ext_impl - input = x + input = a if axis == (1,): input = input.T input = dpnp.get_usm_ndarray(input) @@ -2755,7 +2810,7 @@ def sum( return result y = dpt.sum( - dpnp.get_usm_ndarray(x), axis=axis, dtype=dtype, keepdims=keepdims + dpnp.get_usm_ndarray(a), axis=axis, dtype=dtype, keepdims=keepdims ) result = dpnp_array._create_from_usm_ndarray(y) return dpnp.get_result_array(result, out, casting="same_kind") diff --git a/dpnp/dpnp_iface_nanfunctions.py b/dpnp/dpnp_iface_nanfunctions.py index a16583fa0c9..ab12eacf4c3 100644 --- a/dpnp/dpnp_iface_nanfunctions.py +++ b/dpnp/dpnp_iface_nanfunctions.py @@ -52,8 +52,10 @@ "nancumprod", "nancumsum", "nanmax", + "nanmean", "nanmin", "nanprod", + "nanstd", "nansum", "nanvar", ] @@ -405,6 +407,122 @@ def nanmax(a, axis=None, out=None, keepdims=False, initial=None, where=True): return res +def nanmean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + """ + Compute the arithmetic mean along the specified axis, ignoring NaNs. + + For full documentation refer to :obj:`numpy.nanmean`. + + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray}: + Input array. + axis : int or tuple of ints, optional + Axis or axes along which the arithmetic means must be computed. If + a tuple of unique integers, the means are computed over multiple + axes. If ``None``, the mean is computed over the entire array. + Default: ``None``. + dtype : dtype, optional + Type to use in computing the mean. By default, if `a` has a + floating-point data type, the returned array will have + the same data type as `a`. + If `a` has a boolean or integral data type, the returned array + will have the default floating point data type for the device + where input array `a` is allocated. + out : {dpnp.ndarray, usm_ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. Default: ``None``. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) are included in the result + as singleton dimensions, so that the returned array remains + compatible with the input array according to Array Broadcasting + rules. Otherwise, if ``False``, the reduced axes are not included in + the returned array. Default: ``False``. + + Returns + ------- + out : dpnp.ndarray + An array containing the arithmetic means along the specified axis(axes). + If the input is a zero-size array, an array containing NaN values is returned. + In addition, NaN is returned for slices that contain only NaNs. + + Limitations + ----------- + Parameter `where` is only supported with its default value. + Otherwise ``NotImplementedError`` exception will be raised. + + See Also + -------- + :obj:`dpnp.average` : Weighted average. + :obj:`dpnp.mean` : Compute the arithmetic mean along the specified axis. + :obj:`dpnp.var` : Compute the variance along the specified axis. + :obj:`dpnp.nanvar` : Compute the variance along the specified axis, + while ignoring NaNs. + :obj:`dpnp.std` : Compute the standard deviation along the specified axis. + :obj:`dpnp.nanstd` : Compute the standard deviation along the specified axis, + while ignoring NaNs. + + Examples + -------- + >>> import dpnp as np + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanmean(a) + array(2.6666666666666665) + >>> np.nanmean(a, axis=0) + array([2., 4.]) + >>> np.nanmean(a, axis=1) + array([1., 3.5]) # may vary + + """ + + if where is not True: + raise NotImplementedError( + "where keyword argument is only supported with its default value." + ) + else: + arr, mask = _replace_nan(a, 0) + if mask is None: + return dpnp.mean( + arr, + axis=axis, + dtype=dtype, + out=out, + keepdims=keepdims, + where=where, + ) + + if dtype is not None: + dtype = dpnp.dtype(dtype) + if not dpnp.issubdtype(dtype, dpnp.inexact): + raise TypeError( + "If input is inexact, then dtype must be inexact." + ) + if out is not None: + dpnp.check_supported_arrays_type(out) + if not dpnp.issubdtype(out.dtype, dpnp.inexact): + raise TypeError( + "If input is inexact, then out must be inexact." + ) + + cnt_dtype = a.real.dtype if dtype is None else dtype + cnt = dpnp.sum( + ~mask, axis=axis, dtype=cnt_dtype, keepdims=keepdims, where=where + ) + var_dtype = a.dtype if dtype is None else dtype + avg = dpnp.sum( + arr, + axis=axis, + dtype=var_dtype, + out=out, + keepdims=keepdims, + where=where, + ) + dpnp.divide(avg, cnt, out=avg) + + return avg + + def nanmin(a, axis=None, out=None, keepdims=False, initial=None, where=True): """ Return the minimum of an array or minimum along an axis, ignoring any NaNs. @@ -548,8 +666,7 @@ def nanprod( """ - a, mask = _replace_nan(a, 1) - + a, _ = _replace_nan(a, 1) return dpnp.prod( a, axis=axis, @@ -561,36 +678,224 @@ def nanprod( ) -def nansum(x1, **kwargs): +def nansum( + a, + /, + *, + axis=None, + dtype=None, + keepdims=False, + out=None, + initial=0, + where=True, +): """ - Calculate sum() function treating 'Not a Numbers' (NaN) as zero. + Return the sum of array elements over a given axis treating Not a Numbers (NaNs) as zero. For full documentation refer to :obj:`numpy.nansum`. + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray}: + Input array. + axis : int or tuple of ints, optional + Axis or axes along which sums must be computed. If a tuple + of unique integers, sums are computed over multiple axes. + If ``None``, the sum is computed over the entire array. + Default: ``None``. + dtype : dtype, optional + Data type of the returned array. If ``None``, the default data + type is inferred from the "kind" of the input array data type. + * If `a` has a real-valued floating-point data type, + the returned array will have the default real-valued + floating-point data type for the device where input + array `a` is allocated. + * If `a` has signed integral data type, the returned array + will have the default signed integral type for the device + where input array `a` is allocated. + * If `a` has unsigned integral data type, the returned array + will have the default unsigned integral type for the device + where input array `a` is allocated. + * If `a` has a complex-valued floating-point data type, + the returned array will have the default complex-valued + floating-pointer data type for the device where input + array `a` is allocated. + * If `a` has a boolean data type, the returned array will + have the default signed integral type for the device + where input array `a` is allocated. + If the data type (either specified or resolved) differs from the + data type of `a`, the input array elements are cast to the + specified data type before computing the sum. + Default: ``None``. + out : {dpnp.ndarray, usm_ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. Default: ``None``. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) are included in the result + as singleton dimensions, so that the returned array remains + compatible with the input array according to Array Broadcasting + rules. Otherwise, if ``False``, the reduced axes are not included in + the returned array. Default: ``False``. + + Returns + ------- + out : dpnp.ndarray + An array containing the sums. If the sum is computed over the + entire array, a zero-dimensional array is returned. The returned + array has the data type as described in the `dtype` parameter + description above. Zero is returned for slices that are all-NaN + or empty. + Limitations ----------- - Parameter `x1` is supported as :class:`dpnp.ndarray`. - Keyword argument `kwargs` is currently unsupported. - Otherwise the function will be executed sequentially on CPU. - Input array data types are limited by supported DPNP :ref:`Data types`. + Parameters `initial` and `where` are supported with their default values. + Otherwise ``NotImplementedError`` exception will be raised. + + See Also + -------- + :obj:`dpnp.sum` : Sum across array propagating NaNs. + :obj:`dpnp.isnan` : Show which elements are NaN. + :obj:`dpnp.isfinite` : Show which elements are not NaN or +/-inf. + + Notes + ----- + If both positive and negative infinity are present, the sum will be Not + A Number (NaN). Examples -------- >>> import dpnp as np - >>> np.nansum(np.array([1, 2])) - 3 - >>> np.nansum(np.array([[1, 2], [3, 4]])) - 10 + >>> np.nansum(np.array([1])) + array(1) + >>> np.nansum(np.array([1, np.nan])) + array(1.) + >>> a = np.array([[1, 1], [1, np.nan]]) + >>> np.nansum(a) + array(3.) + >>> np.nansum(a, axis=0) + array([2., 1.]) + >>> np.nansum(np.array([1, np.nan, np.inf])) + array(inf) + >>> np.nansum(np.array([1, np.nan, np.NINF])) + array(-inf) + >>> np.nansum(np.array([1, np.nan, np.inf, -np.inf])) # both +/- infinity present + array(nan) """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc and not kwargs: - result_obj = dpnp_nansum(x1_desc).get_pyobj() - result = dpnp.convert_single_elem_array_to_scalar(result_obj) - return result + a, _ = _replace_nan(a, 0) + return dpnp.sum( + a, + axis=axis, + dtype=dtype, + out=out, + keepdims=keepdims, + initial=initial, + where=where, + ) - return call_origin(numpy.nansum, x1, **kwargs) + +def nanstd( + a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True +): + """ + Compute the standard deviation along the specified axis, while ignoring NaNs. + + For full documentation refer to :obj:`numpy.nanstd`. + + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray}: + Input array. + axis : int or tuple of ints, optional + Axis or axes along which the standard deviations must be computed. + If a tuple of unique integers is given, the standard deviations + are computed over multiple axes. If ``None``, the standard deviation + is computed over the entire array. + Default: ``None``. + dtype : dtype, optional + Type to use in computing the standard deviation. By default, + if `a` has a floating-point data type, the returned array + will have the same data type as `a`. + If `a` has a boolean or integral data type, the returned array + will have the default floating point data type for the device + where input array `a` is allocated. + out : {dpnp.ndarray, usm_ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. + ddof : {int, float}, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` the number of non-NaN elements. + Default: `0.0`. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) are included in the result + as singleton dimensions, so that the returned array remains + compatible with the input array according to Array Broadcasting + rules. Otherwise, if ``False``, the reduced axes are not included in + the returned array. Default: ``False``. + + Returns + ------- + out : dpnp.ndarray + An array containing the standard deviations. If the standard + deviation was computed over the entire array, a zero-dimensional + array is returned. If ddof is >= the number of non-NaN elements + in a slice or the slice contains only NaNs, then the result for + that slice is NaN. + + Limitations + ----------- + Parameters `where` is only supported with its default value. + Otherwise ``NotImplementedError`` exception will be raised. + + Notes + ----- + Note that, for complex numbers, the absolute value is taken before squaring, + so that the result is always real and nonnegative. + + See Also + -------- + :obj:`dpnp.var` : Compute the variance along the specified axis. + :obj:`dpnp.mean` : Compute the arithmetic mean along the specified axis. + :obj:`dpnp.std` : Compute the standard deviation along the specified axis. + :obj:`dpnp.nanmean` : Compute the arithmetic mean along the specified axis, ignoring NaNs. + :obj:`dpnp.nanvar` : Compute the variance along the specified axis, while ignoring NaNs. + + Examples + -------- + >>> import dpnp as np + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanstd(a) + array(1.247219128924647) + >>> np.nanstd(a, axis=0) + array([1., 0.]) + >>> np.nanstd(a, axis=1) + array([0., 0.5]) # may vary + + """ + + if where is not True: + raise NotImplementedError( + "where keyword argument is only supported with its default value." + ) + elif not isinstance(ddof, (int, float)): + raise TypeError( + "An integer or float is required, but got {}".format(type(ddof)) + ) + else: + res = nanvar( + a, + axis=axis, + dtype=dtype, + out=out, + ddof=ddof, + keepdims=keepdims, + where=where, + ) + dpnp.sqrt(res, out=res) + return res def nanvar( @@ -609,19 +914,21 @@ def nanvar( axis or axes along which the variances must be computed. If a tuple of unique integers is given, the variances are computed over multiple axes. If ``None``, the variance is computed over the entire array. - Default: `None`. + Default: ``None``. dtype : dtype, optional - Type to use in computing the standard deviation. For arrays of - integer type the default real-valued floating-point data type is used, - for arrays of float types it is the same as the array type. + Type to use in computing the variance. By default, if `a` has a + floating-point data type, the returned array will have + the same data type as `a`. + If `a` has a boolean or integral data type, the returned array + will have the default floating point data type for the device + where input array `a` is allocated. out : {dpnp_array, usm_ndarray}, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type (of the calculated values) will be cast if necessary. ddof : {int, float}, optional Means Delta Degrees of Freedom. The divisor used in calculations - is ``N - ddof``, where ``N`` corresponds to the total - number of elements over which the variance is calculated. + is ``N - ddof``, where ``N`` represents the number of non-NaN elements. Default: `0.0`. keepdims : bool, optional If ``True``, the reduced axes (dimensions) are included in the result @@ -633,20 +940,20 @@ def nanvar( Returns ------- out : dpnp.ndarray - an array containing the variances. If the variance was computed + An array containing the variances. If the variance was computed over the entire array, a zero-dimensional array is returned. - - If `a` has a real-valued floating-point data type, the returned - array will have the same data type as `a`. - If `a` has a boolean or integral data type, the returned array - will have the default floating point data type for the device - where input array `a` is allocated. + If ddof is >= the number of non-NaN elements in a slice or the + slice contains only NaNs, then the result for that slice is NaN. Limitations ----------- Parameters `where` is only supported with its default value. Otherwise ``NotImplementedError`` exception will be raised. - Input array data types are limited by real valued data types. + + Notes + ----- + Note that, for complex numbers, the absolute value is taken before squaring, + so that the result is always real and nonnegative. See Also -------- @@ -670,7 +977,6 @@ def nanvar( """ - dpnp.check_supported_arrays_type(a) if where is not True: raise NotImplementedError( "where keyword argument is only supported with its default value." @@ -694,7 +1000,7 @@ def nanvar( if dtype is not None: dtype = dpnp.dtype(dtype) - if not issubclass(dtype.type, dpnp.inexact): + if not dpnp.issubdtype(dtype, dpnp.inexact): raise TypeError( "If input is inexact, then dtype must be inexact." ) @@ -710,9 +1016,7 @@ def nanvar( cnt = dpnp.sum( ~mask, axis=axis, dtype=var_dtype, keepdims=True, where=where ) - avg = dpnp.sum( - arr, axis=axis, dtype=var_dtype, keepdims=True, where=where - ) + avg = dpnp.sum(arr, axis=axis, dtype=dtype, keepdims=True, where=where) avg = dpnp.divide(avg, cnt, out=avg) # Compute squared deviation from mean. diff --git a/dpnp/dpnp_iface_statistics.py b/dpnp/dpnp_iface_statistics.py index 4e4201c97cd..24d652ebf51 100644 --- a/dpnp/dpnp_iface_statistics.py +++ b/dpnp/dpnp_iface_statistics.py @@ -143,51 +143,160 @@ def amin(a, axis=None, out=None, keepdims=False, initial=None, where=True): ) -def average(x1, axis=None, weights=None, returned=False): +def average(a, axis=None, weights=None, returned=False, *, keepdims=False): """ Compute the weighted average along the specified axis. For full documentation refer to :obj:`numpy.average`. - Limitations - ----------- - Input array is supported as :obj:`dpnp.ndarray`. - Parameter `axis` is supported only with default value ``None``. - Parameter `weights` is supported only with default value ``None``. - Parameter `returned` is supported only with default value ``False``. - Otherwise the function will be executed sequentially on CPU. - Input array data types are limited by supported DPNP :ref:`Data types`. + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray}: + Input array. + axis : int or tuple of ints, optional + Axis or axes along which the averages must be computed. If + a tuple of unique integers, the averages are computed over multiple + axes. If ``None``, the average is computed over the entire array. + Default: ``None``. + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the average according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. The 1-D calculation is:: + + avg = sum(a * weights) / sum(weights) + + The only constraint on `weights` is that `sum(weights)` must not be 0. + returned : bool, optional + Default is ``False``. If ``True``, the tuple (`average`, `sum_of_weights`) + is returned, otherwise only the average is returned. + If `weights=None`, `sum_of_weights` is equivalent to the number of + elements over which the average is taken. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) are included in the result + as singleton dimensions, so that the returned array remains + compatible with the input array according to Array Broadcasting + rules. Otherwise, if ``False``, the reduced axes are not included in + the returned array. Default: ``False``. + + Returns + ------- + out, [sum_of_weights] : dpnp.ndarray, dpnp.ndarray + Return the average along the specified axis. When `returned` is ``True``, + return a tuple with the average as the first element and the sum of the + weights as the second element. `sum_of_weights` is of the same type as + `out`. The result dtype follows a general pattern. If `weights` is + ``None``, the result dtype will be that of `a` , or default floating point + data type for the device where input array `a` is allocated. Otherwise, + if `weights` is not ``None`` and `a` is non-integral, the result type + will be the type of lowest precision capable of representing values of + both `a` and `weights`. If `a` happens to be integral, the previous rules + still applies but the result dtype will at least be default floating point + data type for the device where input array `a` is allocated. See Also -------- :obj:`dpnp.mean` : Compute the arithmetic mean along the specified axis. + :obj:`dpnp.sum` : Sum of array elements over a given axis. Examples -------- >>> import dpnp as np >>> data = np.arange(1, 5) - >>> [i for i in data] - [1, 2, 3, 4] + >>> data + array([1, 2, 3, 4]) >>> np.average(data) - 2.5 + array(2.5) + >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1)) + array(4.0) + + >>> data = np.arange(6).reshape((3, 2)) + >>> data + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.average(data, axis=1, weights=[1./4, 3./4]) + array([0.75, 2.75, 4.75]) + >>> np.average(data, weights=[1./4, 3./4]) + TypeError: Axis must be specified when shapes of a and weights differ. + + With ``keepdims=True``, the following result has shape (3, 1). + + >>> np.average(data, axis=1, keepdims=True) + array([[0.5], + [2.5], + [4.5]]) + + >>> a = np.ones(5, dtype=np.float64) + >>> w = np.ones(5, dtype=np.complex64) + >>> avg = np.average(a, weights=w) + >>> print(avg.dtype) + complex128 """ - x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False) - if x1_desc: - if axis is not None: - pass - elif weights is not None: - pass - elif returned: - pass + dpnp.check_supported_arrays_type(a) + if weights is None: + avg = dpnp.mean(a, axis=axis, keepdims=keepdims) + scl = dpnp.asanyarray( + avg.dtype.type(a.size / avg.size), + usm_type=a.usm_type, + sycl_queue=a.sycl_queue, + ) + else: + if not isinstance(weights, (dpnp_array, dpt.usm_ndarray)): + wgt = dpnp.asanyarray( + weights, usm_type=a.usm_type, sycl_queue=a.sycl_queue + ) else: - result_obj = dpnp_average(x1_desc) - result = dpnp.convert_single_elem_array_to_scalar(result_obj) + get_usm_allocations([a, weights]) + wgt = weights - return result + if not dpnp.issubdtype(a.dtype, dpnp.inexact): + default_dtype = dpnp.default_float_type(a.device) + result_dtype = dpnp.result_type(a.dtype, wgt.dtype, default_dtype) + else: + result_dtype = dpnp.result_type(a.dtype, wgt.dtype) - return call_origin(numpy.average, x1, axis, weights, returned) + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of input array and weights differ." + ) + if wgt.ndim != 1: + raise TypeError( + "1D weights expected when shapes of input array and weights differ." + ) + if wgt.shape[0] != a.shape[axis]: + raise ValueError( + "Length of weights not compatible with specified axis." + ) + + # setup wgt to broadcast along axis + wgt = dpnp.broadcast_to(wgt, (a.ndim - 1) * (1,) + wgt.shape) + wgt = wgt.swapaxes(-1, axis) + + scl = wgt.sum(axis=axis, dtype=result_dtype, keepdims=keepdims) + if dpnp.any(scl == 0.0): + raise ZeroDivisionError("Weights sum to zero, can't be normalized") + + # result_datatype + avg = ( + dpnp.multiply(a, wgt).sum( + axis=axis, dtype=result_dtype, keepdims=keepdims + ) + / scl + ) + + if returned: + if scl.shape != avg.shape: + scl = dpnp.broadcast_to(scl, avg.shape).copy() + return avg, scl + else: + return avg def bincount(x1, weights=None, minlength=0): @@ -462,19 +571,43 @@ def mean(a, /, axis=None, dtype=None, out=None, keepdims=False, *, where=True): For full documentation refer to :obj:`numpy.mean`. + Parameters + ---------- + a : {dpnp.ndarray, usm_ndarray}: + Input array. + axis : int or tuple of ints, optional + Axis or axes along which the arithmetic means must be computed. If + a tuple of unique integers, the means are computed over multiple + axes. If ``None``, the mean is computed over the entire array. + Default: ``None``. + dtype : dtype, optional + Type to use in computing the mean. By default, if `a` has a + floating-point data type, the returned array will have + the same data type as `a`. + If `a` has a boolean or integral data type, the returned array + will have the default floating point data type for the device + where input array `a` is allocated. + out : {dpnp.ndarray, usm_ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. Default: ``None``. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) are included in the result + as singleton dimensions, so that the returned array remains + compatible with the input array according to Array Broadcasting + rules. Otherwise, if ``False``, the reduced axes are not included in + the returned array. Default: ``False``. + Returns ------- out : dpnp.ndarray - an array containing the mean values of the elements along the specified axis(axes). + An array containing the arithmetic means along the specified axis(axes). If the input is a zero-size array, an array containing NaN values is returned. Limitations ----------- - Parameters `a` is supported as either :class:`dpnp.ndarray` - or :class:`dpctl.tensor.usm_ndarray`. Parameter `where` is only supported with its default value. Otherwise ``NotImplementedError`` exception will be raised. - Input array data types are limited by supported DPNP :ref:`Data types`. See Also -------- @@ -512,7 +645,7 @@ def mean(a, /, axis=None, dtype=None, out=None, keepdims=False, *, where=True): ) result = result.astype(dtype) if dtype is not None else result - return dpnp.get_result_array(result, out) + return dpnp.get_result_array(result, out, casting="same_kind") def median(x1, axis=None, out=None, overwrite_input=False, keepdims=False): @@ -707,16 +840,20 @@ def std( Parameters ---------- a : {dpnp_array, usm_ndarray}: - nput array. + Input array. axis : int or tuple of ints, optional - Axis or axes along which the variances must be computed. If a tuple - of unique integers is given, the variances are computed over multiple axes. - If ``None``, the variance is computed over the entire array. - Default: `None`. + Axis or axes along which the standard deviations must be computed. + If a tuple of unique integers is given, the standard deviations + are computed over multiple axes. If ``None``, the standard deviation + is computed over the entire array. + Default: ``None``. dtype : dtype, optional - Type to use in computing the standard deviation. For arrays of - integer type the default real-valued floating-point data type is used, - for arrays of float types it is the same as the array type. + Type to use in computing the standard deviation. By default, + if `a` has a floating-point data type, the returned array + will have the same data type as `a`. + If `a` has a boolean or integral data type, the returned array + will have the default floating point data type for the device + where input array `a` is allocated. out : {dpnp_array, usm_ndarray}, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type (of the calculated @@ -724,7 +861,7 @@ def std( ddof : {int, float}, optional Means Delta Degrees of Freedom. The divisor used in calculations is ``N - ddof``, where ``N`` corresponds to the total - number of elements over which the variance is calculated. + number of elements over which the standard deviation is calculated. Default: `0.0`. keepdims : bool, optional If ``True``, the reduced axes (dimensions) are included in the result @@ -736,21 +873,14 @@ def std( Returns ------- out : dpnp.ndarray - an array containing the standard deviations. If the standard + An array containing the standard deviations. If the standard deviation was computed over the entire array, a zero-dimensional array is returned. - If `a` has a real-valued floating-point data type, the returned - array will have the same data type as `a`. - If `a` has a boolean or integral data type, the returned array - will have the default floating point data type for the device - where input array `a` is allocated. - Limitations ----------- Parameters `where` is only supported with its default value. Otherwise ``NotImplementedError`` exception will be raised. - Input array data types are limited by supported DPNP :ref:`Data types`. Notes ----- @@ -834,11 +964,14 @@ def var( axis or axes along which the variances must be computed. If a tuple of unique integers is given, the variances are computed over multiple axes. If ``None``, the variance is computed over the entire array. - Default: `None`. + Default: ``None``. dtype : dtype, optional - Type to use in computing the variance. For arrays of integer type - the default real-valued floating-point data type is used, - for arrays of float types it is the same as the array type. + Type to use in computing the variance. By default, if `a` has a + floating-point data type, the returned array will have + the same data type as `a`. + If `a` has a boolean or integral data type, the returned array + will have the default floating point data type for the device + where input array `a` is allocated. out : {dpnp_array, usm_ndarray}, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type (of the calculated @@ -858,20 +991,13 @@ def var( Returns ------- out : dpnp.ndarray - an array containing the variances. If the variance was computed + An array containing the variances. If the variance was computed over the entire array, a zero-dimensional array is returned. - If `a` has a real-valued floating-point data type, the returned - array will have the same data type as `a`. - If `a` has a boolean or integral data type, the returned array - will have the default floating point data type for the device - where input array `a` is allocated. - Limitations ----------- Parameters `where` is only supported with its default value. Otherwise ``NotImplementedError`` exception will be raised. - Input array data types are limited by supported DPNP :ref:`Data types`. Notes ----- diff --git a/tests/skipped_tests.tbl b/tests/skipped_tests.tbl index 15572947eee..e4c88a7298a 100644 --- a/tests/skipped_tests.tbl +++ b/tests/skipped_tests.tbl @@ -561,13 +561,6 @@ tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_1_{axis=1}:: tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_2_{axis=2}::test_cumsum_arraylike tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_2_{axis=2}::test_cumsum_numpy_array -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_0_{axis=(1, 3), shape=(2, 3, 4, 5)}::test_nansum_axes -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_1_{axis=(1, 3), shape=(20, 30, 40, 50)}::test_nansum_axes -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_2_{axis=(0, 2, 3), shape=(2, 3, 4, 5)}::test_nansum_axes -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_3_{axis=(0, 2, 3), shape=(20, 30, 40, 50)}::test_nansum_axes -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodExtra_param_0_{shape=(2, 3, 4)}::test_nansum_out -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodExtra_param_1_{shape=(20, 30, 40)}::test_nansum_out - tests/third_party/cupy/math_tests/test_trigonometric.py::TestUnwrap::test_unwrap_1dim tests/third_party/cupy/math_tests/test_trigonometric.py::TestUnwrap::test_unwrap_1dim_with_discont tests/third_party/cupy/math_tests/test_trigonometric.py::TestUnwrap::test_unwrap_1dim_with_period @@ -953,63 +946,7 @@ tests/third_party/cupy/statistics_tests/test_histogram.py::TestHistogram::test_h tests/third_party/cupy/statistics_tests/test_histogram.py::TestHistogram::test_histogram_range_with_density tests/third_party/cupy/statistics_tests/test_histogram.py::TestHistogram::test_histogram_range_with_weights_and_density tests/third_party/cupy/statistics_tests/test_histogram.py::TestHistogram::test_histogram_same_value - tests/third_party/cupy/statistics_tests/test_histogram.py::TestHistogram::test_histogram_weights_mismatch -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMeanAdditional::test_nanmean_all_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMeanAdditional::test_nanmean_float16 -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMeanAdditional::test_nanmean_huge -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMeanAdditional::test_nanmean_out -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_0_{axis=None, keepdims=True, shape=(3, 4)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_0_{axis=None, keepdims=True, shape=(3, 4)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_10_{axis=1, keepdims=False, shape=(3, 4)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_10_{axis=1, keepdims=False, shape=(3, 4)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_11_{axis=1, keepdims=False, shape=(30, 40, 50)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_11_{axis=1, keepdims=False, shape=(30, 40, 50)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_1_{axis=None, keepdims=True, shape=(30, 40, 50)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_1_{axis=None, keepdims=True, shape=(30, 40, 50)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_2_{axis=None, keepdims=False, shape=(3, 4)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_2_{axis=None, keepdims=False, shape=(3, 4)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_3_{axis=None, keepdims=False, shape=(30, 40, 50)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_3_{axis=None, keepdims=False, shape=(30, 40, 50)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_4_{axis=0, keepdims=True, shape=(3, 4)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_4_{axis=0, keepdims=True, shape=(3, 4)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_5_{axis=0, keepdims=True, shape=(30, 40, 50)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_5_{axis=0, keepdims=True, shape=(30, 40, 50)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_6_{axis=0, keepdims=False, shape=(3, 4)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_6_{axis=0, keepdims=False, shape=(3, 4)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_7_{axis=0, keepdims=False, shape=(30, 40, 50)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_7_{axis=0, keepdims=False, shape=(30, 40, 50)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_8_{axis=1, keepdims=True, shape=(3, 4)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_8_{axis=1, keepdims=True, shape=(3, 4)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_9_{axis=1, keepdims=True, shape=(30, 40, 50)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_9_{axis=1, keepdims=True, shape=(30, 40, 50)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_float16 -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_huge -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_out -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_0_{axis=None, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_10_{axis=0, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_11_{axis=0, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_12_{axis=0, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_13_{axis=0, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_14_{axis=0, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_15_{axis=0, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_16_{axis=1, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_17_{axis=1, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_18_{axis=1, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_19_{axis=1, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_1_{axis=None, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_20_{axis=1, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_21_{axis=1, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_22_{axis=1, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_23_{axis=1, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_2_{axis=None, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_3_{axis=None, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_4_{axis=None, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_5_{axis=None, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_6_{axis=None, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_7_{axis=None, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_8_{axis=0, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_9_{axis=0, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[linear] tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[lower] diff --git a/tests/skipped_tests_gpu.tbl b/tests/skipped_tests_gpu.tbl index dae91d41ac3..1ef5b613243 100644 --- a/tests/skipped_tests_gpu.tbl +++ b/tests/skipped_tests_gpu.tbl @@ -665,13 +665,6 @@ tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_1_{axis=1}:: tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_2_{axis=2}::test_cumsum_arraylike tests/third_party/cupy/math_tests/test_sumprod.py::TestCumsum_param_2_{axis=2}::test_cumsum_numpy_array -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_0_{axis=(1, 3), shape=(2, 3, 4, 5)}::test_nansum_axes -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_1_{axis=(1, 3), shape=(20, 30, 40, 50)}::test_nansum_axes -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_2_{axis=(0, 2, 3), shape=(2, 3, 4, 5)}::test_nansum_axes -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodAxes_param_3_{axis=(0, 2, 3), shape=(20, 30, 40, 50)}::test_nansum_axes -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodExtra_param_0_{shape=(2, 3, 4)}::test_nansum_out -tests/third_party/cupy/math_tests/test_sumprod.py::TestNansumNanprodExtra_param_1_{shape=(20, 30, 40)}::test_nansum_out - tests/third_party/cupy/math_tests/test_trigonometric.py::TestUnwrap::test_unwrap_1dim tests/third_party/cupy/math_tests/test_trigonometric.py::TestUnwrap::test_unwrap_1dim_with_discont tests/third_party/cupy/math_tests/test_trigonometric.py::TestUnwrap::test_unwrap_1dim_with_period @@ -1015,63 +1008,7 @@ tests/third_party/cupy/statistics_tests/test_histogram.py::TestHistogram::test_h tests/third_party/cupy/statistics_tests/test_histogram.py::TestHistogram::test_histogram_range_with_density tests/third_party/cupy/statistics_tests/test_histogram.py::TestHistogram::test_histogram_range_with_weights_and_density tests/third_party/cupy/statistics_tests/test_histogram.py::TestHistogram::test_histogram_same_value - tests/third_party/cupy/statistics_tests/test_histogram.py::TestHistogram::test_histogram_weights_mismatch -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMeanAdditional::test_nanmean_all_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMeanAdditional::test_nanmean_float16 -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMeanAdditional::test_nanmean_huge -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMeanAdditional::test_nanmean_out -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_0_{axis=None, keepdims=True, shape=(3, 4)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_0_{axis=None, keepdims=True, shape=(3, 4)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_10_{axis=1, keepdims=False, shape=(3, 4)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_10_{axis=1, keepdims=False, shape=(3, 4)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_11_{axis=1, keepdims=False, shape=(30, 40, 50)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_11_{axis=1, keepdims=False, shape=(30, 40, 50)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_1_{axis=None, keepdims=True, shape=(30, 40, 50)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_1_{axis=None, keepdims=True, shape=(30, 40, 50)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_2_{axis=None, keepdims=False, shape=(3, 4)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_2_{axis=None, keepdims=False, shape=(3, 4)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_3_{axis=None, keepdims=False, shape=(30, 40, 50)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_3_{axis=None, keepdims=False, shape=(30, 40, 50)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_4_{axis=0, keepdims=True, shape=(3, 4)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_4_{axis=0, keepdims=True, shape=(3, 4)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_5_{axis=0, keepdims=True, shape=(30, 40, 50)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_5_{axis=0, keepdims=True, shape=(30, 40, 50)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_6_{axis=0, keepdims=False, shape=(3, 4)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_6_{axis=0, keepdims=False, shape=(3, 4)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_7_{axis=0, keepdims=False, shape=(30, 40, 50)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_7_{axis=0, keepdims=False, shape=(30, 40, 50)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_8_{axis=1, keepdims=True, shape=(3, 4)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_8_{axis=1, keepdims=True, shape=(3, 4)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_9_{axis=1, keepdims=True, shape=(30, 40, 50)}::test_nanmean_with_nan_float -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanMean_param_9_{axis=1, keepdims=True, shape=(30, 40, 50)}::test_nanmean_without_nan -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_float16 -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_huge -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStdAdditional::test_nanstd_out -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_0_{axis=None, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_10_{axis=0, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_11_{axis=0, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_12_{axis=0, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_13_{axis=0, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_14_{axis=0, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_15_{axis=0, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_16_{axis=1, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_17_{axis=1, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_18_{axis=1, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_19_{axis=1, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_1_{axis=None, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_20_{axis=1, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_21_{axis=1, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_22_{axis=1, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_23_{axis=1, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_2_{axis=None, ddof=0, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_3_{axis=None, ddof=0, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_4_{axis=None, ddof=1, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_5_{axis=None, ddof=1, keepdims=True, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_6_{axis=None, ddof=1, keepdims=False, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_7_{axis=None, ddof=1, keepdims=False, shape=(4, 3, 5)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_8_{axis=0, ddof=0, keepdims=True, shape=(3, 4)}::test_nanstd -tests/third_party/cupy/statistics_tests/test_meanvar.py::TestNanVarStd_param_9_{axis=0, ddof=0, keepdims=True, shape=(4, 3, 5)}::test_nanstd tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[linear] tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[lower] diff --git a/tests/test_arithmetic.py b/tests/test_arithmetic.py index 60dc7a1c9af..6ec18a545f7 100644 --- a/tests/test_arithmetic.py +++ b/tests/test_arithmetic.py @@ -1,10 +1,20 @@ import unittest -import pytest +import numpy +from tests.helper import has_support_aspect64 from tests.third_party.cupy import testing +# Note: numpy.sum() always upcast integers to (u)int64 and float32 to +# float64 for dtype=None. `np.sum` does that too for integers, but not for +# float32, so we need to special-case it for these tests +def _get_dtype_kwargs(xp, dtype): + if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): + return {"dtype": numpy.float64} + return {} + + class TestArithmetic(unittest.TestCase): @testing.for_float_dtypes() @testing.numpy_cupy_allclose() @@ -32,7 +42,7 @@ def test_nanprod(self, xp, dtype): @testing.numpy_cupy_allclose() def test_nansum(self, xp, dtype): a = xp.array([-2.5, -1.5, xp.nan, 10.5, 1.5, xp.nan], dtype=dtype) - return xp.nansum(a) + return xp.nansum(a, **_get_dtype_kwargs(xp, a.dtype)) @testing.for_float_dtypes() @testing.numpy_cupy_allclose() diff --git a/tests/test_mathematical.py b/tests/test_mathematical.py index 15ca6090868..c521da5e0cb 100644 --- a/tests/test_mathematical.py +++ b/tests/test_mathematical.py @@ -693,110 +693,108 @@ def test_positive_boolean(): dpnp.positive(dpnp_a) -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -@pytest.mark.parametrize("func", ["prod", "nanprod"]) -@pytest.mark.parametrize("axis", [None, 0, 1, -1, 2, -2, (1, 2), (0, -2)]) -@pytest.mark.parametrize("keepdims", [False, True]) -@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) -def test_prod_nanprod(func, axis, keepdims, dtype): - a = numpy.arange(1, 13, dtype=dtype).reshape((2, 2, 3)) - if func == "nanprod" and dpnp.issubdtype(a.dtype, dpnp.inexact): - a[:, :, 2] = numpy.nan - ia = dpnp.array(a) - - np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) - dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) - - assert dpnp_res.shape == np_res.shape - assert_allclose(dpnp_res, np_res) - - -@pytest.mark.parametrize("axis", [None, 0, 1, -1, 2, -2, (1, 2), (0, -2)]) -def test_prod_zero_size(axis): - a = numpy.empty((2, 3, 0)) - ia = dpnp.array(a) - - np_res = numpy.prod(a, axis=axis) - dpnp_res = dpnp.prod(ia, axis=axis) - assert_dtype_allclose(dpnp_res, np_res) - - -@pytest.mark.parametrize("func", ["prod", "nanprod"]) -@pytest.mark.parametrize("axis", [None, 0, 1, -1]) -@pytest.mark.parametrize("keepdims", [False, True]) -def test_prod_nanprod_bool(func, axis, keepdims): - a = numpy.arange(2, dtype=numpy.bool_) - a = numpy.tile(a, (2, 2)) - ia = dpnp.array(a) - - np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) - dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) - assert_dtype_allclose(dpnp_res, np_res) - - -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -@pytest.mark.usefixtures("suppress_complex_warning") -@pytest.mark.usefixtures("suppress_invalid_numpy_warnings") -@pytest.mark.parametrize("func", ["prod", "nanprod"]) -@pytest.mark.parametrize("in_dtype", get_all_dtypes(no_bool=True)) -@pytest.mark.parametrize( - "out_dtype", get_all_dtypes(no_bool=True, no_none=True) -) -def test_prod_nanprod_dtype(func, in_dtype, out_dtype): - a = numpy.arange(1, 13, dtype=in_dtype).reshape((2, 2, 3)) - if func == "nanprod" and dpnp.issubdtype(a.dtype, dpnp.inexact): - a[:, :, 2] = numpy.nan - ia = dpnp.array(a) - - np_res = getattr(numpy, func)(a, dtype=out_dtype) - dpnp_res = getattr(dpnp, func)(ia, dtype=out_dtype) - assert_dtype_allclose(dpnp_res, np_res) - - -@pytest.mark.usefixtures("suppress_overflow_encountered_in_cast_numpy_warnings") -@pytest.mark.parametrize("func", ["prod", "nanprod"]) -def test_prod_nanprod_out(func): - ia = dpnp.arange(1, 7).reshape((2, 3)) - ia = ia.astype(dpnp.default_float_type(ia.device)) - if func == "nanprod": - ia[:, 1] = dpnp.nan - a = dpnp.asnumpy(ia) - - # output is dpnp_array - np_res = getattr(numpy, func)(a, axis=0) - dpnp_out = dpnp.empty(np_res.shape, dtype=np_res.dtype) - dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpnp_out) - assert dpnp_out is dpnp_res - assert_allclose(dpnp_res, np_res) - - # output is usm_ndarray - dpt_out = dpt.empty(np_res.shape, dtype=np_res.dtype) - dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpt_out) - assert dpt_out is dpnp_res.get_array() - assert_allclose(dpnp_res, np_res) - - # out is a numpy array -> TypeError - dpnp_res = numpy.empty_like(np_res) - with pytest.raises(TypeError): - getattr(dpnp, func)(ia, axis=0, out=dpnp_res) +class TestProd: + @pytest.mark.usefixtures("allow_fall_back_on_numpy") + @pytest.mark.parametrize("func", ["prod", "nanprod"]) + @pytest.mark.parametrize("axis", [None, 0, 1, -1, 2, -2, (1, 2), (0, -2)]) + @pytest.mark.parametrize("keepdims", [False, True]) + @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) + def test_prod_nanprod(self, func, axis, keepdims, dtype): + a = numpy.arange(1, 13, dtype=dtype).reshape((2, 2, 3)) + if func == "nanprod" and dpnp.issubdtype(a.dtype, dpnp.inexact): + a[:, :, 2] = numpy.nan + ia = dpnp.array(a) + + np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) + dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) + + assert dpnp_res.shape == np_res.shape + assert_allclose(dpnp_res, np_res) + + @pytest.mark.parametrize("axis", [None, 0, 1, -1, 2, -2, (1, 2), (0, -2)]) + def test_prod_zero_size(self, axis): + a = numpy.empty((2, 3, 0)) + ia = dpnp.array(a) + + np_res = numpy.prod(a, axis=axis) + dpnp_res = dpnp.prod(ia, axis=axis) + assert_dtype_allclose(dpnp_res, np_res) + + @pytest.mark.parametrize("func", ["prod", "nanprod"]) + @pytest.mark.parametrize("axis", [None, 0, 1, -1]) + @pytest.mark.parametrize("keepdims", [False, True]) + def test_prod_nanprod_bool(self, func, axis, keepdims): + a = numpy.arange(2, dtype=numpy.bool_) + a = numpy.tile(a, (2, 2)) + ia = dpnp.array(a) + + np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) + dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) + assert_dtype_allclose(dpnp_res, np_res) - # incorrect shape for out - dpnp_res = dpnp.array(numpy.empty((2, 3))) - with pytest.raises(ValueError): - getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + @pytest.mark.usefixtures("allow_fall_back_on_numpy") + @pytest.mark.usefixtures("suppress_complex_warning") + @pytest.mark.usefixtures("suppress_invalid_numpy_warnings") + @pytest.mark.parametrize("func", ["prod", "nanprod"]) + @pytest.mark.parametrize("in_dtype", get_all_dtypes(no_bool=True)) + @pytest.mark.parametrize( + "out_dtype", get_all_dtypes(no_bool=True, no_none=True) + ) + def test_prod_nanprod_dtype(self, func, in_dtype, out_dtype): + a = numpy.arange(1, 13, dtype=in_dtype).reshape((2, 2, 3)) + if func == "nanprod" and dpnp.issubdtype(a.dtype, dpnp.inexact): + a[:, :, 2] = numpy.nan + ia = dpnp.array(a) + + np_res = getattr(numpy, func)(a, dtype=out_dtype) + dpnp_res = getattr(dpnp, func)(ia, dtype=out_dtype) + assert_dtype_allclose(dpnp_res, np_res) + + @pytest.mark.usefixtures( + "suppress_overflow_encountered_in_cast_numpy_warnings" + ) + @pytest.mark.parametrize("func", ["prod", "nanprod"]) + def test_prod_nanprod_out(self, func): + ia = dpnp.arange(1, 7).reshape((2, 3)) + ia = ia.astype(dpnp.default_float_type(ia.device)) + if func == "nanprod": + ia[:, 1] = dpnp.nan + a = dpnp.asnumpy(ia) + + # output is dpnp_array + np_res = getattr(numpy, func)(a, axis=0) + dpnp_out = dpnp.empty(np_res.shape, dtype=np_res.dtype) + dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpnp_out) + assert dpnp_out is dpnp_res + assert_allclose(dpnp_res, np_res) + + # output is usm_ndarray + dpt_out = dpt.empty(np_res.shape, dtype=np_res.dtype) + dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpt_out) + assert dpt_out is dpnp_res.get_array() + assert_allclose(dpnp_res, np_res) + + # out is a numpy array -> TypeError + dpnp_res = numpy.empty_like(np_res) + with pytest.raises(TypeError): + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + # incorrect shape for out + dpnp_res = dpnp.array(numpy.empty((2, 3))) + with pytest.raises(ValueError): + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) -def test_prod_nanprod_Error(): - ia = dpnp.arange(5) + def test_prod_nanprod_Error(self): + ia = dpnp.arange(5) - with pytest.raises(TypeError): - dpnp.prod(dpnp.asnumpy(ia)) - with pytest.raises(TypeError): - dpnp.nanprod(dpnp.asnumpy(ia)) - with pytest.raises(NotImplementedError): - dpnp.prod(ia, where=False) - with pytest.raises(NotImplementedError): - dpnp.prod(ia, initial=6) + with pytest.raises(TypeError): + dpnp.prod(dpnp.asnumpy(ia)) + with pytest.raises(TypeError): + dpnp.nanprod(dpnp.asnumpy(ia)) + with pytest.raises(NotImplementedError): + dpnp.prod(ia, where=False) + with pytest.raises(NotImplementedError): + dpnp.prod(ia, initial=6) @pytest.mark.parametrize( @@ -2314,6 +2312,71 @@ def test_sum(shape, dtype_in, dtype_out, transpose, keepdims, order): assert_array_equal(numpy_res, dpnp_res.asnumpy()) +class TestNanSum: + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + @pytest.mark.parametrize("axis", [None, 0, 1, (0, 1)]) + @pytest.mark.parametrize("keepdims", [True, False]) + def test_nansum(self, dtype, axis, keepdims): + dp_array = dpnp.array([[dpnp.nan, 1, 2], [3, dpnp.nan, 0]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.nansum(np_array, axis=axis, keepdims=keepdims) + result = dpnp.nansum(dp_array, axis=axis, keepdims=keepdims) + assert_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_complex_dtypes()) + def test_nansum_complex(self, dtype): + x1 = numpy.random.rand(10) + x2 = numpy.random.rand(10) + a = numpy.array(x1 + 1j * x2, dtype=dtype) + a[::3] = numpy.nan + ia = dpnp.array(a) + + expected = numpy.nansum(a) + result = dpnp.nansum(ia) + + # use only type kinds check when dpnp handles complex64 arrays + # since `dpnp.sum()` and `numpy.sum()` return different dtypes + assert_dtype_allclose( + result, expected, check_only_type_kind=(dtype == dpnp.complex64) + ) + + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + @pytest.mark.parametrize("axis", [0, 1]) + def test_nansum_out(self, dtype, axis): + dp_array = dpnp.array([[dpnp.nan, 1, 2], [3, dpnp.nan, 0]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.nansum(np_array, axis=axis) + out = dpnp.empty_like(dpnp.asarray(expected)) + result = dpnp.nansum(dp_array, axis=axis, out=out) + assert out is result + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + def test_nansum_dtype(self, dtype): + dp_array = dpnp.array([[dpnp.nan, 1, 2], [3, dpnp.nan, 0]]) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.nansum(np_array, dtype=dtype) + result = dpnp.nansum(dp_array, dtype=dtype) + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + def test_nansum_strided(self, dtype): + dp_array = dpnp.arange(20, dtype=dtype) + dp_array[::3] = dpnp.nan + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.nansum(dp_array[::-1]) + expected = numpy.nansum(np_array[::-1]) + assert_allclose(result, expected) + + result = dpnp.nansum(dp_array[::2]) + expected = numpy.nansum(np_array[::2]) + assert_allclose(result, expected) + + @pytest.mark.parametrize( "dtype", get_all_dtypes(no_bool=True, no_none=True, no_complex=True) ) diff --git a/tests/test_statistics.py b/tests/test_statistics.py index f3866b3c27e..1f340ec4490 100644 --- a/tests/test_statistics.py +++ b/tests/test_statistics.py @@ -1,5 +1,4 @@ -import warnings - +import dpctl import dpctl.tensor as dpt import numpy import pytest @@ -13,6 +12,7 @@ from .helper import ( assert_dtype_allclose, get_all_dtypes, + get_complex_dtypes, get_float_complex_dtypes, has_support_aspect64, ) @@ -32,127 +32,280 @@ def test_median(dtype, size): assert_allclose(dpnp_res, np_res) -@pytest.mark.parametrize("func", ["max", "min", "nanmax", "nanmin"]) -@pytest.mark.parametrize("axis", [None, 0, 1, -1, 2, -2, (1, 2), (0, -2)]) -@pytest.mark.parametrize("keepdims", [False, True]) -@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) -def test_max_min(func, axis, keepdims, dtype): - a = numpy.arange(768, dtype=dtype).reshape((4, 4, 6, 8)) - if func in ["nanmax", "nanmin"] and dpnp.issubdtype(a.dtype, dpnp.inexact): - a[2:3, 2, 3:4, 4] = numpy.nan - ia = dpnp.array(a) +class TestMaxMin: + @pytest.mark.parametrize("func", ["max", "min", "nanmax", "nanmin"]) + @pytest.mark.parametrize("axis", [None, 0, 1, -1, 2, -2, (1, 2), (0, -2)]) + @pytest.mark.parametrize("keepdims", [False, True]) + @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) + def test_max_min(self, func, axis, keepdims, dtype): + a = numpy.arange(768, dtype=dtype).reshape((4, 4, 6, 8)) + if func in ["nanmax", "nanmin"] and dpnp.issubdtype( + a.dtype, dpnp.inexact + ): + a[2:3, 2, 3:4, 4] = numpy.nan + ia = dpnp.array(a) - np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) - dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) - assert_dtype_allclose(dpnp_res, np_res) + np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) + dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) + assert_dtype_allclose(dpnp_res, np_res) + + @pytest.mark.parametrize("func", ["max", "min", "nanmax", "nanmin"]) + @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) + def test_max_min_strided(self, func, dtype): + a = numpy.arange(20, dtype=dtype) + if func in ["nanmax", "nanmin"] and dpnp.issubdtype( + a.dtype, dpnp.inexact + ): + a[::3] = numpy.nan + ia = dpnp.array(a) + np_res = getattr(numpy, func)(a[::-1]) + dpnp_res = getattr(dpnp, func)(ia[::-1]) + assert_dtype_allclose(dpnp_res, np_res) -@pytest.mark.parametrize("func", ["max", "min"]) -@pytest.mark.parametrize("axis", [None, 0, 1, -1]) -@pytest.mark.parametrize("keepdims", [False, True]) -def test_max_min_bool(func, axis, keepdims): - a = numpy.arange(2, dtype=numpy.bool_) - a = numpy.tile(a, (2, 2)) - ia = dpnp.array(a) + np_res = getattr(numpy, func)(a[::2]) + dpnp_res = getattr(dpnp, func)(ia[::2]) + assert_dtype_allclose(dpnp_res, np_res) - np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) - dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) - assert_dtype_allclose(dpnp_res, np_res) + @pytest.mark.parametrize("func", ["max", "min"]) + @pytest.mark.parametrize("axis", [None, 0, 1, -1]) + @pytest.mark.parametrize("keepdims", [False, True]) + def test_max_min_bool(self, func, axis, keepdims): + a = numpy.arange(2, dtype=numpy.bool_) + a = numpy.tile(a, (2, 2)) + ia = dpnp.array(a) + np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) + dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) + assert_dtype_allclose(dpnp_res, np_res) -@pytest.mark.parametrize("func", ["max", "min", "nanmax", "nanmin"]) -def test_max_min_out(func): - a = numpy.arange(12, dtype=numpy.float32).reshape((2, 2, 3)) - if func in ["nanmax", "nanmin"]: - a[1, 0, 2] = numpy.nan - ia = dpnp.array(a) + @pytest.mark.parametrize("func", ["max", "min", "nanmax", "nanmin"]) + def test_max_min_out(self, func): + a = numpy.arange(12, dtype=numpy.float32).reshape((2, 2, 3)) + if func in ["nanmax", "nanmin"]: + a[1, 0, 2] = numpy.nan + ia = dpnp.array(a) - # out is dpnp_array - np_res = getattr(numpy, func)(a, axis=0) - dpnp_out = dpnp.empty(np_res.shape, dtype=np_res.dtype) - dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpnp_out) - assert dpnp_out is dpnp_res - assert_allclose(dpnp_res, np_res) + # out is dpnp_array + np_res = getattr(numpy, func)(a, axis=0) + dpnp_out = dpnp.empty(np_res.shape, dtype=np_res.dtype) + dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpnp_out) + assert dpnp_out is dpnp_res + assert_allclose(dpnp_res, np_res) + + # out is usm_ndarray + dpt_out = dpt.empty(np_res.shape, dtype=np_res.dtype) + dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpt_out) + assert dpt_out is dpnp_res.get_array() + assert_allclose(dpnp_res, np_res) + + # output is numpy array -> Error + dpnp_res = numpy.empty_like(np_res) + with pytest.raises(TypeError): + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) - # out is usm_ndarray - dpt_out = dpt.empty(np_res.shape, dtype=np_res.dtype) - dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpt_out) - assert dpt_out is dpnp_res.get_array() - assert_allclose(dpnp_res, np_res) + # output has incorrect shape -> Error + dpnp_res = dpnp.array(numpy.empty((4, 2))) + with pytest.raises(ValueError): + getattr(dpnp, func)(ia, axis=0, out=dpnp_res) - # output is numpy array -> Error - dpnp_res = numpy.empty_like(np_res) - with pytest.raises(TypeError): - getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + @pytest.mark.parametrize("func", ["max", "min", "nanmax", "nanmin"]) + def test_max_min_error(self, func): + ia = dpnp.arange(5) + # where is not supported + with pytest.raises(NotImplementedError): + getattr(dpnp, func)(ia, where=False) - # output has incorrect shape -> Error - dpnp_res = dpnp.array(numpy.empty((4, 2))) - with pytest.raises(ValueError): - getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + # initial is not supported + with pytest.raises(NotImplementedError): + getattr(dpnp, func)(ia, initial=6) + @pytest.mark.parametrize("func", ["nanmax", "nanmin"]) + @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) + def test_nanmax_nanmin_no_NaN(self, func, dtype): + a = numpy.arange(768, dtype=dtype).reshape((4, 4, 6, 8)) + ia = dpnp.array(a) -@pytest.mark.parametrize("func", ["max", "min", "nanmax", "nanmin"]) -def test_max_min_error(func): - ia = dpnp.arange(5) - # where is not supported - with pytest.raises(NotImplementedError): - getattr(dpnp, func)(ia, where=False) + np_res = getattr(numpy, func)(a, axis=0) + dpnp_res = getattr(dpnp, func)(ia, axis=0) + assert_dtype_allclose(dpnp_res, np_res) - # initial is not supported - with pytest.raises(NotImplementedError): - getattr(dpnp, func)(ia, initial=6) + @pytest.mark.parametrize("func", ["nanmax", "nanmin"]) + def test_nanmax_nanmin_all_NaN(self, recwarn, func): + a = numpy.arange(12, dtype=numpy.float32).reshape((2, 2, 3)) + a[:, :, 2] = numpy.nan + ia = dpnp.array(a) + np_res = getattr(numpy, func)(a, axis=0) + dpnp_res = getattr(dpnp, func)(ia, axis=0) + assert_dtype_allclose(dpnp_res, np_res) -@pytest.mark.parametrize("func", ["nanmax", "nanmin"]) -@pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) -def test_nanmax_nanmin_no_NaN(func, dtype): - a = numpy.arange(768, dtype=dtype).reshape((4, 4, 6, 8)) - ia = dpnp.array(a) + assert len(recwarn) == 2 + assert all( + "All-NaN slice encountered" in str(r.message) for r in recwarn + ) + assert all(r.category is RuntimeWarning for r in recwarn) - np_res = getattr(numpy, func)(a, axis=0) - dpnp_res = getattr(dpnp, func)(ia, axis=0) - assert_dtype_allclose(dpnp_res, np_res) +class TestAverage: + @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("returned", [True, False]) + def test_avg_no_wgt(self, dtype, axis, returned): + dp_array = dpnp.array([[1, 1, 2], [3, 4, 5]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) -@pytest.mark.parametrize("func", ["nanmax", "nanmin"]) -def test_nanmax_nanmin_all_NaN(recwarn, func): - a = numpy.arange(12, dtype=numpy.float32).reshape((2, 2, 3)) - a[:, :, 2] = numpy.nan - ia = dpnp.array(a) + result = dpnp.average(dp_array, axis=axis, returned=returned) + expected = numpy.average(np_array, axis=axis, returned=returned) + if returned: + assert_dtype_allclose(result[0], expected[0]) + assert_dtype_allclose(result[1], expected[1]) + else: + assert_dtype_allclose(result, expected) - np_res = getattr(numpy, func)(a, axis=0) - dpnp_res = getattr(dpnp, func)(ia, axis=0) - assert_dtype_allclose(dpnp_res, np_res) + @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("axis", [None, 0, 1, (0, 1)]) + @pytest.mark.parametrize("returned", [True, False]) + def test_avg(self, dtype, axis, returned): + dp_array = dpnp.array([[1, 1, 2], [3, 4, 5]], dtype=dtype) + dp_wgt = dpnp.array([[3, 1, 2], [3, 4, 2]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + np_wgt = dpnp.asnumpy(dp_wgt) + + result = dpnp.average( + dp_array, axis=axis, weights=dp_wgt, returned=returned + ) + expected = numpy.average( + np_array, axis=axis, weights=np_wgt, returned=returned + ) + + if returned: + assert_dtype_allclose(result[0], expected[0]) + assert_dtype_allclose(result[1], expected[1]) + else: + assert_dtype_allclose(result, expected) - assert len(recwarn) == 2 - assert all("All-NaN slice encountered" in str(r.message) for r in recwarn) - assert all(r.category is RuntimeWarning for r in recwarn) + @pytest.mark.parametrize("dtype", get_complex_dtypes()) + def test_avg_complex(self, dtype): + x1 = numpy.random.rand(10) + x2 = numpy.random.rand(10) + a = numpy.array(x1 + 1j * x2, dtype=dtype) + w = numpy.array(x2 + 1j * x1, dtype=dtype) + ia = dpnp.array(a) + iw = dpnp.array(w) + + expected = numpy.average(a, weights=w) + result = dpnp.average(ia, weights=iw) + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize( + "weight", + [[[3, 1, 2], [3, 4, 2]], ((3, 1, 2), (3, 4, 2))], + ids=["list", "tuple"], + ) + def test_avg_weight_array_like(self, weight): + dp_array = dpnp.array([[1, 1, 2], [3, 4, 5]]) + wgt = weight + np_array = dpnp.asnumpy(dp_array) + + res = dpnp.average(dp_array, weights=wgt) + exp = numpy.average(np_array, weights=wgt) + assert_dtype_allclose(res, exp) + + def test_avg_weight_1D(self): + dp_array = dpnp.arange(12).reshape(3, 4) + wgt = [1, 2, 3] + np_array = dpnp.asnumpy(dp_array) + + res = dpnp.average(dp_array, axis=0, weights=wgt) + exp = numpy.average(np_array, axis=0, weights=wgt) + assert_dtype_allclose(res, exp) + + @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) + def test_avg_strided(self, dtype): + dp_array = dpnp.arange(20, dtype=dtype) + dp_wgt = dpnp.arange(-10, 10, dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + np_wgt = dpnp.asnumpy(dp_wgt) + + result = dpnp.average(dp_array[::-1], weights=dp_wgt[::-1]) + expected = numpy.average(np_array[::-1], weights=np_wgt[::-1]) + assert_allclose(expected, result) + + result = dpnp.average(dp_array[::2], weights=dp_wgt[::2]) + expected = numpy.average(np_array[::2], weights=np_wgt[::2]) + assert_allclose(expected, result) + + def test_avg_error(self): + a = dpnp.arange(5) + w = dpnp.zeros(5) + # Weights sum to zero + with pytest.raises(ZeroDivisionError): + dpnp.average(a, weights=w) + + a = dpnp.arange(12).reshape(3, 4) + w = dpnp.ones(12) + # Axis must be specified when shapes of input array and weights differ + with pytest.raises(TypeError): + dpnp.average(a, weights=w) + + a = dpnp.arange(12).reshape(3, 4) + w = dpnp.ones(12).reshape(2, 6) + # 1D weights expected when shapes of input array and weights differ. + with pytest.raises(TypeError): + dpnp.average(a, axis=0, weights=w) + + a = dpnp.arange(12).reshape(3, 4) + w = dpnp.ones(12) + # Length of weights not compatible with specified axis. + with pytest.raises(ValueError): + dpnp.average(a, axis=0, weights=w) + + a = dpnp.arange(12, sycl_queue=dpctl.SyclQueue()) + w = dpnp.ones(12, sycl_queue=dpctl.SyclQueue()) + # Execution placement can not be unambiguously inferred + with pytest.raises(ValueError): + dpnp.average(a, axis=0, weights=w) class TestMean: @pytest.mark.parametrize("dtype", get_all_dtypes()) - def test_mean_axis_tuple(self, dtype): + @pytest.mark.parametrize("axis", [None, 0, 1, (0, 1)]) + @pytest.mark.parametrize("keepdims", [True, False]) + def test_mean(self, dtype, axis, keepdims): dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) np_array = dpnp.asnumpy(dp_array) - result = dpnp.mean(dp_array, axis=(0, 1)) - expected = numpy.mean(np_array, axis=(0, 1)) - assert_allclose(expected, result) + result = dpnp.mean(dp_array, axis=axis, keepdims=keepdims) + expected = numpy.mean(np_array, axis=axis, keepdims=keepdims) + assert_dtype_allclose(result, expected) @pytest.mark.parametrize("dtype", get_all_dtypes()) - @pytest.mark.parametrize("axis", [0, 1, (0, 1)]) + @pytest.mark.parametrize("axis", [0, 1]) def test_mean_out(self, dtype, axis): dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) np_array = dpnp.asnumpy(dp_array) expected = numpy.mean(np_array, axis=axis) - result = dpnp.empty_like(dpnp.asarray(expected)) - dpnp.mean(dp_array, axis=axis, out=result) + out = dpnp.empty_like(dpnp.asarray(expected)) + result = dpnp.mean(dp_array, axis=axis, out=out) + assert result is out + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_complex_dtypes()) + def test_mean_complex(self, dtype): + x1 = numpy.random.rand(10) + x2 = numpy.random.rand(10) + a = numpy.array(x1 + 1j * x2, dtype=dtype) + ia = dpnp.array(a) + + expected = numpy.mean(a) + result = dpnp.mean(ia) assert_dtype_allclose(result, expected) @pytest.mark.parametrize("dtype", get_all_dtypes()) def test_mean_dtype(self, dtype): - dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype="i4") + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]]) np_array = dpnp.asnumpy(dp_array) expected = numpy.mean(np_array, dtype=dtype) @@ -173,8 +326,9 @@ def test_mean_empty(self, axis, shape): expected = numpy.mean(np_array, axis=axis) assert_allclose(expected, result) - def test_mean_strided(self): - dp_array = dpnp.array([-2, -1, 0, 1, 0, 2], dtype="f4") + @pytest.mark.parametrize("dtype", get_all_dtypes()) + def test_mean_strided(self, dtype): + dp_array = dpnp.array([-2, -1, 0, 1, 0, 2], dtype=dtype) np_array = dpnp.asnumpy(dp_array) result = dpnp.mean(dp_array[::-1]) @@ -199,6 +353,91 @@ def test_mean_NotImplemented(self): dpnp.mean(ia, where=False) +class TestNanMean: + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + @pytest.mark.parametrize("axis", [None, 0, 1, (0, 1)]) + @pytest.mark.parametrize("keepdims", [True, False]) + def test_nanmean(self, dtype, axis, keepdims): + dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.nanmean(dp_array, axis=axis, keepdims=keepdims) + expected = numpy.nanmean(np_array, axis=axis, keepdims=keepdims) + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + @pytest.mark.parametrize("axis", [0, 1]) + def test_nanmean_out(self, dtype, axis): + dp_array = dpnp.array([[dpnp.nan, 1, 2], [3, dpnp.nan, 0]], dtype=dtype) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.nanmean(np_array, axis=axis) + out = dpnp.empty_like(dpnp.asarray(expected)) + result = dpnp.nanmean(dp_array, axis=axis, out=out) + assert out is result + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_complex_dtypes()) + def test_nanmean_complex(self, dtype): + x1 = numpy.random.rand(10) + x2 = numpy.random.rand(10) + a = numpy.array(x1 + 1j * x2, dtype=dtype) + a[::3] = numpy.nan + ia = dpnp.array(a) + + expected = numpy.nanmean(a) + result = dpnp.nanmean(ia) + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + def test_nanmean_dtype(self, dtype): + dp_array = dpnp.array([[dpnp.nan, 1, 2], [3, dpnp.nan, 0]]) + np_array = dpnp.asnumpy(dp_array) + + expected = numpy.nanmean(np_array, dtype=dtype) + result = dpnp.nanmean(dp_array, dtype=dtype) + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + def test_nanmean_strided(self, dtype): + dp_array = dpnp.arange(20, dtype=dtype) + dp_array[::3] = dpnp.nan + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.nanmean(dp_array[::-1]) + expected = numpy.nanmean(np_array[::-1]) + assert_dtype_allclose(result, expected) + + result = dpnp.nanmean(dp_array[::2]) + expected = numpy.nanmean(np_array[::2]) + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures("suppress_mean_empty_slice_numpy_warnings") + def test_nanmean_scalar(self): + dp_array = dpnp.array(dpnp.nan) + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.nanmean(dp_array) + expected = numpy.nanmean(np_array) + assert_allclose(expected, result) + + def test_nanmean_error(self): + ia = dpnp.arange(5, dtype=dpnp.float32) + ia[0] = dpnp.nan + # where keyword is not implemented + with pytest.raises(NotImplementedError): + dpnp.nanmean(ia, where=False) + + # dtype should be floating + with pytest.raises(TypeError): + dpnp.nanmean(ia, dtype=dpnp.int32) + + # out dtype should be inexact + res = dpnp.empty((1,), dtype=dpnp.int32) + with pytest.raises(TypeError): + dpnp.nanmean(ia, out=res) + + class TestVar: @pytest.mark.usefixtures( "suppress_divide_invalid_numpy_warnings", "suppress_dof_numpy_warnings" @@ -234,8 +473,9 @@ def test_var_out(self, dtype, axis, ddof): res_dtype = expected.dtype else: res_dtype = dpnp.default_float_type(dp_array.device) - result = dpnp.empty(expected.shape, dtype=res_dtype) - dpnp.var(dp_array, axis=axis, out=result, ddof=ddof) + out = dpnp.empty(expected.shape, dtype=res_dtype) + result = dpnp.var(dp_array, axis=axis, out=out, ddof=ddof) + assert result is out assert_dtype_allclose(result, expected) @pytest.mark.usefixtures( @@ -329,8 +569,9 @@ def test_std_out(self, dtype, axis, ddof): res_dtype = expected.dtype else: res_dtype = dpnp.default_float_type(dp_array.device) - result = dpnp.empty(expected.shape, dtype=res_dtype) - dpnp.std(dp_array, axis=axis, out=result, ddof=ddof) + out = dpnp.empty(expected.shape, dtype=res_dtype) + result = dpnp.std(dp_array, axis=axis, out=out, ddof=ddof) + assert out is result assert_dtype_allclose(result, expected) @pytest.mark.usefixtures( @@ -443,6 +684,18 @@ def test_nanvar(self, array, dtype): result = dpnp.nanvar(ia, ddof=ddof) assert_dtype_allclose(result, expected) + @pytest.mark.parametrize("dtype", get_complex_dtypes()) + def test_nanvar_complex(self, dtype): + x1 = numpy.random.rand(10) + x2 = numpy.random.rand(10) + a = numpy.array(x1 + 1j * x2, dtype=dtype) + a[::3] = numpy.nan + ia = dpnp.array(a) + + expected = numpy.nanvar(a) + result = dpnp.nanvar(ia) + assert_dtype_allclose(result, expected) + @pytest.mark.usefixtures("suppress_dof_numpy_warnings") @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 1), (1, 2)]) @@ -459,8 +712,25 @@ def test_nanvar_out(self, dtype, axis, keepdims, ddof): res_dtype = expected.dtype else: res_dtype = dpnp.default_float_type(ia.device) - result = dpnp.empty(expected.shape, dtype=res_dtype) - dpnp.nanvar(ia, out=result, axis=axis, ddof=ddof, keepdims=keepdims) + out = dpnp.empty(expected.shape, dtype=res_dtype) + result = dpnp.nanvar( + ia, out=out, axis=axis, ddof=ddof, keepdims=keepdims + ) + assert result is out + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + def test_nanvar_strided(self, dtype): + dp_array = dpnp.arange(20, dtype=dtype) + dp_array[::3] = dpnp.nan + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.nanvar(dp_array[::-1]) + expected = numpy.nanvar(np_array[::-1]) + assert_dtype_allclose(result, expected) + + result = dpnp.nanvar(dp_array[::2]) + expected = numpy.nanvar(np_array[::2]) assert_dtype_allclose(result, expected) @pytest.mark.usefixtures("suppress_complex_warning") @@ -497,6 +767,133 @@ def test_nanvar_error(self): dpnp.nanvar(ia, ddof="1") +class TestNanStd: + @pytest.mark.parametrize( + "array", + [ + [2, 0, 6, 2], + [2, 0, 6, 2, 5, 6, 7, 8], + [], + [2, 1, numpy.nan, 5, 3], + [-1, numpy.nan, 1, numpy.inf], + [3, 6, 0, 1], + [3, 6, 0, 1, 8], + [3, 2, 9, 6, numpy.nan], + [numpy.nan, numpy.nan, numpy.inf, numpy.nan], + [[2, 0], [6, 2]], + [[2, 0, 6, 2], [5, 6, 7, 8]], + [[[2, 0], [6, 2]], [[5, 6], [7, 8]]], + [[-1, numpy.nan], [1, numpy.inf]], + [[numpy.nan, numpy.nan], [numpy.inf, numpy.nan]], + ], + ids=[ + "[2, 0, 6, 2]", + "[2, 0, 6, 2, 5, 6, 7, 8]", + "[]", + "[2, 1, np.nan, 5, 3]", + "[-1, np.nan, 1, np.inf]", + "[3, 6, 0, 1]", + "[3, 6, 0, 1, 8]", + "[3, 2, 9, 6, np.nan]", + "[np.nan, np.nan, np.inf, np.nan]", + "[[2, 0], [6, 2]]", + "[[2, 0, 6, 2], [5, 6, 7, 8]]", + "[[[2, 0], [6, 2]], [[5, 6], [7, 8]]]", + "[[-1, np.nan], [1, np.inf]]", + "[[np.nan, np.nan], [np.inf, np.nan]]", + ], + ) + @pytest.mark.usefixtures( + "suppress_invalid_numpy_warnings", "suppress_dof_numpy_warnings" + ) + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_none=True, no_bool=True) + ) + def test_nanstd(self, array, dtype): + try: + a = numpy.array(array, dtype=dtype) + except: + pytest.skip("floating datat type is needed to store NaN") + ia = dpnp.array(a) + for ddof in range(a.ndim): + expected = numpy.nanstd(a, ddof=ddof) + result = dpnp.nanstd(ia, ddof=ddof) + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_complex_dtypes()) + def test_nanstd_complex(self, dtype): + x1 = numpy.random.rand(10) + x2 = numpy.random.rand(10) + a = numpy.array(x1 + 1j * x2, dtype=dtype) + a[::3] = numpy.nan + ia = dpnp.array(a) + + expected = numpy.nanstd(a) + result = dpnp.nanstd(ia) + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures("suppress_dof_numpy_warnings") + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 1), (1, 2)]) + @pytest.mark.parametrize("keepdims", [True, False]) + @pytest.mark.parametrize("ddof", [0, 0.5, 1, 1.5, 2, 3]) + def test_nanstd_out(self, dtype, axis, keepdims, ddof): + a = numpy.arange(4 * 3 * 5, dtype=dtype) + a[::2] = numpy.nan + a = a.reshape(4, 3, 5) + ia = dpnp.array(a) + + expected = numpy.nanstd(a, axis=axis, ddof=ddof, keepdims=keepdims) + if has_support_aspect64(): + res_dtype = expected.dtype + else: + res_dtype = dpnp.default_float_type(ia.device) + out = dpnp.empty(expected.shape, dtype=res_dtype) + result = dpnp.nanstd( + ia, out=out, axis=axis, ddof=ddof, keepdims=keepdims + ) + assert result is out + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) + def test_nanstd_strided(self, dtype): + dp_array = dpnp.arange(20, dtype=dtype) + dp_array[::3] = dpnp.nan + np_array = dpnp.asnumpy(dp_array) + + result = dpnp.nanstd(dp_array[::-1]) + expected = numpy.nanstd(np_array[::-1]) + assert_dtype_allclose(result, expected) + + result = dpnp.nanstd(dp_array[::2]) + expected = numpy.nanstd(np_array[::2]) + assert_dtype_allclose(result, expected) + + @pytest.mark.usefixtures("suppress_complex_warning") + @pytest.mark.parametrize("dt_in", get_float_complex_dtypes()) + @pytest.mark.parametrize("dt_out", get_float_complex_dtypes()) + def test_nanstd_dtype(self, dt_in, dt_out): + a = numpy.arange(4 * 3 * 5, dtype=dt_in) + a[::2] = numpy.nan + a = a.reshape(4, 3, 5) + ia = dpnp.array(a) + + expected = numpy.nanstd(a, dtype=dt_out) + result = dpnp.nanstd(ia, dtype=dt_out) + assert_dtype_allclose(result, expected) + + def test_nanstd_error(self): + ia = dpnp.arange(5, dtype=dpnp.float32) + ia[0] = dpnp.nan + # where keyword is not implemented + with pytest.raises(NotImplementedError): + dpnp.nanstd(ia, where=False) + + # ddof should be an integer or float + with pytest.raises(TypeError): + dpnp.nanstd(ia, ddof="1") + + @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestBincount: @pytest.mark.parametrize( diff --git a/tests/test_sycl_queue.py b/tests/test_sycl_queue.py index fb31bd59ebf..53bc37f4eeb 100644 --- a/tests/test_sycl_queue.py +++ b/tests/test_sycl_queue.py @@ -331,6 +331,7 @@ def test_meshgrid(device_x, device_y): @pytest.mark.parametrize( "func,data", [ + pytest.param("average", [1.0, 2.0, 4.0, 7.0]), pytest.param("abs", [-1.2, 1.2]), pytest.param("arccos", [-0.5, 0.0, 0.5]), pytest.param("arccosh", [1.5, 3.5, 5.0]), @@ -374,8 +375,10 @@ def test_meshgrid(device_x, device_y): pytest.param("nancumprod", [1.0, dpnp.nan]), pytest.param("nancumsum", [1.0, dpnp.nan]), pytest.param("nanmax", [1.0, 2.0, 4.0, dpnp.nan]), + pytest.param("nanmean", [1.0, 2.0, 4.0, dpnp.nan]), pytest.param("nanmin", [1.0, 2.0, 4.0, dpnp.nan]), pytest.param("nanprod", [1.0, dpnp.nan]), + pytest.param("nanstd", [1.0, 2.0, 4.0, dpnp.nan]), pytest.param("nansum", [1.0, dpnp.nan]), pytest.param("nanvar", [1.0, 2.0, 4.0, dpnp.nan]), pytest.param("negative", [1.0, 0.0, -1.0]), diff --git a/tests/test_usm_type.py b/tests/test_usm_type.py index 3b5bbbbe696..144b58c9a6b 100644 --- a/tests/test_usm_type.py +++ b/tests/test_usm_type.py @@ -365,6 +365,7 @@ def test_meshgrid(usm_type_x, usm_type_y): @pytest.mark.parametrize( "func,data", [ + pytest.param("average", [1.0, 2.0, 4.0, 7.0]), pytest.param("abs", [-1.2, 1.2]), pytest.param("arccos", [-0.5, 0.0, 0.5]), pytest.param("arccosh", [1.5, 3.5, 5.0]), @@ -401,8 +402,11 @@ def test_meshgrid(usm_type_x, usm_type_y): pytest.param("nanargmax", [1.0, 2.0, 4.0, dp.nan]), pytest.param("nanargmin", [1.0, 2.0, 4.0, dp.nan]), pytest.param("nanmax", [1.0, 2.0, 4.0, dp.nan]), + pytest.param("nanmean", [1.0, 2.0, 4.0, dp.nan]), pytest.param("nanmin", [1.0, 2.0, 4.0, dp.nan]), pytest.param("nanprod", [1.0, 2.0, dp.nan]), + pytest.param("nanstd", [1.0, 2.0, 4.0, dp.nan]), + pytest.param("nansum", [1.0, 2.0, 4.0, dp.nan]), pytest.param("nanvar", [1.0, 2.0, 4.0, dp.nan]), pytest.param("negative", [1.0, 0.0, -1.0]), pytest.param("positive", [1.0, 0.0, -1.0]), diff --git a/tests/third_party/cupy/math_tests/test_sumprod.py b/tests/third_party/cupy/math_tests/test_sumprod.py index fc94b329665..e4306788885 100644 --- a/tests/third_party/cupy/math_tests/test_sumprod.py +++ b/tests/third_party/cupy/math_tests/test_sumprod.py @@ -1,5 +1,3 @@ -import unittest - import numpy import pytest @@ -8,62 +6,63 @@ from tests.third_party.cupy import testing -class TestSumprod(unittest.TestCase): +# Note: numpy.sum() always upcast integers to (u)int64 and float32 to +# float64 for dtype=None. `np.sum` does that too for integers, but not for +# float32, so we need to special-case it for these tests +def _get_dtype_kwargs(xp, dtype): + if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): + return {"dtype": numpy.float64} + return {} + + +class TestSumprod: def tearDown(self): # Free huge memory for slow test # cupy.get_default_memory_pool().free_all_blocks() # cupy.get_default_pinned_memory_pool().free_all_blocks() pass - # Note: numpy.sum() always upcast integers to (u)int64 and float32 to - # float64 for dtype=None. `np.sum` does that too for integers, but not for - # float32, so we need to special-case it for these tests - def _get_dtype_kwargs(self, xp, dtype): - if xp is numpy and dtype == numpy.float32 and has_support_aspect64(): - return {"dtype": numpy.float64} - return {} - @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_sum_all(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - return a.sum(**self._get_dtype_kwargs(xp, dtype)) + return a.sum(**_get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_sum_all_keepdims(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - return a.sum(**self._get_dtype_kwargs(xp, dtype), keepdims=True) + return a.sum(**_get_dtype_kwargs(xp, dtype), keepdims=True) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_external_sum_all(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - return xp.sum(a, **self._get_dtype_kwargs(xp, dtype)) + return xp.sum(a, **_get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-06) def test_sum_all2(self, xp, dtype): a = testing.shaped_arange((20, 30, 40), xp, dtype) - return a.sum(**self._get_dtype_kwargs(xp, dtype)) + return a.sum(**_get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(type_check=False) + @testing.numpy_cupy_allclose() def test_sum_all_transposed(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype).transpose(2, 0, 1) - return a.sum(**self._get_dtype_kwargs(xp, dtype)) + return a.sum(**_get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-06) def test_sum_all_transposed2(self, xp, dtype): a = testing.shaped_arange((20, 30, 40), xp, dtype).transpose(2, 0, 1) - return a.sum(**self._get_dtype_kwargs(xp, dtype)) + return a.sum(**_get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_sum_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=1) + return a.sum(**_get_dtype_kwargs(xp, dtype), axis=1) @testing.slow @testing.numpy_cupy_allclose() @@ -75,7 +74,7 @@ def test_sum_axis_huge(self, xp): @testing.numpy_cupy_allclose() def test_external_sum_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - return xp.sum(a, **self._get_dtype_kwargs(xp, dtype), axis=1) + return xp.sum(a, **_get_dtype_kwargs(xp, dtype), axis=1) # float16 is omitted, since NumPy's sum on float16 arrays has more error # than CuPy's. @@ -83,49 +82,49 @@ def test_external_sum_axis(self, xp, dtype): @testing.numpy_cupy_allclose() def test_sum_axis2(self, xp, dtype): a = testing.shaped_arange((20, 30, 40), xp, dtype) - return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=1) + return a.sum(**_get_dtype_kwargs(xp, dtype), axis=1) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(contiguous_check=False) def test_sum_axis_transposed(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype).transpose(2, 0, 1) - return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=1) + return a.sum(**_get_dtype_kwargs(xp, dtype), axis=1) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(contiguous_check=False) def test_sum_axis_transposed2(self, xp, dtype): a = testing.shaped_arange((20, 30, 40), xp, dtype).transpose(2, 0, 1) - return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=1) + return a.sum(**_get_dtype_kwargs(xp, dtype), axis=1) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_sum_axes(self, xp, dtype): a = testing.shaped_arange((2, 3, 4, 5), xp, dtype) - return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=(1, 3)) + return a.sum(**_get_dtype_kwargs(xp, dtype), axis=(1, 3)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-4) def test_sum_axes2(self, xp, dtype): a = testing.shaped_arange((20, 30, 40, 50), xp, dtype) - return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=(1, 3)) + return a.sum(**_get_dtype_kwargs(xp, dtype), axis=(1, 3)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-6) def test_sum_axes3(self, xp, dtype): a = testing.shaped_arange((2, 3, 4, 5), xp, dtype) - return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=(0, 2, 3)) + return a.sum(**_get_dtype_kwargs(xp, dtype), axis=(0, 2, 3)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-6) def test_sum_axes4(self, xp, dtype): a = testing.shaped_arange((20, 30, 40, 50), xp, dtype) - return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=(0, 2, 3)) + return a.sum(**_get_dtype_kwargs(xp, dtype), axis=(0, 2, 3)) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_sum_empty_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4, 5), xp, dtype) - return a.sum(**self._get_dtype_kwargs(xp, dtype), axis=()) + return a.sum(**_get_dtype_kwargs(xp, dtype), axis=()) @testing.for_all_dtypes_combination(names=["src_dtype", "dst_dtype"]) @testing.numpy_cupy_allclose() @@ -143,9 +142,7 @@ def test_sum_keepdims_and_dtype(self, xp, src_dtype, dst_dtype): @testing.numpy_cupy_allclose() def test_sum_keepdims_multiple_axes(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - return a.sum( - **self._get_dtype_kwargs(xp, dtype), axis=(1, 2), keepdims=True - ) + return a.sum(**_get_dtype_kwargs(xp, dtype), axis=(1, 2), keepdims=True) @testing.for_all_dtypes() @testing.numpy_cupy_allclose() @@ -158,32 +155,32 @@ def test_sum_out(self, xp, dtype): def test_sum_out_wrong_shape(self): a = testing.shaped_arange((2, 3, 4)) b = cupy.empty((2, 3)) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): a.sum(axis=1, out=b) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(type_check=False) + @testing.numpy_cupy_allclose() def test_prod_all(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) - return a.prod() + return a.prod(**_get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(type_check=False) + @testing.numpy_cupy_allclose() def test_external_prod_all(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) - return xp.prod(a) + return xp.prod(a, **_get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(type_check=False) + @testing.numpy_cupy_allclose() def test_prod_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - return a.prod(axis=1) + return a.prod(axis=1, **_get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(type_check=False) + @testing.numpy_cupy_allclose() def test_external_prod_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) - return xp.prod(a, axis=1) + return xp.prod(a, axis=1, **_get_dtype_kwargs(xp, dtype)) @testing.for_all_dtypes_combination(names=["src_dtype", "dst_dtype"]) @testing.numpy_cupy_allclose() @@ -203,8 +200,7 @@ def test_prod_dtype(self, xp, src_dtype, dst_dtype): } ) ) -@testing.gpu -class TestNansumNanprodLong(unittest.TestCase): +class TestNansumNanprodLong: def _do_transposed_axis_test(self): return not self.transpose_axes and self.axis != 1 @@ -232,10 +228,15 @@ def _test(self, xp, dtype): if not issubclass(dtype, xp.integer): a[:, 1] = xp.nan func = getattr(xp, self.func) - return func(a, axis=self.axis, keepdims=self.keepdims) + return func( + a, + **_get_dtype_kwargs(xp, dtype), + axis=self.axis, + keepdims=self.keepdims, + ) @testing.for_all_dtypes(no_bool=True, no_float16=True) - @testing.numpy_cupy_allclose(type_check=False) + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_nansum_all(self, xp, dtype): if ( not self._numpy_nanprod_implemented() @@ -245,7 +246,9 @@ def test_nansum_all(self, xp, dtype): return self._test(xp, dtype) @testing.for_all_dtypes(no_bool=True, no_float16=True) - @testing.numpy_cupy_allclose(contiguous_check=False, type_check=False) + @testing.numpy_cupy_allclose( + contiguous_check=False, type_check=has_support_aspect64() + ) def test_nansum_axis_transposed(self, xp, dtype): if ( not self._numpy_nanprod_implemented() @@ -262,9 +265,7 @@ def test_nansum_axis_transposed(self, xp, dtype): } ) ) -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -@testing.gpu -class TestNansumNanprodExtra(unittest.TestCase): +class TestNansumNanprodExtra: @testing.for_all_dtypes(no_bool=True, no_float16=True) @testing.numpy_cupy_allclose() def test_nansum_out(self, xp, dtype): @@ -279,7 +280,7 @@ def test_nansum_out_wrong_shape(self): a = testing.shaped_arange(self.shape) a[:, 1] = cupy.nan b = cupy.empty((2, 3)) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): cupy.nansum(a, axis=1, out=b) @@ -291,16 +292,36 @@ def test_nansum_out_wrong_shape(self): } ) ) -@pytest.mark.usefixtures("allow_fall_back_on_numpy") -@testing.gpu -class TestNansumNanprodAxes(unittest.TestCase): +class TestNansumNanprodAxes: @testing.for_all_dtypes(no_bool=True, no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6) def test_nansum_axes(self, xp, dtype): a = testing.shaped_arange(self.shape, xp, dtype) if not issubclass(dtype, xp.integer): a[:, 1] = xp.nan - return xp.nansum(a, axis=self.axis) + return xp.nansum(a, **_get_dtype_kwargs(xp, dtype), axis=self.axis) + + +class TestNansumNanprodHuge: + def _test(self, xp, nan_slice): + a = testing.shaped_random((2048, 1, 1024), xp, "f") + a[nan_slice] = xp.nan + a = xp.broadcast_to(a, (2048, 256, 1024)) + return xp.nansum(a, **_get_dtype_kwargs(xp, a.dtype), axis=2) + + @testing.slow + @testing.numpy_cupy_allclose(atol=1e-1) + def test_nansum_axis_huge(self, xp): + return self._test( + xp, (slice(None, None), slice(None, None), slice(1, 2)) + ) + + @testing.slow + @testing.numpy_cupy_allclose(atol=1e-2) + def test_nansum_axis_huge_halfnan(self, xp): + return self._test( + xp, (slice(None, None), slice(None, None), slice(0, 512)) + ) axes = [0, 1, 2] @@ -309,7 +330,7 @@ def test_nansum_axes(self, xp, dtype): @testing.parameterize(*testing.product({"axis": axes})) @pytest.mark.usefixtures("allow_fall_back_on_numpy") # TODO: remove "type_check=False" once leveraged on dpctl call -class TestCumsum(unittest.TestCase): +class TestCumsum: @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=False) def test_cumsum(self, xp, dtype): @@ -391,7 +412,7 @@ def test_invalid_axis_lower1(self, dtype): @testing.for_all_dtypes() def test_invalid_axis_lower2(self, dtype): a = testing.shaped_arange((4, 5), cupy, dtype) - with self.assertRaises(numpy.AxisError): + with pytest.raises(numpy.AxisError): return cupy.cumsum(a, axis=-a.ndim - 1) @testing.for_all_dtypes() @@ -404,22 +425,21 @@ def test_invalid_axis_upper1(self, dtype): @testing.for_all_dtypes() def test_invalid_axis_upper2(self, dtype): a = testing.shaped_arange((4, 5), cupy, dtype) - with self.assertRaises(numpy.AxisError): + with pytest.raises(numpy.AxisError): return cupy.cumsum(a, axis=a.ndim + 1) def test_cumsum_arraylike(self): - with self.assertRaises(TypeError): + with pytest.raises(TypeError): return cupy.cumsum((1, 2, 3)) @testing.for_float_dtypes() def test_cumsum_numpy_array(self, dtype): a_numpy = numpy.arange(8, dtype=dtype) - with self.assertRaises(TypeError): + with pytest.raises(TypeError): return cupy.cumsum(a_numpy) -@testing.gpu -class TestCumprod(unittest.TestCase): +class TestCumprod: @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_cumprod_1dim(self, xp, dtype): @@ -503,17 +523,17 @@ def test_invalid_axis_upper1(self, dtype): @testing.for_all_dtypes() def test_invalid_axis_upper2(self, dtype): a = testing.shaped_arange((4, 5), cupy, dtype) - with self.assertRaises(numpy.AxisError): + with pytest.raises(numpy.AxisError): return cupy.cumprod(a, axis=a.ndim) def test_cumprod_arraylike(self): - with self.assertRaises(TypeError): + with pytest.raises(TypeError): return cupy.cumprod((1, 2, 3)) @testing.for_float_dtypes() def test_cumprod_numpy_array(self, dtype): a_numpy = numpy.arange(1, 6, dtype=dtype) - with self.assertRaises(TypeError): + with pytest.raises(TypeError): return cupy.cumprod(a_numpy) diff --git a/tests/third_party/cupy/statistics_tests/test_meanvar.py b/tests/third_party/cupy/statistics_tests/test_meanvar.py index de2eb22604f..1537a57cbc0 100644 --- a/tests/third_party/cupy/statistics_tests/test_meanvar.py +++ b/tests/third_party/cupy/statistics_tests/test_meanvar.py @@ -1,5 +1,3 @@ -import unittest - import numpy import pytest @@ -12,8 +10,7 @@ ) -@testing.gpu -class TestMedian(unittest.TestCase): +class TestMedian: @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_median_noaxis(self, xp, dtype): @@ -89,8 +86,7 @@ def test_median_invalid_axis(self): ) ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") -@testing.gpu -class TestMedianAxis(unittest.TestCase): +class TestMedianAxis: @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_median_axis_sequence(self, xp, dtype): @@ -98,61 +94,63 @@ def test_median_axis_sequence(self, xp, dtype): return xp.median(a, self.axis, keepdims=self.keepdims) -@testing.gpu -class TestAverage(unittest.TestCase): +class TestAverage: _multiprocess_can_split_ = True @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_average_all(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return xp.average(a) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_average_axis(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return xp.average(a, axis=1) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() - @testing.numpy_cupy_allclose() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_average_weights(self, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) w = testing.shaped_arange((2, 3), xp, dtype) return xp.average(a, weights=w) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) - def test_average_axis_weights(self, xp, dtype): - a = testing.shaped_arange((2, 3, 4), xp, dtype) - w = testing.shaped_arange((2, 3, 4), xp, dtype) - return xp.average(a, axis=2, weights=w) - - def check_returned(self, a, axis, weights): - average_cpu, sum_weights_cpu = numpy.average( - a, axis, weights, returned=True + @testing.numpy_cupy_allclose(rtol=2e-7, type_check=has_support_aspect64()) + @pytest.mark.parametrize( + "axis, weights", [(1, False), (None, True), (1, True)] + ) + def test_returned(self, xp, dtype, axis, weights): + a = testing.shaped_arange((2, 3), xp, dtype) + if weights: + w = testing.shaped_arange((2, 3), xp, dtype) + else: + w = None + return xp.average(a, axis=axis, weights=w, returned=True) + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(rtol=5e-7, type_check=has_support_aspect64()) + @pytest.mark.parametrize("returned", [True, False]) + @testing.with_requires("numpy>=1.23.1") + def test_average_keepdims_axis1(self, xp, dtype, returned): + a = testing.shaped_random((2, 3), xp, dtype) + w = testing.shaped_random((2, 3), xp, dtype) + return xp.average( + a, axis=1, weights=w, returned=returned, keepdims=True ) - result = cupy.average(cupy.asarray(a), axis, weights, returned=True) - self.assertTrue(isinstance(result, tuple)) - self.assertEqual(len(result), 2) - average_gpu, sum_weights_gpu = result - testing.assert_allclose(average_cpu, average_gpu) - testing.assert_allclose(sum_weights_cpu, sum_weights_gpu) - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @testing.for_all_dtypes() - def test_returned(self, dtype): - a = testing.shaped_arange((2, 3), numpy, dtype) - w = testing.shaped_arange((2, 3), numpy, dtype) - self.check_returned(a, axis=1, weights=None) - self.check_returned(a, axis=None, weights=w) - self.check_returned(a, axis=1, weights=w) + @testing.numpy_cupy_allclose(rtol=1e-7, type_check=has_support_aspect64()) + @pytest.mark.parametrize("returned", [True, False]) + @testing.with_requires("numpy>=1.23.1") + def test_average_keepdims_noaxis(self, xp, dtype, returned): + a = testing.shaped_random((2, 3), xp, dtype) + w = testing.shaped_random((2, 3), xp, dtype) + return xp.average(a, weights=w, returned=returned, keepdims=True) -class TestMeanVar(unittest.TestCase): +class TestMeanVar: @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_mean_all(self, xp, dtype): @@ -301,17 +299,16 @@ def test_external_std_axis_ddof(self, xp, dtype): } ) ) -@testing.gpu -class TestNanMean(unittest.TestCase): +class TestNanMean: @testing.for_all_dtypes(no_float16=True) - @testing.numpy_cupy_allclose(rtol=1e-6) + @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanmean_without_nan(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) return xp.nanmean(a, axis=self.axis, keepdims=self.keepdims) - @ignore_runtime_warnings + @pytest.mark.usefixtures("suppress_mean_empty_slice_numpy_warnings") @testing.for_all_dtypes(no_float16=True) - @testing.numpy_cupy_allclose(rtol=1e-6) + @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanmean_with_nan_float(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) @@ -322,14 +319,18 @@ def test_nanmean_with_nan_float(self, xp, dtype): return xp.nanmean(a, axis=self.axis, keepdims=self.keepdims) -@testing.gpu -class TestNanMeanAdditional(unittest.TestCase): - @ignore_runtime_warnings +class TestNanMeanAdditional: + @pytest.mark.usefixtures("suppress_mean_empty_slice_numpy_warnings") @testing.for_all_dtypes(no_float16=True) - @testing.numpy_cupy_allclose(rtol=1e-6) + @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanmean_out(self, xp, dtype): a = testing.shaped_random((10, 20, 30), xp, dtype) - z = xp.zeros((20, 30), dtype=dtype) + # `numpy.mean` allows ``unsafe`` casting while `dpnp.mean` does not. + # So, output data type cannot be the same as input. + out_dtype = ( + cupy.default_float_type(a.device) if xp == cupy else numpy.float64 + ) + z = xp.zeros((20, 30), dtype=out_dtype) if a.dtype.kind not in "biu": a[1, :] = xp.nan @@ -340,7 +341,7 @@ def test_nanmean_out(self, xp, dtype): @testing.slow @testing.for_all_dtypes(no_float16=True) - @testing.numpy_cupy_allclose(rtol=1e-6) + @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanmean_huge(self, xp, dtype): a = testing.shaped_random((1024, 512), xp, dtype) @@ -349,14 +350,17 @@ def test_nanmean_huge(self, xp, dtype): return xp.nanmean(a, axis=1) + @pytest.mark.skipif( + not has_support_aspect16(), reason="No fp16 support by device" + ) @testing.numpy_cupy_allclose(rtol=1e-4) def test_nanmean_float16(self, xp): a = testing.shaped_arange((2, 3), xp, numpy.float16) a[0][0] = xp.nan return xp.nanmean(a) - @ignore_runtime_warnings - @testing.numpy_cupy_allclose(rtol=1e-6) + @pytest.mark.usefixtures("suppress_mean_empty_slice_numpy_warnings") + @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanmean_all_nan(self, xp): a = xp.zeros((3, 4)) a[:] = xp.nan @@ -373,7 +377,7 @@ def test_nanmean_all_nan(self, xp): } ) ) -class TestNanVarStd(unittest.TestCase): +class TestNanVarStd: @pytest.mark.usefixtures("suppress_dof_numpy_warnings") @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) @@ -385,8 +389,9 @@ def test_nanvar(self, xp, dtype): a, axis=self.axis, ddof=self.ddof, keepdims=self.keepdims ) + @pytest.mark.usefixtures("suppress_dof_numpy_warnings") @testing.for_all_dtypes(no_float16=True) - @testing.numpy_cupy_allclose(rtol=1e-6) + @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanstd(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype=dtype) if a.dtype.kind not in "biu": @@ -396,7 +401,7 @@ def test_nanstd(self, xp, dtype): ) -class TestNanVarStdAdditional(unittest.TestCase): +class TestNanVarStdAdditional: @pytest.mark.usefixtures("suppress_dof_numpy_warnings") @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) @@ -431,8 +436,9 @@ def test_nanvar_float16(self, xp): a[0][0] = xp.nan return xp.nanvar(a, axis=0) + @pytest.mark.usefixtures("suppress_dof_numpy_warnings") @testing.for_all_dtypes(no_float16=True) - @testing.numpy_cupy_allclose(rtol=1e-6) + @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanstd_out(self, xp, dtype): a = testing.shaped_random((10, 20, 30), xp, dtype) z = xp.zeros((20, 30)) @@ -446,7 +452,7 @@ def test_nanstd_out(self, xp, dtype): @testing.slow @testing.for_all_dtypes(no_float16=True) - @testing.numpy_cupy_allclose(rtol=1e-6) + @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanstd_huge(self, xp, dtype): a = testing.shaped_random((1024, 512), xp, dtype) @@ -455,6 +461,9 @@ def test_nanstd_huge(self, xp, dtype): return xp.nanstd(a, axis=1) + @pytest.mark.skipif( + not has_support_aspect16(), reason="No fp16 support by device" + ) @testing.numpy_cupy_allclose(rtol=1e-4) def test_nanstd_float16(self, xp): a = testing.shaped_arange((4, 5), xp, numpy.float16) @@ -482,7 +491,7 @@ def test_nanstd_float16(self, xp): "suppress_dof_numpy_warnings", "suppress_mean_empty_slice_numpy_warnings", ) -class TestProductZeroLength(unittest.TestCase): +class TestProductZeroLength: @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_mean_zero_len(self, xp, dtype):